diff --git a/cmd/kubevpn/cmds/clone.go b/cmd/kubevpn/cmds/clone.go index 55fd2bf0..e9c0388b 100644 --- a/cmd/kubevpn/cmds/clone.go +++ b/cmd/kubevpn/cmds/clone.go @@ -20,6 +20,7 @@ import ( "github.com/wencaiwulue/kubevpn/v2/pkg/handler" pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh" "github.com/wencaiwulue/kubevpn/v2/pkg/util" + "github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl" ) // CmdClone multiple cluster operate, can start up one deployment to another cluster @@ -76,7 +77,14 @@ func CmdClone(f cmdutil.Factory) *cobra.Command { PreRunE: func(cmd *cobra.Command, args []string) (err error) { util.InitLoggerForClient(false) // startup daemon process and sudo process - return daemon.StartupDaemon(cmd.Context()) + err = daemon.StartupDaemon(cmd.Context()) + if err != nil { + return err + } + if transferImage { + err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image) + } + return err }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { diff --git a/cmd/kubevpn/cmds/connect.go b/cmd/kubevpn/cmds/connect.go index 8486cb15..d4579de2 100644 --- a/cmd/kubevpn/cmds/connect.go +++ b/cmd/kubevpn/cmds/connect.go @@ -21,6 +21,7 @@ import ( "github.com/wencaiwulue/kubevpn/v2/pkg/handler" pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh" "github.com/wencaiwulue/kubevpn/v2/pkg/util" + "github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl" ) func CmdConnect(f cmdutil.Factory) *cobra.Command { @@ -69,7 +70,10 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command { if err != nil { return err } - return nil + if transferImage { + err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image) + } + return err }, RunE: func(cmd *cobra.Command, args []string) error { bytes, ns, err := util.ConvertToKubeConfigBytes(f) diff --git a/cmd/kubevpn/cmds/dev.go b/cmd/kubevpn/cmds/dev.go index 84ee47ad..f278ed97 100644 --- a/cmd/kubevpn/cmds/dev.go +++ b/cmd/kubevpn/cmds/dev.go @@ -18,6 +18,7 @@ import ( "github.com/wencaiwulue/kubevpn/v2/pkg/handler" pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh" "github.com/wencaiwulue/kubevpn/v2/pkg/util" + "github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl" ) func CmdDev(f cmdutil.Factory) *cobra.Command { @@ -103,6 +104,12 @@ func CmdDev(f cmdutil.Factory) *cobra.Command { if err != nil { return err } + if transferImage { + err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image) + if err != nil { + return err + } + } return pkgssh.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false) }, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/kubevpn/cmds/proxy.go b/cmd/kubevpn/cmds/proxy.go index bb0b10a8..a006c216 100644 --- a/cmd/kubevpn/cmds/proxy.go +++ b/cmd/kubevpn/cmds/proxy.go @@ -20,6 +20,7 @@ import ( "github.com/wencaiwulue/kubevpn/v2/pkg/handler" pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh" "github.com/wencaiwulue/kubevpn/v2/pkg/util" + "github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl" ) func CmdProxy(f cmdutil.Factory) *cobra.Command { @@ -91,6 +92,9 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command { if err = daemon.StartupDaemon(cmd.Context()); err != nil { return err } + if transferImage { + err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image) + } return err }, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/go.mod b/go.mod index e2cbd8c0..5b4ffd2c 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/docker/cli v26.0.0+incompatible github.com/docker/docker v26.1.4+incompatible github.com/docker/go-connections v0.5.0 + github.com/docker/go-units v0.5.0 github.com/docker/libcontainer v2.2.1+incompatible github.com/envoyproxy/go-control-plane v0.12.0 github.com/fsnotify/fsnotify v1.7.0 @@ -32,6 +33,7 @@ require ( github.com/opencontainers/image-spec v1.1.0 github.com/pkg/errors v0.9.1 github.com/prometheus-community/pro-bing v0.4.0 + github.com/regclient/regclient v0.8.0 github.com/schollz/progressbar/v3 v3.14.2 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.8.1 @@ -44,6 +46,7 @@ require ( golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.10.0 golang.org/x/sys v0.29.0 + golang.org/x/term v0.28.0 golang.org/x/text v0.21.0 golang.org/x/time v0.6.0 golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 @@ -121,7 +124,7 @@ require ( github.com/docker/docker-credential-helpers v0.8.1 // indirect github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/docker/go-units v0.5.0 // indirect + github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/ebitengine/purego v0.7.0 // indirect github.com/emicklei/go-restful/v3 v3.12.0 // indirect @@ -191,7 +194,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/compress v1.17.7 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect @@ -250,6 +253,7 @@ require ( github.com/tinylib/msgp v1.1.9 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect github.com/vishvananda/netns v0.0.4 // indirect github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0 // indirect github.com/x448/float16 v0.8.4 // indirect @@ -277,7 +281,6 @@ require ( go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect golang.org/x/mod v0.21.0 // indirect - golang.org/x/term v0.28.0 // indirect golang.org/x/tools v0.26.0 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 5f34bfa1..7e091877 100644 --- a/go.sum +++ b/go.sum @@ -446,8 +446,8 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4 github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= -github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -537,6 +537,8 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/olareg/olareg v0.1.1 h1:Ui7q93zjcoF+U9U71sgqgZWByDoZOpqHitUXEu2xV+g= +github.com/olareg/olareg v0.1.1/go.mod h1:w8NP4SWrHHtxsFaUiv1lnCnYPm4sN1seCd2h7FK/dc0= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= @@ -623,6 +625,8 @@ github.com/quic-go/quic-go v0.46.0 h1:uuwLClEEyk1DNvchH8uCByQVjo3yKL9opKulExNDs7 github.com/quic-go/quic-go v0.46.0/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/regclient/regclient v0.8.0 h1:xNAMDlADcyMvFAlGXoqDOxlSUBG4mqWBFgjQqVTP8Og= +github.com/regclient/regclient v0.8.0/go.mod h1:h9+Y6dBvqBkdlrj6EIhbTOv0xUuIFl7CdI1bZvEB42g= github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 h1:Qp27Idfgi6ACvFQat5+VJvlYToylpM/hcyLBI3WaKPA= github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -709,6 +713,8 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= +github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= diff --git a/pkg/daemon/action/connect-fork.go b/pkg/daemon/action/connect-fork.go index 2faf97fe..c0b6d6de 100644 --- a/pkg/daemon/action/connect-fork.go +++ b/pkg/daemon/action/connect-fork.go @@ -41,16 +41,7 @@ func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectF Lock: &svr.Lock, ImagePullSecretName: req.ImagePullSecretName, } - var sshConf = ssh.ParseSshFromRPC(req.SshJump) - var transferImage = req.TransferImage - defaultlog.Default().SetOutput(io.Discard) - if transferImage { - err = ssh.TransferImage(ctx, sshConf, config.OriginImage, req.Image, out) - if err != nil { - return err - } - } file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes)) if err != nil { return err @@ -74,7 +65,7 @@ func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectF }() var path string - path, err = ssh.SshJump(sshCtx, sshConf, flags, false) + path, err = ssh.SshJump(sshCtx, ssh.ParseSshFromRPC(req.SshJump), flags, false) if err != nil { return err } diff --git a/pkg/daemon/action/connect.go b/pkg/daemon/action/connect.go index 7db4acd5..93d7be88 100644 --- a/pkg/daemon/action/connect.go +++ b/pkg/daemon/action/connect.go @@ -61,16 +61,7 @@ func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServe Lock: &svr.Lock, ImagePullSecretName: req.ImagePullSecretName, } - var sshConf = ssh.ParseSshFromRPC(req.SshJump) - var transferImage = req.TransferImage - golog.Default().SetOutput(io.Discard) - if transferImage { - err := ssh.TransferImage(ctx, sshConf, config.OriginImage, req.Image, out) - if err != nil { - return err - } - } file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes)) if err != nil { return err @@ -92,7 +83,7 @@ func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServe } }() var path string - path, err = ssh.SshJump(sshCtx, sshConf, flags, false) + path, err = ssh.SshJump(sshCtx, ssh.ParseSshFromRPC(req.SshJump), flags, false) if err != nil { return err } diff --git a/pkg/ssh/image.go b/pkg/ssh/image.go index ee85b804..e645c123 100644 --- a/pkg/ssh/image.go +++ b/pkg/ssh/image.go @@ -185,7 +185,7 @@ func PullImage(ctx context.Context, platform *v1.Platform, cli *client.Client, d Platform: plat, }) if err != nil { - log.Errorf("Failed to pull image %s, err: %s, please make sure image is exist and can be pulled from local", img, err) + log.Errorf("Failed to pull image %s: %v", img, err) return err } defer readCloser.Close() diff --git a/pkg/util/regctl/ascii/lines.go b/pkg/util/regctl/ascii/lines.go new file mode 100644 index 00000000..33e175a6 --- /dev/null +++ b/pkg/util/regctl/ascii/lines.go @@ -0,0 +1,75 @@ +package ascii + +import ( + "bytes" + "fmt" + "io" + + "golang.org/x/term" +) + +type Lines struct { + atStart bool + buf []byte + lines int + out io.Writer + width int +} + +func NewLines(w io.Writer) *Lines { + width := 0 + if wFd, ok := w.(interface{ Fd() uintptr }); ok && term.IsTerminal(int(wFd.Fd())) { + w, _, err := term.GetSize(int(wFd.Fd())) + if err == nil { + width = w + } + } + + return &Lines{ + buf: []byte{}, + out: w, + width: width, + } +} + +func (b *Lines) Add(add []byte) { + b.buf = append(b.buf, add...) +} + +func (b *Lines) Del() { + b.buf = b.buf[:0] +} + +func (b *Lines) Flush() { + b.Clear() + _, err := b.out.Write(b.buf) + if err != nil { + return + } + b.lines = bytes.Count(b.buf, []byte("\n")) + if b.width > 0 { + for _, line := range bytes.Split(b.buf, []byte("\n")) { + if len(line) > b.width { + b.lines += (len(line) - 1) / b.width + } + } + } + b.buf = b.buf[:0] + b.atStart = false +} + +func (b *Lines) Clear() { + if !b.atStart { + b.Return() + } + fmt.Fprintf(b.out, "\033[0J") + b.atStart = true + b.lines = 0 +} + +func (b *Lines) Return() { + if !b.atStart && b.lines > 0 { + fmt.Fprintf(b.out, "\033[%dF", b.lines) + } + b.atStart = true +} diff --git a/pkg/util/regctl/ascii/progress.go b/pkg/util/regctl/ascii/progress.go new file mode 100644 index 00000000..c5a7f2d6 --- /dev/null +++ b/pkg/util/regctl/ascii/progress.go @@ -0,0 +1,63 @@ +package ascii + +import ( + "fmt" + "io" + + "golang.org/x/term" +) + +type ProgressBar struct { + Width, Min, Max int + Start, Done, Active, Pending, End byte + Out io.Writer +} + +func NewProgressBar(w io.Writer) *ProgressBar { + width := 0 + if wFd, ok := w.(interface{ Fd() uintptr }); ok && term.IsTerminal(int(wFd.Fd())) { + w, _, err := term.GetSize(int(wFd.Fd())) + if err == nil { + width = w + } + } + + return &ProgressBar{ + Width: width, + Min: 10, + Max: 40, + Out: w, + Start: '[', + Done: '=', + Active: '>', + Pending: ' ', + End: ']', + } +} + +func (p *ProgressBar) Generate(pct float64, pre, post string) []byte { + if pct < 0 { + pct = 0 + } else if pct > 1 { + pct = 1 + } + curWidth := p.Width - (len(pre) + len(post) + 2) + if curWidth < p.Min { + curWidth = p.Min + } else if curWidth > p.Max { + curWidth = p.Max + } + buf := make([]byte, curWidth) + + doneLen := int(float64(curWidth) * pct) + for i := 0; i < doneLen; i++ { + buf[i] = p.Done + } + if doneLen < curWidth { + buf[doneLen] = p.Active + } + for i := doneLen + 1; i < curWidth; i++ { + buf[i] = p.Pending + } + return []byte(fmt.Sprintf("%s%c%s%c%s\n", pre, p.Start, buf, p.End, post)) +} diff --git a/pkg/util/regctl/image.go b/pkg/util/regctl/image.go new file mode 100644 index 00000000..89f50401 --- /dev/null +++ b/pkg/util/regctl/image.go @@ -0,0 +1,144 @@ +package regctl + +import ( + "fmt" + "sort" + "sync" + "time" + + "github.com/docker/go-units" + "github.com/regclient/regclient/types" + + "github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl/ascii" +) + +type ImageProgress struct { + mu sync.Mutex + Start time.Time + Entries map[string]*ImageProgressEntry + AsciiOut *ascii.Lines + Bar *ascii.ProgressBar + changed bool +} + +type ImageProgressEntry struct { + kind types.CallbackKind + instance string + state types.CallbackState + start, last time.Time + cur, total int64 + bps []float64 +} + +func (ip *ImageProgress) Callback(kind types.CallbackKind, instance string, state types.CallbackState, cur, total int64) { + // track kind/instance + ip.mu.Lock() + defer ip.mu.Unlock() + ip.changed = true + now := time.Now() + if e, ok := ip.Entries[kind.String()+":"+instance]; ok { + e.state = state + diff := now.Sub(e.last) + bps := float64(cur-e.cur) / diff.Seconds() + e.state = state + e.last = now + e.cur = cur + e.total = total + if len(e.bps) >= 10 { + e.bps = append(e.bps[1:], bps) + } else { + e.bps = append(e.bps, bps) + } + } else { + ip.Entries[kind.String()+":"+instance] = &ImageProgressEntry{ + kind: kind, + instance: instance, + state: state, + start: now, + last: now, + cur: cur, + total: total, + bps: []float64{}, + } + } +} + +func (ip *ImageProgress) Display(final bool) { + ip.mu.Lock() + defer ip.mu.Unlock() + if !ip.changed && !final { + return // skip since no changes since last display and not the final display + } + var manifestTotal, manifestFinished, sum, skipped, queued int64 + // sort entry keys by start time + keys := make([]string, 0, len(ip.Entries)) + for k := range ip.Entries { + keys = append(keys, k) + } + sort.Slice(keys, func(a, b int) bool { + if ip.Entries[keys[a]].state != ip.Entries[keys[b]].state { + return ip.Entries[keys[a]].state > ip.Entries[keys[b]].state + } else if ip.Entries[keys[a]].state != types.CallbackActive { + return ip.Entries[keys[a]].last.Before(ip.Entries[keys[b]].last) + } else { + return ip.Entries[keys[a]].cur > ip.Entries[keys[b]].cur + } + }) + startCount, startLimit := 0, 2 + finishedCount, finishedLimit := 0, 2 + // hide old finished entries + for i := len(keys) - 1; i >= 0; i-- { + e := ip.Entries[keys[i]] + if e.kind != types.CallbackManifest && e.state == types.CallbackFinished { + finishedCount++ + if finishedCount > finishedLimit { + e.state = types.CallbackArchived + } + } + } + for _, k := range keys { + e := ip.Entries[k] + switch e.kind { + case types.CallbackManifest: + manifestTotal++ + if e.state == types.CallbackFinished || e.state == types.CallbackSkipped { + manifestFinished++ + } + default: + // show progress bars + if !final && (e.state == types.CallbackActive || (e.state == types.CallbackStarted && startCount < startLimit) || e.state == types.CallbackFinished) { + if e.state == types.CallbackStarted { + startCount++ + } + pre := e.instance + " " + if len(pre) > 15 { + pre = pre[:14] + " " + } + pct := float64(e.cur) / float64(e.total) + post := fmt.Sprintf(" %4.2f%% %s/%s", pct*100, units.HumanSize(float64(e.cur)), units.HumanSize(float64(e.total))) + ip.AsciiOut.Add(ip.Bar.Generate(pct, pre, post)) + } + // track stats + if e.state == types.CallbackSkipped { + skipped += e.total + } else if e.total > 0 { + sum += e.cur + queued += e.total - e.cur + } + } + } + // show stats summary + ip.AsciiOut.Add([]byte(fmt.Sprintf("Manifests: %d/%d | Blobs: %s copied, %s skipped", + manifestFinished, manifestTotal, + units.HumanSize(float64(sum)), + units.HumanSize(float64(skipped))))) + if queued > 0 { + ip.AsciiOut.Add([]byte(fmt.Sprintf(", %s queued", + units.HumanSize(float64(queued))))) + } + ip.AsciiOut.Add([]byte(fmt.Sprintf(" | Elapsed: %ds\n", int64(time.Since(ip.Start).Seconds())))) + ip.AsciiOut.Flush() + if !final { + ip.AsciiOut.Return() + } +} diff --git a/pkg/util/regctl/regctl.go b/pkg/util/regctl/regctl.go new file mode 100644 index 00000000..934fb7a7 --- /dev/null +++ b/pkg/util/regctl/regctl.go @@ -0,0 +1,67 @@ +package regctl + +import ( + "context" + "fmt" + "log/slog" + "os" + "time" + + "github.com/regclient/regclient" + "github.com/regclient/regclient/types/ref" + + "github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl/ascii" +) + +func TransferImageWithRegctl(ctx context.Context, imageSource, imageTarget string) error { + rc := regclient.New( + regclient.WithDockerCerts(), + regclient.WithDockerCreds(), + regclient.WithSlog(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelWarn}))), + ) + // create a reference for an image + src, err := ref.New(imageSource) + if err != nil { + _, _ = os.Stdout.Write([]byte(fmt.Sprintf("failed to create ref: %v\n", err))) + return err + } + defer rc.Close(ctx, src) + dst, err := ref.New(imageTarget) + if err != nil { + _, _ = os.Stdout.Write([]byte(fmt.Sprintf("failed to create ref: %v\n", err))) + return err + } + defer rc.Close(ctx, dst) + + // check for a tty and attach progress reporter + done := make(chan bool) + var progress = &ImageProgress{ + Start: time.Now(), + Entries: map[string]*ImageProgressEntry{}, + AsciiOut: ascii.NewLines(os.Stdout), + Bar: ascii.NewProgressBar(os.Stdout), + } + progressFreq := time.Millisecond * 250 + ticker := time.NewTicker(progressFreq) + defer ticker.Stop() + go func() { + for { + select { + case <-done: + ticker.Stop() + return + case <-ticker.C: + progress.Display(false) + } + } + }() + var opts []regclient.ImageOpts + opts = append(opts, regclient.ImageWithCallback(progress.Callback)) + + err = rc.ImageCopy(ctx, src, dst, opts...) + + close(done) + progress.Display(true) + + return err +} diff --git a/vendor/github.com/docker/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/libtrust/CONTRIBUTING.md new file mode 100644 index 00000000..05be0f8a --- /dev/null +++ b/vendor/github.com/docker/libtrust/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/vendor/github.com/docker/libtrust/LICENSE b/vendor/github.com/docker/libtrust/LICENSE new file mode 100644 index 00000000..27448585 --- /dev/null +++ b/vendor/github.com/docker/libtrust/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/libtrust/MAINTAINERS b/vendor/github.com/docker/libtrust/MAINTAINERS new file mode 100644 index 00000000..9768175f --- /dev/null +++ b/vendor/github.com/docker/libtrust/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md new file mode 100644 index 00000000..dcffb31a --- /dev/null +++ b/vendor/github.com/docker/libtrust/README.md @@ -0,0 +1,22 @@ +# libtrust + +> **WARNING** this library is no longer actively developed, and will be integrated +> in the [docker/distribution][https://www.github.com/docker/distribution] +> repository in future. + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/vendor/github.com/docker/libtrust/certificates.go b/vendor/github.com/docker/libtrust/certificates.go new file mode 100644 index 00000000..3dcca33c --- /dev/null +++ b/vendor/github.com/docker/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/vendor/github.com/docker/libtrust/doc.go b/vendor/github.com/docker/libtrust/doc.go new file mode 100644 index 00000000..ec5d2159 --- /dev/null +++ b/vendor/github.com/docker/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/vendor/github.com/docker/libtrust/ec_key.go b/vendor/github.com/docker/libtrust/ec_key.go new file mode 100644 index 00000000..00bbe4b3 --- /dev/null +++ b/vendor/github.com/docker/libtrust/ec_key.go @@ -0,0 +1,428 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/vendor/github.com/docker/libtrust/filter.go b/vendor/github.com/docker/libtrust/filter.go new file mode 100644 index 00000000..5b2b4fca --- /dev/null +++ b/vendor/github.com/docker/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/vendor/github.com/docker/libtrust/hash.go b/vendor/github.com/docker/libtrust/hash.go new file mode 100644 index 00000000..a2df787d --- /dev/null +++ b/vendor/github.com/docker/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/vendor/github.com/docker/libtrust/jsonsign.go b/vendor/github.com/docker/libtrust/jsonsign.go new file mode 100644 index 00000000..cb2ca9a7 --- /dev/null +++ b/vendor/github.com/docker/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/vendor/github.com/docker/libtrust/key.go b/vendor/github.com/docker/libtrust/key.go new file mode 100644 index 00000000..73642db2 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/vendor/github.com/docker/libtrust/key_files.go b/vendor/github.com/docker/libtrust/key_files.go new file mode 100644 index 00000000..c526de54 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/vendor/github.com/docker/libtrust/key_manager.go b/vendor/github.com/docker/libtrust/key_manager.go new file mode 100644 index 00000000..9a98ae35 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/vendor/github.com/docker/libtrust/rsa_key.go b/vendor/github.com/docker/libtrust/rsa_key.go new file mode 100644 index 00000000..dac4cacf --- /dev/null +++ b/vendor/github.com/docker/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/vendor/github.com/docker/libtrust/util.go b/vendor/github.com/docker/libtrust/util.go new file mode 100644 index 00000000..a5a101d3 --- /dev/null +++ b/vendor/github.com/docker/libtrust/util.go @@ -0,0 +1,363 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + s = strings.Replace(s, "\n", "", -1) + s = strings.Replace(s, " ", "", -1) + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index a2295380..4528059c 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -1,5 +1,5 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com +version: 2 + before: hooks: - ./gen.sh @@ -99,7 +99,7 @@ archives: checksum: name_template: 'checksums.txt' snapshot: - name_template: "{{ .Tag }}-next" + version_template: "{{ .Tag }}-next" changelog: sort: asc filters: diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 1f72cdde..de264c85 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,27 @@ This package provides various compression algorithms. # changelog +* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) + * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 + * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 + * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 + * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 + * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + * Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 @@ -55,6 +76,10 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+ See changes to v1.16.x + * July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 @@ -77,7 +102,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 @@ -93,6 +118,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
See changes to v1.15.x @@ -131,7 +157,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 @@ -334,7 +360,7 @@ While the release has been extensively tested, it is recommended to testing when * s2: Fix binaries. * Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) @@ -513,7 +539,7 @@ While the release has been extensively tested, it is recommended to testing when * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. * Feb 19, 2016: Handle small payloads faster in level 1-3. * Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. * Feb 14, 2016: Snappy: Merge upstream changes. * Feb 14, 2016: Snappy: Fix aggressive skipping. * Feb 14, 2016: Snappy: Update benchmark. @@ -560,6 +586,8 @@ the stateless compress described below. For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). +To disable all assembly add `-tags=noasm`. This works across all packages. + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index cc05d0f7..0c7dd4ff 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -15,7 +15,7 @@ const ( // It is possible, but by no way guaranteed that corrupt data will // return an error. // It is up to the caller to verify integrity of the returned data. -// Use a predefined Scrach to set maximum acceptable output size. +// Use a predefined Scratch to set maximum acceptable output size. func Decompress(b []byte, s *Scratch) ([]byte, error) { s, err := s.prepare(b) if err != nil { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 54bd08b2..0f56b02d 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) continue } // Ensure that all combinations are covered. @@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) + fmt.Fprintf(w, "%d errors, stopping\n", errs) break } } diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 2aa6a95a..2754bac6 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -51,7 +51,7 @@ func emitCopy(dst []byte, offset, length int) int { i := 0 // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because + // length emitted down below is a little lower (at 60 = 64 - 4), because // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 9f17ce60..9c28840c 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -554,6 +554,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if debugDecoder { printf("Compression modes: 0b%b", compMode) } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } for i := uint(0); i < 3; i++ { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { @@ -595,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: - println("Reading table for", tableIndex(i)) + if debugDecoder { + println("Reading table for", tableIndex(i)) + } if seq.fse == nil || seq.fse.preDefined { seq.fse = fseDecoderPool.Get().(*fseDecoder) } diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 2cfe925a..32a7f401 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -427,6 +427,16 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { return nil } +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + // fuzzFseEncoder can be used to fuzz the FSE encoder. func fuzzFseEncoder(data []byte) int { if len(data) > maxSequences || len(data) < 2 { @@ -479,6 +489,16 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if len(b.sequences) == 0 { return b.encodeLits(b.literals, rawAllLits) } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + // We want some difference to at least account for the headers. saved := b.size - len(b.literals) - (b.size >> 6) if saved < 16 { diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f04aaa21..bbca1723 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -82,7 +82,7 @@ var ( // can run multiple concurrent stateless decodes. It is even possible to // use stateless decodes while a stream is being decoded. // -// The Reset function can be used to initiate a new stream, which is will considerably +// The Reset function can be used to initiate a new stream, which will considerably // reduce the allocations normally caused by NewReader. func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { initPredefined() diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index 8d5567fe..b7b83164 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -273,6 +273,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { enc.Encode(&block, b) addValues(&remain, block.literals) litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } seqs += len(block.sequences) block.genCodes() addHist(&ll, block.coders.llEnc.Histogram()) @@ -286,6 +289,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if offset == 0 { continue } + if int(offset) >= len(o.History) { + continue + } if offset > 3 { newOffsets[offset-3]++ } else { @@ -336,6 +342,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if seqs/nUsed < 512 { // Use 512 as minimum. nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } } copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { hist := dst.Histogram() @@ -358,6 +367,28 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { fakeLength += v hist[i] = uint32(v) } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + dst.HistogramFinished(maxSym, maxCount) dst.reUsed = false dst.useRLE = false diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 87f42879..4613724e 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -135,8 +135,20 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { break } + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 20d25b0e..84a79fde 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -102,9 +102,20 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { e.cur = e.maxMatchOff break } - + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] @@ -168,9 +179,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -199,12 +210,12 @@ encodeLoop: // Index match start+1 (long) -> s - 1 index0 := s + repOff - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -230,9 +241,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -259,11 +270,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -697,9 +708,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -727,12 +738,12 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -761,9 +772,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -790,11 +801,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index a154c18f..d36be7bd 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -138,9 +138,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -166,11 +166,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -798,9 +798,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -826,11 +826,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 72af7ef0..8f8223cd 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -6,6 +6,7 @@ package zstd import ( "crypto/rand" + "errors" "fmt" "io" "math" @@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) { // and write CRC if requested. func (e *Encoder) Write(p []byte) (n int, err error) { s := &e.state + if s.eofWritten { + return 0, ErrEncoderClosed + } for len(p) > 0 { if len(p)+len(s.filling) < e.o.blockSize { if e.o.crc { @@ -202,7 +206,7 @@ func (e *Encoder) nextBlock(final bool) error { return nil } if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) var n2 int n2, s.err = s.w.Write(s.current) if s.err != nil { @@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error { s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) s.wg.Add(1) + if final { + s.eofWritten = true + } go func(src []byte) { if debugEncoder { println("Adding block,", len(src), "bytes, final:", final) @@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error { blk := enc.Block() enc.Encode(blk, src) blk.last = final - if final { - s.eofWritten = true - } // Wait for pending writes. s.wWg.Wait() if s.writeErr != nil { @@ -401,12 +405,20 @@ func (e *Encoder) Flush() error { if len(s.filling) > 0 { err := e.nextBlock(false) if err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } } s.wg.Wait() s.wWg.Wait() if s.err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return s.err } return s.writeErr @@ -422,6 +434,9 @@ func (e *Encoder) Close() error { } err := e.nextBlock(true) if err != nil { + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } if s.frameContentSize > 0 { @@ -459,6 +474,11 @@ func (e *Encoder) Close() error { } _, s.err = s.w.Write(frame) } + if s.err == nil { + s.err = ErrEncoderClosed + return nil + } + return s.err } @@ -469,6 +489,15 @@ func (e *Encoder) Close() error { // Data compressed with EncodeAll can be decoded with the Decoder, // using either a stream or DecodeAll. func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { if len(src) == 0 { if e.o.fullZero { // Add frame header. @@ -491,13 +520,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() + // Use single segments when above minimum window and below window size. single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 53e160f7..e47af66e 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error { } return err } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } windowLog := 10 + (wd >> 3) windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * uint64(wd&0x7) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s index 17901e08..ae7d4d32 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -162,12 +162,12 @@ finalize: MOVD h, ret+24(FP) RET -// func writeBlocks(d *Digest, b []byte) int +// func writeBlocks(s *Digest, b []byte) int TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 LDP ·primes+0(SB), (prime1, prime2) // Load state. Assume v[1-4] are stored contiguously. - MOVD d+0(FP), digest + MOVD s+0(FP), digest LDP 0(digest), (v1, v2) LDP 16(digest), (v3, v4) diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s index 9a7655c0..0782b86e 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -5,7 +5,6 @@ #include "textflag.h" // func matchLen(a []byte, b []byte) int -// Requires: BMI TEXT ·matchLen(SB), NOSPLIT, $0-56 MOVQ a_base+0(FP), AX MOVQ b_base+24(FP), CX @@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56 JB matchlen_match4_standalone matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone #ifdef GOAMD64_v3 TZCNTQ BX, BX #else BSFQ BX, BX #endif - SARQ $0x03, BX + SHRL $0x03, BX LEAL (SI)(BX*1), SI JMP gen_match_len_end diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 8adabd82..c59f17e0 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) } s.seqSize += ctx.litRemain @@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { return io.ErrUnexpectedEOF } - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) } if ctx.litRemain < 0 { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 5b06174b..f5591fa1 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 4be7cc73..066bef2a 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -88,6 +88,10 @@ var ( // Close has been called. ErrDecoderClosed = errors.New("decoder used after Close") + // ErrEncoderClosed will be returned if the Encoder was used after + // Close has been called. + ErrEncoderClosed = errors.New("encoder used after Close") + // ErrDecoderNilInput is returned when a nil Reader was provided // and an operation other than Reset/DecodeAll/Close was attempted. ErrDecoderNilInput = errors.New("nil input provided as reader") diff --git a/vendor/github.com/regclient/regclient/.dockerignore b/vendor/github.com/regclient/regclient/.dockerignore new file mode 100644 index 00000000..cbcf085e --- /dev/null +++ b/vendor/github.com/regclient/regclient/.dockerignore @@ -0,0 +1,15 @@ +* +!.git/ +!build/root.tgz +!cmd/ +!config/ +!internal/ +!mod/ +!pkg/ +!regclient/ +!scheme/ +!types/ +!vendor/ +!go.* +!*.go +!Makefile \ No newline at end of file diff --git a/vendor/github.com/regclient/regclient/.gitignore b/vendor/github.com/regclient/regclient/.gitignore new file mode 100644 index 00000000..24207e4e --- /dev/null +++ b/vendor/github.com/regclient/regclient/.gitignore @@ -0,0 +1,5 @@ +artifacts/ +bin/ +output/ +vendor/ +.regctl_conf_ci.json diff --git a/vendor/github.com/regclient/regclient/.markdownlint.yml b/vendor/github.com/regclient/regclient/.markdownlint.yml new file mode 100644 index 00000000..01f625f5 --- /dev/null +++ b/vendor/github.com/regclient/regclient/.markdownlint.yml @@ -0,0 +1,19 @@ +# all lists use a `-` +MD004: + style: dash + +# allow tabs in code blocks (for Go) +MD010: + code_blocks: false + +# disable line length, prefer one sentence per line for PRs +MD013: false + +# emphasis with underscore (`_emphasis_`) +MD049: + style: "underscore" + +# bold with asterisk (`**bold**`) +MD050: + style: "asterisk" + \ No newline at end of file diff --git a/vendor/github.com/regclient/regclient/.osv-scanner.toml b/vendor/github.com/regclient/regclient/.osv-scanner.toml new file mode 100644 index 00000000..b3d1f7a2 --- /dev/null +++ b/vendor/github.com/regclient/regclient/.osv-scanner.toml @@ -0,0 +1 @@ +GoVersionOverride = "1.23.4" diff --git a/vendor/github.com/regclient/regclient/.version-bump.lock b/vendor/github.com/regclient/regclient/.version-bump.lock new file mode 100644 index 00000000..d3276884 --- /dev/null +++ b/vendor/github.com/regclient/regclient/.version-bump.lock @@ -0,0 +1,52 @@ +{"name":"docker-arg-alpine-digest","key":"docker.io/library/alpine:3.21.0","version":"sha256:21dc6063fd678b478f57c0e13f47560d0ea4eeba26dfc947b2a4f81f686b9f45"} +{"name":"docker-arg-alpine-tag","key":"docker.io/library/alpine","version":"3.21.0"} +{"name":"docker-arg-ecr","key":"https://github.com/awslabs/amazon-ecr-credential-helper.git:main","version":"b9e7404a33c30f6b0f4c3585b12aa5c33dc7f715"} +{"name":"docker-arg-gcr","key":"https://github.com/GoogleCloudPlatform/docker-credential-gcr.git","version":"v2.1.26"} +{"name":"docker-arg-go-digest","key":"docker.io/library/golang:1.23.4-alpine","version":"sha256:6c5c9590f169f77c8046e45c611d3b28fe477789acd8d3762d23d4744de69812"} +{"name":"docker-arg-go-tag","key":"docker.io/library/golang","version":"1.23.4"} +{"name":"docker-arg-lunajson","key":"https://github.com/grafi-tt/lunajson.git:master","version":"3d10600874527d71519b33ecbb314eb93ccd1df6"} +{"name":"docker-arg-semver","key":"https://github.com/kikito/semver.lua.git:master","version":"af495adc857d51fd1507a112be18523828a1da0d"} +{"name":"gha-alpine-digest","key":"docker.io/library/alpine:3.21.0","version":"sha256:21dc6063fd678b478f57c0e13f47560d0ea4eeba26dfc947b2a4f81f686b9f45"} +{"name":"gha-alpine-tag-base","key":"docker.io/library/alpine","version":"3"} +{"name":"gha-alpine-tag-comment","key":"docker.io/library/alpine","version":"3.21.0"} +{"name":"gha-cosign-version","key":"https://github.com/sigstore/cosign.git","version":"v2.4.1"} +{"name":"gha-golang-matrix","key":"golang-matrix","version":"[\"1.21\", \"1.22\", \"1.23\"]"} +{"name":"gha-golang-release","key":"golang-latest","version":"1.23"} +{"name":"gha-syft-version","key":"docker.io/anchore/syft","version":"v1.17.0"} +{"name":"gha-uses-commit","key":"https://github.com/actions/checkout.git:v4.2.2","version":"11bd71901bbe5b1630ceea73d27597364c9af683"} +{"name":"gha-uses-commit","key":"https://github.com/actions/setup-go.git:v5.1.0","version":"41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed"} +{"name":"gha-uses-commit","key":"https://github.com/actions/stale.git:v9.0.0","version":"28ca1036281a5e5922ead5184a1bbf96e5fc984e"} +{"name":"gha-uses-commit","key":"https://github.com/actions/upload-artifact.git:v4.4.3","version":"b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882"} +{"name":"gha-uses-commit","key":"https://github.com/anchore/sbom-action.git:v0.17.8","version":"55dc4ee22412511ee8c3142cbea40418e6cec693"} +{"name":"gha-uses-commit","key":"https://github.com/docker/build-push-action.git:v6.10.0","version":"48aba3b46d1b1fec4febb7c5d0c644b249a11355"} +{"name":"gha-uses-commit","key":"https://github.com/docker/login-action.git:v3.3.0","version":"9780b0c442fbb1117ed29e0efdff1e18412f7567"} +{"name":"gha-uses-commit","key":"https://github.com/docker/setup-buildx-action.git:v3.7.1","version":"c47758b77c9736f4b2ef4073d4d51994fabfe349"} +{"name":"gha-uses-commit","key":"https://github.com/regclient/actions.git:main","version":"ce5fd131e371ffcdd7508b478cb223b3511a9183"} +{"name":"gha-uses-commit","key":"https://github.com/sigstore/cosign-installer.git:v3.7.0","version":"dc72c7d5c4d10cd6bcb8cf6e3fd625a9e5e537da"} +{"name":"gha-uses-commit","key":"https://github.com/softprops/action-gh-release.git:v2.1.0","version":"01570a1f39cb168c169c802c3bceb9e93fb10974"} +{"name":"gha-uses-semver","key":"https://github.com/actions/checkout.git","version":"v4.2.2"} +{"name":"gha-uses-semver","key":"https://github.com/actions/setup-go.git","version":"v5.1.0"} +{"name":"gha-uses-semver","key":"https://github.com/actions/stale.git","version":"v9.0.0"} +{"name":"gha-uses-semver","key":"https://github.com/actions/upload-artifact.git","version":"v4.4.3"} +{"name":"gha-uses-semver","key":"https://github.com/anchore/sbom-action.git","version":"v0.17.8"} +{"name":"gha-uses-semver","key":"https://github.com/docker/build-push-action.git","version":"v6.10.0"} +{"name":"gha-uses-semver","key":"https://github.com/docker/login-action.git","version":"v3.3.0"} +{"name":"gha-uses-semver","key":"https://github.com/docker/setup-buildx-action.git","version":"v3.7.1"} +{"name":"gha-uses-semver","key":"https://github.com/sigstore/cosign-installer.git","version":"v3.7.0"} +{"name":"gha-uses-semver","key":"https://github.com/softprops/action-gh-release.git","version":"v2.1.0"} +{"name":"go-mod-golang-release","key":"golang-oldest","version":"1.21"} +{"name":"makefile-ci-distribution","key":"docker.io/library/registry","version":"2.8.3"} +{"name":"makefile-ci-zot","key":"ghcr.io/project-zot/zot-linux-amd64","version":"v2.1.1"} +{"name":"makefile-go-vulncheck","key":"https://go.googlesource.com/vuln.git","version":"v1.1.3"} +{"name":"makefile-gomajor","key":"https://github.com/icholy/gomajor.git","version":"v0.14.0"} +{"name":"makefile-gosec","key":"https://github.com/securego/gosec.git","version":"v2.21.4"} +{"name":"makefile-markdown-lint","key":"docker.io/davidanson/markdownlint-cli2","version":"v0.15.0"} +{"name":"makefile-osv-scanner","key":"https://github.com/google/osv-scanner.git","version":"v1.9.1"} +{"name":"makefile-staticcheck","key":"https://github.com/dominikh/go-tools.git","version":"v0.5.1"} +{"name":"makefile-syft-container-digest","key":"anchore/syft:v1.17.0","version":"sha256:f1099806495b4d2300adf03887bdfb69230c36a5e077061a12ee292bcd9bfd62"} +{"name":"makefile-syft-container-tag","key":"anchore/syft","version":"v1.17.0"} +{"name":"makefile-syft-version","key":"docker.io/anchore/syft","version":"v1.17.0"} +{"name":"osv-golang-release","key":"docker.io/library/golang","version":"1.23.4"} +{"name":"shell-alpine-digest","key":"docker.io/library/alpine:3.21.0","version":"sha256:21dc6063fd678b478f57c0e13f47560d0ea4eeba26dfc947b2a4f81f686b9f45"} +{"name":"shell-alpine-tag-base","key":"docker.io/library/alpine","version":"3"} +{"name":"shell-alpine-tag-comment","key":"docker.io/library/alpine","version":"3.21.0"} diff --git a/vendor/github.com/regclient/regclient/.version-bump.yml b/vendor/github.com/regclient/regclient/.version-bump.yml new file mode 100644 index 00000000..e7421d5f --- /dev/null +++ b/vendor/github.com/regclient/regclient/.version-bump.yml @@ -0,0 +1,330 @@ +files: + "build/Dockerfile*": + processors: + - docker-arg-alpine-tag + - docker-arg-alpine-digest + - docker-arg-go-tag + - docker-arg-go-digest + - docker-arg-ecr + - docker-arg-gcr + - docker-arg-lunajson + - docker-arg-semver + "build/oci-image.sh": + processors: + - shell-alpine-tag-base + - shell-alpine-tag-comment + - shell-alpine-digest + ".github/workflows/*.yml": + processors: + - gha-golang-matrix + - gha-golang-release + - gha-uses-vx + - gha-uses-semver + - gha-uses-commit + - gha-syft-version + - gha-cosign-version + - gha-alpine-tag-base + - gha-alpine-tag-comment + - gha-alpine-digest + "Makefile": + processors: + - makefile-gomajor + - makefile-go-vulncheck + - makefile-markdown-lint + - makefile-gosec + - makefile-osv-scanner + - makefile-staticcheck + - makefile-syft-version + - makefile-syft-container-tag + - makefile-syft-container-digest + - makefile-ci-distribution + - makefile-ci-zot + "go.mod": + processors: + - go-mod-golang-release + ".osv-scanner.toml": + processors: + - osv-golang-release + +x-processor-tmpl: + git-commit: &git-commit + key: "{{ .SourceArgs.url }}:{{ .SourceArgs.ref }}" + scan: "regexp" + source: "git-commit" + filter: + expr: "^{{ .SourceArgs.ref }}$" + git-tag-semver: &git-tag-semver + key: "{{ .SourceArgs.url }}" + scan: "regexp" + source: "git-tag" + filter: + expr: '^v?\d+\.\d+\.\d+$' + sort: + method: "semver" + registry-digest: ®istry-digest + key: "{{ .SourceArgs.image }}" + scan: "regexp" + source: "registry-digest" + registry-tag-semver: ®istry-tag-semver + key: "{{ .SourceArgs.repo }}" + scan: "regexp" + source: "registry-tag" + filter: + expr: '^v?\d+\.\d+\.\d+$' + sort: + method: "semver" + +processors: + docker-arg-alpine-tag: + <<: *registry-tag-semver + scanArgs: + regexp: '^ARG ALPINE_VER=(?Pv?\d+\.\d+\.\d+)@(?Psha256:[0-9a-f]+)\s*$' + sourceArgs: + repo: "docker.io/library/alpine" + docker-arg-alpine-digest: + <<: *registry-digest + scanArgs: + regexp: '^ARG ALPINE_VER=(?Pv?\d+\.\d+\.\d+)@(?Psha256:[0-9a-f]+)\s*$' + sourceArgs: + image: "docker.io/library/alpine:{{.ScanMatch.Tag}}" + docker-arg-go-tag: + <<: *registry-tag-semver + scanArgs: + regexp: '^ARG GO_VER=(?P[a-z0-9\-\.]+)-alpine@(?Psha256:[0-9a-f]+)\s*$' + sourceArgs: + repo: "docker.io/library/golang" + docker-arg-go-digest: + <<: *registry-digest + scanArgs: + regexp: '^ARG GO_VER=(?P[a-z0-9\-\.]+)@(?Psha256:[0-9a-f]+)\s*$' + sourceArgs: + image: "docker.io/library/golang:{{.ScanMatch.Tag}}" + docker-arg-ecr: + <<: *git-commit + scanArgs: + regexp: '^ARG ECR_HELPER_VER=(?P[0-9a-f]+)\s*$' + sourceArgs: + url: "https://github.com/awslabs/amazon-ecr-credential-helper.git" + ref: main + docker-arg-gcr: + <<: *git-tag-semver + scanArgs: + regexp: '^ARG GCR_HELPER_VER=(?Pv?\d+\.\d+\.\d+)\s*$' + sourceArgs: + url: "https://github.com/GoogleCloudPlatform/docker-credential-gcr.git" + docker-arg-lunajson: + <<: *git-commit + scanArgs: + regexp: '^ARG LUNAJSON_COMMIT=(?P[0-9a-f]+)\s*$' + sourceArgs: + url: "https://github.com/grafi-tt/lunajson.git" + ref: master + docker-arg-semver: + <<: *git-commit + scanArgs: + regexp: '^ARG SEMVER_COMMIT=(?P[0-9a-f]+)\s*$' + sourceArgs: + url: "https://github.com/kikito/semver.lua.git" + ref: master + + gha-alpine-digest: + <<: *registry-digest + scanArgs: + regexp: '^\s*ALPINE_DIGEST: "(?Psha256:[0-9a-f]+)"\s*#\s*(?P\d+\.\d+\.\d+)\s*$' + sourceArgs: + image: "docker.io/library/alpine:{{ .ScanMatch.Tag }}" + gha-alpine-tag-base: + <<: *registry-tag-semver + scanArgs: + regexp: '^\s*ALPINE_NAME: "alpine:(?Pv?\d+)"\s*$' + sourceArgs: + repo: "docker.io/library/alpine" + # only return the major version number in the tag to support detecting a change in the base image + template: '{{ index ( split .Version "." ) 0 }}' + gha-alpine-tag-comment: + <<: *registry-tag-semver + scanArgs: + regexp: '^\s*ALPINE_DIGEST: "(?Psha256:[0-9a-f]+)"\s*#\s*(?Pv?\d+\.\d+\.\d+)\s*$' + sourceArgs: + repo: "docker.io/library/alpine" + gha-cosign-version: + <<: *git-tag-semver + scanArgs: + regexp: '^\s*cosign-release: "(?Pv?[0-9\.]+)"\s*$' + sourceArgs: + url: "https://github.com/sigstore/cosign.git" + gha-golang-matrix: + <<: *registry-tag-semver + key: "golang-matrix" + scanArgs: + regexp: '^\s*gover: (?P\[["0-9, \.]+\])\s*$' + sourceArgs: + repo: "docker.io/library/golang" + filter: + expr: '^v?\d+\.\d+$' + template: '["{{ index .VerMap ( index .VerList 2 ) }}", "{{ index .VerMap ( index .VerList 1 ) }}", "{{ index .VerMap ( index .VerList 0 ) }}"]' + gha-golang-release: + <<: *registry-tag-semver + key: "golang-latest" + scanArgs: + regexp: '^\s*RELEASE_GO_VER: "(?Pv?[0-9\.]+)"\s*$' + sourceArgs: + repo: "docker.io/library/golang" + filter: + expr: '^v?\d+\.\d+$' + gha-syft-version: + <<: *registry-tag-semver + scanArgs: + regexp: '^\s*syft-version: "(?Pv?[0-9\.]+)"\s*$' + sourceArgs: + repo: "docker.io/anchore/syft" + gha-uses-vx: + <<: *git-tag-semver + scanArgs: + regexp: '^\s+-?\s+uses: (?P[^@/]+/[^@/]+)[^@]*@(?P[0-9a-f]+)\s+#\s+(?Pv?\d+)\s*$' + sourceArgs: + url: "https://github.com/{{ .ScanMatch.Repo }}.git" + filter: + expr: '^v?\d+$' + gha-uses-semver: + <<: *git-tag-semver + scanArgs: + regexp: '^\s+-?\s+uses: (?P[^@/]+/[^@/]+)[^@]*@(?P[0-9a-f]+)\s+#\s+(?Pv?\d+\.\d+\.\d+)\s*$' + sourceArgs: + url: "https://github.com/{{ .ScanMatch.Repo }}.git" + gha-uses-commit: + <<: *git-commit + scanArgs: + regexp: '^\s+-?\s+uses: (?P[^@/]+/[^@/]+)[^@]*@(?P[0-9a-f]+)\s+#\s+(?P[\w\d\.]+)\s*$' + sourceArgs: + url: "https://github.com/{{ .ScanMatch.Repo }}.git" + ref: "{{ .ScanMatch.Ref }}" + + go-mod-golang-release: + <<: *registry-tag-semver + key: "golang-oldest" + scanArgs: + regexp: '^go (?P[0-9\.]+)\s*$' + sourceArgs: + repo: "docker.io/library/golang" + filter: + expr: '^\d+\.\d+$' + template: '{{ index .VerMap ( index .VerList 2 ) }}' + + makefile-ci-distribution: + <<: *registry-tag-semver + scanArgs: + regexp: '^CI_DISTRIBUTION_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + repo: "docker.io/library/registry" + makefile-ci-zot: + <<: *registry-tag-semver + scanArgs: + regexp: '^CI_ZOT_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + repo: "ghcr.io/project-zot/zot-linux-amd64" + makefile-gomajor: + <<: *git-tag-semver + scanArgs: + regexp: '^GOMAJOR_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + url: "https://github.com/icholy/gomajor.git" + makefile-gosec: + <<: *git-tag-semver + scanArgs: + regexp: '^GOSEC_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + url: "https://github.com/securego/gosec.git" + makefile-go-vulncheck: + <<: *git-tag-semver + scanArgs: + regexp: '^GO_VULNCHECK_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + url: "https://go.googlesource.com/vuln.git" + makefile-markdown-lint: + <<: *registry-tag-semver + scanArgs: + regexp: '^MARKDOWN_LINT_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + repo: "docker.io/davidanson/markdownlint-cli2" + makefile-osv-scanner: + <<: *git-tag-semver + scanArgs: + regexp: '^OSV_SCANNER_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + url: "https://github.com/google/osv-scanner.git" + makefile-staticcheck: + <<: *git-tag-semver + scanArgs: + regexp: '^STATICCHECK_VER\?=(?Pv?[0-9\.]+)\s*$' + sourceArgs: + url: "https://github.com/dominikh/go-tools.git" + filter: + # repo also has dated tags, ignore versions without a preceding "v" + expr: '^v\d+\.\d+\.\d+$' + makefile-syft-container-tag: + <<: *registry-tag-semver + scanArgs: + regexp: '^SYFT_CONTAINER\?=(?P[^:]*):(?Pv?[0-9\.]+)@(?Psha256:[0-9a-f]+)\s*$' + sourceArgs: + repo: "{{ .ScanMatch.Repo }}" + makefile-syft-container-digest: + <<: *registry-digest + scanArgs: + regexp: '^SYFT_CONTAINER\?=(?P[^:]*):(?Pv?[0-9\.]+)@(?Psha256:[0-9a-f]+)\s*$' + sourceArgs: + image: "{{ .ScanMatch.Image }}:{{.ScanMatch.Tag}}" + makefile-syft-version: + <<: *registry-tag-semver + scanArgs: + regexp: '^SYFT_VERSION\?=(?Pv[0-9\.]+)\s*$' + sourceArgs: + repo: "docker.io/anchore/syft" + + osv-golang-release: + <<: *registry-tag-semver + scanArgs: + regexp: '^GoVersionOverride = "(?Pv?[0-9\.]+)"\s*$' + sourceArgs: + repo: "docker.io/library/golang" + + shell-alpine-tag-base: + <<: *registry-tag-semver + scanArgs: + regexp: '^\s*ALPINE_NAME="alpine:(?Pv?\d+)"\s*$' + sourceArgs: + repo: "docker.io/library/alpine" + # only return the major version number in the tag to support detecting a change in the base image + template: '{{ index ( split .Version "." ) 0 }}' + shell-alpine-tag-comment: + <<: *registry-tag-semver + scanArgs: + regexp: '^\s*ALPINE_DIGEST="(?Psha256:[0-9a-f]+)"\s*#\s*(?Pv?\d+\.\d+\.\d+)\s*$' + sourceArgs: + repo: "docker.io/library/alpine" + shell-alpine-digest: + <<: *registry-digest + scanArgs: + regexp: '^\s*ALPINE_DIGEST="(?Psha256:[0-9a-f]+)"\s*#\s*(?P\d+\.\d+\.\d+)\s*$' + sourceArgs: + image: "docker.io/library/alpine:{{ .ScanMatch.Tag }}" + +scans: + regexp: + type: "regexp" + +sources: + git-commit: + type: "git" + args: + type: "commit" + git-tag: + type: "git" + args: + type: "tag" + registry-digest: + type: "registry" + registry-tag: + type: "registry" + args: + type: "tag" diff --git a/vendor/github.com/regclient/regclient/CODE_OF_CONDUCT.md b/vendor/github.com/regclient/regclient/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..5498720f --- /dev/null +++ b/vendor/github.com/regclient/regclient/CODE_OF_CONDUCT.md @@ -0,0 +1,134 @@ + +# Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +- Demonstrating empathy and kindness toward other people +- Being respectful of differing opinions, viewpoints, and experiences +- Giving and gracefully accepting constructive feedback +- Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +- Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +- The use of sexualized language or imagery, and sexual attention or advances of + any kind +- Trolling, insulting or derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or email address, + without their explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at + or slack (I'm found on the CNCF, Docker, OCI, and OpenSSF +slacks). +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/vendor/github.com/regclient/regclient/CONTRIBUTING.md b/vendor/github.com/regclient/regclient/CONTRIBUTING.md new file mode 100644 index 00000000..d1d67753 --- /dev/null +++ b/vendor/github.com/regclient/regclient/CONTRIBUTING.md @@ -0,0 +1,74 @@ +# Contributing + +## Reporting security issues + +Please see [SECURITY.md](security.md) for the process to report security issues. + +## Reporting other issues + +Please search for similar issues and if none are seen, report an issue at [github.com/regclient/regclient/issues](https://github.com/regclient/regclient/issues) + +## Code style + +This project attempts to follow these principles: + +- Code is canonical Go, following styles and patterns commonly used by the Go community. +- Dependencies outside of the Go standard library should be minimized. +- Dependencies should be pinned to a specific digest and tracked by Go or version-check. +- Unit tests are strongly encouraged with a focus on test coverage of the successful path and common errors. +- Linters and other style formatting tools are used, please run `make all` before committing any changes. + +## Pull requests + +PRs are welcome following the below guides: + +- For anything beyond a minor fix, opening an issue is suggested to discuss possible solutions. +- Changes should be rebased on the main branch. +- Changes should be squashed to a single commit per logical change. + +All changes must be signed (`git commit -s`) to indicate you agree to the [Developer Certificate or Origin](https://developercertificate.org/): + +```text +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +The sign-off will include the following message in your commit: + +```text +Signed-off-by: Your Name +``` + +This needs to be your real name, no aliases please. diff --git a/vendor/github.com/regclient/regclient/LICENSE b/vendor/github.com/regclient/regclient/LICENSE new file mode 100644 index 00000000..6e12b19d --- /dev/null +++ b/vendor/github.com/regclient/regclient/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2020 The regclient Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/regclient/regclient/Makefile b/vendor/github.com/regclient/regclient/Makefile new file mode 100644 index 00000000..2bb73230 --- /dev/null +++ b/vendor/github.com/regclient/regclient/Makefile @@ -0,0 +1,259 @@ +COMMANDS?=regctl regsync regbot +BINARIES?=$(addprefix bin/,$(COMMANDS)) +IMAGES?=$(addprefix docker-,$(COMMANDS)) +ARTIFACT_PLATFORMS?=linux-amd64 linux-arm64 linux-ppc64le linux-s390x darwin-amd64 darwin-arm64 windows-amd64.exe +ARTIFACTS?=$(foreach cmd,$(addprefix artifacts/,$(COMMANDS)),$(addprefix $(cmd)-,$(ARTIFACT_PLATFORMS))) +TEST_PLATFORMS?=linux/386,linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x +VCS_REPO?="https://github.com/regclient/regclient.git" +VCS_REF?=$(shell git rev-list -1 HEAD) +ifneq ($(shell git status --porcelain 2>/dev/null),) + VCS_REF := $(VCS_REF)-dirty +endif +VCS_VERSION?=$(shell vcs_describe="$$(git describe --all)"; \ + vcs_version="(devel)"; \ + if [ "$${vcs_describe}" != "$${vcs_describe#tags/}" ]; then \ + vcs_version="$${vcs_describe#tags/}"; \ + elif [ "$${vcs_describe}" != "$${vcs_describe#heads/}" ]; then \ + vcs_version="$${vcs_describe#heads/}"; \ + if [ "main" = "$${vcs_version}" ]; then vcs_version=edge; fi; \ + fi; \ + echo "$${vcs_version}" | sed -r 's#/+#-#g') +VCS_TAG?=$(shell git describe --tags --abbrev=0 2>/dev/null || true) +LD_FLAGS?=-s -w -extldflags -static -buildid= -X \"github.com/regclient/regclient/internal/version.vcsTag=$(VCS_TAG)\" +GO_BUILD_FLAGS?=-trimpath -ldflags "$(LD_FLAGS)" +DOCKERFILE_EXT?=$(shell if docker build --help 2>/dev/null | grep -q -- '--progress'; then echo ".buildkit"; fi) +DOCKER_ARGS?=--build-arg "VCS_REF=$(VCS_REF)" --build-arg "VCS_VERSION=$(VCS_VERSION)" +GOPATH?=$(shell go env GOPATH) +PWD:=$(shell pwd) +VER_BUMP?=$(shell command -v version-bump 2>/dev/null) +VER_BUMP_CONTAINER?=sudobmitch/version-bump:edge +ifeq "$(strip $(VER_BUMP))" '' + VER_BUMP=docker run --rm \ + -v "$(shell pwd)/:$(shell pwd)/" -w "$(shell pwd)" \ + -u "$(shell id -u):$(shell id -g)" \ + $(VER_BUMP_CONTAINER) +endif +MARKDOWN_LINT_VER?=v0.15.0 +GOMAJOR_VER?=v0.14.0 +GOSEC_VER?=v2.21.4 +GO_VULNCHECK_VER?=v1.1.3 +OSV_SCANNER_VER?=v1.9.1 +SYFT?=$(shell command -v syft 2>/dev/null) +SYFT_CMD_VER:=$(shell [ -x "$(SYFT)" ] && echo "v$$($(SYFT) version | awk '/^Version: / {print $$2}')" || echo "0") +SYFT_VERSION?=v1.17.0 +SYFT_CONTAINER?=anchore/syft:v1.17.0@sha256:f1099806495b4d2300adf03887bdfb69230c36a5e077061a12ee292bcd9bfd62 +ifneq "$(SYFT_CMD_VER)" "$(SYFT_VERSION)" + SYFT=docker run --rm \ + -v "$(shell pwd)/:$(shell pwd)/" -w "$(shell pwd)" \ + -u "$(shell id -u):$(shell id -g)" \ + $(SYFT_CONTAINER) +endif +STATICCHECK_VER?=v0.5.1 +CI_DISTRIBUTION_VER?=2.8.3 +CI_ZOT_VER?=v2.1.1 + +.PHONY: .FORCE +.FORCE: + +.PHONY: all +all: fmt goimports vet test lint binaries ## Full build of Go binaries (including fmt, vet, test, and lint) + +.PHONY: fmt +fmt: ## go fmt + go fmt ./... + +goimports: $(GOPATH)/bin/goimports + $(GOPATH)/bin/goimports -w -format-only -local github.com/regclient . + +.PHONY: vet +vet: ## go vet + go vet ./... + +.PHONY: test +test: ## go test + go test -cover -race ./... + +.PHONY: lint +lint: lint-go lint-goimports lint-md lint-gosec ## Run all linting + +.PHONY: lint-go +lint-go: $(GOPATH)/bin/staticcheck .FORCE ## Run linting for Go + $(GOPATH)/bin/staticcheck -checks all ./... + +lint-goimports: $(GOPATH)/bin/goimports + @if [ -n "$$($(GOPATH)/bin/goimports -l -format-only -local github.com/regclient .)" ]; then \ + echo $(GOPATH)/bin/goimports -d -format-only -local github.com/regclient .; \ + $(GOPATH)/bin/goimports -d -format-only -local github.com/regclient .; \ + exit 1; \ + fi + +# excluding types/platform pending resultion to https://github.com/securego/gosec/issues/1116 +.PHONY: lint-gosec +lint-gosec: $(GOPATH)/bin/gosec .FORCE ## Run gosec + $(GOPATH)/bin/gosec -terse -exclude-dir types/platform ./... + +.PHONY: lint-md +lint-md: .FORCE ## Run linting for markdown + docker run --rm -v "$(PWD):/workdir:ro" davidanson/markdownlint-cli2:$(MARKDOWN_LINT_VER) \ + "**/*.md" "#vendor" + +.PHONY: vulnerability-scan +vulnerability-scan: osv-scanner vulncheck-go ## Run all vulnerability scanners + +.PHONY: osv-scanner +osv-scanner: $(GOPATH)/bin/osv-scanner .FORCE ## Run OSV Scanner + $(GOPATH)/bin/osv-scanner scan --config .osv-scanner.toml -r --experimental-licenses="Apache-2.0,BSD-3-Clause,MIT,CC-BY-SA-4.0,UNKNOWN" . + +.PHONY: vulncheck-go +vulncheck-go: $(GOPATH)/bin/govulncheck .FORCE ## Run govulncheck + $(GOPATH)/bin/govulncheck ./... + +.PHONY: vendor +vendor: ## Vendor Go modules + go mod vendor + +.PHONY: binaries +binaries: $(BINARIES) ## Build Go binaries + +bin/%: .FORCE + CGO_ENABLED=0 go build ${GO_BUILD_FLAGS} -o bin/$* ./cmd/$* + +.PHONY: docker +docker: $(IMAGES) ## Build Docker images + +docker-%: .FORCE + docker build -t regclient/$* -f build/Dockerfile.$*$(DOCKERFILE_EXT) $(DOCKER_ARGS) . + docker build -t regclient/$*:alpine -f build/Dockerfile.$*$(DOCKERFILE_EXT) --target release-alpine $(DOCKER_ARGS) . + +.PHONY: oci-image +oci-image: $(addprefix oci-image-,$(COMMANDS)) ## Build reproducible images to an OCI Layout + +oci-image-%: bin/regctl .FORCE + PATH="$(PWD)/bin:$(PATH)" build/oci-image.sh -r scratch -i "$*" -p "$(TEST_PLATFORMS)" + PATH="$(PWD)/bin:$(PATH)" build/oci-image.sh -r alpine -i "$*" -p "$(TEST_PLATFORMS)" -b "alpine:3" + +.PHONY: test-docker +test-docker: $(addprefix test-docker-,$(COMMANDS)) ## Build multi-platform docker images (but do not tag) + +test-docker-%: + docker buildx build --platform="$(TEST_PLATFORMS)" -f build/Dockerfile.$*.buildkit . + docker buildx build --platform="$(TEST_PLATFORMS)" -f build/Dockerfile.$*.buildkit --target release-alpine . + +.PHONY: ci +ci: ci-distribution ci-zot ## Run CI tests against self hosted registries + +.PHONY: ci-distribution +ci-distribution: + docker run --rm -d -p 5000 \ + --label regclient-ci=true --name regclient-ci-distribution \ + -e "REGISTRY_STORAGE_DELETE_ENABLED=true" \ + docker.io/library/registry:$(CI_DISTRIBUTION_VER) + ./build/ci-test.sh -t localhost:$$(docker port regclient-ci-distribution 5000 | head -1 | cut -f2 -d:)/test-ci + docker stop regclient-ci-distribution + +.PHONY: ci-zot +ci-zot: + docker run --rm -d -p 5000 \ + --label regclient-ci=true --name regclient-ci-zot \ + -v "$$(pwd)/build/zot-config.json:/etc/zot/config.json:ro" \ + ghcr.io/project-zot/zot-linux-amd64:$(CI_ZOT_VER) + ./build/ci-test.sh -t localhost:$$(docker port regclient-ci-zot 5000 | head -1 | cut -f2 -d:)/test-ci + docker stop regclient-ci-zot + +.PHONY: artifacts +artifacts: $(ARTIFACTS) ## Generate artifacts + +.PHONY: artifact-pre +artifact-pre: + mkdir -p artifacts + +artifacts/%: artifact-pre .FORCE + @set -e; \ + target="$*"; \ + command="$${target%%-*}"; \ + platform_ext="$${target#*-}"; \ + platform="$${platform_ext%.*}"; \ + export GOOS="$${platform%%-*}"; \ + export GOARCH="$${platform#*-}"; \ + echo export GOOS=$${GOOS}; \ + echo export GOARCH=$${GOARCH}; \ + echo go build ${GO_BUILD_FLAGS} -o "$@" ./cmd/$${command}/; \ + CGO_ENABLED=0 go build ${GO_BUILD_FLAGS} -o "$@" ./cmd/$${command}/; \ + $(SYFT) scan -q "file:$@" --source-name "$${command}" -o cyclonedx-json >"artifacts/$${command}-$${platform}.cyclonedx.json"; \ + $(SYFT) scan -q "file:$@" --source-name "$${command}" -o spdx-json >"artifacts/$${command}-$${platform}.spdx.json" + +.PHONY: plugin-user +plugin-user: + mkdir -p ${HOME}/.docker/cli-plugins/ + cp docker-plugin/docker-regclient ${HOME}/.docker/cli-plugins/docker-regctl + +.PHONY: plugin-host +plugin-host: + sudo cp docker-plugin/docker-regclient /usr/libexec/docker/cli-plugins/docker-regctl + +.PHONY: util-golang-major +util-golang-major: $(GOPATH)/bin/gomajor ## check for major dependency updates + $(GOPATH)/bin/gomajor list + +.PHONY: util-golang-update +util-golang-update: ## update go module versions + go get -u -t ./... + go mod tidy + [ ! -d vendor ] || go mod vendor + +.PHONY: util-release-preview +util-release-preview: $(GOPATH)/bin/gorelease ## preview changes for next release + git checkout main + ./.github/release.sh -d + gorelease + +.PHONY: util-release-run +util-release-run: ## generate a new release + git checkout main + ./.github/release.sh + +.PHONY: util-version-check +util-version-check: ## check all dependencies for updates + $(VER_BUMP) check + +.PHONY: util-version-update +util-version-update: ## update versions on all dependencies + $(VER_BUMP) update + +$(GOPATH)/bin/gomajor: .FORCE + @[ -f "$(GOPATH)/bin/gomajor" ] \ + && [ "$$($(GOPATH)/bin/gomajor version | grep '^version' | cut -f 2 -d ' ')" = "$(GOMAJOR_VER)" ] \ + || go install github.com/icholy/gomajor@$(GOMAJOR_VER) + +$(GOPATH)/bin/goimports: .FORCE + @[ -f "$(GOPATH)/bin/goimports" ] \ + || go install golang.org/x/tools/cmd/goimports@latest + +$(GOPATH)/bin/gorelease: .FORCE + @[ -f "$(GOPATH)/bin/gorelease" ] \ + || go install golang.org/x/exp/cmd/gorelease@latest + +$(GOPATH)/bin/gosec: .FORCE + @[ -f $(GOPATH)/bin/gosec ] \ + && [ "$$($(GOPATH)/bin/gosec -version | grep '^Version' | cut -f 2 -d ' ')" = "$(GOSEC_VER)" ] \ + || go install -ldflags '-X main.Version=$(GOSEC_VER) -X main.GitTag=$(GOSEC_VER)' \ + github.com/securego/gosec/v2/cmd/gosec@$(GOSEC_VER) + +$(GOPATH)/bin/staticcheck: .FORCE + @[ -f $(GOPATH)/bin/staticcheck ] \ + && [ "$$($(GOPATH)/bin/staticcheck -version | cut -f 3 -d ' ' | tr -d '()')" = "$(STATICCHECK_VER)" ] \ + || go install "honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VER)" + +$(GOPATH)/bin/govulncheck: .FORCE + @[ $$(go version -m $(GOPATH)/bin/govulncheck | \ + awk -F ' ' '{ if ($$1 == "mod" && $$2 == "golang.org/x/vuln") { printf "%s\n", $$3 } }') = "$(GO_VULNCHECK_VER)" ] \ + || CGO_ENABLED=0 go install "golang.org/x/vuln/cmd/govulncheck@$(GO_VULNCHECK_VER)" + +$(GOPATH)/bin/osv-scanner: .FORCE + @[ -f $(GOPATH)/bin/osv-scanner ] \ + && [ "$$(osv-scanner --version | awk -F ': ' '{ if ($$1 == "osv-scanner version") { printf "%s\n", $$2 } }')" = "$(OSV_SCANNER_VER)" ] \ + || CGO_ENABLED=0 go install "github.com/google/osv-scanner/cmd/osv-scanner@$(OSV_SCANNER_VER)" + +.PHONY: help +help: # Display help + @awk -F ':|##' '/^[^\t].+?:.*?##/ { printf "\033[36m%-30s\033[0m %s\n", $$1, $$NF }' $(MAKEFILE_LIST) diff --git a/vendor/github.com/regclient/regclient/README.md b/vendor/github.com/regclient/regclient/README.md new file mode 100644 index 00000000..6bf58ce8 --- /dev/null +++ b/vendor/github.com/regclient/regclient/README.md @@ -0,0 +1,95 @@ +# regclient + +[![Go Workflow Status](https://img.shields.io/github/actions/workflow/status/regclient/regclient/go.yml?branch=main&label=Go%20build)](https://github.com/regclient/regclient/actions/workflows/go.yml) +[![Docker Workflow Status](https://img.shields.io/github/actions/workflow/status/regclient/regclient/docker.yml?branch=main&label=Docker%20build)](https://github.com/regclient/regclient/actions/workflows/docker.yml) +[![Dependency Workflow Status](https://img.shields.io/github/actions/workflow/status/regclient/regclient/version-check.yml?branch=main&label=Dependency%20check)](https://github.com/regclient/regclient/actions/workflows/version-check.yml) +[![Vulnerability Workflow Status](https://img.shields.io/github/actions/workflow/status/regclient/regclient/vulnscans.yml?branch=main&label=Vulnerability%20check)](https://github.com/regclient/regclient/actions/workflows/vulnscans.yml) + +[![Go Reference](https://pkg.go.dev/badge/github.com/regclient/regclient.svg)](https://pkg.go.dev/github.com/regclient/regclient) +![License](https://img.shields.io/github/license/regclient/regclient) +[![Go Report Card](https://goreportcard.com/badge/github.com/regclient/regclient)](https://goreportcard.com/report/github.com/regclient/regclient) +[![GitHub Downloads](https://img.shields.io/github/downloads/regclient/regclient/total?label=GitHub%20downloads)](https://github.com/regclient/regclient/releases) + +Client interface for the registry API. +This includes `regctl` for a command line interface to manage registries. + +![regctl demo](docs/demo.gif) + +## regclient Features + +- Provides a client interface to interacting with registries. +- Images may be inspected without pulling the layers, allowing quick access to the image manifest and configuration. +- Tags may be listed for a repository. +- Repositories may be listed from a registry (if supported). +- Copying an image only pulls layers when needed, allowing images to be quickly retagged or promoted across repositories. +- Multi-platform images are supported, allowing all platforms to be copied between registries. +- Digest tags used by projects like sigstore/cosign are supported, allowing signature, attestation, and SBOM metadata to be copied with the image. +- OCI subject/referrers is supported for the standardized replacement of the "digest tags". +- Digests may be queried for a tag without pulling the manifest. +- Rate limits may be queried from the registry without pulling an image (useful for Docker Hub). +- Images may be imported and exported to both OCI and Docker formatted tar files. +- OCI Layout is supported for copying images to and from a local directory. +- Delete APIs have been provided for tags, manifests, and blobs (the tag deletion will only delete a single tag even if multiple tags point to the same digest). +- Registry logins are imported from docker when available +- Self signed, insecure, and http-only registries are all supported. +- Requests will retry and fall back to chunked uploads when network issues are encountered. + +## regctl Features + +`regctl` is a CLI interface to the `regclient` library. +In addition to the features listed for `regclient`, `regctl` adds the following abilities: + +- Formatting output with templates. +- Push and pull arbitrary artifacts. + +## regsync features + +`regsync` is an image mirroring tool. +It will copy images between two locations with the following additional features: + +- Uses a yaml configuration. +- The `regclient` copy is used to only pull needed layers, supporting multi-platform, and additional metadata. +- Can use user's docker configuration for registry credentials. +- Ability to run on a cron schedule, one time synchronization, or only check for stale images. +- Ability to backup previous target image before overwriting. +- Ability to postpone mirror step when rate limit is below a threshold. +- Ability to mirror multiple images concurrently. + +## regbot features + +`regbot` is a scripting tool on top of the `regclient` API with the following features: + +- Runs user provided scripts based on a yaml configuration. +- Scripts are written in Lua and executed directly in Go. +- Can run on a cron schedule or a one time execution. +- Dry-run option can be used for testing. +- Built-in functions include: + - Repository list + - Tag list + - Image manifest (either head or get, and optional resolving multi-platform reference) + - Image config (this includes the creation time, labels, and other details shown in a `docker image inspect`) + - Image rate limit and a wait function to delay the script when rate limit remaining is below a threshold + - Image copy + - Manifest delete + - Tag delete + +## Development Status + +This project is in active development. +Various Go APIs may change, but efforts will be made to provide aliases and stubs for any removed API. + +## Installing + +See the [installation options](docs/install.md). + +## Usage + +See the [project documentation](docs/README.md). + +## Contributors + + + contributor list + + + diff --git a/vendor/github.com/regclient/regclient/SECURITY.md b/vendor/github.com/regclient/regclient/SECURITY.md new file mode 100644 index 00000000..50508cb4 --- /dev/null +++ b/vendor/github.com/regclient/regclient/SECURITY.md @@ -0,0 +1,5 @@ +# Reporting security issues + +Please report security issues directly in GitHub at or alternatively email . + +We will typically respond within 7 working days of your report. If the issue is confirmed as a vulnerability, we will open a Security Advisory and acknowledge your contributions as part of it. This project follows a 90 day disclosure timeline. diff --git a/vendor/github.com/regclient/regclient/blob.go b/vendor/github.com/regclient/regclient/blob.go new file mode 100644 index 00000000..4bea4679 --- /dev/null +++ b/vendor/github.com/regclient/regclient/blob.go @@ -0,0 +1,264 @@ +package regclient + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "log/slog" + "time" + + "github.com/regclient/regclient/internal/pqueue" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/blob" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/warning" +) + +const blobCBFreq = time.Millisecond * 100 + +type blobOpt struct { + callback func(kind types.CallbackKind, instance string, state types.CallbackState, cur, total int64) +} + +// BlobOpts define options for the Image* commands. +type BlobOpts func(*blobOpt) + +// BlobWithCallback provides progress data to a callback function. +func BlobWithCallback(callback func(kind types.CallbackKind, instance string, state types.CallbackState, cur, total int64)) BlobOpts { + return func(opts *blobOpt) { + opts.callback = callback + } +} + +// BlobCopy copies a blob between two locations. +// If the blob already exists in the target, the copy is skipped. +// A server side cross repository blob mount is attempted. +func (rc *RegClient) BlobCopy(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor, opts ...BlobOpts) error { + if !refSrc.IsSetRepo() { + return fmt.Errorf("refSrc is not set: %s%.0w", refSrc.CommonName(), errs.ErrInvalidReference) + } + if !refTgt.IsSetRepo() { + return fmt.Errorf("refTgt is not set: %s%.0w", refTgt.CommonName(), errs.ErrInvalidReference) + } + var opt blobOpt + for _, optFn := range opts { + optFn(&opt) + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + tDesc := d + tDesc.URLs = []string{} // ignore URLs when pushing to target + if opt.callback != nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackStarted, 0, d.Size) + } + // for the same repository, there's nothing to copy + if ref.EqualRepository(refSrc, refTgt) { + if opt.callback != nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackSkipped, 0, d.Size) + } + rc.slog.Debug("Blob copy skipped, same repo", + slog.String("src", refSrc.Reference), + slog.String("tgt", refTgt.Reference), + slog.String("digest", string(d.Digest))) + return nil + } + // check if layer already exists + if _, err := rc.BlobHead(ctx, refTgt, tDesc); err == nil { + if opt.callback != nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackSkipped, 0, d.Size) + } + rc.slog.Debug("Blob copy skipped, already exists", + slog.String("src", refSrc.Reference), + slog.String("tgt", refTgt.Reference), + slog.String("digest", string(d.Digest))) + return nil + } + // acquire throttle for both src and tgt to avoid deadlocks + tList := []*pqueue.Queue[reqmeta.Data]{} + schemeSrcAPI, err := rc.schemeGet(refSrc.Scheme) + if err != nil { + return err + } + schemeTgtAPI, err := rc.schemeGet(refTgt.Scheme) + if err != nil { + return err + } + if tSrc, ok := schemeSrcAPI.(scheme.Throttler); ok { + tList = append(tList, tSrc.Throttle(refSrc, false)...) + } + if tTgt, ok := schemeTgtAPI.(scheme.Throttler); ok { + tList = append(tList, tTgt.Throttle(refTgt, true)...) + } + if len(tList) > 0 { + ctxMulti, done, err := pqueue.AcquireMulti[reqmeta.Data](ctx, reqmeta.Data{Kind: reqmeta.Blob, Size: d.Size}, tList...) + if err != nil { + return err + } + if done != nil { + defer done() + } + ctx = ctxMulti + } + + // try mounting blob from the source repo is the registry is the same + if ref.EqualRegistry(refSrc, refTgt) { + err := rc.BlobMount(ctx, refSrc, refTgt, d) + if err == nil { + if opt.callback != nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackSkipped, 0, d.Size) + } + rc.slog.Debug("Blob copy performed server side with registry mount", + slog.String("src", refSrc.Reference), + slog.String("tgt", refTgt.Reference), + slog.String("digest", string(d.Digest))) + return nil + } + rc.slog.Warn("Failed to mount blob", + slog.String("src", refSrc.Reference), + slog.String("tgt", refTgt.Reference), + slog.String("err", err.Error())) + } + // fast options failed, download layer from source and push to target + blobIO, err := rc.BlobGet(ctx, refSrc, d) + if err != nil { + if !errors.Is(err, context.Canceled) { + rc.slog.Warn("Failed to retrieve blob", + slog.String("src", refSrc.Reference), + slog.String("digest", string(d.Digest)), + slog.String("err", err.Error())) + } + return err + } + if opt.callback != nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackStarted, 0, d.Size) + ticker := time.NewTicker(blobCBFreq) + done := make(chan bool) + defer func() { + close(done) + ticker.Stop() + if ctx.Err() == nil { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackFinished, d.Size, d.Size) + } + }() + go func() { + for { + select { + case <-done: + return + case <-ticker.C: + offset, err := blobIO.Seek(0, io.SeekCurrent) + if err == nil && offset > 0 { + opt.callback(types.CallbackBlob, d.Digest.String(), types.CallbackActive, offset, d.Size) + } + } + } + }() + } + defer blobIO.Close() + if _, err := rc.BlobPut(ctx, refTgt, blobIO.GetDescriptor(), blobIO); err != nil { + if !errors.Is(err, context.Canceled) { + rc.slog.Warn("Failed to push blob", + slog.String("src", refSrc.Reference), + slog.String("tgt", refTgt.Reference), + slog.String("err", err.Error())) + } + return err + } + return nil +} + +// BlobDelete removes a blob from the registry. +// This method should only be used to repair a damaged registry. +// Typically a server side garbage collection should be used to purge unused blobs. +func (rc *RegClient) BlobDelete(ctx context.Context, r ref.Ref, d descriptor.Descriptor) error { + if !r.IsSetRepo() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return err + } + return schemeAPI.BlobDelete(ctx, r, d) +} + +// BlobGet retrieves a blob, returning a reader. +// This reader must be closed to free up resources that limit concurrent pulls. +func (rc *RegClient) BlobGet(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { + data, err := d.GetData() + if err == nil { + return blob.NewReader(blob.WithDesc(d), blob.WithRef(r), blob.WithReader(bytes.NewReader(data))), nil + } + if !r.IsSetRepo() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return nil, err + } + return schemeAPI.BlobGet(ctx, r, d) +} + +// BlobGetOCIConfig retrieves an OCI config from a blob, automatically extracting the JSON. +func (rc *RegClient) BlobGetOCIConfig(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.OCIConfig, error) { + if !r.IsSetRepo() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + b, err := rc.BlobGet(ctx, r, d) + if err != nil { + return nil, err + } + return b.ToOCIConfig() +} + +// BlobHead is used to verify if a blob exists and is accessible. +func (rc *RegClient) BlobHead(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { + if !r.IsSetRepo() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return nil, err + } + return schemeAPI.BlobHead(ctx, r, d) +} + +// BlobMount attempts to perform a server side copy/mount of the blob between repositories. +func (rc *RegClient) BlobMount(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor) error { + if !refSrc.IsSetRepo() { + return fmt.Errorf("ref is not set: %s%.0w", refSrc.CommonName(), errs.ErrInvalidReference) + } + if !refTgt.IsSetRepo() { + return fmt.Errorf("ref is not set: %s%.0w", refTgt.CommonName(), errs.ErrInvalidReference) + } + schemeAPI, err := rc.schemeGet(refSrc.Scheme) + if err != nil { + return err + } + return schemeAPI.BlobMount(ctx, refSrc, refTgt, d) +} + +// BlobPut uploads a blob to a repository. +// Descriptor is optional, leave size and digest to zero value if unknown. +// Reader must also be an [io.Seeker] to support chunked upload fallback. +// +// This will attempt an anonymous blob mount first which some registries may support. +// It will then try doing a full put of the blob without chunking (most widely supported). +// If the full put fails, it will fall back to a chunked upload (useful for flaky networks). +func (rc *RegClient) BlobPut(ctx context.Context, r ref.Ref, d descriptor.Descriptor, rdr io.Reader) (descriptor.Descriptor, error) { + if !r.IsSetRepo() { + return descriptor.Descriptor{}, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return descriptor.Descriptor{}, err + } + return schemeAPI.BlobPut(ctx, r, d, rdr) +} diff --git a/vendor/github.com/regclient/regclient/config/credhelper.go b/vendor/github.com/regclient/regclient/config/credhelper.go new file mode 100644 index 00000000..b926f993 --- /dev/null +++ b/vendor/github.com/regclient/regclient/config/credhelper.go @@ -0,0 +1,97 @@ +package config + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "strings" +) + +// credHelper wraps a command that manages user credentials. +type credHelper struct { + prog string + env map[string]string +} + +func newCredHelper(prog string, env map[string]string) *credHelper { + return &credHelper{prog: prog, env: env} +} + +func (ch *credHelper) run(arg string, input io.Reader) ([]byte, error) { + //#nosec G204 only untrusted arg is a hostname which the executed command should not trust + cmd := exec.Command(ch.prog, arg) + cmd.Env = os.Environ() + if ch.env != nil { + for k, v := range ch.env { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + } + cmd.Stderr = os.Stderr + cmd.Stdin = input + return cmd.Output() +} + +type credStore struct { + ServerURL string `json:"ServerURL"` + Username string `json:"Username"` + Secret string `json:"Secret"` +} + +// get requests a credential from the helper for a given host. +func (ch *credHelper) get(host *Host) error { + hostname := host.Hostname + if host.CredHost != "" { + hostname = host.CredHost + } + hostIn := strings.NewReader(hostname) + credOut := credStore{ + Username: host.User, + Secret: host.Pass, + } + outB, err := ch.run("get", hostIn) + if err != nil { + outS := strings.TrimSpace(string(outB)) + return fmt.Errorf("error getting credentials, output: %s, error: %w", outS, err) + } + err = json.NewDecoder(bytes.NewReader(outB)).Decode(&credOut) + if err != nil { + return fmt.Errorf("error reading credentials: %w", err) + } + if credOut.Username == tokenUser { + host.User = "" + host.Pass = "" + host.Token = credOut.Secret + } else { + host.User = credOut.Username + host.Pass = credOut.Secret + host.Token = "" + } + return nil +} + +// list returns a list of hosts supported by the credential helper. +func (ch *credHelper) list() ([]Host, error) { + credList := map[string]string{} + outB, err := ch.run("list", bytes.NewReader([]byte{})) + if err != nil { + outS := strings.TrimSpace(string(outB)) + return nil, fmt.Errorf("error getting credential list, output: %s, error: %w", outS, err) + } + err = json.NewDecoder(bytes.NewReader(outB)).Decode(&credList) + if err != nil { + return nil, fmt.Errorf("error reading credential list: %w", err) + } + hostList := []Host{} + for host, user := range credList { + h := HostNewName(host) + h.User = user + h.CredHelper = ch.prog + hostList = append(hostList, *h) + } + return hostList, nil +} + +// TODO: store method not implemented diff --git a/vendor/github.com/regclient/regclient/config/docker.go b/vendor/github.com/regclient/regclient/config/docker.go new file mode 100644 index 00000000..0918db9e --- /dev/null +++ b/vendor/github.com/regclient/regclient/config/docker.go @@ -0,0 +1,154 @@ +package config + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "strings" + + "github.com/regclient/regclient/internal/conffile" +) + +const ( + // dockerEnv is the environment variable used to look for Docker's config.json. + dockerEnv = "DOCKER_CONFIG" + // dockerDir is the directory name for Docker's config (inside the users home directory). + dockerDir = ".docker" + // dockerConfFile is the name of Docker's config file. + dockerConfFile = "config.json" + // dockerHelperPre is the prefix of docker credential helpers. + dockerHelperPre = "docker-credential-" +) + +// dockerConfig is used to parse the ~/.docker/config.json +type dockerConfig struct { + AuthConfigs map[string]dockerAuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + DetachKeys string `json:"detachKeys,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + CredentialHelpers map[string]string `json:"credHelpers,omitempty"` + Proxies map[string]dockerProxyConfig `json:"proxies,omitempty"` +} + +// dockerProxyConfig contains proxy configuration settings +type dockerProxyConfig struct { + HTTPProxy string `json:"httpProxy,omitempty"` + HTTPSProxy string `json:"httpsProxy,omitempty"` + NoProxy string `json:"noProxy,omitempty"` + FTPProxy string `json:"ftpProxy,omitempty"` + AllProxy string `json:"allProxy,omitempty"` +} + +// dockerAuthConfig contains the auths +type dockerAuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} + +// DockerLoad returns a slice of hosts from the users docker config. +func DockerLoad() ([]Host, error) { + cf := conffile.New(conffile.WithDirName(dockerDir, dockerConfFile), conffile.WithEnvDir(dockerEnv, dockerConfFile)) + return dockerParse(cf) +} + +// DockerLoadFile returns a slice of hosts from a named docker config file. +func DockerLoadFile(fname string) ([]Host, error) { + cf := conffile.New(conffile.WithFullname(fname)) + return dockerParse(cf) +} + +// dockerParse parses a docker config into a slice of Hosts. +func dockerParse(cf *conffile.File) ([]Host, error) { + rdr, err := cf.Open() + if err != nil && errors.Is(err, fs.ErrNotExist) { + return []Host{}, nil + } else if err != nil { + return nil, err + } + defer rdr.Close() + dc := dockerConfig{} + if err := json.NewDecoder(rdr).Decode(&dc); err != nil && !errors.Is(err, io.EOF) { + return nil, err + } + hosts := []Host{} + for name, auth := range dc.AuthConfigs { + h, err := dockerAuthToHost(name, dc, auth) + if err != nil { + continue + } + hosts = append(hosts, h) + } + // also include default entries for credential helpers + for name, helper := range dc.CredentialHelpers { + h := HostNewName(name) + h.CredHelper = dockerHelperPre + helper + if _, ok := dc.AuthConfigs[h.Name]; ok { + continue // skip fields with auth config + } + hosts = append(hosts, *h) + } + // add credStore entries + if dc.CredentialsStore != "" { + ch := newCredHelper(dockerHelperPre+dc.CredentialsStore, map[string]string{}) + csHosts, err := ch.list() + if err == nil { + hosts = append(hosts, csHosts...) + } + } + return hosts, nil +} + +// dockerAuthToHost parses an auth entry from a docker config into a Host. +func dockerAuthToHost(name string, conf dockerConfig, auth dockerAuthConfig) (Host, error) { + helper := "" + if conf.CredentialHelpers != nil && conf.CredentialHelpers[name] != "" { + helper = dockerHelperPre + conf.CredentialHelpers[name] + } + // parse base64 auth into user/pass + if auth.Auth != "" { + var err error + auth.Username, auth.Password, err = decodeAuth(auth.Auth) + if err != nil { + return Host{}, err + } + } + if (auth.Username == "" || auth.Password == "") && auth.IdentityToken == "" && helper == "" { + return Host{}, fmt.Errorf("no credentials found for %s", name) + } + + h := HostNewName(name) + h.User = auth.Username + h.Pass = auth.Password + h.Token = auth.IdentityToken + h.CredHelper = helper + return *h, nil +} + +// decodeAuth extracts a base64 encoded user:pass into the username and password. +func decodeAuth(authStr string) (string, string, error) { + if authStr == "" { + return "", "", nil + } + decoded, err := base64.StdEncoding.DecodeString(authStr) + if err != nil { + return "", "", err + } + userPass := strings.SplitN(string(decoded), ":", 2) + if len(userPass) != 2 { + return "", "", fmt.Errorf("invalid auth configuration file") + } + return userPass[0], strings.Trim(userPass[1], "\x00"), nil +} diff --git a/vendor/github.com/regclient/regclient/config/host.go b/vendor/github.com/regclient/regclient/config/host.go new file mode 100644 index 00000000..77ae57bf --- /dev/null +++ b/vendor/github.com/regclient/regclient/config/host.go @@ -0,0 +1,524 @@ +// Package config is used for all regclient configuration settings. +package config + +import ( + "encoding/json" + "fmt" + "io" + "log/slog" + "strings" + "time" + + "github.com/regclient/regclient/internal/timejson" +) + +// TLSConf specifies whether TLS is enabled and verified for a host. +type TLSConf int + +const ( + // TLSUndefined indicates TLS is not passed, defaults to Enabled. + TLSUndefined TLSConf = iota + // TLSEnabled uses TLS (https) for the connection. + TLSEnabled + // TLSInsecure uses TLS but does not verify CA. + TLSInsecure + // TLSDisabled does not use TLS (http). + TLSDisabled +) + +const ( + // DockerRegistry is the name resolved in docker images on Hub. + DockerRegistry = "docker.io" + // DockerRegistryAuth is the name provided in docker's config for Hub. + DockerRegistryAuth = "https://index.docker.io/v1/" + // DockerRegistryDNS is the host to connect to for Hub. + DockerRegistryDNS = "registry-1.docker.io" + // defaultExpire is the default time to expire a credential and force re-authentication. + defaultExpire = time.Hour * 1 + // defaultCredHelperRetry is the time to refresh a credential from a failed credential helper command. + defaultCredHelperRetry = time.Second * 5 + // defaultConcurrent is the default number of concurrent registry connections. + defaultConcurrent = 3 + // defaultReqPerSec is the default maximum frequency to send requests to a registry. + defaultReqPerSec = 0 + // tokenUser is the username returned by credential helpers that indicates the password is an identity token. + tokenUser = "" +) + +// MarshalJSON converts TLSConf to a json string using MarshalText. +func (t TLSConf) MarshalJSON() ([]byte, error) { + s, err := t.MarshalText() + if err != nil { + return []byte(""), err + } + return json.Marshal(string(s)) +} + +// MarshalText converts TLSConf to a string. +func (t TLSConf) MarshalText() ([]byte, error) { + var s string + switch t { + default: + s = "" + case TLSEnabled: + s = "enabled" + case TLSInsecure: + s = "insecure" + case TLSDisabled: + s = "disabled" + } + return []byte(s), nil +} + +// UnmarshalJSON converts TLSConf from a json string. +func (t *TLSConf) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + return t.UnmarshalText([]byte(s)) +} + +// UnmarshalText converts TLSConf from a string. +func (t *TLSConf) UnmarshalText(b []byte) error { + switch strings.ToLower(string(b)) { + default: + return fmt.Errorf("unknown TLS value \"%s\"", b) + case "": + *t = TLSUndefined + case "enabled": + *t = TLSEnabled + case "insecure": + *t = TLSInsecure + case "disabled": + *t = TLSDisabled + } + return nil +} + +// Host defines settings for connecting to a registry. +type Host struct { + Name string `json:"-" yaml:"registry,omitempty"` // Name of the registry (required) (yaml configs pass this as a field, json provides this from the object key) + TLS TLSConf `json:"tls,omitempty" yaml:"tls"` // TLS setting: enabled (default), disabled, insecure + RegCert string `json:"regcert,omitempty" yaml:"regcert"` // public pem cert of registry + ClientCert string `json:"clientCert,omitempty" yaml:"clientCert"` // public pem cert for client (mTLS) + ClientKey string `json:"clientKey,omitempty" yaml:"clientKey"` // private pem cert for client (mTLS) + Hostname string `json:"hostname,omitempty" yaml:"hostname"` // hostname of registry, default is the registry name + User string `json:"user,omitempty" yaml:"user"` // username, not used with credHelper + Pass string `json:"pass,omitempty" yaml:"pass"` // password, not used with credHelper + Token string `json:"token,omitempty" yaml:"token"` // token, experimental for specific APIs + CredHelper string `json:"credHelper,omitempty" yaml:"credHelper"` // credential helper command for requesting logins + CredExpire timejson.Duration `json:"credExpire,omitempty" yaml:"credExpire"` // time until credential expires + CredHost string `json:"credHost,omitempty" yaml:"credHost"` // used when a helper hostname doesn't match Hostname + PathPrefix string `json:"pathPrefix,omitempty" yaml:"pathPrefix"` // used for mirrors defined within a repository namespace + Mirrors []string `json:"mirrors,omitempty" yaml:"mirrors"` // list of other Host Names to use as mirrors + Priority uint `json:"priority,omitempty" yaml:"priority"` // priority when sorting mirrors, higher priority attempted first + RepoAuth bool `json:"repoAuth,omitempty" yaml:"repoAuth"` // tracks a separate auth per repo + API string `json:"api,omitempty" yaml:"api"` // Deprecated: registry API to use + APIOpts map[string]string `json:"apiOpts,omitempty" yaml:"apiOpts"` // options for APIs + BlobChunk int64 `json:"blobChunk,omitempty" yaml:"blobChunk"` // size of each blob chunk + BlobMax int64 `json:"blobMax,omitempty" yaml:"blobMax"` // threshold to switch to chunked upload, -1 to disable, 0 for regclient.blobMaxPut + ReqPerSec float64 `json:"reqPerSec,omitempty" yaml:"reqPerSec"` // requests per second + ReqConcurrent int64 `json:"reqConcurrent,omitempty" yaml:"reqConcurrent"` // concurrent requests, default is defaultConcurrent(3) + Scheme string `json:"scheme,omitempty" yaml:"scheme"` // Deprecated: use TLS instead + credRefresh time.Time `json:"-" yaml:"-"` // internal use, when to refresh credentials +} + +// Cred defines a user credential for accessing a registry. +type Cred struct { + User, Password, Token string +} + +// HostNew creates a default Host entry. +func HostNew() *Host { + h := Host{ + TLS: TLSEnabled, + APIOpts: map[string]string{}, + ReqConcurrent: int64(defaultConcurrent), + ReqPerSec: float64(defaultReqPerSec), + } + return &h +} + +// HostNewDefName creates a host using provided defaults and hostname. +func HostNewDefName(def *Host, name string) *Host { + var h Host + if def == nil { + h = *HostNew() + } else { + h = *def + // configure required defaults + if h.TLS == TLSUndefined { + h.TLS = TLSEnabled + } + if h.APIOpts == nil { + h.APIOpts = map[string]string{} + } + if h.ReqConcurrent == 0 { + h.ReqConcurrent = int64(defaultConcurrent) + } + if h.ReqPerSec == 0 { + h.ReqPerSec = float64(defaultReqPerSec) + } + // copy any fields that are not passed by value + if len(h.APIOpts) > 0 { + orig := h.APIOpts + h.APIOpts = map[string]string{} + for k, v := range orig { + h.APIOpts[k] = v + } + } + if h.Mirrors != nil { + orig := h.Mirrors + h.Mirrors = make([]string, len(orig)) + copy(h.Mirrors, orig) + } + } + // configure host + origName := name + // Docker Hub is a special case + if name == DockerRegistryAuth || name == DockerRegistryDNS || name == DockerRegistry { + h.Name = DockerRegistry + h.Hostname = DockerRegistryDNS + h.CredHost = DockerRegistryAuth + return &h + } + // handle http/https prefix + i := strings.Index(name, "://") + if i > 0 { + scheme := name[:i] + name = name[i+3:] + if scheme == "http" { + h.TLS = TLSDisabled + } + } + // trim any repository path + i = strings.Index(name, "/") + if i > 0 { + name = name[:i] + } + h.Name = name + h.Hostname = name + if origName != name { + h.CredHost = origName + } + return &h +} + +// HostNewName creates a default Host with a hostname. +func HostNewName(name string) *Host { + return HostNewDefName(nil, name) +} + +// GetCred returns the credential, fetching from a credential helper if needed. +func (host *Host) GetCred() Cred { + // refresh from credHelper if needed + if host.CredHelper != "" && (host.credRefresh.IsZero() || time.Now().After(host.credRefresh)) { + host.refreshHelper() + } + return Cred{User: host.User, Password: host.Pass, Token: host.Token} +} + +func (host *Host) refreshHelper() { + if host.CredHelper == "" { + return + } + if host.CredExpire <= 0 { + host.CredExpire = timejson.Duration(defaultExpire) + } + // run a cred helper, calling get method + ch := newCredHelper(host.CredHelper, map[string]string{}) + err := ch.get(host) + if err != nil { + host.credRefresh = time.Now().Add(defaultCredHelperRetry) + } else { + host.credRefresh = time.Now().Add(time.Duration(host.CredExpire)) + } +} + +// IsZero returns true if the struct is set to the zero value or the result of [HostNew]. +func (host Host) IsZero() bool { + if host.Name != "" || + (host.TLS != TLSUndefined && host.TLS != TLSEnabled) || + host.RegCert != "" || + host.ClientCert != "" || + host.ClientKey != "" || + host.Hostname != "" || + host.User != "" || + host.Pass != "" || + host.Token != "" || + host.CredHelper != "" || + host.CredExpire != 0 || + host.CredHost != "" || + host.PathPrefix != "" || + len(host.Mirrors) != 0 || + host.Priority != 0 || + host.RepoAuth || + len(host.APIOpts) != 0 || + host.BlobChunk != 0 || + host.BlobMax != 0 || + (host.ReqPerSec != 0 && host.ReqPerSec != float64(defaultReqPerSec)) || + (host.ReqConcurrent != 0 && host.ReqConcurrent != int64(defaultConcurrent)) || + !host.credRefresh.IsZero() { + return false + } + return true +} + +// Merge adds fields from a new config host entry. +func (host *Host) Merge(newHost Host, log *slog.Logger) error { + name := newHost.Name + if name == "" { + name = host.Name + } + if log == nil { + log = slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{})) + } + + // merge the existing and new config host + if host.Name == "" { + // only set the name if it's not initialized, this shouldn't normally change + host.Name = newHost.Name + } + + if newHost.CredHelper == "" && (newHost.Pass != "" || host.Token != "") { + // unset existing cred helper for user/pass or token + host.CredHelper = "" + host.CredExpire = 0 + } + if newHost.CredHelper != "" && newHost.User == "" && newHost.Pass == "" && newHost.Token == "" { + // unset existing user/pass/token for cred helper + host.User = "" + host.Pass = "" + host.Token = "" + } + + if newHost.User != "" { + if host.User != "" && host.User != newHost.User { + log.Warn("Changing login user for registry", + slog.String("orig", host.User), + slog.String("new", newHost.User), + slog.String("host", name)) + } + host.User = newHost.User + } + + if newHost.Pass != "" { + if host.Pass != "" && host.Pass != newHost.Pass { + log.Warn("Changing login password for registry", + slog.String("host", name)) + } + host.Pass = newHost.Pass + } + + if newHost.Token != "" { + if host.Token != "" && host.Token != newHost.Token { + log.Warn("Changing login token for registry", + slog.String("host", name)) + } + host.Token = newHost.Token + } + + if newHost.CredHelper != "" { + if host.CredHelper != "" && host.CredHelper != newHost.CredHelper { + log.Warn("Changing credential helper for registry", + slog.String("host", name), + slog.String("orig", host.CredHelper), + slog.String("new", newHost.CredHelper)) + } + host.CredHelper = newHost.CredHelper + } + + if newHost.CredExpire != 0 { + if host.CredExpire != 0 && host.CredExpire != newHost.CredExpire { + log.Warn("Changing credential expire for registry", + slog.String("host", name), + slog.Any("orig", host.CredExpire), + slog.Any("new", newHost.CredExpire)) + } + host.CredExpire = newHost.CredExpire + } + + if newHost.CredHost != "" { + if host.CredHost != "" && host.CredHost != newHost.CredHost { + log.Warn("Changing credential host for registry", + slog.String("host", name), + slog.String("orig", host.CredHost), + slog.String("new", newHost.CredHost)) + } + host.CredHost = newHost.CredHost + } + + if newHost.TLS != TLSUndefined { + if host.TLS != TLSUndefined && host.TLS != newHost.TLS { + tlsOrig, _ := host.TLS.MarshalText() + tlsNew, _ := newHost.TLS.MarshalText() + log.Warn("Changing TLS settings for registry", + slog.String("orig", string(tlsOrig)), + slog.String("new", string(tlsNew)), + slog.String("host", name)) + } + host.TLS = newHost.TLS + } + + if newHost.RegCert != "" { + if host.RegCert != "" && host.RegCert != newHost.RegCert { + log.Warn("Changing certificate settings for registry", + slog.String("orig", host.RegCert), + slog.String("new", newHost.RegCert), + slog.String("host", name)) + } + host.RegCert = newHost.RegCert + } + + if newHost.ClientCert != "" { + if host.ClientCert != "" && host.ClientCert != newHost.ClientCert { + log.Warn("Changing client certificate settings for registry", + slog.String("orig", host.ClientCert), + slog.String("new", newHost.ClientCert), + slog.String("host", name)) + } + host.ClientCert = newHost.ClientCert + } + + if newHost.ClientKey != "" { + if host.ClientKey != "" && host.ClientKey != newHost.ClientKey { + log.Warn("Changing client certificate key settings for registry", + slog.String("host", name)) + } + host.ClientKey = newHost.ClientKey + } + + if newHost.Hostname != "" { + if host.Hostname != "" && host.Hostname != newHost.Hostname { + log.Warn("Changing hostname settings for registry", + slog.String("orig", host.Hostname), + slog.String("new", newHost.Hostname), + slog.String("host", name)) + } + host.Hostname = newHost.Hostname + } + + if newHost.PathPrefix != "" { + newHost.PathPrefix = strings.Trim(newHost.PathPrefix, "/") // leading and trailing / are not needed + if host.PathPrefix != "" && host.PathPrefix != newHost.PathPrefix { + log.Warn("Changing path prefix settings for registry", + slog.String("orig", host.PathPrefix), + slog.String("new", newHost.PathPrefix), + slog.String("host", name)) + } + host.PathPrefix = newHost.PathPrefix + } + + if len(newHost.Mirrors) > 0 { + if len(host.Mirrors) > 0 && !stringSliceEq(host.Mirrors, newHost.Mirrors) { + log.Warn("Changing mirror settings for registry", + slog.Any("orig", host.Mirrors), + slog.Any("new", newHost.Mirrors), + slog.String("host", name)) + } + host.Mirrors = newHost.Mirrors + } + + if newHost.Priority != 0 { + if host.Priority != 0 && host.Priority != newHost.Priority { + log.Warn("Changing priority settings for registry", + slog.Uint64("orig", uint64(host.Priority)), + slog.Uint64("new", uint64(newHost.Priority)), + slog.String("host", name)) + } + host.Priority = newHost.Priority + } + + if newHost.RepoAuth { + host.RepoAuth = newHost.RepoAuth + } + + // TODO: eventually delete + if newHost.API != "" { + log.Warn("API field has been deprecated", + slog.String("api", newHost.API), + slog.String("host", name)) + } + + if len(newHost.APIOpts) > 0 { + if len(host.APIOpts) > 0 { + merged := copyMapString(host.APIOpts) + for k, v := range newHost.APIOpts { + if host.APIOpts[k] != "" && host.APIOpts[k] != v { + log.Warn("Changing APIOpts setting for registry", + slog.String("orig", host.APIOpts[k]), + slog.String("new", newHost.APIOpts[k]), + slog.String("opt", k), + slog.String("host", name)) + } + merged[k] = v + } + host.APIOpts = merged + } else { + host.APIOpts = newHost.APIOpts + } + } + + if newHost.BlobChunk > 0 { + if host.BlobChunk != 0 && host.BlobChunk != newHost.BlobChunk { + log.Warn("Changing blobChunk settings for registry", + slog.Int64("orig", host.BlobChunk), + slog.Int64("new", newHost.BlobChunk), + slog.String("host", name)) + } + host.BlobChunk = newHost.BlobChunk + } + + if newHost.BlobMax != 0 { + if host.BlobMax != 0 && host.BlobMax != newHost.BlobMax { + log.Warn("Changing blobMax settings for registry", + slog.Int64("orig", host.BlobMax), + slog.Int64("new", newHost.BlobMax), + slog.String("host", name)) + } + host.BlobMax = newHost.BlobMax + } + + if newHost.ReqPerSec != 0 { + if host.ReqPerSec != 0 && host.ReqPerSec != newHost.ReqPerSec { + log.Warn("Changing reqPerSec settings for registry", + slog.Float64("orig", host.ReqPerSec), + slog.Float64("new", newHost.ReqPerSec), + slog.String("host", name)) + } + host.ReqPerSec = newHost.ReqPerSec + } + + if newHost.ReqConcurrent > 0 { + if host.ReqConcurrent != 0 && host.ReqConcurrent != newHost.ReqConcurrent { + log.Warn("Changing reqPerSec settings for registry", + slog.Int64("orig", host.ReqConcurrent), + slog.Int64("new", newHost.ReqConcurrent), + slog.String("host", name)) + } + host.ReqConcurrent = newHost.ReqConcurrent + } + + return nil +} + +func copyMapString(src map[string]string) map[string]string { + copy := map[string]string{} + for k, v := range src { + copy[k] = v + } + return copy +} + +func stringSliceEq(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} diff --git a/vendor/github.com/regclient/regclient/image.go b/vendor/github.com/regclient/regclient/image.go new file mode 100644 index 00000000..9f2cf88b --- /dev/null +++ b/vendor/github.com/regclient/regclient/image.go @@ -0,0 +1,1916 @@ +package regclient + +import ( + "archive/tar" + "compress/gzip" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log/slog" + "net/url" + "path/filepath" + "strings" + "sync" + "time" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + digest "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/pkg/archive" + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/blob" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/docker/schema2" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/platform" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/warning" +) + +const ( + dockerManifestFilename = "manifest.json" + ociLayoutVersion = "1.0.0" + ociIndexFilename = "index.json" + ociLayoutFilename = "oci-layout" + annotationRefName = "org.opencontainers.image.ref.name" + annotationImageName = "io.containerd.image.name" +) + +// used by import/export to match docker tar expected format +type dockerTarManifest struct { + Config string + RepoTags []string + Layers []string + Parent digest.Digest `json:",omitempty"` + LayerSources map[digest.Digest]descriptor.Descriptor `json:",omitempty"` +} + +type tarFileHandler func(header *tar.Header, trd *tarReadData) error +type tarReadData struct { + tr *tar.Reader + name string + handleAdded bool + handlers map[string]tarFileHandler + links map[string][]string + processed map[string]bool + finish []func() error + // data processed from various handlers + manifests map[digest.Digest]manifest.Manifest + ociIndex v1.Index + ociManifest manifest.Manifest + dockerManifestFound bool + dockerManifestList []dockerTarManifest + dockerManifest schema2.Manifest +} +type tarWriteData struct { + tw *tar.Writer + dirs map[string]bool + files map[string]bool + // uid, gid int + mode int64 + timestamp time.Time +} + +type imageOpt struct { + callback func(kind types.CallbackKind, instance string, state types.CallbackState, cur, total int64) + checkBaseDigest string + checkBaseRef string + checkSkipConfig bool + child bool + exportCompress bool + exportRef ref.Ref + fastCheck bool + forceRecursive bool + importName string + includeExternal bool + digestTags bool + platform string + platforms []string + referrerConfs []scheme.ReferrerConfig + referrerSrc ref.Ref + referrerTgt ref.Ref + tagList []string + mu sync.Mutex + seen map[string]*imageSeen + finalFn []func(context.Context) error +} + +type imageSeen struct { + done chan struct{} + err error +} + +// ImageOpts define options for the Image* commands. +type ImageOpts func(*imageOpt) + +// ImageWithCallback provides progress data to a callback function. +func ImageWithCallback(callback func(kind types.CallbackKind, instance string, state types.CallbackState, cur, total int64)) ImageOpts { + return func(opts *imageOpt) { + opts.callback = callback + } +} + +// ImageWithCheckBaseDigest provides a base digest to compare in ImageCheckBase. +func ImageWithCheckBaseDigest(d string) ImageOpts { + return func(opts *imageOpt) { + opts.checkBaseDigest = d + } +} + +// ImageWithCheckBaseRef provides a base reference to compare in ImageCheckBase. +func ImageWithCheckBaseRef(r string) ImageOpts { + return func(opts *imageOpt) { + opts.checkBaseRef = r + } +} + +// ImageWithCheckSkipConfig skips the configuration check in ImageCheckBase. +func ImageWithCheckSkipConfig() ImageOpts { + return func(opts *imageOpt) { + opts.checkSkipConfig = true + } +} + +// ImageWithChild attempts to copy every manifest and blob even if parent manifests already exist in ImageCopy. +func ImageWithChild() ImageOpts { + return func(opts *imageOpt) { + opts.child = true + } +} + +// ImageWithExportCompress adds gzip compression to tar export output in ImageExport. +func ImageWithExportCompress() ImageOpts { + return func(opts *imageOpt) { + opts.exportCompress = true + } +} + +// ImageWithExportRef overrides the image name embedded in the export file in ImageExport. +func ImageWithExportRef(r ref.Ref) ImageOpts { + return func(opts *imageOpt) { + opts.exportRef = r + } +} + +// ImageWithFastCheck skips check for referrers when manifest has already been copied in ImageCopy. +func ImageWithFastCheck() ImageOpts { + return func(opts *imageOpt) { + opts.fastCheck = true + } +} + +// ImageWithForceRecursive attempts to copy every manifest and blob even if parent manifests already exist in ImageCopy. +func ImageWithForceRecursive() ImageOpts { + return func(opts *imageOpt) { + opts.forceRecursive = true + } +} + +// ImageWithImportName selects the name of the image to import when multiple images are included in ImageImport. +func ImageWithImportName(name string) ImageOpts { + return func(opts *imageOpt) { + opts.importName = name + } +} + +// ImageWithIncludeExternal attempts to copy every manifest and blob even if parent manifests already exist in ImageCopy. +func ImageWithIncludeExternal() ImageOpts { + return func(opts *imageOpt) { + opts.includeExternal = true + } +} + +// ImageWithDigestTags looks for "sha-.*" tags in the repo to copy with any manifest in ImageCopy. +// These are used by some artifact systems like sigstore/cosign. +func ImageWithDigestTags() ImageOpts { + return func(opts *imageOpt) { + opts.digestTags = true + } +} + +// ImageWithPlatform requests specific platforms from a manifest list in ImageCheckBase. +func ImageWithPlatform(p string) ImageOpts { + return func(opts *imageOpt) { + opts.platform = p + } +} + +// ImageWithPlatforms only copies specific platforms from a manifest list in ImageCopy. +// This will result in a failure on many registries that validate manifests. +// Use the empty string to indicate images without a platform definition should be copied. +func ImageWithPlatforms(p []string) ImageOpts { + return func(opts *imageOpt) { + opts.platforms = p + } +} + +// ImageWithReferrers recursively recursively includes referrer images in ImageCopy. +func ImageWithReferrers(rOpts ...scheme.ReferrerOpts) ImageOpts { + return func(opts *imageOpt) { + if opts.referrerConfs == nil { + opts.referrerConfs = []scheme.ReferrerConfig{} + } + rConf := scheme.ReferrerConfig{} + for _, rOpt := range rOpts { + rOpt(&rConf) + } + opts.referrerConfs = append(opts.referrerConfs, rConf) + } +} + +// ImageWithReferrerSrc specifies an alternate repository to pull referrers from. +func ImageWithReferrerSrc(src ref.Ref) ImageOpts { + return func(opts *imageOpt) { + opts.referrerSrc = src + } +} + +// ImageWithReferrerTgt specifies an alternate repository to pull referrers from. +func ImageWithReferrerTgt(tgt ref.Ref) ImageOpts { + return func(opts *imageOpt) { + opts.referrerTgt = tgt + } +} + +// ImageCheckBase returns nil if the base image is unchanged. +// A base image mismatch returns an error that wraps errs.ErrMismatch. +func (rc *RegClient) ImageCheckBase(ctx context.Context, r ref.Ref, opts ...ImageOpts) error { + var opt imageOpt + for _, optFn := range opts { + optFn(&opt) + } + var m manifest.Manifest + var err error + + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + // if the base name is not provided, check image for base annotations + if opt.checkBaseRef == "" { + m, err = rc.ManifestGet(ctx, r) + if err != nil { + return err + } + ma, ok := m.(manifest.Annotator) + if !ok { + return fmt.Errorf("image does not support annotations, base image must be provided%.0w", errs.ErrMissingAnnotation) + } + annot, err := ma.GetAnnotations() + if err != nil { + return err + } + if baseName, ok := annot[types.AnnotationBaseImageName]; ok { + opt.checkBaseRef = baseName + } else { + return fmt.Errorf("image does not have a base annotation, base image must be provided%.0w", errs.ErrMissingAnnotation) + } + if baseDig, ok := annot[types.AnnotationBaseImageDigest]; ok { + opt.checkBaseDigest = baseDig + } + } + baseR, err := ref.New(opt.checkBaseRef) + if err != nil { + return err + } + defer rc.Close(ctx, baseR) + + // if the digest is available, check if that matches the base name + if opt.checkBaseDigest != "" { + baseMH, err := rc.ManifestHead(ctx, baseR, WithManifestRequireDigest()) + if err != nil { + return err + } + expectDig, err := digest.Parse(opt.checkBaseDigest) + if err != nil { + return err + } + if baseMH.GetDescriptor().Digest == expectDig { + rc.slog.Debug("base image digest matches", + slog.String("name", baseR.CommonName()), + slog.String("digest", baseMH.GetDescriptor().Digest.String())) + return nil + } else { + rc.slog.Debug("base image digest changed", + slog.String("name", baseR.CommonName()), + slog.String("digest", baseMH.GetDescriptor().Digest.String()), + slog.String("expected", expectDig.String())) + return fmt.Errorf("base digest changed, %s, expected %s, received %s%.0w", + baseR.CommonName(), expectDig.String(), baseMH.GetDescriptor().Digest.String(), errs.ErrMismatch) + } + } + + // if the digest is not available, compare layers of each manifest + if m == nil { + m, err = rc.ManifestGet(ctx, r) + if err != nil { + return err + } + } + if m.IsList() && opt.platform != "" { + p, err := platform.Parse(opt.platform) + if err != nil { + return err + } + d, err := manifest.GetPlatformDesc(m, &p) + if err != nil { + return err + } + rp := r + rp.Digest = d.Digest.String() + m, err = rc.ManifestGet(ctx, rp) + if err != nil { + return err + } + } + if m.IsList() { + // loop through each platform + ml, ok := m.(manifest.Indexer) + if !ok { + return fmt.Errorf("manifest list is not an Indexer") + } + dl, err := ml.GetManifestList() + if err != nil { + return err + } + rp := r + for _, d := range dl { + rp.Digest = d.Digest.String() + optP := append(opts, ImageWithPlatform(d.Platform.String())) + err = rc.ImageCheckBase(ctx, rp, optP...) + if err != nil { + return fmt.Errorf("platform %s mismatch: %w", d.Platform.String(), err) + } + } + return nil + } + img, ok := m.(manifest.Imager) + if !ok { + return fmt.Errorf("manifest must be an image") + } + layers, err := img.GetLayers() + if err != nil { + return err + } + baseM, err := rc.ManifestGet(ctx, baseR) + if err != nil { + return err + } + if baseM.IsList() && opt.platform != "" { + p, err := platform.Parse(opt.platform) + if err != nil { + return err + } + d, err := manifest.GetPlatformDesc(baseM, &p) + if err != nil { + return err + } + rp := baseR + rp.Digest = d.Digest.String() + baseM, err = rc.ManifestGet(ctx, rp) + if err != nil { + return err + } + } + baseImg, ok := baseM.(manifest.Imager) + if !ok { + return fmt.Errorf("base image manifest must be an image") + } + baseLayers, err := baseImg.GetLayers() + if err != nil { + return err + } + if len(baseLayers) <= 0 { + return fmt.Errorf("base image has no layers") + } + for i := range baseLayers { + if i >= len(layers) { + return fmt.Errorf("image has fewer layers than base image") + } + if !layers[i].Same(baseLayers[i]) { + rc.slog.Debug("image layer changed", + slog.Int("layer", i), + slog.String("expected", layers[i].Digest.String()), + slog.String("digest", baseLayers[i].Digest.String())) + return fmt.Errorf("base layer changed, %s[%d], expected %s, received %s%.0w", + baseR.CommonName(), i, layers[i].Digest.String(), baseLayers[i].Digest.String(), errs.ErrMismatch) + } + } + + if opt.checkSkipConfig { + return nil + } + + // if the layers match, compare the config history + confDesc, err := img.GetConfig() + if err != nil { + return err + } + conf, err := rc.BlobGetOCIConfig(ctx, r, confDesc) + if err != nil { + return err + } + confOCI := conf.GetConfig() + baseConfDesc, err := baseImg.GetConfig() + if err != nil { + return err + } + baseConf, err := rc.BlobGetOCIConfig(ctx, baseR, baseConfDesc) + if err != nil { + return err + } + baseConfOCI := baseConf.GetConfig() + for i := range baseConfOCI.History { + if i >= len(confOCI.History) { + return fmt.Errorf("image has fewer history entries than base image") + } + if baseConfOCI.History[i].Author != confOCI.History[i].Author || + baseConfOCI.History[i].Comment != confOCI.History[i].Comment || + !baseConfOCI.History[i].Created.Equal(*confOCI.History[i].Created) || + baseConfOCI.History[i].CreatedBy != confOCI.History[i].CreatedBy || + baseConfOCI.History[i].EmptyLayer != confOCI.History[i].EmptyLayer { + rc.slog.Debug("image history changed", + slog.Int("index", i), + slog.Any("expected", confOCI.History[i]), + slog.Any("history", baseConfOCI.History[i])) + return fmt.Errorf("base history changed, %s[%d], expected %v, received %v%.0w", + baseR.CommonName(), i, confOCI.History[i], baseConfOCI.History[i], errs.ErrMismatch) + } + } + + rc.slog.Debug("base image layers and history matches", + slog.String("base", baseR.CommonName())) + return nil +} + +// ImageConfig returns the OCI config of a given image. +// Use [ImageWithPlatform] to select a platform from an Index or Manifest List. +func (rc *RegClient) ImageConfig(ctx context.Context, r ref.Ref, opts ...ImageOpts) (*blob.BOCIConfig, error) { + opt := imageOpt{ + platform: "local", + } + for _, optFn := range opts { + optFn(&opt) + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + p, err := platform.Parse(opt.platform) + if err != nil { + return nil, fmt.Errorf("failed to parse platform %s: %w", opt.platform, err) + } + m, err := rc.ManifestGet(ctx, r, WithManifestPlatform(p)) + if err != nil { + return nil, fmt.Errorf("failed to get manifest: %w", err) + } + for m.IsList() { + mi, ok := m.(manifest.Indexer) + if !ok { + return nil, fmt.Errorf("unsupported manifest type: %s", m.GetDescriptor().MediaType) + } + ml, err := mi.GetManifestList() + if err != nil { + return nil, fmt.Errorf("failed to get manifest list: %w", err) + } + d, err := descriptor.DescriptorListSearch(ml, descriptor.MatchOpt{Platform: &p}) + if err != nil { + return nil, fmt.Errorf("failed to find platform in manifest list: %w", err) + } + m, err = rc.ManifestGet(ctx, r, WithManifestDesc(d)) + if err != nil { + return nil, fmt.Errorf("failed to get manifest: %w", err) + } + } + mi, ok := m.(manifest.Imager) + if !ok { + return nil, fmt.Errorf("unsupported manifest type: %s", m.GetDescriptor().MediaType) + } + d, err := mi.GetConfig() + if err != nil { + return nil, fmt.Errorf("failed to get image config: %w", err) + } + if d.MediaType != mediatype.OCI1ImageConfig && d.MediaType != mediatype.Docker2ImageConfig { + return nil, fmt.Errorf("unsupported config media type %s: %w", d.MediaType, errs.ErrUnsupportedMediaType) + } + return rc.BlobGetOCIConfig(ctx, r, d) +} + +// ImageCopy copies an image. +// This will retag an image in the same repository, only pushing and pulling the top level manifest. +// On the same registry, it will attempt to use cross-repository blob mounts to avoid pulling blobs. +// Blobs are only pulled when they don't exist on the target and a blob mount fails. +// Referrers are optionally copied recursively. +func (rc *RegClient) ImageCopy(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, opts ...ImageOpts) error { + opt := imageOpt{ + seen: map[string]*imageSeen{}, + finalFn: []func(context.Context) error{}, + } + for _, optFn := range opts { + optFn(&opt) + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + // block GC from running (in OCIDir) during the copy + schemeTgtAPI, err := rc.schemeGet(refTgt.Scheme) + if err != nil { + return err + } + if tgtGCLocker, isGCLocker := schemeTgtAPI.(scheme.GCLocker); isGCLocker { + tgtGCLocker.GCLock(refTgt) + defer tgtGCLocker.GCUnlock(refTgt) + } + // run the copy of manifests and blobs recursively + err = rc.imageCopyOpt(ctx, refSrc, refTgt, descriptor.Descriptor{}, opt.child, []digest.Digest{}, &opt) + if err != nil { + return err + } + // run any final functions, digest-tags and referrers that detected loops are retried here + for _, fn := range opt.finalFn { + err := fn(ctx) + if err != nil { + return err + } + } + return nil +} + +// imageCopyOpt is a thread safe copy of a manifest and nested content. +func (rc *RegClient) imageCopyOpt(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor, child bool, parents []digest.Digest, opt *imageOpt) (err error) { + var mSrc, mTgt manifest.Manifest + var sDig digest.Digest + refTgtRepo := refTgt.SetTag("").CommonName() + seenCB := func(error) {} + defer func() { + if seenCB != nil { + seenCB(err) + } + }() + // if digest is provided and we are already copying it, wait + if d.Digest != "" { + sDig = d.Digest + } else if refSrc.Digest != "" { + sDig = digest.Digest(refSrc.Digest) + } + if sDig != "" { + if seenCB, err = imageSeenOrWait(ctx, opt, refTgtRepo, refTgt.Tag, sDig, parents); seenCB == nil { + return err + } + } + // check target with head request + mTgt, err = rc.ManifestHead(ctx, refTgt, WithManifestRequireDigest()) + var urlError *url.Error + if err != nil && errors.As(err, &urlError) { + return fmt.Errorf("failed to access target registry: %w", err) + } + // for non-recursive copies, compare to source digest + if err == nil && (opt.fastCheck || (!opt.forceRecursive && opt.referrerConfs == nil && !opt.digestTags)) { + if sDig == "" { + mSrc, err = rc.ManifestHead(ctx, refSrc, WithManifestRequireDigest()) + if err != nil { + return fmt.Errorf("copy failed, error getting source: %w", err) + } + sDig = mSrc.GetDescriptor().Digest + if seenCB, err = imageSeenOrWait(ctx, opt, refTgtRepo, refTgt.Tag, sDig, parents); seenCB == nil { + return err + } + } + if sDig == mTgt.GetDescriptor().Digest { + if opt.callback != nil { + opt.callback(types.CallbackManifest, d.Digest.String(), types.CallbackSkipped, mTgt.GetDescriptor().Size, mTgt.GetDescriptor().Size) + } + return nil + } + } + // when copying/updating digest tags or referrers, only the source digest is needed for an image + if mTgt != nil && mSrc == nil && !opt.forceRecursive && sDig == "" { + mSrc, err = rc.ManifestHead(ctx, refSrc, WithManifestRequireDigest()) + if err != nil { + return fmt.Errorf("copy failed, error getting source: %w", err) + } + sDig = mSrc.GetDescriptor().Digest + if seenCB, err = imageSeenOrWait(ctx, opt, refTgtRepo, refTgt.Tag, sDig, parents); seenCB == nil { + return err + } + } + // get the source manifest when a copy is needed or recursion into the content is needed + if sDig == "" || mTgt == nil || sDig != mTgt.GetDescriptor().Digest || opt.forceRecursive || mTgt.IsList() { + mSrc, err = rc.ManifestGet(ctx, refSrc, WithManifestDesc(d)) + if err != nil { + return fmt.Errorf("copy failed, error getting source: %w", err) + } + if sDig == "" { + sDig = mSrc.GetDescriptor().Digest + if seenCB, err = imageSeenOrWait(ctx, opt, refTgtRepo, refTgt.Tag, sDig, parents); seenCB == nil { + return err + } + } + } + // setup vars for a copy + mOpts := []ManifestOpts{} + if child { + mOpts = append(mOpts, WithManifestChild()) + } + bOpt := []BlobOpts{} + if opt.callback != nil { + bOpt = append(bOpt, BlobWithCallback(opt.callback)) + } + waitCh := make(chan error) + waitCount := 0 + ctx, cancel := context.WithCancel(ctx) + defer cancel() + parentsNew := make([]digest.Digest, len(parents)+1) + copy(parentsNew, parents) + parentsNew[len(parentsNew)-1] = sDig + if opt.callback != nil { + opt.callback(types.CallbackManifest, d.Digest.String(), types.CallbackStarted, 0, d.Size) + } + // process entries in an index + if mSrcIndex, ok := mSrc.(manifest.Indexer); ok && mSrc.IsSet() && !ref.EqualRepository(refSrc, refTgt) { + // manifest lists need to recursively copy nested images by digest + dList, err := mSrcIndex.GetManifestList() + if err != nil { + return err + } + for _, dEntry := range dList { + // skip copy of platforms not specifically included + if len(opt.platforms) > 0 { + match, err := imagePlatformInList(dEntry.Platform, opt.platforms) + if err != nil { + return err + } + if !match { + rc.slog.Debug("Platform excluded from copy", + slog.Any("platform", dEntry.Platform)) + continue + } + } + dEntry := dEntry + waitCount++ + go func() { + var err error + rc.slog.Debug("Copy platform", + slog.Any("platform", dEntry.Platform), + slog.String("digest", dEntry.Digest.String())) + entrySrc := refSrc.SetDigest(dEntry.Digest.String()) + entryTgt := refTgt.SetDigest(dEntry.Digest.String()) + switch dEntry.MediaType { + case mediatype.Docker1Manifest, mediatype.Docker1ManifestSigned, + mediatype.Docker2Manifest, mediatype.Docker2ManifestList, + mediatype.OCI1Manifest, mediatype.OCI1ManifestList: + // known manifest media type + err = rc.imageCopyOpt(ctx, entrySrc, entryTgt, dEntry, true, parentsNew, opt) + case mediatype.Docker2ImageConfig, mediatype.OCI1ImageConfig, + mediatype.Docker2Layer, mediatype.Docker2LayerGzip, mediatype.Docker2LayerZstd, + mediatype.OCI1Layer, mediatype.OCI1LayerGzip, mediatype.OCI1LayerZstd, + mediatype.BuildkitCacheConfig: + // known blob media type + err = rc.imageCopyBlob(ctx, entrySrc, entryTgt, dEntry, opt, bOpt...) + default: + // unknown media type, first try an image copy + err = rc.imageCopyOpt(ctx, entrySrc, entryTgt, dEntry, true, parentsNew, opt) + if err != nil { + // fall back to trying to copy a blob + err = rc.imageCopyBlob(ctx, entrySrc, entryTgt, dEntry, opt, bOpt...) + } + } + waitCh <- err + }() + } + } + + // If source is image, copy blobs + if mSrcImg, ok := mSrc.(manifest.Imager); ok && mSrc.IsSet() && !ref.EqualRepository(refSrc, refTgt) { + // copy the config + cd, err := mSrcImg.GetConfig() + if err != nil { + // docker schema v1 does not have a config object, ignore if it's missing + if !errors.Is(err, errs.ErrUnsupportedMediaType) { + rc.slog.Warn("Failed to get config digest from manifest", + slog.String("ref", refSrc.Reference), + slog.String("err", err.Error())) + return fmt.Errorf("failed to get config digest for %s: %w", refSrc.CommonName(), err) + } + } else { + waitCount++ + go func() { + rc.slog.Info("Copy config", + slog.String("source", refSrc.Reference), + slog.String("target", refTgt.Reference), + slog.String("digest", cd.Digest.String())) + err := rc.imageCopyBlob(ctx, refSrc, refTgt, cd, opt, bOpt...) + if err != nil && !errors.Is(err, context.Canceled) { + rc.slog.Warn("Failed to copy config", + slog.String("source", refSrc.Reference), + slog.String("target", refTgt.Reference), + slog.String("digest", cd.Digest.String()), + slog.String("err", err.Error())) + } + waitCh <- err + }() + } + + // copy filesystem layers + l, err := mSrcImg.GetLayers() + if err != nil { + return err + } + for _, layerSrc := range l { + if len(layerSrc.URLs) > 0 && !opt.includeExternal { + // skip blobs where the URLs are defined, these aren't hosted and won't be pulled from the source + rc.slog.Debug("Skipping external layer", + slog.String("source", refSrc.Reference), + slog.String("target", refTgt.Reference), + slog.String("layer", layerSrc.Digest.String()), + slog.Any("external-urls", layerSrc.URLs)) + continue + } + waitCount++ + layerSrc := layerSrc + go func() { + rc.slog.Info("Copy layer", + slog.String("source", refSrc.Reference), + slog.String("target", refTgt.Reference), + slog.String("layer", layerSrc.Digest.String())) + err := rc.imageCopyBlob(ctx, refSrc, refTgt, layerSrc, opt, bOpt...) + if err != nil && !errors.Is(err, context.Canceled) { + rc.slog.Warn("Failed to copy layer", + slog.String("source", refSrc.Reference), + slog.String("target", refTgt.Reference), + slog.String("layer", layerSrc.Digest.String()), + slog.String("err", err.Error())) + } + waitCh <- err + }() + } + } + + // check for any errors and abort early if found + err = nil + done := false + for !done && waitCount > 0 { + if err == nil { + select { + case err = <-waitCh: + if err != nil { + cancel() + } + default: + done = true // happy path + } + } else { + if errors.Is(err, context.Canceled) { + // try to find a better error message than context canceled + err = <-waitCh + } else { + <-waitCh + } + } + if !done { + waitCount-- + } + } + if err != nil { + rc.slog.Debug("child manifest copy failed", + slog.String("err", err.Error()), + slog.String("sDig", sDig.String())) + return err + } + + // copy referrers + referrerTags := []string{} + if opt.referrerConfs != nil { + referrerOpts := []scheme.ReferrerOpts{} + rSubject := refSrc + referrerSrc := refSrc + referrerTgt := refTgt + if opt.referrerSrc.IsSet() { + referrerOpts = append(referrerOpts, scheme.WithReferrerSource(opt.referrerSrc)) + referrerSrc = opt.referrerSrc + } + if opt.referrerTgt.IsSet() { + referrerTgt = opt.referrerTgt + } + if sDig != "" { + rSubject = rSubject.SetDigest(sDig.String()) + } + rl, err := rc.ReferrerList(ctx, rSubject, referrerOpts...) + if err != nil { + return err + } + if !rl.Source.IsSet() || ref.EqualRepository(refSrc, rl.Source) { + referrerTags = append(referrerTags, rl.Tags...) + } + descList := []descriptor.Descriptor{} + if len(opt.referrerConfs) == 0 { + descList = rl.Descriptors + } else { + for _, rConf := range opt.referrerConfs { + rlFilter := scheme.ReferrerFilter(rConf, rl) + descList = append(descList, rlFilter.Descriptors...) + } + } + for _, rDesc := range descList { + opt.mu.Lock() + seen := opt.seen[":"+rDesc.Digest.String()] + opt.mu.Unlock() + if seen != nil { + continue // skip referrers that have been seen + } + referrerSrc := referrerSrc.SetDigest(rDesc.Digest.String()) + referrerTgt := referrerTgt.SetDigest(rDesc.Digest.String()) + rDesc := rDesc + waitCount++ + go func() { + err := rc.imageCopyOpt(ctx, referrerSrc, referrerTgt, rDesc, true, parentsNew, opt) + if errors.Is(err, errs.ErrLoopDetected) { + // if a loop is detected, push the referrers copy to the end + opt.mu.Lock() + opt.finalFn = append(opt.finalFn, func(ctx context.Context) error { + return rc.imageCopyOpt(ctx, referrerSrc, referrerTgt, rDesc, true, []digest.Digest{}, opt) + }) + opt.mu.Unlock() + waitCh <- nil + } else { + if err != nil && !errors.Is(err, context.Canceled) { + rc.slog.Warn("Failed to copy referrer", + slog.String("digest", rDesc.Digest.String()), + slog.String("src", referrerSrc.CommonName()), + slog.String("tgt", referrerTgt.CommonName())) + } + waitCh <- err + } + }() + } + } + + // lookup digest tags to include artifacts with image + if opt.digestTags { + // load tag listing for digest tag copy + opt.mu.Lock() + if opt.tagList == nil { + tl, err := rc.TagList(ctx, refSrc) + if err != nil { + opt.mu.Unlock() + rc.slog.Warn("Failed to list tags for digest-tag copy", + slog.String("source", refSrc.Reference), + slog.String("err", err.Error())) + return err + } + tags, err := tl.GetTags() + if err != nil { + opt.mu.Unlock() + rc.slog.Warn("Failed to list tags for digest-tag copy", + slog.String("source", refSrc.Reference), + slog.String("err", err.Error())) + return err + } + if tags == nil { + tags = []string{} + } + opt.tagList = tags + } + opt.mu.Unlock() + prefix := fmt.Sprintf("%s-%s", sDig.Algorithm(), sDig.Encoded()) + for _, tag := range opt.tagList { + if strings.HasPrefix(tag, prefix) { + // skip referrers that were copied above + found := false + for _, referrerTag := range referrerTags { + if referrerTag == tag { + found = true + break + } + } + if found { + continue + } + refTagSrc := refSrc.SetTag(tag) + refTagTgt := refTgt.SetTag(tag) + tag := tag + waitCount++ + go func() { + err := rc.imageCopyOpt(ctx, refTagSrc, refTagTgt, descriptor.Descriptor{}, false, parentsNew, opt) + if errors.Is(err, errs.ErrLoopDetected) { + // if a loop is detected, push the digest tag copy back to the end + opt.mu.Lock() + opt.finalFn = append(opt.finalFn, func(ctx context.Context) error { + return rc.imageCopyOpt(ctx, refTagSrc, refTagTgt, descriptor.Descriptor{}, false, []digest.Digest{}, opt) + }) + opt.mu.Unlock() + waitCh <- nil + } else { + if err != nil && !errors.Is(err, context.Canceled) { + rc.slog.Warn("Failed to copy digest-tag", + slog.String("tag", tag), + slog.String("src", refTagSrc.CommonName()), + slog.String("tgt", refTagTgt.CommonName())) + } + waitCh <- err + } + }() + } + } + } + + // wait for background tasks to finish + err = nil + for waitCount > 0 { + if err == nil { + err = <-waitCh + if err != nil { + cancel() + } + } else { + if errors.Is(err, context.Canceled) { + // try to find a better error message than context canceled + err = <-waitCh + } else { + <-waitCh + } + } + waitCount-- + } + if err != nil { + return err + } + + // push manifest + if mTgt == nil || sDig != mTgt.GetDescriptor().Digest || opt.forceRecursive { + err = rc.ManifestPut(ctx, refTgt, mSrc, mOpts...) + if err != nil { + if !errors.Is(err, context.Canceled) { + rc.slog.Warn("Failed to push manifest", + slog.String("target", refTgt.Reference), + slog.String("err", err.Error())) + } + return err + } + if opt.callback != nil { + opt.callback(types.CallbackManifest, d.Digest.String(), types.CallbackFinished, d.Size, d.Size) + } + } else { + if opt.callback != nil { + opt.callback(types.CallbackManifest, d.Digest.String(), types.CallbackSkipped, d.Size, d.Size) + } + } + if seenCB != nil { + seenCB(nil) + seenCB = nil + } + + return nil +} + +func (rc *RegClient) imageCopyBlob(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor, opt *imageOpt, bOpt ...BlobOpts) error { + seenCB, err := imageSeenOrWait(ctx, opt, refTgt.SetTag("").CommonName(), "", d.Digest, []digest.Digest{}) + if seenCB == nil { + return err + } + err = rc.BlobCopy(ctx, refSrc, refTgt, d, bOpt...) + seenCB(err) + return err +} + +// imageSeenOrWait returns either a callback to report the error when the digest hasn't been seen before +// or it will wait for the previous copy to run and return the error from that copy +func imageSeenOrWait(ctx context.Context, opt *imageOpt, repo, tag string, dig digest.Digest, parents []digest.Digest) (func(error), error) { + var seenNew *imageSeen + key := repo + "/" + tag + ":" + dig.String() + opt.mu.Lock() + seen := opt.seen[key] + if seen == nil { + seenNew = &imageSeen{ + done: make(chan struct{}), + } + opt.seen[key] = seenNew + } + opt.mu.Unlock() + if seen != nil { + // quick check for the previous copy already done + select { + case <-seen.done: + return nil, seen.err + default: + } + // look for loops in parents + for _, p := range parents { + if key == repo+"/"+tag+":"+p.String() { + return nil, errs.ErrLoopDetected + } + } + // wait for copy to finish or context to cancel + done := ctx.Done() + select { + case <-seen.done: + return nil, seen.err + case <-done: + return nil, ctx.Err() + } + } else { + return func(err error) { + seenNew.err = err + close(seenNew.done) + // on failures, delete the history to allow a retry + if err != nil { + opt.mu.Lock() + delete(opt.seen, key) + opt.mu.Unlock() + } + }, nil + } +} + +// ImageExport exports an image to an output stream. +// The format is compatible with "docker load" if a single image is selected and not a manifest list. +// The ref must include a tag for exporting to docker (defaults to latest), and may also include a digest. +// The export is also formatted according to [OCI Layout] which supports multi-platform images. +// A tar file will be sent to outStream. +// +// Resulting filesystem: +// - oci-layout: created at top level, can be done at the start +// - index.json: created at top level, single descriptor with org.opencontainers.image.ref.name annotation pointing to the tag +// - manifest.json: created at top level, based on every layer added, only works for a single arch image +// - blobs/$algo/$hash: each content addressable object (manifest, config, or layer), created recursively +// +// [OCI Layout]: https://github.com/opencontainers/image-spec/blob/master/image-layout.md +func (rc *RegClient) ImageExport(ctx context.Context, r ref.Ref, outStream io.Writer, opts ...ImageOpts) error { + if !r.IsSet() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + var ociIndex v1.Index + + var opt imageOpt + for _, optFn := range opts { + optFn(&opt) + } + if opt.exportRef.IsZero() { + opt.exportRef = r + } + + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + // create tar writer object + out := outStream + if opt.exportCompress { + gzOut := gzip.NewWriter(out) + defer gzOut.Close() + out = gzOut + } + tw := tar.NewWriter(out) + defer tw.Close() + twd := &tarWriteData{ + tw: tw, + dirs: map[string]bool{}, + files: map[string]bool{}, + mode: 0644, + } + + // retrieve image manifest + m, err := rc.ManifestGet(ctx, r) + if err != nil { + rc.slog.Warn("Failed to get manifest", + slog.String("ref", r.CommonName()), + slog.String("err", err.Error())) + return err + } + + // build/write oci-layout + ociLayout := v1.ImageLayout{Version: ociLayoutVersion} + err = twd.tarWriteFileJSON(ociLayoutFilename, ociLayout) + if err != nil { + return err + } + + // create a manifest descriptor + mDesc := m.GetDescriptor() + if mDesc.Annotations == nil { + mDesc.Annotations = map[string]string{} + } + mDesc.Annotations[annotationImageName] = opt.exportRef.CommonName() + mDesc.Annotations[annotationRefName] = opt.exportRef.Tag + + // generate/write an OCI index + ociIndex.Versioned = v1.IndexSchemaVersion + ociIndex.Manifests = []descriptor.Descriptor{mDesc} // initialize with the descriptor to the manifest list + err = twd.tarWriteFileJSON(ociIndexFilename, ociIndex) + if err != nil { + return err + } + + // append to docker manifest with tag, config filename, each layer filename, and layer descriptors + if mi, ok := m.(manifest.Imager); ok { + conf, err := mi.GetConfig() + if err != nil { + return err + } + if err = conf.Digest.Validate(); err != nil { + return err + } + refTag := opt.exportRef.ToReg() + if refTag.Digest != "" { + refTag.Digest = "" + } + if refTag.Tag == "" { + refTag.Tag = "latest" + } + dockerManifest := dockerTarManifest{ + RepoTags: []string{refTag.CommonName()}, + Config: tarOCILayoutDescPath(conf), + Layers: []string{}, + LayerSources: map[digest.Digest]descriptor.Descriptor{}, + } + dl, err := mi.GetLayers() + if err != nil { + return err + } + for _, d := range dl { + if err = d.Digest.Validate(); err != nil { + return err + } + dockerManifest.Layers = append(dockerManifest.Layers, tarOCILayoutDescPath(d)) + dockerManifest.LayerSources[d.Digest] = d + } + + // marshal manifest and write manifest.json + err = twd.tarWriteFileJSON(dockerManifestFilename, []dockerTarManifest{dockerManifest}) + if err != nil { + return err + } + } + + // recursively include manifests and nested blobs + err = rc.imageExportDescriptor(ctx, r, mDesc, twd) + if err != nil { + return err + } + + return nil +} + +// imageExportDescriptor pulls a manifest or blob, outputs to a tar file, and recursively processes any nested manifests or blobs +func (rc *RegClient) imageExportDescriptor(ctx context.Context, r ref.Ref, desc descriptor.Descriptor, twd *tarWriteData) error { + if err := desc.Digest.Validate(); err != nil { + return err + } + tarFilename := tarOCILayoutDescPath(desc) + if twd.files[tarFilename] { + // blob has already been imported into tar, skip + return nil + } + switch desc.MediaType { + case mediatype.Docker1Manifest, mediatype.Docker1ManifestSigned, mediatype.Docker2Manifest, mediatype.OCI1Manifest: + // Handle single platform manifests + // retrieve manifest + m, err := rc.ManifestGet(ctx, r, WithManifestDesc(desc)) + if err != nil { + return err + } + mi, ok := m.(manifest.Imager) + if !ok { + return fmt.Errorf("manifest doesn't support image methods%.0w", errs.ErrUnsupportedMediaType) + } + // write manifest body by digest + mBody, err := m.RawBody() + if err != nil { + return err + } + err = twd.tarWriteHeader(tarFilename, int64(len(mBody))) + if err != nil { + return err + } + _, err = twd.tw.Write(mBody) + if err != nil { + return err + } + + // add config + confD, err := mi.GetConfig() + // ignore unsupported media type errors + if err != nil && !errors.Is(err, errs.ErrUnsupportedMediaType) { + return err + } + if err == nil { + err = rc.imageExportDescriptor(ctx, r, confD, twd) + if err != nil { + return err + } + } + + // loop over layers + layerDL, err := mi.GetLayers() + // ignore unsupported media type errors + if err != nil && !errors.Is(err, errs.ErrUnsupportedMediaType) { + return err + } + if err == nil { + for _, layerD := range layerDL { + err = rc.imageExportDescriptor(ctx, r, layerD, twd) + if err != nil { + return err + } + } + } + + case mediatype.Docker2ManifestList, mediatype.OCI1ManifestList: + // handle OCI index and Docker manifest list + // retrieve manifest + m, err := rc.ManifestGet(ctx, r, WithManifestDesc(desc)) + if err != nil { + return err + } + mi, ok := m.(manifest.Indexer) + if !ok { + return fmt.Errorf("manifest doesn't support index methods%.0w", errs.ErrUnsupportedMediaType) + } + // write manifest body by digest + mBody, err := m.RawBody() + if err != nil { + return err + } + err = twd.tarWriteHeader(tarFilename, int64(len(mBody))) + if err != nil { + return err + } + _, err = twd.tw.Write(mBody) + if err != nil { + return err + } + // recurse over entries in the list/index + mdl, err := mi.GetManifestList() + if err != nil { + return err + } + for _, md := range mdl { + err = rc.imageExportDescriptor(ctx, r, md, twd) + if err != nil { + return err + } + } + + default: + // get blob + blobR, err := rc.BlobGet(ctx, r, desc) + if err != nil { + return err + } + defer blobR.Close() + // write blob by digest + err = twd.tarWriteHeader(tarFilename, int64(desc.Size)) + if err != nil { + return err + } + size, err := io.Copy(twd.tw, blobR) + if err != nil { + return fmt.Errorf("failed to export blob %s: %w", desc.Digest.String(), err) + } + if size != desc.Size { + return fmt.Errorf("blob size mismatch, descriptor %d, received %d", desc.Size, size) + } + } + + return nil +} + +// ImageImport pushes an image from a tar file (ImageExport) to a registry. +func (rc *RegClient) ImageImport(ctx context.Context, r ref.Ref, rs io.ReadSeeker, opts ...ImageOpts) error { + if !r.IsSetRepo() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + var opt imageOpt + for _, optFn := range opts { + optFn(&opt) + } + + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + trd := &tarReadData{ + name: opt.importName, + handlers: map[string]tarFileHandler{}, + links: map[string][]string{}, + processed: map[string]bool{}, + finish: []func() error{}, + manifests: map[digest.Digest]manifest.Manifest{}, + } + + // add handler for oci-layout, index.json, and manifest.json + rc.imageImportOCIAddHandler(ctx, r, trd) + rc.imageImportDockerAddHandler(trd) + + // process tar file looking for oci-layout and index.json, load manifests/blobs on success + err := trd.tarReadAll(rs) + + if err != nil && errors.Is(err, errs.ErrNotFound) && trd.dockerManifestFound { + // import failed but manifest.json found, fall back to manifest.json processing + // add handlers for the docker manifest layers + rc.imageImportDockerAddLayerHandlers(ctx, r, trd) + // reprocess the tar looking for manifest.json files + err = trd.tarReadAll(rs) + if err != nil { + return fmt.Errorf("failed to import layers from docker tar: %w", err) + } + // push docker manifest + m, err := manifest.New(manifest.WithOrig(trd.dockerManifest)) + if err != nil { + return err + } + err = rc.ManifestPut(ctx, r, m) + if err != nil { + return err + } + } else if err != nil { + // unhandled error from tar read + return err + } else { + // successful load of OCI blobs, now push manifest and tag + err = rc.imageImportOCIPushManifests(ctx, r, trd) + if err != nil { + return err + } + } + return nil +} + +func (rc *RegClient) imageImportBlob(ctx context.Context, r ref.Ref, desc descriptor.Descriptor, trd *tarReadData) error { + // skip if blob already exists + _, err := rc.BlobHead(ctx, r, desc) + if err == nil { + return nil + } + // upload blob + _, err = rc.BlobPut(ctx, r, desc, trd.tr) + if err != nil { + return err + } + return nil +} + +// imageImportDockerAddHandler processes tar files generated by docker. +func (rc *RegClient) imageImportDockerAddHandler(trd *tarReadData) { + trd.handlers[dockerManifestFilename] = func(header *tar.Header, trd *tarReadData) error { + err := trd.tarReadFileJSON(&trd.dockerManifestList) + if err != nil { + return err + } + trd.dockerManifestFound = true + return nil + } +} + +// imageImportDockerAddLayerHandlers imports the docker layers when OCI import fails and docker manifest found. +func (rc *RegClient) imageImportDockerAddLayerHandlers(ctx context.Context, r ref.Ref, trd *tarReadData) { + // remove handlers for OCI + delete(trd.handlers, ociLayoutFilename) + delete(trd.handlers, ociIndexFilename) + + index := 0 + if trd.name != "" { + found := false + tags := []string{} + for i, entry := range trd.dockerManifestList { + tags = append(tags, entry.RepoTags...) + for _, tag := range entry.RepoTags { + if tag == trd.name { + index = i + found = true + break + } + } + if found { + break + } + } + if !found { + rc.slog.Warn("Could not find requested name", + slog.Any("tags", tags), + slog.String("name", trd.name)) + return + } + } + + // make a docker v2 manifest from first json array entry (can only tag one image) + trd.dockerManifest.SchemaVersion = 2 + trd.dockerManifest.MediaType = mediatype.Docker2Manifest + trd.dockerManifest.Layers = make([]descriptor.Descriptor, len(trd.dockerManifestList[index].Layers)) + + // add handler for config + trd.handlers[filepath.ToSlash(filepath.Clean(trd.dockerManifestList[index].Config))] = func(header *tar.Header, trd *tarReadData) error { + // upload blob, digest is unknown + d, err := rc.BlobPut(ctx, r, descriptor.Descriptor{Size: header.Size}, trd.tr) + if err != nil { + return err + } + // save the resulting descriptor to the manifest + if od, ok := trd.dockerManifestList[index].LayerSources[d.Digest]; ok { + trd.dockerManifest.Config = od + } else { + d.MediaType = mediatype.Docker2ImageConfig + trd.dockerManifest.Config = d + } + return nil + } + // add handlers for each layer + for i, layerFile := range trd.dockerManifestList[index].Layers { + func(i int) { + trd.handlers[filepath.ToSlash(filepath.Clean(layerFile))] = func(header *tar.Header, trd *tarReadData) error { + // ensure blob is compressed + rdrUC, err := archive.Decompress(trd.tr) + if err != nil { + return err + } + gzipR, err := archive.Compress(rdrUC, archive.CompressGzip) + if err != nil { + return err + } + defer gzipR.Close() + // upload blob, digest and size is unknown + d, err := rc.BlobPut(ctx, r, descriptor.Descriptor{}, gzipR) + if err != nil { + return err + } + // save the resulting descriptor in the appropriate layer + if od, ok := trd.dockerManifestList[index].LayerSources[d.Digest]; ok { + trd.dockerManifest.Layers[i] = od + } else { + d.MediaType = mediatype.Docker2LayerGzip + trd.dockerManifest.Layers[i] = d + } + return nil + } + }(i) + } + trd.handleAdded = true +} + +// imageImportOCIAddHandler adds handlers for oci-layout and index.json found in OCI layout tar files. +func (rc *RegClient) imageImportOCIAddHandler(ctx context.Context, r ref.Ref, trd *tarReadData) { + // add handler for oci-layout, index.json, and manifest.json + var err error + var foundLayout, foundIndex bool + + // common handler code when both oci-layout and index.json have been processed + ociHandler := func(trd *tarReadData) error { + // no need to process docker manifest.json when OCI layout is available + delete(trd.handlers, dockerManifestFilename) + // create a manifest from the index + trd.ociManifest, err = manifest.New(manifest.WithOrig(trd.ociIndex)) + if err != nil { + return err + } + // start recursively processing manifests starting with the index + // there's no need to push the index.json by digest, it will be pushed by tag if needed + err = rc.imageImportOCIHandleManifest(ctx, r, trd.ociManifest, trd, false, false) + if err != nil { + return err + } + return nil + } + trd.handlers[ociLayoutFilename] = func(header *tar.Header, trd *tarReadData) error { + var ociLayout v1.ImageLayout + err := trd.tarReadFileJSON(&ociLayout) + if err != nil { + return err + } + if ociLayout.Version != ociLayoutVersion { + // unknown version, ignore + rc.slog.Warn("Unsupported oci-layout version", + slog.String("version", ociLayout.Version)) + return nil + } + foundLayout = true + if foundIndex { + err = ociHandler(trd) + if err != nil { + return err + } + } + return nil + } + trd.handlers[ociIndexFilename] = func(header *tar.Header, trd *tarReadData) error { + err := trd.tarReadFileJSON(&trd.ociIndex) + if err != nil { + return err + } + foundIndex = true + if foundLayout { + err = ociHandler(trd) + if err != nil { + return err + } + } + return nil + } +} + +// imageImportOCIHandleManifest recursively processes index and manifest entries from an OCI layout tar. +func (rc *RegClient) imageImportOCIHandleManifest(ctx context.Context, r ref.Ref, m manifest.Manifest, trd *tarReadData, push bool, child bool) error { + // cache the manifest to avoid needing to pull again later, this is used if index.json is a wrapper around some other manifest + trd.manifests[m.GetDescriptor().Digest] = m + + handleManifest := func(d descriptor.Descriptor, child bool) error { + if err := d.Digest.Validate(); err != nil { + return err + } + filename := tarOCILayoutDescPath(d) + if !trd.processed[filename] && trd.handlers[filename] == nil { + trd.handlers[filename] = func(header *tar.Header, trd *tarReadData) error { + b, err := io.ReadAll(trd.tr) + if err != nil { + return err + } + switch d.MediaType { + case mediatype.Docker1Manifest, mediatype.Docker1ManifestSigned, + mediatype.Docker2Manifest, mediatype.Docker2ManifestList, + mediatype.OCI1Manifest, mediatype.OCI1ManifestList: + // known manifest media types + md, err := manifest.New(manifest.WithDesc(d), manifest.WithRaw(b)) + if err != nil { + return err + } + return rc.imageImportOCIHandleManifest(ctx, r, md, trd, true, child) + case mediatype.Docker2ImageConfig, mediatype.OCI1ImageConfig, + mediatype.Docker2Layer, mediatype.Docker2LayerGzip, mediatype.Docker2LayerZstd, + mediatype.OCI1Layer, mediatype.OCI1LayerGzip, mediatype.OCI1LayerZstd, + mediatype.BuildkitCacheConfig: + // known blob media types + return rc.imageImportBlob(ctx, r, d, trd) + default: + // attempt manifest import, fall back to blob import + md, err := manifest.New(manifest.WithDesc(d), manifest.WithRaw(b)) + if err == nil { + return rc.imageImportOCIHandleManifest(ctx, r, md, trd, true, child) + } + return rc.imageImportBlob(ctx, r, d, trd) + } + } + } + return nil + } + + if !push { + mi, ok := m.(manifest.Indexer) + if !ok { + return fmt.Errorf("manifest doesn't support image methods%.0w", errs.ErrUnsupportedMediaType) + } + // for root index, add handler for matching reference (or only reference) + dl, err := mi.GetManifestList() + if err != nil { + return err + } + // locate the digest in the index + var d descriptor.Descriptor + if len(dl) == 1 { + d = dl[0] + } else if r.Digest != "" { + d.Digest = digest.Digest(r.Digest) + } else if trd.name != "" { + for _, cur := range dl { + if cur.Annotations[annotationRefName] == trd.name { + d = cur + break + } + } + if d.Digest.String() == "" { + return fmt.Errorf("could not find requested tag in index.json, %s", trd.name) + } + } else { + if r.Tag == "" { + r.Tag = "latest" + } + // if more than one digest is in the index, use the first matching tag + for _, cur := range dl { + if cur.Annotations[annotationRefName] == r.Tag { + d = cur + break + } + } + if d.Digest.String() == "" { + return fmt.Errorf("could not find requested tag in index.json, %s", r.Tag) + } + } + err = handleManifest(d, false) + if err != nil { + return err + } + // add a finish step to tag the selected digest + trd.finish = append(trd.finish, func() error { + mRef, ok := trd.manifests[d.Digest] + if !ok { + return fmt.Errorf("could not find manifest to tag, ref: %s, digest: %s", r.CommonName(), d.Digest) + } + return rc.ManifestPut(ctx, r, mRef) + }) + } else if m.IsList() { + // for index/manifest lists, add handlers for each embedded manifest + mi, ok := m.(manifest.Indexer) + if !ok { + return fmt.Errorf("manifest doesn't support index methods%.0w", errs.ErrUnsupportedMediaType) + } + dl, err := mi.GetManifestList() + if err != nil { + return err + } + for _, d := range dl { + err = handleManifest(d, true) + if err != nil { + return err + } + } + } else { + // else if a single image/manifest + mi, ok := m.(manifest.Imager) + if !ok { + return fmt.Errorf("manifest doesn't support image methods%.0w", errs.ErrUnsupportedMediaType) + } + // add handler for the config descriptor if it's defined + cd, err := mi.GetConfig() + if err == nil { + if err = cd.Digest.Validate(); err != nil { + return err + } + filename := tarOCILayoutDescPath(cd) + if !trd.processed[filename] && trd.handlers[filename] == nil { + func(cd descriptor.Descriptor) { + trd.handlers[filename] = func(header *tar.Header, trd *tarReadData) error { + return rc.imageImportBlob(ctx, r, cd, trd) + } + }(cd) + } + } + // add handlers for each layer + layers, err := mi.GetLayers() + if err != nil { + return err + } + for _, d := range layers { + if err = d.Digest.Validate(); err != nil { + return err + } + filename := tarOCILayoutDescPath(d) + if !trd.processed[filename] && trd.handlers[filename] == nil { + func(d descriptor.Descriptor) { + trd.handlers[filename] = func(header *tar.Header, trd *tarReadData) error { + return rc.imageImportBlob(ctx, r, d, trd) + } + }(d) + } + } + } + // add a finish func to push the manifest, this gets skipped for the index.json + if push { + trd.finish = append(trd.finish, func() error { + mRef := r + mRef.Digest = string(m.GetDescriptor().Digest) + _, err := rc.ManifestHead(ctx, mRef) + if err == nil { + return nil + } + opts := []ManifestOpts{} + if child { + opts = append(opts, WithManifestChild()) + } + return rc.ManifestPut(ctx, mRef, m, opts...) + }) + } + trd.handleAdded = true + return nil +} + +// imageImportOCIPushManifests uploads manifests after OCI blobs were successfully loaded. +func (rc *RegClient) imageImportOCIPushManifests(_ context.Context, _ ref.Ref, trd *tarReadData) error { + // run finish handlers in reverse order to upload nested manifests + for i := len(trd.finish) - 1; i >= 0; i-- { + err := trd.finish[i]() + if err != nil { + return err + } + } + return nil +} + +func imagePlatformInList(target *platform.Platform, list []string) (bool, error) { + // special case for an unset platform + if target == nil || target.OS == "" { + for _, entry := range list { + if entry == "" { + return true, nil + } + } + return false, nil + } + for _, entry := range list { + if entry == "" { + continue + } + plat, err := platform.Parse(entry) + if err != nil { + return false, err + } + if platform.Match(*target, plat) { + return true, nil + } + } + return false, nil +} + +// tarReadAll processes the tar file in a loop looking for matching filenames in the list of handlers. +// Handlers for filenames are added at the top level, and by manifest imports. +func (trd *tarReadData) tarReadAll(rs io.ReadSeeker) error { + // return immediately if nothing to do + if len(trd.handlers) == 0 { + return nil + } + for { + // reset back to beginning of tar file + _, err := rs.Seek(0, 0) + if err != nil { + return err + } + dr, err := archive.Decompress(rs) + if err != nil { + return err + } + trd.tr = tar.NewReader(dr) + trd.handleAdded = false + // loop over each entry of the tar file + for { + header, err := trd.tr.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + name := filepath.ToSlash(filepath.Clean(header.Name)) + // track symlinks + if header.Typeflag == tar.TypeSymlink || header.Typeflag == tar.TypeLink { + // normalize target relative to root of tar + target := header.Linkname + if !filepath.IsAbs(target) { + target, err = filepath.Rel(filepath.Dir(name), target) + if err != nil { + return err + } + } + target = filepath.ToSlash(filepath.Clean("/" + target)[1:]) + // track and set handleAdded if an existing handler points to the target + if trd.linkAdd(name, target) && !trd.handleAdded { + list, err := trd.linkList(target) + if err != nil { + return err + } + for _, src := range append(list, name) { + if trd.handlers[src] != nil { + trd.handleAdded = true + } + } + } + } else { + // loop through filename and symlinks to file in search of handlers + list, err := trd.linkList(name) + if err != nil { + return err + } + list = append(list, name) + trdUsed := false + for _, entry := range list { + if trd.handlers[entry] != nil { + // trd cannot be reused, force the loop to run again + if trdUsed { + trd.handleAdded = true + break + } + trdUsed = true + // run handler + err = trd.handlers[entry](header, trd) + if err != nil { + return err + } + delete(trd.handlers, entry) + trd.processed[entry] = true + // return if last handler processed + if len(trd.handlers) == 0 { + return nil + } + } + } + } + } + // if entire file read without adding a new handler, fail + if !trd.handleAdded { + return fmt.Errorf("unable to read all files from tar: %w", errs.ErrNotFound) + } + } +} + +func (trd *tarReadData) linkAdd(src, tgt string) bool { + for _, entry := range trd.links[tgt] { + if entry == src { + return false + } + } + trd.links[tgt] = append(trd.links[tgt], src) + return true +} + +func (trd *tarReadData) linkList(tgt string) ([]string, error) { + list := trd.links[tgt] + for _, entry := range list { + if entry == tgt { + return nil, fmt.Errorf("symlink loop encountered for %s", tgt) + } + list = append(list, trd.links[entry]...) + } + return list, nil +} + +// tarReadFileJSON reads the current tar entry and unmarshals json into provided interface. +func (trd *tarReadData) tarReadFileJSON(data interface{}) error { + b, err := io.ReadAll(trd.tr) + if err != nil { + return err + } + err = json.Unmarshal(b, data) + if err != nil { + return err + } + return nil +} + +var errTarFileExists = errors.New("tar file already exists") + +func (td *tarWriteData) tarWriteHeader(filename string, size int64) error { + dirName := filepath.ToSlash(filepath.Dir(filename)) + if !td.dirs[dirName] && dirName != "." { + dirSplit := strings.Split(dirName, "/") + for i := range dirSplit { + dirJoin := strings.Join(dirSplit[:i+1], "/") + if !td.dirs[dirJoin] && dirJoin != "" { + header := tar.Header{ + Format: tar.FormatPAX, + Typeflag: tar.TypeDir, + Name: dirJoin + "/", + Size: 0, + Mode: td.mode | 0511, + ModTime: td.timestamp, + AccessTime: td.timestamp, + ChangeTime: td.timestamp, + } + err := td.tw.WriteHeader(&header) + if err != nil { + return err + } + td.dirs[dirJoin] = true + } + } + } + if td.files[filename] { + return fmt.Errorf("%w: %s", errTarFileExists, filename) + } + td.files[filename] = true + header := tar.Header{ + Format: tar.FormatPAX, + Typeflag: tar.TypeReg, + Name: filename, + Size: size, + Mode: td.mode | 0400, + ModTime: td.timestamp, + AccessTime: td.timestamp, + ChangeTime: td.timestamp, + } + return td.tw.WriteHeader(&header) +} + +func (td *tarWriteData) tarWriteFileJSON(filename string, data interface{}) error { + dataJSON, err := json.Marshal(data) + if err != nil { + return err + } + err = td.tarWriteHeader(filename, int64(len(dataJSON))) + if err != nil { + return err + } + _, err = td.tw.Write(dataJSON) + if err != nil { + return err + } + return nil +} + +func tarOCILayoutDescPath(d descriptor.Descriptor) string { + return fmt.Sprintf("blobs/%s/%s", d.Digest.Algorithm(), d.Digest.Encoded()) +} diff --git a/vendor/github.com/regclient/regclient/internal/auth/auth.go b/vendor/github.com/regclient/regclient/internal/auth/auth.go new file mode 100644 index 00000000..e05488f8 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/auth/auth.go @@ -0,0 +1,834 @@ +// Package auth is used for HTTP authentication +package auth + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/regclient/regclient/types/errs" +) + +type charLU byte + +var charLUs [256]charLU + +var defaultClientID = "regclient" + +// minTokenLife tokens are required to last at least 60 seconds to support older docker clients +var minTokenLife = 60 + +// tokenBuffer is used to renew a token before it expires to account for time to process requests on the server +var tokenBuffer = time.Second * 5 + +const ( + isSpace charLU = 1 << iota + isToken +) + +func init() { + for c := 0; c < 256; c++ { + charLUs[c] = 0 + if strings.ContainsRune(" \t\r\n", rune(c)) { + charLUs[c] |= isSpace + } + if (rune('a') <= rune(c) && rune(c) <= rune('z')) || (rune('A') <= rune(c) && rune(c) <= rune('Z') || (rune('0') <= rune(c) && rune(c) <= rune('9')) || strings.ContainsRune("-._~+/", rune(c))) { + charLUs[c] |= isToken + } + } +} + +// CredsFn is passed to lookup credentials for a given hostname, response is a username and password or empty strings +type CredsFn func(host string) Cred + +// Cred is returned by the CredsFn. +// If Token is provided and auth method is bearer, it will attempt to use it as a refresh token. +// Else if user and password are provided, they are attempted with all auth methods. +// Else if neither are provided and auth method is bearer, an anonymous login is attempted. +type Cred struct { + User, Password string // clear text username and password + Token string // refresh token only used for bearer auth +} + +// challenge is the extracted contents of the WWW-Authenticate header. +type challenge struct { + authType string + params map[string]string +} + +// handler handles a challenge for a host to return an auth header +type handler interface { + AddScope(scope string) error + ProcessChallenge(challenge) error + GenerateAuth() (string, error) +} + +// handlerBuild is used to make a new handler for a specific authType and URL +type handlerBuild func(client *http.Client, clientID, host string, credFn CredsFn, slog *slog.Logger) handler + +// Opts configures options for NewAuth +type Opts func(*Auth) + +// Auth is used to handle authentication requests. +type Auth struct { + httpClient *http.Client + clientID string + credsFn CredsFn + hbs map[string]handlerBuild // handler builders based on authType + hs map[string]map[string]handler // handlers based on url and authType + authTypes []string + slog *slog.Logger + mu sync.Mutex +} + +// NewAuth creates a new Auth +func NewAuth(opts ...Opts) *Auth { + a := &Auth{ + httpClient: &http.Client{}, + clientID: defaultClientID, + credsFn: DefaultCredsFn, + hbs: map[string]handlerBuild{}, + hs: map[string]map[string]handler{}, + authTypes: []string{}, + slog: slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{})), + } + + for _, opt := range opts { + opt(a) + } + + if len(a.authTypes) == 0 { + a.addDefaultHandlers() + } + + return a +} + +// WithCreds provides a user/pass lookup for a url +func WithCreds(f CredsFn) Opts { + return func(a *Auth) { + if f != nil { + a.credsFn = f + } + } +} + +// WithHTTPClient uses a specific http client with requests +func WithHTTPClient(h *http.Client) Opts { + return func(a *Auth) { + if h != nil { + a.httpClient = h + } + } +} + +// WithClientID uses a client ID with request headers +func WithClientID(clientID string) Opts { + return func(a *Auth) { + a.clientID = clientID + } +} + +// WithHandler includes a handler for a specific auth type +func WithHandler(authType string, hb handlerBuild) Opts { + return func(a *Auth) { + lcat := strings.ToLower(authType) + a.hbs[lcat] = hb + a.authTypes = append(a.authTypes, lcat) + } +} + +// WithDefaultHandlers includes a Basic and Bearer handler, this is automatically added with "WithHandler" is not called +func WithDefaultHandlers() Opts { + return func(a *Auth) { + a.addDefaultHandlers() + } +} + +// WithLog injects a Logger +func WithLog(slog *slog.Logger) Opts { + return func(a *Auth) { + a.slog = slog + } +} + +// AddScope extends an existing auth with additional scopes. +// This is used to pre-populate scopes with the Docker convention rather than +// depend on the registry to respond with the correct http status and headers. +func (a *Auth) AddScope(host, scope string) error { + a.mu.Lock() + defer a.mu.Unlock() + success := false + if a.hs[host] == nil { + return ErrNoNewChallenge + } + for _, at := range a.authTypes { + if a.hs[host][at] != nil { + err := a.hs[host][at].AddScope(scope) + if err == nil { + success = true + } else if err != ErrNoNewChallenge { + return err + } + } + } + if !success { + return ErrNoNewChallenge + } + a.slog.Debug("Auth scope added", + slog.String("host", host), + slog.String("scope", scope)) + return nil +} + +// HandleResponse parses the 401 response, extracting the WWW-Authenticate +// header and verifying the requirement is different from what was included in +// the last request +func (a *Auth) HandleResponse(resp *http.Response) error { + a.mu.Lock() + defer a.mu.Unlock() + // verify response is an access denied + if resp.StatusCode != http.StatusUnauthorized { + return ErrUnsupported + } + + // extract host and auth header + host := resp.Request.URL.Host + cl, err := ParseAuthHeaders(resp.Header.Values("WWW-Authenticate")) + if err != nil { + return err + } + a.slog.Debug("Auth request parsed", + slog.Any("challenge", cl)) + if len(cl) < 1 { + return ErrEmptyChallenge + } + goodChallenge := false + // loop over the received challenge(s) + for _, c := range cl { + if _, ok := a.hbs[c.authType]; !ok { + a.slog.Warn("Unsupported auth type", + slog.String("authtype", c.authType)) + continue + } + // setup a handler for the host and auth type + if _, ok := a.hs[host]; !ok { + a.hs[host] = map[string]handler{} + } + if _, ok := a.hs[host][c.authType]; !ok { + h := a.hbs[c.authType](a.httpClient, a.clientID, host, a.credsFn, a.slog) + if h == nil { + continue + } + a.hs[host][c.authType] = h + } + // process the challenge with that handler + err := a.hs[host][c.authType].ProcessChallenge(c) + if err == nil { + goodChallenge = true + } else if err == ErrNoNewChallenge { + // handle race condition when another request updates the challenge + // detect that by seeing the current auth header is different + prevAH := resp.Request.Header.Get("Authorization") + ah, err := a.hs[host][c.authType].GenerateAuth() + if err == nil && prevAH != ah { + goodChallenge = true + } + } else { + return err + } + } + if !goodChallenge { + return ErrUnauthorized + } + + return nil +} + +// UpdateRequest adds Authorization headers to a request +func (a *Auth) UpdateRequest(req *http.Request) error { + a.mu.Lock() + defer a.mu.Unlock() + host := req.URL.Host + if a.hs[host] == nil { + return nil + } + var err error + var ah string + for _, at := range a.authTypes { + if a.hs[host][at] != nil { + ah, err = a.hs[host][at].GenerateAuth() + if err != nil { + a.slog.Debug("Failed to generate auth", + slog.String("err", err.Error()), + slog.String("host", host), + slog.String("authtype", at)) + continue + } + req.Header.Set("Authorization", ah) + break + } + } + if err != nil { + return err + } + return nil +} + +func (a *Auth) addDefaultHandlers() { + if _, ok := a.hbs["basic"]; !ok { + a.hbs["basic"] = NewBasicHandler + a.authTypes = append(a.authTypes, "basic") + } + if _, ok := a.hbs["bearer"]; !ok { + a.hbs["bearer"] = NewBearerHandler + a.authTypes = append(a.authTypes, "bearer") + } +} + +// DefaultCredsFn is used to return no credentials when auth is not configured with a CredsFn +// This avoids the need to check for nil pointers +func DefaultCredsFn(h string) Cred { + return Cred{} +} + +// ParseAuthHeaders extracts the scheme and realm from WWW-Authenticate headers +func ParseAuthHeaders(ahl []string) ([]challenge, error) { + var cl []challenge + for _, ah := range ahl { + c, err := parseAuthHeader(ah) + if err != nil { + return nil, fmt.Errorf("failed to parse challenge header: %s, %w", ah, err) + } + cl = append(cl, c...) + } + return cl, nil +} + +// parseAuthHeader parses a single header line for WWW-Authenticate +// Example values: +// Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" +// Basic realm="GitHub Package Registry" +func parseAuthHeader(ah string) ([]challenge, error) { + var cl []challenge + var c *challenge + var eb, atb, kb, vb []byte // eb is element bytes, atb auth type, kb key, vb value + state := "string" + + for _, b := range []byte(ah) { + switch state { + case "string": + if len(eb) == 0 { + // beginning of string + if b == '"' { // TODO: Invalid? + state = "quoted" + } else if charLUs[b]&isToken != 0 { + // read any token + eb = append(eb, b) + } else if charLUs[b]&isSpace != 0 { + // ignore leading whitespace + } else { + // unknown leading char + return nil, ErrParseFailure + } + } else { + if charLUs[b]&isToken != 0 { + // read any token + eb = append(eb, b) + } else if b == '=' && len(atb) > 0 { + // equals when authtype is defined makes this a key + kb = eb + eb = []byte{} + state = "value" + } else if charLUs[b]&isSpace != 0 { + // space ends the element + atb = eb + eb = []byte{} + c = &challenge{authType: strings.ToLower(string(atb)), params: map[string]string{}} + cl = append(cl, *c) + } else { + // unknown char + return nil, ErrParseFailure + } + } + + case "value": + if charLUs[b]&isToken != 0 { + // read any token + vb = append(vb, b) + } else if b == '"' && len(vb) == 0 { + // quoted value + state = "quoted" + } else if charLUs[b]&isSpace != 0 || b == ',' { + // space or comma ends the value + c.params[strings.ToLower(string(kb))] = string(vb) + kb = []byte{} + vb = []byte{} + if b == ',' { + state = "string" + } else { + state = "endvalue" + } + } else { + // unknown char + return nil, ErrParseFailure + } + + case "quoted": + if b == '"' { + // end quoted string + c.params[strings.ToLower(string(kb))] = string(vb) + kb = []byte{} + vb = []byte{} + state = "endvalue" + } else if b == '\\' { + state = "escape" + } else { + // all other bytes in a quoted string are taken as-is + vb = append(vb, b) + } + + case "endvalue": + if charLUs[b]&isSpace != 0 { + // ignore leading whitespace + } else if b == ',' { + // expect a comma separator, return to start of a string + state = "string" + } else { + // unknown char + return nil, ErrParseFailure + } + + case "escape": + vb = append(vb, b) + state = "quoted" + + default: + return nil, ErrParseFailure + } + } + + // process any content left at end of string, and handle any unfinished sections + switch state { + case "string": + if len(eb) != 0 { + atb = eb + c = &challenge{authType: strings.ToLower(string(atb)), params: map[string]string{}} + cl = append(cl, *c) + } + case "value": + if len(vb) != 0 { + c.params[strings.ToLower(string(kb))] = string(vb) + } + case "quoted", "escape": + return nil, ErrParseFailure + } + + return cl, nil +} + +// basicHandler supports Basic auth type requests +type basicHandler struct { + realm string + host string + credsFn CredsFn +} + +// NewBasicHandler creates a new BasicHandler +func NewBasicHandler(client *http.Client, clientID, host string, credsFn CredsFn, slog *slog.Logger) handler { + return &basicHandler{ + realm: "", + host: host, + credsFn: credsFn, + } +} + +// AddScope is not valid for BasicHandler +func (b *basicHandler) AddScope(scope string) error { + return ErrNoNewChallenge +} + +// ProcessChallenge for BasicHandler is a noop +func (b *basicHandler) ProcessChallenge(c challenge) error { + if _, ok := c.params["realm"]; !ok { + return ErrInvalidChallenge + } + if b.realm != c.params["realm"] { + b.realm = c.params["realm"] + return nil + } + return ErrNoNewChallenge +} + +// GenerateAuth for BasicHandler generates base64 encoded user/pass for a host +func (b *basicHandler) GenerateAuth() (string, error) { + cred := b.credsFn(b.host) + if cred.User == "" || cred.Password == "" { + return "", fmt.Errorf("no credentials available: %w", errs.ErrHTTPUnauthorized) + } + auth := base64.StdEncoding.EncodeToString([]byte(cred.User + ":" + cred.Password)) + return fmt.Sprintf("Basic %s", auth), nil +} + +// bearerHandler supports Bearer auth type requests +type bearerHandler struct { + client *http.Client + clientID string + realm, service string + host string + credsFn CredsFn + scopes []string + token bearerToken + slog *slog.Logger +} + +// bearerToken is the json response to the Bearer request +type bearerToken struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` + Scope string `json:"scope"` +} + +// NewBearerHandler creates a new BearerHandler +func NewBearerHandler(client *http.Client, clientID, host string, credsFn CredsFn, slog *slog.Logger) handler { + return &bearerHandler{ + client: client, + clientID: clientID, + host: host, + credsFn: credsFn, + realm: "", + service: "", + scopes: []string{}, + slog: slog, + } +} + +// AddScope appends a new scope if it doesn't already exist +func (b *bearerHandler) AddScope(scope string) error { + if b.scopeExists(scope) { + if b.token.Token == "" || !b.isExpired() { + return ErrNoNewChallenge + } + return nil + } + return b.addScope(scope) +} + +func (b *bearerHandler) addScope(scope string) error { + replaced := false + for i, cur := range b.scopes { + // extend an existing scope with more actions + if strings.HasPrefix(scope, cur+",") { + b.scopes[i] = scope + replaced = true + break + } + } + if !replaced { + b.scopes = append(b.scopes, scope) + } + // delete any scope specific or invalid tokens + b.token.Token = "" + b.token.RefreshToken = "" + return nil +} + +// ProcessChallenge handles WWW-Authenticate header for bearer tokens +// Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" +func (b *bearerHandler) ProcessChallenge(c challenge) error { + if _, ok := c.params["realm"]; !ok { + return ErrInvalidChallenge + } + if _, ok := c.params["service"]; !ok { + c.params["service"] = "" + } + if _, ok := c.params["scope"]; !ok { + c.params["scope"] = "" + } + + existingScope := b.scopeExists(c.params["scope"]) + + if b.realm == c.params["realm"] && b.service == c.params["service"] && existingScope && (b.token.Token == "" || !b.isExpired()) { + return ErrNoNewChallenge + } + + if b.realm == "" { + b.realm = c.params["realm"] + } else if b.realm != c.params["realm"] { + return ErrInvalidChallenge + } + if b.service == "" { + b.service = c.params["service"] + } else if b.service != c.params["service"] { + return ErrInvalidChallenge + } + if !existingScope { + return b.addScope(c.params["scope"]) + } + return nil +} + +// GenerateAuth for BasicHandler generates base64 encoded user/pass for a host +func (b *bearerHandler) GenerateAuth() (string, error) { + // if unexpired token already exists, return it + if b.token.Token != "" && !b.isExpired() { + return fmt.Sprintf("Bearer %s", b.token.Token), nil + } + + // attempt to post with oauth form, this also uses refresh tokens + if err := b.tryPost(); err == nil { + return fmt.Sprintf("Bearer %s", b.token.Token), nil + } else if err != ErrUnauthorized { + return "", fmt.Errorf("failed to request auth token (post): %w%.0w", err, errs.ErrHTTPUnauthorized) + } + + // attempt a get (with basic auth if user/pass available) + if err := b.tryGet(); err == nil { + return fmt.Sprintf("Bearer %s", b.token.Token), nil + } else if err != ErrUnauthorized { + return "", fmt.Errorf("failed to request auth token (get): %w%.0w", err, errs.ErrHTTPUnauthorized) + } + + return "", ErrUnauthorized +} + +// isExpired returns true when token issue date is either 0, token has expired, +// or will expire within buffer time +func (b *bearerHandler) isExpired() bool { + if b.token.IssuedAt.IsZero() { + return true + } + expireSec := b.token.IssuedAt.Add(time.Duration(b.token.ExpiresIn) * time.Second) + expireSec = expireSec.Add(tokenBuffer * -1) + return time.Now().After(expireSec) +} + +// tryGet requests a new token with a GET request +func (b *bearerHandler) tryGet() error { + cred := b.credsFn(b.host) + req, err := http.NewRequest("GET", b.realm, nil) + if err != nil { + return err + } + + reqParams := req.URL.Query() + reqParams.Add("client_id", b.clientID) + reqParams.Add("offline_token", "true") + if b.service != "" { + reqParams.Add("service", b.service) + } + + for _, s := range b.scopes { + reqParams.Add("scope", s) + } + + if cred.User != "" && cred.Password != "" { + reqParams.Add("account", cred.User) + req.SetBasicAuth(cred.User, cred.Password) + } + + req.Header.Add("User-Agent", b.clientID) + req.URL.RawQuery = reqParams.Encode() + + resp, err := b.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + return b.validateResponse(resp) +} + +// tryPost requests a new token via a POST request +func (b *bearerHandler) tryPost() error { + cred := b.credsFn(b.host) + form := url.Values{} + if len(b.scopes) > 0 { + form.Set("scope", strings.Join(b.scopes, " ")) + } + if b.service != "" { + form.Set("service", b.service) + } + form.Set("client_id", b.clientID) + if b.token.RefreshToken != "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", b.token.RefreshToken) + } else if cred.Token != "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", cred.Token) + } else if cred.User != "" && cred.Password != "" { + form.Set("grant_type", "password") + form.Set("username", cred.User) + form.Set("password", cred.Password) + } + + req, err := http.NewRequest("POST", b.realm, strings.NewReader(form.Encode())) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + req.Header.Add("User-Agent", b.clientID) + + resp, err := b.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + return b.validateResponse(resp) +} + +// scopeExists check if the scope already exists within the list of scopes +func (b *bearerHandler) scopeExists(search string) bool { + if search == "" { + return true + } + for _, scope := range b.scopes { + // allow scopes with additional actions, search for pull should match pull,push + if scope == search || strings.HasPrefix(scope, search+",") { + return true + } + } + return false +} + +// validateResponse extracts the returned token +func (b *bearerHandler) validateResponse(resp *http.Response) error { + if resp.StatusCode != 200 { + return ErrUnauthorized + } + + // decode response and if successful, update token + decoder := json.NewDecoder(resp.Body) + decoded := bearerToken{} + if err := decoder.Decode(&decoded); err != nil { + return err + } + b.token = decoded + + if b.token.ExpiresIn < minTokenLife { + b.token.ExpiresIn = minTokenLife + } + + // If token is already expired, it was sent with a zero value or + // there may be a clock skew between the client and auth server. + // Also handle cases of remote time in the future. + // But if remote time is slightly in the past, leave as is so token + // expires here before the server. + if b.isExpired() || b.token.IssuedAt.After(time.Now()) { + b.token.IssuedAt = time.Now().UTC() + } + + // AccessToken and Token should be the same and we use Token elsewhere + if b.token.AccessToken != "" { + b.token.Token = b.token.AccessToken + } + + return nil +} + +// jwtHubHandler supports JWT auth type requests. +type jwtHubHandler struct { + client *http.Client + clientID string + realm string + host string + credsFn CredsFn + jwt string +} + +type jwtHubPost struct { + User string `json:"username"` + Pass string `json:"password"` +} +type jwtHubResp struct { + Detail string `json:"detail"` + Token string `json:"token"` + RefreshToken string `json:"refresh_token"` +} + +// NewJWTHubHandler creates a new JWTHandler for Docker Hub. +func NewJWTHubHandler(client *http.Client, clientID, host string, credsFn CredsFn, slog *slog.Logger) handler { + // JWT handler is only tested against Hub, and the API is Hub specific + if host == "hub.docker.com" { + return &jwtHubHandler{ + client: client, + clientID: clientID, + host: host, + credsFn: credsFn, + realm: "https://hub.docker.com/v2/users/login", + } + } + return nil +} + +// AddScope is not valid for JWTHubHandler +func (j *jwtHubHandler) AddScope(scope string) error { + return ErrNoNewChallenge +} + +// ProcessChallenge handles WWW-Authenticate header for JWT auth on Docker Hub +func (j *jwtHubHandler) ProcessChallenge(c challenge) error { + cred := j.credsFn(j.host) + // use token if provided + if cred.Token != "" { + j.jwt = cred.Token + return nil + } + + // send a login request to hub + bodyBytes, err := json.Marshal(jwtHubPost{ + User: cred.User, + Pass: cred.Password, + }) + if err != nil { + return err + } + + req, err := http.NewRequest("POST", j.realm, bytes.NewReader(bodyBytes)) + if err != nil { + return err + } + req.Header.Add("Content-Type", "application/json") + req.Header.Add("Accept", "application/json") + req.Header.Add("User-Agent", j.clientID) + + resp, err := j.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + body, _ := io.ReadAll(resp.Body) + + if resp.StatusCode != 200 || resp.StatusCode >= 300 { + return ErrUnauthorized + } + + var bodyParsed jwtHubResp + err = json.Unmarshal(body, &bodyParsed) + if err != nil { + return err + } + j.jwt = bodyParsed.Token + + return nil +} + +// GenerateAuth for JWTHubHandler adds JWT header +func (j *jwtHubHandler) GenerateAuth() (string, error) { + if len(j.jwt) > 0 { + return fmt.Sprintf("JWT %s", j.jwt), nil + } + return "", ErrUnauthorized +} diff --git a/vendor/github.com/regclient/regclient/internal/auth/error.go b/vendor/github.com/regclient/regclient/internal/auth/error.go new file mode 100644 index 00000000..2ba7e3b8 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/auth/error.go @@ -0,0 +1,40 @@ +package auth + +import ( + "github.com/regclient/regclient/types/errs" +) + +var ( + // ErrEmptyChallenge indicates an issue with the received challenge in the WWW-Authenticate header + // + // Deprecated: replace with [errs.ErrEmptyChallenge]. + ErrEmptyChallenge = errs.ErrEmptyChallenge + // ErrInvalidChallenge indicates an issue with the received challenge in the WWW-Authenticate header + // + // Deprecated: replace with [errs.ErrInvalidChallenge]. + ErrInvalidChallenge = errs.ErrInvalidChallenge + // ErrNoNewChallenge indicates a challenge update did not result in any change + // + // Deprecated: replace with [errs.ErrNoNewChallenge]. + ErrNoNewChallenge = errs.ErrNoNewChallenge + // ErrNotFound indicates no credentials found for basic auth + // + // Deprecated: replace with [errs.ErrNotFound]. + ErrNotFound = errs.ErrNotFound + // ErrNotImplemented returned when method has not been implemented yet + // + // Deprecated: replace with [errs.ErrNotImplemented]. + ErrNotImplemented = errs.ErrNotImplemented + // ErrParseFailure indicates the WWW-Authenticate header could not be parsed + // + // Deprecated: replace with [errs.ErrParseFailure]. + ErrParseFailure = errs.ErrParsingFailed + // ErrUnauthorized request was not authorized + // + // Deprecated: replace with [errs.ErrUnauthorized]. + ErrUnauthorized = errs.ErrHTTPUnauthorized + // ErrUnsupported indicates the request was unsupported + // + // Deprecated: replace with [errs.ErrUnsupported]. + ErrUnsupported = errs.ErrUnsupported +) diff --git a/vendor/github.com/regclient/regclient/internal/cache/cache.go b/vendor/github.com/regclient/regclient/internal/cache/cache.go new file mode 100644 index 00000000..4bc4523e --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/cache/cache.go @@ -0,0 +1,182 @@ +//go:build go1.18 +// +build go1.18 + +// Package cache is used to store values with limits. +// Items are automatically pruned when too many entries are stored, or values become stale. +package cache + +import ( + "sort" + "sync" + "time" + + "github.com/regclient/regclient/types/errs" +) + +type Cache[k comparable, v any] struct { + mu sync.Mutex + minAge time.Duration + maxAge time.Duration + minCount int + maxCount int + timer *time.Timer + entries map[k]*Entry[v] +} + +type Entry[v any] struct { + used time.Time + value v +} + +type sortKeys[k comparable] struct { + keys []k + lessFn func(a, b k) bool +} + +type conf struct { + minAge time.Duration + maxCount int +} + +type cacheOpts func(*conf) + +func WithAge(age time.Duration) cacheOpts { + return func(c *conf) { + c.minAge = age + } +} + +func WithCount(count int) cacheOpts { + return func(c *conf) { + c.maxCount = count + } +} + +func New[k comparable, v any](opts ...cacheOpts) Cache[k, v] { + c := conf{} + for _, opt := range opts { + opt(&c) + } + maxAge := c.minAge + (c.minAge / 10) + minCount := 0 + if c.maxCount > 0 { + minCount = int(float64(c.maxCount) * 0.9) + } + return Cache[k, v]{ + minAge: c.minAge, + maxAge: maxAge, + minCount: minCount, + maxCount: c.maxCount, + entries: map[k]*Entry[v]{}, + } +} + +func (c *Cache[k, v]) Delete(key k) { + if c == nil { + return + } + c.mu.Lock() + defer c.mu.Unlock() + delete(c.entries, key) + if len(c.entries) == 0 && c.timer != nil { + c.timer.Stop() + c.timer = nil + } +} + +func (c *Cache[k, v]) Set(key k, val v) { + if c == nil { + return + } + c.mu.Lock() + defer c.mu.Unlock() + c.entries[key] = &Entry[v]{ + used: time.Now(), + value: val, + } + if len(c.entries) > c.maxCount { + c.pruneLocked() + } else if c.timer == nil { + // prune resets the timer, so this is only needed if the prune wasn't triggered + c.timer = time.AfterFunc(c.maxAge, c.prune) + } +} + +func (c *Cache[k, v]) Get(key k) (v, error) { + if c == nil { + var val v + return val, errs.ErrNotFound + } + c.mu.Lock() + defer c.mu.Unlock() + if e, ok := c.entries[key]; ok { + if e.used.Add(c.minAge).Before(time.Now()) { + // entry expired + go c.prune() + } else { + c.entries[key].used = time.Now() + return e.value, nil + } + } + var val v + return val, errs.ErrNotFound +} + +func (c *Cache[k, v]) prune() { + c.mu.Lock() + defer c.mu.Unlock() + c.pruneLocked() +} + +func (c *Cache[k, v]) pruneLocked() { + // sort key list by last used date + keyList := make([]k, 0, len(c.entries)) + for key := range c.entries { + keyList = append(keyList, key) + } + sk := sortKeys[k]{ + keys: keyList, + lessFn: func(a, b k) bool { + return c.entries[a].used.Before(c.entries[b].used) + }, + } + sort.Sort(&sk) + // prune entries + now := time.Now() + cutoff := now.Add(c.minAge * -1) + nextTime := now + delCount := len(keyList) - c.minCount + for i, key := range keyList { + if i < delCount || c.entries[key].used.Before(cutoff) { + delete(c.entries, key) + } else { + nextTime = c.entries[key].used + break + } + } + // set next timer + if len(c.entries) > 0 { + dur := nextTime.Sub(now) + c.maxAge + if c.timer == nil { + // this shouldn't be possible + c.timer = time.AfterFunc(dur, c.prune) + } else { + c.timer.Reset(dur) + } + } else if c.timer != nil { + c.timer.Stop() + c.timer = nil + } +} + +func (sk *sortKeys[k]) Len() int { + return len(sk.keys) +} + +func (sk *sortKeys[k]) Less(i, j int) bool { + return sk.lessFn(sk.keys[i], sk.keys[j]) +} + +func (sk *sortKeys[k]) Swap(i, j int) { + sk.keys[i], sk.keys[j] = sk.keys[j], sk.keys[i] +} diff --git a/vendor/github.com/regclient/regclient/internal/conffile/conffile.go b/vendor/github.com/regclient/regclient/internal/conffile/conffile.go new file mode 100644 index 00000000..433645a8 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/conffile/conffile.go @@ -0,0 +1,135 @@ +// Package conffile wraps the read and write of configuration files +package conffile + +import ( + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" +) + +type File struct { + perms int + fullname string +} + +type Opt func(*File) + +// New returns a new File +func New(opts ...Opt) *File { + f := File{perms: 0600} + for _, fn := range opts { + fn(&f) + } + if f.fullname == "" { + return nil + } + return &f +} + +// WithDirName determines the filename from a subdirectory in the user's HOME +// e.g. dir=".app", name="config.json", sets the fullname to "$HOME/.app/config.json" +func WithDirName(dir, name string) Opt { + return func(f *File) { + f.fullname = filepath.Join(homedir(), dir, name) + } +} + +// WithEnvFile sets the fullname to the environment value if defined +func WithEnvFile(envVar string) Opt { + return func(f *File) { + val := os.Getenv(envVar) + if val != "" { + f.fullname = val + } + } +} + +// WithEnvDir sets the fullname to the environment value + filename if the environment variable is defined +func WithEnvDir(envVar, name string) Opt { + return func(f *File) { + val := os.Getenv(envVar) + if val != "" { + f.fullname = filepath.Join(val, name) + } + } +} + +// WithFullname specifies the filename +func WithFullname(fullname string) Opt { + return func(f *File) { + f.fullname = fullname + } +} + +// WithPerms specifies the permissions to create a file with (default 0600) +func WithPerms(perms int) Opt { + return func(f *File) { + f.perms = perms + } +} + +func (f *File) Name() string { + return f.fullname +} + +func (f *File) Open() (io.ReadCloser, error) { + return os.Open(f.fullname) +} + +func (f *File) Write(rdr io.Reader) error { + // create temp file/open + dir := filepath.Dir(f.fullname) + if err := os.MkdirAll(dir, 0700); err != nil { + return err + } + tmp, err := os.CreateTemp(dir, filepath.Base(f.fullname)) + if err != nil { + return err + } + tmpStat, err := tmp.Stat() + if err != nil { + return err + } + tmpName := tmpStat.Name() + tmpFullname := filepath.Join(dir, tmpName) + defer os.Remove(tmpFullname) + + // copy from rdr to temp file + _, err = io.Copy(tmp, rdr) + errC := tmp.Close() + if err != nil { + return fmt.Errorf("failed to write config: %w", err) + } + if errC != nil { + return fmt.Errorf("failed to close config: %w", errC) + } + + // adjust file ownership/permissions + mode := os.FileMode(0600) + uid := os.Getuid() + gid := os.Getgid() + // adjust defaults based on existing file if available + stat, err := os.Stat(f.fullname) + if err == nil { + // adjust mode to existing file + if stat.Mode().IsRegular() { + mode = stat.Mode() + } + uid, gid, _ = getFileOwner(stat) + } else if !errors.Is(err, fs.ErrNotExist) { + return err + } + + // update mode and owner of temp file + if err := os.Chmod(tmpFullname, mode); err != nil { + return err + } + if uid > 0 && gid > 0 { + _ = os.Chown(tmpFullname, uid, gid) + } + // move temp file to target filename + return os.Rename(tmpFullname, f.fullname) +} diff --git a/vendor/github.com/regclient/regclient/internal/conffile/conffile_unix.go b/vendor/github.com/regclient/regclient/internal/conffile/conffile_unix.go new file mode 100644 index 00000000..1b1dca47 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/conffile/conffile_unix.go @@ -0,0 +1,20 @@ +//go:build !windows +// +build !windows + +package conffile + +import ( + "io/fs" + "syscall" +) + +const homeEnv = "HOME" + +func getFileOwner(stat fs.FileInfo) (int, int, error) { + var uid, gid int + if sysstat, ok := stat.Sys().(*syscall.Stat_t); ok { + uid = int(sysstat.Uid) + gid = int(sysstat.Gid) + } + return uid, gid, nil +} diff --git a/vendor/github.com/regclient/regclient/internal/conffile/conffile_windows.go b/vendor/github.com/regclient/regclient/internal/conffile/conffile_windows.go new file mode 100644 index 00000000..eb2f5558 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/conffile/conffile_windows.go @@ -0,0 +1,14 @@ +//go:build windows +// +build windows + +package conffile + +import ( + "io/fs" +) + +const homeEnv = "USERPROFILE" + +func getFileOwner(stat fs.FileInfo) (int, int, error) { + return 0, 0, nil +} diff --git a/vendor/github.com/regclient/regclient/internal/conffile/homedir.go b/vendor/github.com/regclient/regclient/internal/conffile/homedir.go new file mode 100644 index 00000000..782cddc6 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/conffile/homedir.go @@ -0,0 +1,17 @@ +package conffile + +import ( + "os" + "os/user" +) + +func homedir() string { + home := os.Getenv(homeEnv) + if home == "" { + u, err := user.Current() + if err == nil { + home = u.HomeDir + } + } + return home +} diff --git a/vendor/github.com/regclient/regclient/internal/httplink/httplink.go b/vendor/github.com/regclient/regclient/internal/httplink/httplink.go new file mode 100644 index 00000000..b747cfd2 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/httplink/httplink.go @@ -0,0 +1,196 @@ +// Package httplink parses the Link header from HTTP responses according to RFC5988 +package httplink + +import ( + "fmt" + "strings" + + "github.com/regclient/regclient/types/errs" +) + +type Links []Link +type Link struct { + URI string + Param map[string]string +} + +type charLU byte + +var charLUs [256]charLU + +const ( + isSpace charLU = 1 << iota + isToken + isAlphaNum +) + +func init() { + for c := 0; c < 256; c++ { + charLUs[c] = 0 + if strings.ContainsRune(" \t\r\n", rune(c)) { + charLUs[c] |= isSpace + } + if (rune('a') <= rune(c) && rune(c) <= rune('z')) || (rune('A') <= rune(c) && rune(c) <= rune('Z') || (rune('0') <= rune(c) && rune(c) <= rune('9'))) { + charLUs[c] |= isAlphaNum | isToken + } + if strings.ContainsRune("!#$%&'()*+-./:<=>?@[]^_`{|}~", rune(c)) { + charLUs[c] |= isToken + } + } +} + +// Parse reads "Link" http headers into an array of Link structs. +// Header array should be the output of resp.Header.Values("link"). +func Parse(headers []string) (Links, error) { + links := []Link{} + for _, h := range headers { + state := "init" + var ub, pnb, pvb []byte + parms := map[string]string{} + endLink := func() { + links = append(links, Link{ + URI: string(ub), + Param: parms, + }) + // reset state + ub, pnb, pvb = []byte{}, []byte{}, []byte{} + parms = map[string]string{} + } + endParm := func() { + if _, ok := parms[string(pnb)]; !ok { + parms[string(pnb)] = string(pvb) + } + // reset parm + pnb, pvb = []byte{}, []byte{} + } + for i, b := range []byte(h) { + switch state { + case "init": + if b == '<' { + state = "uriQuoted" + } else if charLUs[b]&isToken != 0 { + state = "uri" + ub = append(ub, b) + } else if charLUs[b]&isSpace != 0 || b == ',' { + // noop + } else { + // unknown character + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) + } + case "uri": + // parse tokens until space or comma + if charLUs[b]&isToken != 0 { + ub = append(ub, b) + } else if charLUs[b]&isSpace != 0 { + state = "fieldSep" + } else if b == ';' { + state = "parmName" + } else if b == ',' { + state = "init" + endLink() + } else { + // unknown character + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) + } + case "uriQuoted": + // parse tokens until quote + if b == '>' { + state = "fieldSep" + } else { + ub = append(ub, b) + } + case "fieldSep": + if b == ';' { + state = "parmName" + } else if b == ',' { + state = "init" + endLink() + } else if charLUs[b]&isSpace != 0 { + // noop + } else { + // unknown character + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) + } + case "parmName": + if len(pnb) > 0 && b == '=' { + state = "parmValue" + } else if len(pnb) > 0 && b == '*' { + state = "parmNameStar" + } else if charLUs[b]&isAlphaNum != 0 { + pnb = append(pnb, b) + } else if len(pnb) == 0 && charLUs[b]&isSpace != 0 { + // noop + } else { + // unknown character + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) + } + case "parmNameStar": + if b == '=' { + state = "parmValue" + } else { + // unknown character + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) + } + case "parmValue": + if len(pvb) == 0 { + if charLUs[b]&isToken != 0 { + pvb = append(pvb, b) + } else if b == '"' { + state = "parmValueQuoted" + } else { + // unknown character + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) + } + } else { + if charLUs[b]&isToken != 0 { + pvb = append(pvb, b) + } else if charLUs[b]&isSpace != 0 { + state = "fieldSep" + endParm() + } else if b == ';' { + state = "parmName" + endParm() + } else if b == ',' { + state = "init" + endParm() + endLink() + } else { + // unknown character + return nil, fmt.Errorf("unknown character in position %d of %s: %w", i, h, errs.ErrParsingFailed) + } + } + case "parmValueQuoted": + if b == '"' { + state = "fieldSep" + endParm() + } else { + pvb = append(pvb, b) + } + } + } + // check for valid state at end of header + switch state { + case "parmValue": + endParm() + endLink() + case "uri", "fieldSep": + endLink() + case "init": + // noop + default: + return nil, fmt.Errorf("unexpected end state %s for header %s: %w", state, h, errs.ErrParsingFailed) + } + } + + return links, nil +} + +// Get returns a link with a specific parm value, e.g. rel="next" +func (links Links) Get(parm, val string) (Link, error) { + for _, link := range links { + if link.Param != nil && link.Param[parm] == val { + return link, nil + } + } + return Link{}, errs.ErrNotFound +} diff --git a/vendor/github.com/regclient/regclient/internal/limitread/limitread.go b/vendor/github.com/regclient/regclient/internal/limitread/limitread.go new file mode 100644 index 00000000..77115ef0 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/limitread/limitread.go @@ -0,0 +1,29 @@ +// Package limitread provides a reader that will error if the limit is ever exceeded +package limitread + +import ( + "fmt" + "io" + + "github.com/regclient/regclient/types/errs" +) + +type LimitRead struct { + Reader io.Reader + Limit int64 +} + +func (lr *LimitRead) Read(p []byte) (int, error) { + if lr.Limit < 0 { + return 0, fmt.Errorf("read limit exceeded%.0w", errs.ErrSizeLimitExceeded) + } + if int64(len(p)) > lr.Limit+1 { + p = p[0 : lr.Limit+1] + } + n, err := lr.Reader.Read(p) + lr.Limit -= int64(n) + if lr.Limit < 0 { + return n, fmt.Errorf("read limit exceeded%.0w", errs.ErrSizeLimitExceeded) + } + return n, err +} diff --git a/vendor/github.com/regclient/regclient/internal/pqueue/pqueue.go b/vendor/github.com/regclient/regclient/internal/pqueue/pqueue.go new file mode 100644 index 00000000..4d493f2f --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/pqueue/pqueue.go @@ -0,0 +1,286 @@ +// Package pqueue implements a priority queue. +package pqueue + +import ( + "context" + "fmt" + "sync" +) + +type Queue[T any] struct { + mu sync.Mutex + max int + next func(queued, active []*T) int + active []*T + queued []*T + wait []*chan struct{} +} + +// Opts is used to configure a new priority queue. +type Opts[T any] struct { + Max int // maximum concurrent entries, defaults to 1. + Next func(queued, active []*T) int // function to lookup index of next queued entry to release, defaults to oldest entry. +} + +// New creates a new priority queue. +func New[T any](opts Opts[T]) *Queue[T] { + if opts.Max <= 0 { + opts.Max = 1 + } + return &Queue[T]{ + max: opts.Max, + next: opts.Next, + } +} + +// Acquire adds a new entry to the queue and returns once it is ready. +// The returned function must be called when the queued job completes to release the next entry. +// If there is any error, the returned function will be nil. +func (q *Queue[T]) Acquire(ctx context.Context, e T) (func(), error) { + if q == nil { + return func() {}, nil + } + found, err := q.checkContext(ctx) + if err != nil { + return nil, err + } + if found { + return func() {}, nil + } + q.mu.Lock() + if len(q.active)+len(q.queued) < q.max { + q.active = append(q.active, &e) + q.mu.Unlock() + return q.releaseFn(&e), nil + } + // limit reached, add to queue and wait + w := make(chan struct{}, 1) + q.queued = append(q.queued, &e) + q.wait = append(q.wait, &w) + q.mu.Unlock() + // wait on both context and queue + select { + case <-ctx.Done(): + // context abort, remove queued entry + q.mu.Lock() + for i := range q.queued { + if q.queued[i] == &e { + if len(q.queued) >= i+1 { + q.queued = q.queued[:i] + q.wait = q.wait[:i] + } else { + q.queued = append(q.queued[:i], q.queued[i+1:]...) + q.wait = append(q.wait[:i], q.wait[i+1:]...) + } + q.mu.Unlock() + return nil, ctx.Err() + } + } + q.mu.Unlock() + // queued entry found, assume race condition with context and entry being released, release next entry + q.release(&e) + return nil, ctx.Err() + case <-w: + return q.releaseFn(&e), nil + } +} + +// TryAcquire attempts to add an entry on to the list of active entries. +// If the returned function is nil, the queue was not available. +// If the returned function is not nil, it must be called when the job is complete to release the next entry. +func (q *Queue[T]) TryAcquire(ctx context.Context, e T) (func(), error) { + if q == nil { + return func() {}, nil + } + found, err := q.checkContext(ctx) + if err != nil { + return nil, err + } + if found { + return func() {}, nil + } + q.mu.Lock() + defer q.mu.Unlock() + if len(q.active)+len(q.queued) < q.max { + q.active = append(q.active, &e) + return q.releaseFn(&e), nil + } + return nil, nil +} + +// release next entry or noop. +func (q *Queue[T]) release(prev *T) { + q.mu.Lock() + defer q.mu.Unlock() + // remove prev entry from active list + for i := range q.active { + if q.active[i] == prev { + if i == len(q.active)+1 { + q.active = q.active[:i] + } else { + q.active = append(q.active[:i], q.active[i+1:]...) + } + break + } + } + // skip checks when at limit or nothing queued + if len(q.queued) == 0 { + if len(q.active) == 0 { + // free up slices if this was the last active entry + q.active = nil + q.queued = nil + q.wait = nil + } + return + } + if len(q.active) >= q.max { + return + } + i := 0 + if q.next != nil && len(q.queued) > 1 { + i = q.next(q.queued, q.active) + // validate response + if i < 0 { + i = 0 + } + if i >= len(q.queued) { + i = len(q.queued) - 1 + } + } + // release queued entry, move to active list, and remove from queued/wait lists + close(*q.wait[i]) + q.active = append(q.active, q.queued[i]) + if i == len(q.queued)-1 { + q.queued = q.queued[:i] + q.wait = q.wait[:i] + } else { + q.queued = append(q.queued[:i], q.queued[i+1:]...) + q.wait = append(q.wait[:i], q.wait[i+1:]...) + } +} + +// releaseFn is a convenience wrapper around [release]. +func (q *Queue[T]) releaseFn(prev *T) func() { + return func() { + q.release(prev) + } +} + +// TODO: is there a way to make a different context key for each generic type? +type ctxType int + +var ctxKey ctxType + +type valMulti[T any] struct { + qList []*Queue[T] +} + +// AcquireMulti is used to simultaneously lock multiple queues without the risk of deadlock. +// The returned context needs to be used on calls to [Acquire] or [TryAcquire] which will immediately succeed since the resource is already acquired. +// Attempting to acquire other resources with [Acquire], [TryAcquire], or [AcquireMulti] using the returned context and will fail for being outside of the transaction. +// The returned function must be called to release the resources. +// The returned function is not thread safe, ensure no other simultaneous calls to [Acquire] or [TryAcquire] using the returned context have finished before it is called. +func AcquireMulti[T any](ctx context.Context, e T, qList ...*Queue[T]) (context.Context, func(), error) { + // verify context not already holding locks + qCtx := ctx.Value(ctxKey) + if qCtx != nil { + if qCtxVal, ok := qCtx.(*valMulti[T]); !ok || qCtxVal.qList != nil { + return ctx, nil, fmt.Errorf("context already used by another AcquireMulti request") + } + } + // delete nil entries + for i := len(qList) - 1; i >= 0; i-- { + if qList[i] == nil { + if i == len(qList)-1 { + qList = qList[:i] + } else { + qList = append(qList[:i], qList[i+1:]...) + } + } + } + // empty/nil list is a noop + if len(qList) == 0 { + return ctx, func() {}, nil + } + // dedup entries from the list + for i := len(qList) - 2; i >= 0; i-- { + for j := len(qList) - 1; j > i; j-- { + if qList[i] == qList[j] { + qList[j] = qList[len(qList)-1] + qList = qList[:len(qList)-1] + } + } + } + // Loop through queues to acquire, waiting on the first, and attempting the remaining. + // If any of the remaining entries cannot be immediately acquired, reset and make it the new queue to wait on. + lockI := 0 + doneList := make([]func(), len(qList)) + for { + acquired := true + i := 0 + done, err := qList[lockI].Acquire(ctx, e) + if err != nil { + return ctx, nil, err + } + doneList[lockI] = done + for i < len(qList) { + if i != lockI { + doneList[i], err = qList[i].TryAcquire(ctx, e) + if doneList[i] == nil || err != nil { + acquired = false + break + } + } + i++ + } + if err == nil && acquired { + break + } + // cleanup on failed attempt + if lockI > i { + doneList[lockI]() + } + // track blocking index for a retry + lockI = i + for i > 0 { + i-- + doneList[i]() + } + // abort on errors + if err != nil { + return ctx, nil, err + } + } + // success, update context + ctxVal := valMulti[T]{qList: qList} + newCtx := context.WithValue(ctx, ctxKey, &ctxVal) + cleanup := func() { + ctxVal.qList = nil + // dequeue in reverse order to minimize chance of another AcquireMulti being freed and immediately blocking on the next queue + for i := len(doneList) - 1; i >= 0; i-- { + doneList[i]() + } + } + return newCtx, cleanup, nil +} + +func (q *Queue[T]) checkContext(ctx context.Context) (bool, error) { + qCtx := ctx.Value(ctxKey) + if qCtx == nil { + return false, nil + } + qCtxVal, ok := qCtx.(*valMulti[T]) + if !ok { + return false, nil // another type is using the context, treat it as unset + } + if qCtxVal.qList == nil { + return false, nil + } + for _, cur := range qCtxVal.qList { + if cur == q { + // instance already locked + return true, nil + } + } + return true, fmt.Errorf("cannot acquire new locks during a transaction") +} diff --git a/vendor/github.com/regclient/regclient/internal/reghttp/http.go b/vendor/github.com/regclient/regclient/internal/reghttp/http.go new file mode 100644 index 00000000..e461a9c3 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/reghttp/http.go @@ -0,0 +1,969 @@ +// Package reghttp is used for HTTP requests to a registry +package reghttp + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/regclient/regclient/config" + "github.com/regclient/regclient/internal/auth" + "github.com/regclient/regclient/internal/pqueue" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/warning" +) + +var defaultDelayInit, _ = time.ParseDuration("0.1s") +var defaultDelayMax, _ = time.ParseDuration("30s") +var warnRegexp = regexp.MustCompile(`^299\s+-\s+"([^"]+)"`) + +const ( + DefaultRetryLimit = 5 // number of times a request will be retried + backoffResetCount = 5 // number of successful requests needed to reduce the backoff +) + +// Client is an HTTP client wrapper. +// It handles features like authentication, retries, backoff delays, TLS settings. +type Client struct { + httpClient *http.Client // upstream [http.Client], this is wrapped per repository for an auth handler on redirects + getConfigHost func(string) *config.Host // call-back to get the [config.Host] for a specific registry + host map[string]*clientHost // host specific settings, wrap access with a mutex lock + rootCAPool [][]byte // list of root CAs for configuring the http.Client transport + rootCADirs []string // list of directories for additional root CAs + retryLimit int // number of retries before failing a request, this applies to each host, and each request + delayInit time.Duration // how long to initially delay requests on a failure + delayMax time.Duration // maximum time to delay a request + slog *slog.Logger // logging for tracing and failures + userAgent string // user agent to specify in http request headers + mu sync.Mutex // mutex to prevent data races +} + +type clientHost struct { + config *config.Host // config entry + httpClient *http.Client // modified http client for registry specific settings + userAgent string // user agent to specify in http request headers + slog *slog.Logger // logging for tracing and failures + auth map[string]*auth.Auth // map of auth handlers by repository + backoffCur int // current count of backoffs for this host + backoffLast time.Time // time the last request was released, this may be in the future if there is a queue, or zero if no delay is needed + backoffReset int // count of successful requests when a backoff is experienced, once [backoffResetCount] is reached, [backoffCur] is reduced by one and this is reset to 0 + reqFreq time.Duration // how long between submitting requests for this host + reqNext time.Time // time to release the next request + throttle *pqueue.Queue[reqmeta.Data] // limit concurrent requests to the host + mu sync.Mutex // mutex to prevent data races +} + +// Req is a request to send to a registry. +type Req struct { + MetaKind reqmeta.Kind // kind of request for the priority queue + Host string // registry name, hostname and mirrors will be looked up from host configuration + Method string // http method to call + DirectURL *url.URL // url to query, overrides repository, path, and query + Repository string // repository to scope the request + Path string // path of the request within a repository + Query url.Values // url query parameters + BodyLen int64 // length of body to send + BodyBytes []byte // bytes of the body, overridden by BodyFunc + BodyFunc func() (io.ReadCloser, error) // function to return a new body + Headers http.Header // headers to send in the request + NoPrefix bool // do not include the repository prefix + NoMirrors bool // do not send request to a mirror + ExpectLen int64 // expected size of the returned body + TransactLen int64 // size of an overall transaction for the priority queue + IgnoreErr bool // ignore http errors and do not trigger backoffs +} + +// Resp is used to handle the result of a request. +type Resp struct { + ctx context.Context + client *Client + req *Req + resp *http.Response + mirror string + done bool + reader io.Reader + readCur, readMax int64 + retryCount int + throttleDone func() +} + +// Opts is used to configure client options. +type Opts func(*Client) + +// NewClient returns a client for handling requests. +func NewClient(opts ...Opts) *Client { + c := Client{ + httpClient: &http.Client{}, + host: map[string]*clientHost{}, + retryLimit: DefaultRetryLimit, + delayInit: defaultDelayInit, + delayMax: defaultDelayMax, + slog: slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{})), + rootCAPool: [][]byte{}, + rootCADirs: []string{}, + } + for _, opt := range opts { + opt(&c) + } + return &c +} + +// WithCerts adds certificates. +func WithCerts(certs [][]byte) Opts { + return func(c *Client) { + c.rootCAPool = append(c.rootCAPool, certs...) + } +} + +// WithCertDirs adds directories to check for host specific certs. +func WithCertDirs(dirs []string) Opts { + return func(c *Client) { + c.rootCADirs = append(c.rootCADirs, dirs...) + } +} + +// WithCertFiles adds certificates by filename. +func WithCertFiles(files []string) Opts { + return func(c *Client) { + for _, f := range files { + //#nosec G304 command is run by a user accessing their own files + cert, err := os.ReadFile(f) + if err != nil { + c.slog.Warn("Failed to read certificate", + slog.String("err", err.Error()), + slog.String("file", f)) + } else { + c.rootCAPool = append(c.rootCAPool, cert) + } + } + } +} + +// WithConfigHostFn adds the callback to request a [config.Host] struct. +// The function must normalize the hostname for Docker Hub support. +func WithConfigHostFn(gch func(string) *config.Host) Opts { + return func(c *Client) { + c.getConfigHost = gch + } +} + +// WithDelay initial time to wait between retries (increased with exponential backoff). +func WithDelay(delayInit time.Duration, delayMax time.Duration) Opts { + return func(c *Client) { + if delayInit > 0 { + c.delayInit = delayInit + } + // delayMax must be at least delayInit, if 0 initialize to 30x delayInit + if delayMax > c.delayInit { + c.delayMax = delayMax + } else if delayMax > 0 { + c.delayMax = c.delayInit + } else { + c.delayMax = c.delayInit * 30 + } + } +} + +// WithHTTPClient uses a specific http client with retryable requests. +func WithHTTPClient(hc *http.Client) Opts { + return func(c *Client) { + c.httpClient = hc + } +} + +// WithRetryLimit restricts the number of retries (defaults to 5). +func WithRetryLimit(rl int) Opts { + return func(c *Client) { + if rl > 0 { + c.retryLimit = rl + } + } +} + +// WithLog injects a slog Logger configuration. +func WithLog(slog *slog.Logger) Opts { + return func(c *Client) { + c.slog = slog + } +} + +// WithTransport uses a specific http transport with retryable requests. +func WithTransport(t *http.Transport) Opts { + return func(c *Client) { + c.httpClient = &http.Client{Transport: t} + } +} + +// WithUserAgent sets a user agent header. +func WithUserAgent(ua string) Opts { + return func(c *Client) { + c.userAgent = ua + } +} + +// Do runs a request, returning the response result. +func (c *Client) Do(ctx context.Context, req *Req) (*Resp, error) { + resp := &Resp{ + ctx: ctx, + client: c, + req: req, + readCur: 0, + readMax: req.ExpectLen, + } + err := resp.next() + return resp, err +} + +// next sends requests until a mirror responds or all requests fail. +func (resp *Resp) next() error { + var err error + c := resp.client + req := resp.req + // lookup reqHost entry + reqHost := c.getHost(req.Host) + // create sorted list of mirrors, based on backoffs, upstream, and priority + hosts := make([]*clientHost, 0, 1+len(reqHost.config.Mirrors)) + if !req.NoMirrors { + for _, m := range reqHost.config.Mirrors { + hosts = append(hosts, c.getHost(m)) + } + } + hosts = append(hosts, reqHost) + sort.Slice(hosts, sortHostsCmp(hosts, reqHost.config.Name)) + // loop over requests to mirrors and retries + curHost := 0 + for { + backoff := false + dropHost := false + retryHost := false + if len(hosts) == 0 { + if err != nil { + return err + } + return errs.ErrAllRequestsFailed + } + if curHost >= len(hosts) { + curHost = 0 + } + h := hosts[curHost] + resp.mirror = h.config.Name + // there is an intentional extra retry in this check to allow for auth requests + if resp.retryCount > c.retryLimit { + return errs.ErrRetryLimitExceeded + } + resp.retryCount++ + + // check that context isn't canceled/done + ctxErr := resp.ctx.Err() + if ctxErr != nil { + return ctxErr + } + // wait for other concurrent requests to this host + throttleDone, throttleErr := h.throttle.Acquire(resp.ctx, reqmeta.Data{ + Kind: req.MetaKind, + Size: req.BodyLen + req.ExpectLen + req.TransactLen, + }) + if throttleErr != nil { + return throttleErr + } + + // try each host in a closure to handle all the backoff/dropHost from one place + loopErr := func() error { + var err error + if req.Method == "HEAD" && h.config.APIOpts != nil { + var disableHead bool + disableHead, err = strconv.ParseBool(h.config.APIOpts["disableHead"]) + if err == nil && disableHead { + dropHost = true + return fmt.Errorf("head requests disabled for host \"%s\": %w", h.config.Name, errs.ErrUnsupportedAPI) + } + } + + // build the url + var u url.URL + if req.DirectURL != nil { + u = *req.DirectURL + } else { + u = url.URL{ + Host: h.config.Hostname, + Scheme: "https", + } + path := strings.Builder{} + path.WriteString("/v2") + if h.config.PathPrefix != "" && !req.NoPrefix { + path.WriteString("/" + h.config.PathPrefix) + } + if req.Repository != "" { + path.WriteString("/" + req.Repository) + } + path.WriteString("/" + req.Path) + u.Path = path.String() + if h.config.TLS == config.TLSDisabled { + u.Scheme = "http" + } + if req.Query != nil { + u.RawQuery = req.Query.Encode() + } + } + // close previous response + if resp.resp != nil && resp.resp.Body != nil { + _ = resp.resp.Body.Close() + } + // delay for backoff if needed + bu := resp.backoffGet() + if !bu.IsZero() && bu.After(time.Now()) { + sleepTime := time.Until(bu) + c.slog.Debug("Sleeping for backoff", + slog.String("Host", h.config.Name), + slog.Duration("Duration", sleepTime)) + select { + case <-resp.ctx.Done(): + return errs.ErrCanceled + case <-time.After(sleepTime): + } + } + var httpReq *http.Request + httpReq, err = http.NewRequestWithContext(resp.ctx, req.Method, u.String(), nil) + if err != nil { + dropHost = true + return err + } + if req.BodyFunc != nil { + body, err := req.BodyFunc() + if err != nil { + dropHost = true + return err + } + httpReq.Body = body + httpReq.GetBody = req.BodyFunc + httpReq.ContentLength = req.BodyLen + } else if len(req.BodyBytes) > 0 { + body := io.NopCloser(bytes.NewReader(req.BodyBytes)) + httpReq.Body = body + httpReq.GetBody = func() (io.ReadCloser, error) { return body, nil } + httpReq.ContentLength = req.BodyLen + } + if len(req.Headers) > 0 { + httpReq.Header = req.Headers.Clone() + } + if c.userAgent != "" && httpReq.Header.Get("User-Agent") == "" { + httpReq.Header.Add("User-Agent", c.userAgent) + } + if resp.readCur > 0 && resp.readMax > 0 { + if req.Headers.Get("Range") == "" { + httpReq.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", resp.readCur, resp.readMax)) + } else { + // TODO: support Seek within a range request + dropHost = true + return fmt.Errorf("unable to resume a connection within a range request") + } + } + + hAuth := h.getAuth(req.Repository) + if hAuth != nil { + // include docker generated scope to emulate docker clients + if req.Repository != "" { + scope := "repository:" + req.Repository + ":pull" + if req.Method != "HEAD" && req.Method != "GET" { + scope = scope + ",push" + } + _ = hAuth.AddScope(h.config.Hostname, scope) + } + // add auth headers + err = hAuth.UpdateRequest(httpReq) + if err != nil { + if errors.Is(err, errs.ErrHTTPUnauthorized) { + dropHost = true + } else { + backoff = true + } + return err + } + } + + // delay for the rate limit + if h.reqFreq > 0 { + sleep := time.Duration(0) + h.mu.Lock() + if time.Now().Before(h.reqNext) { + sleep = time.Until(h.reqNext) + h.reqNext = h.reqNext.Add(h.reqFreq) + } else { + h.reqNext = time.Now().Add(h.reqFreq) + } + h.mu.Unlock() + if sleep > 0 { + time.Sleep(sleep) + } + } + + // send request + hc := h.getHTTPClient(req.Repository) + resp.resp, err = hc.Do(httpReq) + + if err != nil { + c.slog.Debug("Request failed", + slog.String("URL", u.String()), + slog.String("err", err.Error())) + backoff = true + return err + } + + statusCode := resp.resp.StatusCode + if statusCode < 200 || statusCode >= 300 { + switch statusCode { + case http.StatusUnauthorized: + // if auth can be done, retry same host without delay, otherwise drop/backoff + if hAuth != nil { + err = hAuth.HandleResponse(resp.resp) + } else { + err = fmt.Errorf("authentication handler unavailable") + } + if err != nil { + if errors.Is(err, errs.ErrEmptyChallenge) || errors.Is(err, errs.ErrNoNewChallenge) || errors.Is(err, errs.ErrHTTPUnauthorized) { + c.slog.Debug("Failed to handle auth request", + slog.String("URL", u.String()), + slog.String("Err", err.Error())) + } else { + c.slog.Warn("Failed to handle auth request", + slog.String("URL", u.String()), + slog.String("Err", err.Error())) + } + dropHost = true + } else { + err = fmt.Errorf("authentication required") + retryHost = true + } + return err + case http.StatusNotFound: + // if not found, drop mirror for this req, but other requests don't need backoff + dropHost = true + case http.StatusRequestedRangeNotSatisfiable: + // if range request error (blob push), drop mirror for this req, but other requests don't need backoff + dropHost = true + case http.StatusTooManyRequests, http.StatusRequestTimeout, http.StatusGatewayTimeout, http.StatusBadGateway, http.StatusInternalServerError: + // server is likely overloaded, backoff but still retry + backoff = true + default: + // all other errors indicate a bigger issue, don't retry and set backoff + backoff = true + dropHost = true + } + errHTTP := HTTPError(resp.resp.StatusCode) + errBody, _ := io.ReadAll(resp.resp.Body) + _ = resp.resp.Body.Close() + return fmt.Errorf("request failed: %w: %s", errHTTP, errBody) + } + + resp.reader = resp.resp.Body + resp.done = false + // set variables from headers if found + clHeader := resp.resp.Header.Get("Content-Length") + if resp.readCur == 0 && clHeader != "" { + cl, parseErr := strconv.ParseInt(clHeader, 10, 64) + if parseErr != nil { + c.slog.Debug("failed to parse content-length header", + slog.String("err", parseErr.Error()), + slog.String("header", clHeader)) + } else if resp.readMax > 0 { + if resp.readMax != cl { + return fmt.Errorf("unexpected content-length, expected %d, received %d", resp.readMax, cl) + } + } else { + resp.readMax = cl + } + } + // verify Content-Range header when range request used, fail if missing + if httpReq.Header.Get("Range") != "" && resp.resp.Header.Get("Content-Range") == "" { + dropHost = true + _ = resp.resp.Body.Close() + return fmt.Errorf("range request not supported by server") + } + return nil + }() + // return on success + if loopErr == nil { + resp.throttleDone = throttleDone + return nil + } + // backoff, dropHost, and/or go to next host in the list + if backoff { + if req.IgnoreErr { + // don't set a backoff, immediately drop the host when errors ignored + dropHost = true + } else { + boErr := resp.backoffSet() + if boErr != nil { + // reached backoff limit + dropHost = true + } + } + } + throttleDone() + // when error does not allow retries, abort with the last known err value + if err != nil && errors.Is(loopErr, errs.ErrNotRetryable) { + return err + } + err = loopErr + if dropHost { + hosts = append(hosts[:curHost], hosts[curHost+1:]...) + } else if !retryHost { + curHost++ + } + } +} + +// GetThrottle returns the current [pqueue.Queue] for a host used to throttle connections. +// This can be used to acquire multiple throttles before performing a request across multiple hosts. +func (c *Client) GetThrottle(host string) *pqueue.Queue[reqmeta.Data] { + ch := c.getHost(host) + return ch.throttle +} + +// HTTPResponse returns the [http.Response] from the last request. +func (resp *Resp) HTTPResponse() *http.Response { + return resp.resp +} + +// Read provides a retryable read from the body of the response. +func (resp *Resp) Read(b []byte) (int, error) { + if resp.done { + return 0, io.EOF + } + if resp.resp == nil { + return 0, errs.ErrNotFound + } + // perform the read + i, err := resp.reader.Read(b) + resp.readCur += int64(i) + if err == io.EOF || err == io.ErrUnexpectedEOF { + if resp.resp.Request.Method == "HEAD" || resp.readCur >= resp.readMax { + resp.backoffReset() + resp.done = true + } else { + // short read, retry? + resp.client.slog.Debug("EOF before reading all content, retrying", + slog.Int64("curRead", resp.readCur), + slog.Int64("contentLen", resp.readMax)) + // retry + respErr := resp.backoffSet() + if respErr == nil { + respErr = resp.next() + } + // unrecoverable EOF + if respErr != nil { + resp.client.slog.Warn("Failed to recover from short read", + slog.String("err", respErr.Error())) + resp.done = true + return i, err + } + // retry successful, no EOF + return i, nil + } + } + + if err == nil { + return i, nil + } + return i, err +} + +// Close frees up resources from the request. +func (resp *Resp) Close() error { + if resp.throttleDone != nil { + resp.throttleDone() + resp.throttleDone = nil + } + if resp.resp == nil { + return errs.ErrNotFound + } + if !resp.done { + resp.backoffReset() + } + resp.done = true + return resp.resp.Body.Close() +} + +// Seek provides a limited ability seek within the request response. +func (resp *Resp) Seek(offset int64, whence int) (int64, error) { + newOffset := resp.readCur + switch whence { + case io.SeekStart: + newOffset = offset + case io.SeekCurrent: + newOffset += offset + case io.SeekEnd: + if resp.readMax <= 0 { + return resp.readCur, fmt.Errorf("seek from end is not supported") + } else if resp.readMax+offset < 0 { + return resp.readCur, fmt.Errorf("seek past beginning of the file is not supported") + } + newOffset = resp.readMax + offset + default: + return resp.readCur, fmt.Errorf("unknown value of whence: %d", whence) + } + if newOffset != resp.readCur { + resp.readCur = newOffset + // rerun the request to restart + resp.retryCount-- // do not count a seek as a retry + err := resp.next() + if err != nil { + return resp.readCur, err + } + } + return resp.readCur, nil +} + +func (resp *Resp) backoffGet() time.Time { + c := resp.client + ch := c.getHost(resp.mirror) + ch.mu.Lock() + defer ch.mu.Unlock() + if ch.backoffCur > 0 { + delay := c.delayInit << ch.backoffCur + if delay > c.delayMax { + delay = c.delayMax + } + next := ch.backoffLast.Add(delay) + now := time.Now() + if now.After(next) { + next = now + } + ch.backoffLast = next + return next + } + // reset a stale "retry-after" time + if !ch.backoffLast.IsZero() && ch.backoffLast.Before(time.Now()) { + ch.backoffLast = time.Time{} + } + return ch.backoffLast +} + +func (resp *Resp) backoffSet() error { + c := resp.client + ch := c.getHost(resp.mirror) + ch.mu.Lock() + defer ch.mu.Unlock() + // check rate limit header and use that directly if possible + if resp.resp != nil && resp.resp.Header.Get("Retry-After") != "" { + ras := resp.resp.Header.Get("Retry-After") + ra, _ := time.ParseDuration(ras + "s") + if ra > 0 { + next := time.Now().Add(ra) + if ch.backoffLast.Before(next) { + ch.backoffLast = next + } + return nil + } + } + // Else track the number of backoffs and fail when the limit is exceeded. + // New requests always get at least one try, but fail fast if the server has been throwing errors. + ch.backoffCur++ + if ch.backoffLast.IsZero() { + ch.backoffLast = time.Now() + } + if ch.backoffCur >= c.retryLimit { + return fmt.Errorf("%w: backoffs %d", errs.ErrBackoffLimit, ch.backoffCur) + } + + return nil +} + +func (resp *Resp) backoffReset() { + c := resp.client + ch := c.getHost(resp.mirror) + ch.mu.Lock() + defer ch.mu.Unlock() + if ch.backoffCur > 0 { + ch.backoffReset++ + // If enough successful requests are seen, lower the backoffCur count. + // This requires multiple successful requests of a flaky server, but quickly drops when above the retry limit. + if ch.backoffReset > backoffResetCount || ch.backoffCur > c.retryLimit { + ch.backoffReset = 0 + ch.backoffCur-- + if ch.backoffCur == 0 { + // reset the last time to the zero value + ch.backoffLast = time.Time{} + } + } + } +} + +// getHost looks up or creates a clientHost for a given registry. +func (c *Client) getHost(host string) *clientHost { + c.mu.Lock() + defer c.mu.Unlock() + if h, ok := c.host[host]; ok { + return h + } + var conf *config.Host + if c.getConfigHost != nil { + conf = c.getConfigHost(host) + } else { + conf = config.HostNewName(host) + } + if conf.Name != host { + if h, ok := c.host[conf.Name]; ok { + return h + } + } + h := &clientHost{ + config: conf, + userAgent: c.userAgent, + slog: c.slog, + auth: map[string]*auth.Auth{}, + } + if h.config.ReqPerSec > 0 { + h.reqFreq = time.Duration(float64(time.Second) / h.config.ReqPerSec) + } + if h.config.ReqConcurrent > 0 { + h.throttle = pqueue.New(pqueue.Opts[reqmeta.Data]{Max: int(h.config.ReqConcurrent), Next: reqmeta.DataNext}) + } + // copy the http client and configure registry specific settings + hc := *c.httpClient + h.httpClient = &hc + if h.httpClient.Transport == nil { + h.httpClient.Transport = http.DefaultTransport.(*http.Transport).Clone() + } + // configure transport for insecure requests and root certs + if h.config.TLS == config.TLSInsecure || len(c.rootCAPool) > 0 || len(c.rootCADirs) > 0 || h.config.RegCert != "" || (h.config.ClientCert != "" && h.config.ClientKey != "") { + t, ok := h.httpClient.Transport.(*http.Transport) + if ok { + var tlsc *tls.Config + if t.TLSClientConfig != nil { + tlsc = t.TLSClientConfig.Clone() + } else { + //#nosec G402 the default TLS 1.2 minimum version is allowed to support older registries + tlsc = &tls.Config{} + } + if h.config.TLS == config.TLSInsecure { + tlsc.InsecureSkipVerify = true + } else { + rootPool, err := makeRootPool(c.rootCAPool, c.rootCADirs, h.config.Hostname, h.config.RegCert) + if err != nil { + c.slog.Warn("failed to setup CA pool", + slog.String("err", err.Error())) + } else { + tlsc.RootCAs = rootPool + } + } + if h.config.ClientCert != "" && h.config.ClientKey != "" { + cert, err := tls.X509KeyPair([]byte(h.config.ClientCert), []byte(h.config.ClientKey)) + if err != nil { + c.slog.Warn("failed to configure client certs", + slog.String("err", err.Error())) + } else { + tlsc.Certificates = []tls.Certificate{cert} + } + } + t.TLSClientConfig = tlsc + h.httpClient.Transport = t + } + } + // wrap the transport for logging and to handle warning headers + h.httpClient.Transport = &wrapTransport{c: c, orig: h.httpClient.Transport} + + c.host[conf.Name] = h + if conf.Name != host { + // save another reference for faster lookups + c.host[host] = h + } + return h +} + +// getHTTPClient returns a client specific to the repo being queried. +// Repository specific authentication needs a dedicated CheckRedirect handler. +func (ch *clientHost) getHTTPClient(repo string) *http.Client { + hc := *ch.httpClient + hc.CheckRedirect = ch.checkRedirect(repo, hc.CheckRedirect) + return &hc +} + +// checkRedirect wraps http.CheckRedirect to inject auth headers to specific hosts in the redirect chain +func (ch *clientHost) checkRedirect(repo string, orig func(req *http.Request, via []*http.Request) error) func(req *http.Request, via []*http.Request) error { + return func(req *http.Request, via []*http.Request) error { + // fail on too many redirects + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + // add auth headers if appropriate for the target host + hAuth := ch.getAuth(repo) + err := hAuth.UpdateRequest(req) + if err != nil { + return err + } + // wrap original redirect check + if orig != nil { + return orig(req, via) + } + return nil + } +} + +// getAuth returns an auth, which may be repository specific. +func (ch *clientHost) getAuth(repo string) *auth.Auth { + ch.mu.Lock() + defer ch.mu.Unlock() + if !ch.config.RepoAuth { + repo = "" // without RepoAuth, unset the provided repo + } + if _, ok := ch.auth[repo]; !ok { + ch.auth[repo] = auth.NewAuth( + auth.WithLog(ch.slog), + auth.WithHTTPClient(ch.httpClient), + auth.WithCreds(ch.AuthCreds()), + auth.WithClientID(ch.userAgent), + ) + } + return ch.auth[repo] +} + +func (ch *clientHost) AuthCreds() func(h string) auth.Cred { + if ch == nil || ch.config == nil { + return auth.DefaultCredsFn + } + return func(h string) auth.Cred { + hCred := ch.config.GetCred() + return auth.Cred{User: hCred.User, Password: hCred.Password, Token: hCred.Token} + } +} + +type wrapTransport struct { + c *Client + orig http.RoundTripper +} + +func (wt *wrapTransport) RoundTrip(req *http.Request) (*http.Response, error) { + resp, err := wt.orig.RoundTrip(req) + // copy headers to censor auth field + reqHead := req.Header.Clone() + if reqHead.Get("Authorization") != "" { + reqHead.Set("Authorization", "[censored]") + } + if err != nil { + wt.c.slog.Debug("reg http request", + slog.String("req-method", req.Method), + slog.String("req-url", req.URL.String()), + slog.Any("req-headers", reqHead), + slog.String("err", err.Error())) + } else { + // extract any warnings + for _, wh := range resp.Header.Values("Warning") { + if match := warnRegexp.FindStringSubmatch(wh); len(match) == 2 { + // TODO(bmitch): pass other fields (registry hostname) with structured logging + warning.Handle(req.Context(), wt.c.slog, match[1]) + } + } + wt.c.slog.Log(req.Context(), types.LevelTrace, "reg http request", + slog.String("req-method", req.Method), + slog.String("req-url", req.URL.String()), + slog.Any("req-headers", reqHead), + slog.String("resp-status", resp.Status), + slog.Any("resp-headers", resp.Header)) + } + return resp, err +} + +// HTTPError returns an error based on the status code. +func HTTPError(statusCode int) error { + switch statusCode { + case 401: + return fmt.Errorf("%w [http %d]", errs.ErrHTTPUnauthorized, statusCode) + case 403: + return fmt.Errorf("%w [http %d]", errs.ErrHTTPUnauthorized, statusCode) + case 404: + return fmt.Errorf("%w [http %d]", errs.ErrNotFound, statusCode) + case 429: + return fmt.Errorf("%w [http %d]", errs.ErrHTTPRateLimit, statusCode) + default: + return fmt.Errorf("%w: %s [http %d]", errs.ErrHTTPStatus, http.StatusText(statusCode), statusCode) + } +} + +func makeRootPool(rootCAPool [][]byte, rootCADirs []string, hostname string, hostcert string) (*x509.CertPool, error) { + pool, err := x509.SystemCertPool() + if err != nil { + return nil, err + } + for _, ca := range rootCAPool { + if ok := pool.AppendCertsFromPEM(ca); !ok { + return nil, fmt.Errorf("failed to load ca: %s", ca) + } + } + for _, dir := range rootCADirs { + hostDir := filepath.Join(dir, hostname) + files, err := os.ReadDir(hostDir) + if err != nil { + if !os.IsNotExist(err) { + return nil, fmt.Errorf("failed to read directory %s: %w", hostDir, err) + } + continue + } + for _, f := range files { + if f.IsDir() { + continue + } + if strings.HasSuffix(f.Name(), ".crt") { + f := filepath.Join(hostDir, f.Name()) + //#nosec G304 file from a known directory and extension read by the user running the command on their own host + cert, err := os.ReadFile(f) + if err != nil { + return nil, fmt.Errorf("failed to read %s: %w", f, err) + } + if ok := pool.AppendCertsFromPEM(cert); !ok { + return nil, fmt.Errorf("failed to import cert from %s", f) + } + } + } + } + if hostcert != "" { + if ok := pool.AppendCertsFromPEM([]byte(hostcert)); !ok { + // try to parse the certificate and generate a useful error + block, _ := pem.Decode([]byte(hostcert)) + if block == nil { + err = fmt.Errorf("pem.Decode is nil") + } else { + _, err = x509.ParseCertificate(block.Bytes) + } + return nil, fmt.Errorf("failed to load host specific ca (registry: %s): %w: %s", hostname, err, hostcert) + } + } + return pool, nil +} + +// sortHostCmp to sort host list of mirrors. +func sortHostsCmp(hosts []*clientHost, upstream string) func(i, j int) bool { + now := time.Now() + // sort by backoff first, then priority decending, then upstream name last + return func(i, j int) bool { + if now.Before(hosts[i].backoffLast) || now.Before(hosts[j].backoffLast) { + return hosts[i].backoffLast.Before(hosts[j].backoffLast) + } + if hosts[i].config.Priority != hosts[j].config.Priority { + return hosts[i].config.Priority < hosts[j].config.Priority + } + return hosts[i].config.Name != upstream + } +} diff --git a/vendor/github.com/regclient/regclient/internal/reqmeta/data.go b/vendor/github.com/regclient/regclient/internal/reqmeta/data.go new file mode 100644 index 00000000..2a7b94a9 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/reqmeta/data.go @@ -0,0 +1,88 @@ +// Package reqmeta provides metadata on requests for prioritizing with a pqueue. +package reqmeta + +type Data struct { + Kind Kind + Size int64 +} + +type Kind int + +const ( + Unknown Kind = iota + Head + Manifest + Query + Blob +) + +const ( + smallLimit = 4194304 // 4MiB + largePct = 0.9 // anything above 90% of largest queued entry size is large +) + +func DataNext(queued, active []*Data) int { + if len(queued) == 0 { + return -1 + } + // After removing one small entry, split remaining requests 50/50 between large and old (truncated int division always rounds down). + // If len active = 2, this function returns the 3rd entry (+1), minus 1 for the small, divide by 2 to split with old = goal of 1. + largeGoal := len(active) / 2 + largeI := 0 + var largeSize int64 + if largeGoal > 0 { + // find the largest queued blob requests + for i, cur := range queued { + if cur.Kind == Blob && cur.Size > largeSize { + largeI = i + largeSize = cur.Size + } + } + } + largeCutoff := int64(float64(largeSize) * 0.9) + // count active requests by type + small := 0 + large := 0 + old := 0 + for _, cur := range active { + if cur.Kind != Blob && cur.Size <= smallLimit { + small++ + } else if cur.Kind == Blob && largeSize > 0 && cur.Size >= largeCutoff { + large++ + } else { + old++ + } + } + // if there is at least one active, and none are small, return the best small entry if available. + if len(active) > 0 && small == 0 { + var sizeI int64 + bestI := -1 + kindI := Unknown + for i, cur := range queued { + // the small search skips blobs and large requests + if cur.Kind == Blob || cur.Size > smallLimit { + continue + } + // the best small entry is the: + // - first one found if no other matches + // - one with a better Kind (Head > Manifest > Query) + // - one with the same kind but smaller request + if bestI < 0 || + (cur.Kind != Unknown && (kindI == Unknown || cur.Kind < kindI)) || + (cur.Kind == kindI && cur.Size > 0 && (cur.Size < sizeI || sizeI <= 0)) { + bestI = i + kindI = cur.Kind + sizeI = cur.Size + } + } + if bestI >= 0 { + return bestI + } + } + // Prefer the biggest of these blobs to minimize the size of the last running blob. + if largeGoal > 0 && large < largeGoal && largeSize > 0 { + return largeI + } + // enough small and large, or none available, so return the oldest queued entry to avoid starvation. + return 0 +} diff --git a/vendor/github.com/regclient/regclient/internal/sloghandle/logrus.go b/vendor/github.com/regclient/regclient/internal/sloghandle/logrus.go new file mode 100644 index 00000000..4f7ec4bf --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/sloghandle/logrus.go @@ -0,0 +1,126 @@ +//go:build !wasm +// +build !wasm + +// Package sloghandle provides a transition handler for migrating from logrus to slog. +package sloghandle + +import ( + "context" + "log/slog" + "strings" + + "github.com/sirupsen/logrus" + + "github.com/regclient/regclient/types" +) + +func Logrus(logger *logrus.Logger) *logrusHandler { + return &logrusHandler{ + logger: logger, + } +} + +type logrusHandler struct { + logger *logrus.Logger + attrs []slog.Attr + groups []string +} + +func (h *logrusHandler) Enabled(_ context.Context, level slog.Level) bool { + ll := h.logger.GetLevel() + if curLevel, ok := logrusToSlog[ll]; ok { + return level >= curLevel + } + return true +} + +func (h *logrusHandler) Handle(ctx context.Context, r slog.Record) error { + log := logrus.NewEntry(h.logger).WithContext(ctx) + if !r.Time.IsZero() { + log = log.WithTime(r.Time) + } + fields := logrus.Fields{} + for _, a := range h.attrs { + if a.Key != "" { + fields[a.Key] = a.Value + } + } + r.Attrs(func(a slog.Attr) bool { + if a.Key != "" { + fields[a.Key] = a.Value + } + return true + }) + if len(fields) > 0 { + log = log.WithFields(fields) + } + log.Log(slogToLogrus(r.Level), r.Message) + return nil +} + +func (h *logrusHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + ret := h.clone() + prefix := "" + if len(h.groups) > 0 { + prefix = strings.Join(h.groups, ":") + ":" + } + for _, a := range attrs { + if a.Key == "" { + continue + } + ret.attrs = append(ret.attrs, slog.Attr{ + Key: prefix + a.Key, + Value: a.Value, + }) + } + return ret +} + +func (h *logrusHandler) WithGroup(name string) slog.Handler { + if name == "" { + return h + } + ret := h.clone() + ret.groups = append(ret.groups, name) + return ret +} + +func (h *logrusHandler) clone() *logrusHandler { + attrs := make([]slog.Attr, len(h.attrs)) + copy(attrs, h.attrs) + groups := make([]string, len(h.groups)) + copy(groups, h.groups) + return &logrusHandler{ + logger: h.logger, + attrs: attrs, + groups: groups, + } +} + +var logrusToSlog = map[logrus.Level]slog.Level{ + logrus.TraceLevel: types.LevelTrace, + logrus.DebugLevel: slog.LevelDebug, + logrus.InfoLevel: slog.LevelInfo, + logrus.WarnLevel: slog.LevelWarn, + logrus.ErrorLevel: slog.LevelError, + logrus.FatalLevel: slog.LevelError + 4, + logrus.PanicLevel: slog.LevelError + 8, +} + +func slogToLogrus(level slog.Level) logrus.Level { + if level <= types.LevelTrace { + return logrus.TraceLevel + } else if level <= slog.LevelDebug { + return logrus.DebugLevel + } else if level <= slog.LevelInfo { + return logrus.InfoLevel + } else if level <= slog.LevelWarn { + return logrus.WarnLevel + } else if level <= slog.LevelError { + return logrus.ErrorLevel + } else if level <= slog.LevelError+4 { + return logrus.FatalLevel + } else { + return logrus.PanicLevel + } +} diff --git a/vendor/github.com/regclient/regclient/internal/strparse/strparse.go b/vendor/github.com/regclient/regclient/internal/strparse/strparse.go new file mode 100644 index 00000000..f67fdafe --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/strparse/strparse.go @@ -0,0 +1,91 @@ +// Package strparse is used to parse strings +package strparse + +import ( + "fmt" + + "github.com/regclient/regclient/types/errs" +) + +// SplitCSKV splits a comma separated key=value list into a map +func SplitCSKV(s string) (map[string]string, error) { + state := "key" + key := "" + val := "" + result := map[string]string{} + procKV := func() { + if key != "" { + result[key] = val + } + state = "key" + key = "" + val = "" + } + for _, c := range s { + switch state { + case "key": + switch c { + case '"': + state = "keyQuote" + case '\\': + state = "keyEscape" + case '=': + state = "val" + case ',': + procKV() + default: + key = key + string(c) + } + case "keyQuote": + switch c { + case '"': + state = "key" + case '\\': + state = "keyEscapeQuote" + default: + key = key + string(c) + } + case "keyEscape": + key = key + string(c) + state = "key" + case "keyEscapeQuote": + key = key + string(c) + state = "keyQuote" + case "val": + switch c { + case '"': + state = "valQuote" + case ',': + procKV() + case '\\': + state = "valEscape" + default: + val = val + string(c) + } + case "valQuote": + switch c { + case '"': + state = "val" + case '\\': + state = "valEscapeQuote" + default: + val = val + string(c) + } + case "valEscape": + val = val + string(c) + state = "val" + case "valEscapeQuote": + val = val + string(c) + state = "valQuote" + default: + return nil, fmt.Errorf("unhandled state: %s", state) + } + } + switch state { + case "val", "key": + procKV() + default: + return nil, fmt.Errorf("string parsing failed, end state: %s%.0w", state, errs.ErrParsingFailed) + } + return result, nil +} diff --git a/vendor/github.com/regclient/regclient/internal/timejson/timejson.go b/vendor/github.com/regclient/regclient/internal/timejson/timejson.go new file mode 100644 index 00000000..32570897 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/timejson/timejson.go @@ -0,0 +1,41 @@ +// Package timejson extends time methods with marshal/unmarshal for json +package timejson + +import ( + "encoding/json" + "errors" + "time" +) + +var errInvalid = errors.New("invalid duration") + +// Duration is an alias to time.Duration +// Implementation taken from https://stackoverflow.com/questions/48050945/how-to-unmarshal-json-into-durations +type Duration time.Duration + +// MarshalJSON converts a duration to json +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Duration(d).String()) +} + +// UnmarshalJSON converts json to a duration +func (d *Duration) UnmarshalJSON(b []byte) error { + var v interface{} + if err := json.Unmarshal(b, &v); err != nil { + return err + } + switch value := v.(type) { + case float64: + *d = Duration(time.Duration(value)) + return nil + case string: + timeDur, err := time.ParseDuration(value) + if err != nil { + return err + } + *d = Duration(timeDur) + return nil + default: + return errInvalid + } +} diff --git a/vendor/github.com/regclient/regclient/internal/units/size.go b/vendor/github.com/regclient/regclient/internal/units/size.go new file mode 100644 index 00000000..772e4873 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/units/size.go @@ -0,0 +1,57 @@ +// Package units is taken from https://github.com/docker/go-units +package units + +// Copyright 2015 Docker, Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// https://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" +) + +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + +func getSizeAndUnit(size float64, base float64, unitList []string) (float64, string) { + i := 0 + unitsLimit := len(unitList) - 1 + for size >= base && i < unitsLimit { + size = size / base + i++ + } + return size, unitList[i] +} + +// CustomSize returns a human-readable approximation of a size using custom format. +func CustomSize(format string, size float64, base float64, unitList []string) string { + size, unit := getSizeAndUnit(size, base, unitList) + return fmt.Sprintf(format, size, unit) +} + +// HumanSizeWithPrecision allows the size to be in any precision. +func HumanSizeWithPrecision(size float64, width, precision int) string { + size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) + return fmt.Sprintf("%*.*f%s", width, precision, size, unit) +} + +// HumanSize returns a human-readable approximation of a size +// with a width of 5 (eg. "2.746MB", "796.0KB"). +func HumanSize(size float64) string { + return HumanSizeWithPrecision(size, 5, 3) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44.2kiB", "17.6MiB"). +func BytesSize(size float64) string { + return CustomSize("%5.3f%s", size, 1024.0, binaryAbbrs) +} diff --git a/vendor/github.com/regclient/regclient/internal/version/version.go b/vendor/github.com/regclient/regclient/internal/version/version.go new file mode 100644 index 00000000..5e5715e5 --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/version/version.go @@ -0,0 +1,32 @@ +// Package version returns details on the Go and Git repo used in the build +package version + +import ( + "bytes" + "fmt" + "text/tabwriter" +) + +const ( + stateClean = "clean" + stateDirty = "dirty" + unknown = "unknown" + biVCSDate = "vcs.time" + biVCSCommit = "vcs.revision" + biVCSModified = "vcs.modified" +) + +func (i Info) MarshalPretty() ([]byte, error) { + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + fmt.Fprintf(tw, "VCSTag:\t%s\n", i.VCSTag) + fmt.Fprintf(tw, "VCSRef:\t%s\n", i.VCSRef) + fmt.Fprintf(tw, "VCSCommit:\t%s\n", i.VCSCommit) + fmt.Fprintf(tw, "VCSState:\t%s\n", i.VCSState) + fmt.Fprintf(tw, "VCSDate:\t%s\n", i.VCSDate) + fmt.Fprintf(tw, "Platform:\t%s\n", i.Platform) + fmt.Fprintf(tw, "GoVer:\t%s\n", i.GoVer) + fmt.Fprintf(tw, "GoCompiler:\t%s\n", i.GoCompiler) + err := tw.Flush() + return buf.Bytes(), err +} diff --git a/vendor/github.com/regclient/regclient/internal/version/version_buildinfo.go b/vendor/github.com/regclient/regclient/internal/version/version_buildinfo.go new file mode 100644 index 00000000..bf94cd6a --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/version/version_buildinfo.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +package version + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +var vcsTag = "" + +type Info struct { + GoVer string `json:"goVersion"` // go version + GoCompiler string `json:"goCompiler"` // go compiler + Platform string `json:"platform"` // os/arch + VCSCommit string `json:"vcsCommit"` // commit sha + VCSDate string `json:"vcsDate"` // commit date in RFC3339 format + VCSRef string `json:"vcsRef"` // commit sha + dirty if state is not clean + VCSState string `json:"vcsState"` // clean or dirty + VCSTag string `json:"vcsTag"` // tag is not available from Go + Debug *debug.BuildInfo `json:"debug,omitempty"` // build info debugging data +} + +func GetInfo() Info { + i := Info{ + GoVer: unknown, + Platform: unknown, + VCSCommit: unknown, + VCSDate: unknown, + VCSRef: unknown, + VCSState: unknown, + VCSTag: vcsTag, + } + + i.GoVer = runtime.Version() + i.GoCompiler = runtime.Compiler + i.Platform = fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH) + + if bi, ok := debug.ReadBuildInfo(); ok && bi != nil { + i.Debug = bi + if i.VCSTag == "" { + i.VCSTag = bi.Main.Version + } + date := biSetting(bi, biVCSDate) + if t, err := time.Parse(time.RFC3339, date); err == nil { + i.VCSDate = t.UTC().Format(time.RFC3339) + } + i.VCSCommit = biSetting(bi, biVCSCommit) + i.VCSRef = i.VCSCommit + modified := biSetting(bi, biVCSModified) + if modified == "true" { + i.VCSState = stateDirty + i.VCSRef += "-" + stateDirty + } else if modified == "false" { + i.VCSState = stateClean + } + } + + return i +} + +func biSetting(bi *debug.BuildInfo, key string) string { + if bi == nil { + return unknown + } + for _, setting := range bi.Settings { + if setting.Key == key { + return setting.Value + } + } + return unknown +} diff --git a/vendor/github.com/regclient/regclient/internal/version/version_old.go b/vendor/github.com/regclient/regclient/internal/version/version_old.go new file mode 100644 index 00000000..83f35dec --- /dev/null +++ b/vendor/github.com/regclient/regclient/internal/version/version_old.go @@ -0,0 +1,38 @@ +//go:build !go1.18 +// +build !go1.18 + +package version + +import ( + "fmt" + "runtime" +) + +type Info struct { + GoVer string `json:"goVersion"` // go version + GoCompiler string `json:"goCompiler"` // go compiler + Platform string `json:"platform"` // os/arch + VCSCommit string `json:"vcsCommit"` // commit sha + VCSDate string `json:"vcsDate"` // commit date in RFC3339 format + VCSRef string `json:"vcsRef"` // commit sha + dirty if state is not clean + VCSState string `json:"vcsState"` // clean or dirty + VCSTag string `json:"vcsTag"` // tag +} + +func GetInfo() Info { + i := Info{ + GoVer: unknown, + Platform: unknown, + VCSCommit: unknown, + VCSDate: unknown, + VCSRef: unknown, + VCSState: unknown, + VCSTag: "", + } + + i.GoVer = runtime.Version() + i.GoCompiler = runtime.Compiler + i.Platform = fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH) + + return i +} diff --git a/vendor/github.com/regclient/regclient/manifest.go b/vendor/github.com/regclient/regclient/manifest.go new file mode 100644 index 00000000..285f72e5 --- /dev/null +++ b/vendor/github.com/regclient/regclient/manifest.go @@ -0,0 +1,206 @@ +package regclient + +import ( + "context" + "fmt" + "log/slog" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/platform" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/warning" +) + +type manifestOpt struct { + d descriptor.Descriptor + platform *platform.Platform + schemeOpts []scheme.ManifestOpts + requireDigest bool +} + +// ManifestOpts define options for the Manifest* commands. +type ManifestOpts func(*manifestOpt) + +// WithManifest passes a manifest to ManifestDelete. +func WithManifest(m manifest.Manifest) ManifestOpts { + return func(opts *manifestOpt) { + opts.schemeOpts = append(opts.schemeOpts, scheme.WithManifest(m)) + } +} + +// WithManifestCheckReferrers checks for referrers field on ManifestDelete. +// This will update the client managed referrer listing. +func WithManifestCheckReferrers() ManifestOpts { + return func(opts *manifestOpt) { + opts.schemeOpts = append(opts.schemeOpts, scheme.WithManifestCheckReferrers()) + } +} + +// WithManifestChild for ManifestPut indicates the manifest is not the top level manifest being copied. +// This is used by the ocidir scheme to determine what entries to include in the index.json. +func WithManifestChild() ManifestOpts { + return func(opts *manifestOpt) { + opts.schemeOpts = append(opts.schemeOpts, scheme.WithManifestChild()) + } +} + +// WithManifestDesc includes the descriptor for ManifestGet. +// This is used to automatically extract a Data field if available. +func WithManifestDesc(d descriptor.Descriptor) ManifestOpts { + return func(opts *manifestOpt) { + opts.d = d + } +} + +// WithManifestPlatform resolves the platform specific manifest on Get and Head requests. +// This causes an additional GET query to a registry when an Index or Manifest List is encountered. +// This option is ignored if the retrieved manifest is not an Index or Manifest List. +func WithManifestPlatform(p platform.Platform) ManifestOpts { + return func(opts *manifestOpt) { + opts.platform = &p + } +} + +// WithManifestRequireDigest falls back from a HEAD to a GET request when digest headers aren't received. +func WithManifestRequireDigest() ManifestOpts { + return func(opts *manifestOpt) { + opts.requireDigest = true + } +} + +// ManifestDelete removes a manifest, including all tags pointing to that registry. +// The reference must include the digest to delete (see TagDelete for deleting a tag). +// All tags pointing to the manifest will be deleted. +func (rc *RegClient) ManifestDelete(ctx context.Context, r ref.Ref, opts ...ManifestOpts) error { + if !r.IsSet() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + opt := manifestOpt{schemeOpts: []scheme.ManifestOpts{}} + for _, fn := range opts { + fn(&opt) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return err + } + return schemeAPI.ManifestDelete(ctx, r, opt.schemeOpts...) +} + +// ManifestGet retrieves a manifest. +func (rc *RegClient) ManifestGet(ctx context.Context, r ref.Ref, opts ...ManifestOpts) (manifest.Manifest, error) { + if !r.IsSet() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + opt := manifestOpt{schemeOpts: []scheme.ManifestOpts{}} + for _, fn := range opts { + fn(&opt) + } + if opt.d.Digest != "" { + r.Digest = opt.d.Digest.String() + data, err := opt.d.GetData() + if err == nil { + return manifest.New( + manifest.WithDesc(opt.d), + manifest.WithRaw(data), + manifest.WithRef(r), + ) + } + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return nil, err + } + m, err := schemeAPI.ManifestGet(ctx, r) + if err != nil { + return m, err + } + if opt.platform != nil && !m.IsList() { + rc.slog.Debug("ignoring platform option, image is not an index", + slog.String("platform", opt.platform.String()), + slog.String("ref", r.CommonName())) + } + // this will loop to handle a nested index + for opt.platform != nil && m.IsList() { + d, err := manifest.GetPlatformDesc(m, opt.platform) + if err != nil { + return m, err + } + r = r.SetDigest(d.Digest.String()) + m, err = schemeAPI.ManifestGet(ctx, r) + if err != nil { + return m, err + } + } + return m, err +} + +// ManifestHead queries for the existence of a manifest and returns metadata (digest, media-type, size). +func (rc *RegClient) ManifestHead(ctx context.Context, r ref.Ref, opts ...ManifestOpts) (manifest.Manifest, error) { + if !r.IsSet() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + opt := manifestOpt{schemeOpts: []scheme.ManifestOpts{}} + for _, fn := range opts { + fn(&opt) + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return nil, err + } + m, err := schemeAPI.ManifestHead(ctx, r) + if err != nil { + return m, err + } + if opt.platform != nil && !m.IsList() { + rc.slog.Debug("ignoring platform option, image is not an index", + slog.String("platform", opt.platform.String()), + slog.String("ref", r.CommonName())) + } + // this will loop to handle a nested index + for opt.platform != nil && m.IsList() { + if !m.IsSet() { + m, err = schemeAPI.ManifestGet(ctx, r) + } + d, err := manifest.GetPlatformDesc(m, opt.platform) + if err != nil { + return m, err + } + r = r.SetDigest(d.Digest.String()) + m, err = schemeAPI.ManifestHead(ctx, r) + if err != nil { + return m, err + } + } + if opt.requireDigest && m.GetDescriptor().Digest.String() == "" { + m, err = schemeAPI.ManifestGet(ctx, r) + } + return m, err +} + +// ManifestPut pushes a manifest. +// Any descriptors referenced by the manifest typically need to be pushed first. +func (rc *RegClient) ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, opts ...ManifestOpts) error { + if !r.IsSetRepo() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + opt := manifestOpt{schemeOpts: []scheme.ManifestOpts{}} + for _, fn := range opts { + fn(&opt) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return err + } + return schemeAPI.ManifestPut(ctx, r, m, opt.schemeOpts...) +} diff --git a/vendor/github.com/regclient/regclient/ping.go b/vendor/github.com/regclient/regclient/ping.go new file mode 100644 index 00000000..4298f7ac --- /dev/null +++ b/vendor/github.com/regclient/regclient/ping.go @@ -0,0 +1,18 @@ +package regclient + +import ( + "context" + + "github.com/regclient/regclient/types/ping" + "github.com/regclient/regclient/types/ref" +) + +// Ping verifies access to a registry or equivalent. +func (rc *RegClient) Ping(ctx context.Context, r ref.Ref) (ping.Result, error) { + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return ping.Result{}, err + } + + return schemeAPI.Ping(ctx, r) +} diff --git a/vendor/github.com/regclient/regclient/pkg/archive/archive.go b/vendor/github.com/regclient/regclient/pkg/archive/archive.go new file mode 100644 index 00000000..d183afd4 --- /dev/null +++ b/vendor/github.com/regclient/regclient/pkg/archive/archive.go @@ -0,0 +1,2 @@ +// Package archive is used to read and write tar files +package archive diff --git a/vendor/github.com/regclient/regclient/pkg/archive/compress.go b/vendor/github.com/regclient/regclient/pkg/archive/compress.go new file mode 100644 index 00000000..84199943 --- /dev/null +++ b/vendor/github.com/regclient/regclient/pkg/archive/compress.go @@ -0,0 +1,160 @@ +package archive + +import ( + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/zstd" + "github.com/ulikunitz/xz" +) + +// CompressType identifies the detected compression type +type CompressType int + +const ( + CompressNone CompressType = iota // uncompressed or unable to detect compression + CompressBzip2 // bzip2 + CompressGzip // gzip + CompressXz // xz + CompressZstd // zstd +) + +// compressHeaders are used to detect the compression type +var compressHeaders = map[CompressType][]byte{ + CompressBzip2: []byte("\x42\x5A\x68"), + CompressGzip: []byte("\x1F\x8B\x08"), + CompressXz: []byte("\xFD\x37\x7A\x58\x5A\x00"), + CompressZstd: []byte("\x28\xB5\x2F\xFD"), +} + +func Compress(r io.Reader, oComp CompressType) (io.ReadCloser, error) { + switch oComp { + // note, bzip2 compression is not supported + case CompressGzip: + return writeToRead(r, newGzipWriter) + case CompressXz: + return writeToRead(r, xz.NewWriter) + case CompressZstd: + return writeToRead(r, newZstdWriter) + case CompressNone: + return io.NopCloser(r), nil + default: + return nil, ErrUnknownType + } +} + +// newGzipWriter generates a writer and an always nil error. +func newGzipWriter(w io.Writer) (io.WriteCloser, error) { + return gzip.NewWriter(w), nil +} + +// newZstdWriter generates a writer with the default options. +func newZstdWriter(w io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(w) +} + +// writeToRead uses a pipe + goroutine + copy to switch from a writer to a reader. +func writeToRead[wc io.WriteCloser](src io.Reader, newWriterFn func(io.Writer) (wc, error)) (io.ReadCloser, error) { + pr, pw := io.Pipe() + go func() { + // buffer output to avoid lots of small reads + bw := bufio.NewWriterSize(pw, 2<<16) + dest, err := newWriterFn(bw) + if err != nil { + _ = pw.CloseWithError(err) + return + } + if _, err := io.Copy(dest, src); err != nil { + _ = pw.CloseWithError(err) + } + if err := dest.Close(); err != nil { + _ = pw.CloseWithError(err) + } + if err := bw.Flush(); err != nil { + _ = pw.CloseWithError(err) + } + _ = pw.Close() + }() + return pr, nil +} + +// Decompress extracts gzip and bzip streams +func Decompress(r io.Reader) (io.Reader, error) { + // create bufio to peak on first few bytes + br := bufio.NewReader(r) + head, err := br.Peek(10) + if err != nil && !errors.Is(err, io.EOF) { + return br, fmt.Errorf("failed to detect compression: %w", err) + } + + // compare peaked data against known compression types + switch DetectCompression(head) { + case CompressBzip2: + return bzip2.NewReader(br), nil + case CompressGzip: + return gzip.NewReader(br) + case CompressXz: + return xz.NewReader(br) + case CompressZstd: + return zstd.NewReader(br) + default: + return br, nil + } +} + +// DetectCompression identifies the compression type based on the first few bytes +func DetectCompression(head []byte) CompressType { + for c, b := range compressHeaders { + if bytes.HasPrefix(head, b) { + return c + } + } + return CompressNone +} + +func (ct CompressType) String() string { + mt, err := ct.MarshalText() + if err != nil { + return "unknown" + } + return string(mt) +} + +func (ct CompressType) MarshalText() ([]byte, error) { + switch ct { + case CompressNone: + return []byte("none"), nil + case CompressBzip2: + return []byte("bzip2"), nil + case CompressGzip: + return []byte("gzip"), nil + case CompressXz: + return []byte("xz"), nil + case CompressZstd: + return []byte("zstd"), nil + } + return nil, fmt.Errorf("unknown compression type") +} + +func (ct *CompressType) UnmarshalText(text []byte) error { + switch string(text) { + case "none": + *ct = CompressNone + case "bzip2": + *ct = CompressBzip2 + case "gzip": + *ct = CompressGzip + case "xz": + *ct = CompressXz + case "zstd": + *ct = CompressZstd + default: + return fmt.Errorf("unknown compression type %s", string(text)) + } + return nil +} diff --git a/vendor/github.com/regclient/regclient/pkg/archive/errors.go b/vendor/github.com/regclient/regclient/pkg/archive/errors.go new file mode 100644 index 00000000..b3198900 --- /dev/null +++ b/vendor/github.com/regclient/regclient/pkg/archive/errors.go @@ -0,0 +1,13 @@ +package archive + +import "errors" + +var ( + // ErrNotImplemented used for routines that need to be developed still + ErrNotImplemented = errors.New("this archive routine is not implemented yet") + // ErrUnknownType used for unknown compression types + ErrUnknownType = errors.New("unknown compression type") + // ErrXzUnsupported because there isn't a Go package for this and I'm + // avoiding dependencies on external binaries + ErrXzUnsupported = errors.New("xz compression is currently unsupported") +) diff --git a/vendor/github.com/regclient/regclient/pkg/archive/tar.go b/vendor/github.com/regclient/regclient/pkg/archive/tar.go new file mode 100644 index 00000000..b6a18ce3 --- /dev/null +++ b/vendor/github.com/regclient/regclient/pkg/archive/tar.go @@ -0,0 +1,170 @@ +package archive + +import ( + "archive/tar" + "compress/gzip" + "context" + "fmt" + "io" + "io/fs" + "math" + "os" + "path/filepath" + "time" +) + +// TarOpts configures options for Create/Extract tar +type TarOpts func(*tarOpts) + +// TODO: add support for compressed files with bzip +type tarOpts struct { + // allowRelative bool // allow relative paths outside of target folder + compress string +} + +// TarCompressGzip option to use gzip compression on tar files +func TarCompressGzip(to *tarOpts) { + to.compress = "gzip" +} + +// TarUncompressed option to tar (noop) +func TarUncompressed(to *tarOpts) { +} + +// TODO: add option for full path or to adjust the relative path + +// Tar creation +func Tar(ctx context.Context, path string, w io.Writer, opts ...TarOpts) error { + to := tarOpts{} + for _, opt := range opts { + opt(&to) + } + + twOut := w + if to.compress == "gzip" { + gw := gzip.NewWriter(w) + defer gw.Close() + twOut = gw + } + + tw := tar.NewWriter(twOut) + defer tw.Close() + + // walk the path performing a recursive tar + err := filepath.Walk(path, func(file string, fi os.FileInfo, err error) error { + // return any errors filepath encounters accessing the file + if err != nil { + return err + } + + // TODO: handle symlinks, security attributes, hard links + // TODO: add options for file owner and timestamps + // TODO: add options to override time, or disable access/change stamps + + // adjust for relative path + relPath, err := filepath.Rel(path, file) + if err != nil || relPath == "." { + return nil + } + + header, err := tar.FileInfoHeader(fi, relPath) + if err != nil { + return err + } + + header.Format = tar.FormatPAX + header.Name = filepath.ToSlash(relPath) + header.AccessTime = time.Time{} + header.ChangeTime = time.Time{} + header.ModTime = header.ModTime.Truncate(time.Second) + + if err = tw.WriteHeader(header); err != nil { + return err + } + + // open file and copy contents into tar writer + if header.Typeflag == tar.TypeReg && header.Size > 0 { + //#nosec G304 filename is limited to provided path directory + f, err := os.Open(file) + if err != nil { + return err + } + if _, err = io.Copy(tw, f); err != nil { + return err + } + err = f.Close() + if err != nil { + return fmt.Errorf("failed to close file: %w", err) + } + } + return nil + }) + return err +} + +// Extract Tar +func Extract(ctx context.Context, path string, r io.Reader, opts ...TarOpts) error { + to := tarOpts{} + for _, opt := range opts { + opt(&to) + } + + // verify path exists + fi, err := os.Stat(path) + if err != nil { + return err + } + if !fi.IsDir() { + return fmt.Errorf("extract path must be a directory: \"%s\"", path) + } + + // decompress + rd, err := Decompress(r) + if err != nil { + return err + } + + rt := tar.NewReader(rd) + for { + hdr, err := rt.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + // join a cleaned version of the filename with the path + fn := filepath.Join(path, filepath.Clean("/"+hdr.Name)) + switch hdr.Typeflag { + case tar.TypeDir: + if hdr.Mode < 0 || hdr.Mode > math.MaxUint32 { + return fmt.Errorf("integer conversion overflow/underflow (file mode = %d)", hdr.Mode) + } + err = os.MkdirAll(fn, fs.FileMode(hdr.Mode)) + if err != nil { + return err + } + case tar.TypeReg: + // TODO: configure file mode, creation timestamp, etc + //#nosec G304 filename is limited to provided path directory + fh, err := os.Create(fn) + if err != nil { + return err + } + n, err := io.CopyN(fh, rt, hdr.Size) + errC := fh.Close() + if err != nil { + return err + } + if errC != nil { + return fmt.Errorf("failed to close file: %w", errC) + } + if n != hdr.Size { + return fmt.Errorf("size mismatch extracting \"%s\", expected %d, extracted %d", hdr.Name, hdr.Size, n) + } + // TODO: handle other tar types (symlinks, etc) + } + } + + return nil +} diff --git a/vendor/github.com/regclient/regclient/referrer.go b/vendor/github.com/regclient/regclient/referrer.go new file mode 100644 index 00000000..2c168124 --- /dev/null +++ b/vendor/github.com/regclient/regclient/referrer.go @@ -0,0 +1,57 @@ +package regclient + +import ( + "context" + "fmt" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/platform" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/referrer" + "github.com/regclient/regclient/types/warning" +) + +// ReferrerList retrieves a list of referrers to a manifest. +// The descriptor list should contain manifests that each have a subject field matching the requested ref. +func (rc *RegClient) ReferrerList(ctx context.Context, rSubject ref.Ref, opts ...scheme.ReferrerOpts) (referrer.ReferrerList, error) { + if !rSubject.IsSet() { + return referrer.ReferrerList{}, fmt.Errorf("ref is not set: %s%.0w", rSubject.CommonName(), errs.ErrInvalidReference) + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + // set the digest on the subject reference + config := scheme.ReferrerConfig{} + for _, opt := range opts { + opt(&config) + } + if rSubject.Digest == "" || config.Platform != "" { + mo := []ManifestOpts{WithManifestRequireDigest()} + if config.Platform != "" { + p, err := platform.Parse(config.Platform) + if err != nil { + return referrer.ReferrerList{}, fmt.Errorf("failed to lookup referrer platform: %w", err) + } + mo = append(mo, WithManifestPlatform(p)) + } + m, err := rc.ManifestHead(ctx, rSubject, mo...) + if err != nil { + return referrer.ReferrerList{}, fmt.Errorf("failed to get digest for subject: %w", err) + } + rSubject = rSubject.SetDigest(m.GetDescriptor().Digest.String()) + } + // lookup the scheme for the appropriate ref + var r ref.Ref + if config.SrcRepo.IsSet() { + r = config.SrcRepo + } else { + r = rSubject + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return referrer.ReferrerList{}, err + } + return schemeAPI.ReferrerList(ctx, rSubject, opts...) +} diff --git a/vendor/github.com/regclient/regclient/regclient.go b/vendor/github.com/regclient/regclient/regclient.go new file mode 100644 index 00000000..c1a34218 --- /dev/null +++ b/vendor/github.com/regclient/regclient/regclient.go @@ -0,0 +1,273 @@ +// Package regclient is used to access OCI registries. +package regclient + +import ( + "io" + "log/slog" + "time" + + "fmt" + + "github.com/regclient/regclient/config" + "github.com/regclient/regclient/internal/version" + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/scheme/ocidir" + "github.com/regclient/regclient/scheme/reg" +) + +const ( + // DefaultUserAgent sets the header on http requests. + DefaultUserAgent = "regclient/regclient" + // DockerCertDir default location for docker certs. + DockerCertDir = "/etc/docker/certs.d" + // DockerRegistry is the well known name of Docker Hub, "docker.io". + DockerRegistry = config.DockerRegistry + // DockerRegistryAuth is the name of Docker Hub seen in docker's config.json. + DockerRegistryAuth = config.DockerRegistryAuth + // DockerRegistryDNS is the actual registry DNS name for Docker Hub. + DockerRegistryDNS = config.DockerRegistryDNS +) + +// RegClient is used to access OCI distribution-spec registries. +type RegClient struct { + hosts map[string]*config.Host + hostDefault *config.Host + regOpts []reg.Opts + schemes map[string]scheme.API + slog *slog.Logger + userAgent string +} + +// Opt functions are used by [New] to create a [*RegClient]. +type Opt func(*RegClient) + +// New returns a registry client. +func New(opts ...Opt) *RegClient { + var rc = RegClient{ + hosts: map[string]*config.Host{}, + userAgent: DefaultUserAgent, + regOpts: []reg.Opts{}, + schemes: map[string]scheme.API{}, + slog: slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{})), + } + + info := version.GetInfo() + if info.VCSTag != "" { + rc.userAgent = fmt.Sprintf("%s (%s)", rc.userAgent, info.VCSTag) + } else { + rc.userAgent = fmt.Sprintf("%s (%s)", rc.userAgent, info.VCSRef) + } + + // inject Docker Hub settings + _ = rc.hostSet(*config.HostNewName(config.DockerRegistryAuth)) + + for _, opt := range opts { + opt(&rc) + } + + // configure regOpts + hostList := []*config.Host{} + for _, h := range rc.hosts { + hostList = append(hostList, h) + } + rc.regOpts = append(rc.regOpts, + reg.WithConfigHosts(hostList), + reg.WithConfigHostDefault(rc.hostDefault), + reg.WithSlog(rc.slog), + reg.WithUserAgent(rc.userAgent), + ) + + // setup scheme's + rc.schemes["reg"] = reg.New(rc.regOpts...) + rc.schemes["ocidir"] = ocidir.New( + ocidir.WithSlog(rc.slog), + ) + + rc.slog.Debug("regclient initialized", + slog.String("VCSRef", info.VCSRef), + slog.String("VCSTag", info.VCSTag)) + + return &rc +} + +// WithBlobLimit sets the max size for chunked blob uploads which get stored in memory. +// +// Deprecated: replace with WithRegOpts(reg.WithBlobLimit(limit)), see [WithRegOpts] and [reg.WithBlobLimit]. +func WithBlobLimit(limit int64) Opt { + return func(rc *RegClient) { + rc.regOpts = append(rc.regOpts, reg.WithBlobLimit(limit)) + } +} + +// WithBlobSize overrides default blob sizes. +// +// Deprecated: replace with WithRegOpts(reg.WithBlobSize(chunk, max)), see [WithRegOpts] and [reg.WithBlobSize]. +func WithBlobSize(chunk, max int64) Opt { + return func(rc *RegClient) { + rc.regOpts = append(rc.regOpts, reg.WithBlobSize(chunk, max)) + } +} + +// WithCertDir adds a path of certificates to trust similar to Docker's /etc/docker/certs.d. +// +// Deprecated: replace with WithRegOpts(reg.WithCertDirs(path)), see [WithRegOpts] and [reg.WithCertDirs]. +func WithCertDir(path ...string) Opt { + return func(rc *RegClient) { + rc.regOpts = append(rc.regOpts, reg.WithCertDirs(path)) + } +} + +// WithConfigHost adds a list of config host settings. +func WithConfigHost(configHost ...config.Host) Opt { + return func(rc *RegClient) { + rc.hostLoad("host", configHost) + } +} + +// WithConfigHostDefault adds default settings for new hosts. +func WithConfigHostDefault(configHost config.Host) Opt { + return func(rc *RegClient) { + rc.hostDefault = &configHost + } +} + +// WithConfigHosts adds a list of config host settings. +// +// Deprecated: replace with [WithConfigHost]. +func WithConfigHosts(configHosts []config.Host) Opt { + return WithConfigHost(configHosts...) +} + +// WithDockerCerts adds certificates trusted by docker in /etc/docker/certs.d. +func WithDockerCerts() Opt { + return WithCertDir(DockerCertDir) +} + +// WithDockerCreds adds configuration from users docker config with registry logins. +// This changes the default value from the config file, and should be added after the config file is loaded. +func WithDockerCreds() Opt { + return func(rc *RegClient) { + configHosts, err := config.DockerLoad() + if err != nil { + rc.slog.Warn("Failed to load docker creds", + slog.String("err", err.Error())) + return + } + rc.hostLoad("docker", configHosts) + } +} + +// WithDockerCredsFile adds configuration from a named docker config file with registry logins. +// This changes the default value from the config file, and should be added after the config file is loaded. +func WithDockerCredsFile(fname string) Opt { + return func(rc *RegClient) { + configHosts, err := config.DockerLoadFile(fname) + if err != nil { + rc.slog.Warn("Failed to load docker creds", + slog.String("err", err.Error())) + return + } + rc.hostLoad("docker-file", configHosts) + } +} + +// WithRegOpts passes through opts to the reg scheme. +func WithRegOpts(opts ...reg.Opts) Opt { + return func(rc *RegClient) { + if len(opts) == 0 { + return + } + rc.regOpts = append(rc.regOpts, opts...) + } +} + +// WithRetryDelay specifies the time permitted for retry delays. +// +// Deprecated: replace with WithRegOpts(reg.WithDelay(delayInit, delayMax)), see [WithRegOpts] and [reg.WithDelay]. +func WithRetryDelay(delayInit, delayMax time.Duration) Opt { + return func(rc *RegClient) { + rc.regOpts = append(rc.regOpts, reg.WithDelay(delayInit, delayMax)) + } +} + +// WithRetryLimit specifies the number of retries for non-fatal errors. +// +// Deprecated: replace with WithRegOpts(reg.WithRetryLimit(retryLimit)), see [WithRegOpts] and [reg.WithRetryLimit]. +func WithRetryLimit(retryLimit int) Opt { + return func(rc *RegClient) { + rc.regOpts = append(rc.regOpts, reg.WithRetryLimit(retryLimit)) + } +} + +// WithSlog configures the slog Logger. +func WithSlog(slog *slog.Logger) Opt { + return func(rc *RegClient) { + rc.slog = slog + } +} + +// WithUserAgent specifies the User-Agent http header. +func WithUserAgent(ua string) Opt { + return func(rc *RegClient) { + rc.userAgent = ua + } +} + +func (rc *RegClient) hostLoad(src string, hosts []config.Host) { + for _, configHost := range hosts { + if configHost.Name == "" { + if configHost.Pass != "" { + configHost.Pass = "***" + } + if configHost.Token != "" { + configHost.Token = "***" + } + rc.slog.Warn("Ignoring registry config without a name", + slog.Any("entry", configHost)) + continue + } + if configHost.Name == DockerRegistry || configHost.Name == DockerRegistryDNS || configHost.Name == DockerRegistryAuth { + configHost.Name = DockerRegistry + if configHost.Hostname == "" || configHost.Hostname == DockerRegistry || configHost.Hostname == DockerRegistryAuth { + configHost.Hostname = DockerRegistryDNS + } + } + tls, _ := configHost.TLS.MarshalText() + rc.slog.Debug("Loading config", + slog.Int64("blobChunk", configHost.BlobChunk), + slog.Int64("blobMax", configHost.BlobMax), + slog.String("helper", configHost.CredHelper), + slog.String("hostname", configHost.Hostname), + slog.Any("mirrors", configHost.Mirrors), + slog.String("name", configHost.Name), + slog.String("pathPrefix", configHost.PathPrefix), + slog.Bool("repoAuth", configHost.RepoAuth), + slog.String("source", src), + slog.String("tls", string(tls)), + slog.String("user", configHost.User)) + err := rc.hostSet(configHost) + if err != nil { + rc.slog.Warn("Failed to update host config", + slog.String("host", configHost.Name), + slog.String("user", configHost.User), + slog.String("error", err.Error())) + } + } +} + +func (rc *RegClient) hostSet(newHost config.Host) error { + name := newHost.Name + var err error + if _, ok := rc.hosts[name]; !ok { + // merge newHost with default host settings + rc.hosts[name] = config.HostNewDefName(rc.hostDefault, name) + err = rc.hosts[name].Merge(newHost, nil) + } else { + // merge newHost with existing settings + err = rc.hosts[name].Merge(newHost, rc.slog) + } + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/regclient/regclient/regclient_nowasm.go b/vendor/github.com/regclient/regclient/regclient_nowasm.go new file mode 100644 index 00000000..d99ca1ed --- /dev/null +++ b/vendor/github.com/regclient/regclient/regclient_nowasm.go @@ -0,0 +1,20 @@ +//go:build !wasm +// +build !wasm + +package regclient + +import ( + "log/slog" + + "github.com/sirupsen/logrus" + + "github.com/regclient/regclient/internal/sloghandle" +) + +// WithLog configuring logging with a logrus Logger. +// Note that regclient has switched to log/slog for logging and my eventually deprecate logrus support. +func WithLog(log *logrus.Logger) Opt { + return func(rc *RegClient) { + rc.slog = slog.New(sloghandle.Logrus(log)) + } +} diff --git a/vendor/github.com/regclient/regclient/release.md b/vendor/github.com/regclient/regclient/release.md new file mode 100644 index 00000000..92649f6b --- /dev/null +++ b/vendor/github.com/regclient/regclient/release.md @@ -0,0 +1,74 @@ +# Release v0.8.0 + +## Highlights + +There are three headline changes in this release: slog support, external referrers, and deprecating legacy packages. + +This release switches from logrus to slog. +Migration methods are included to minimize the impact on existing users. +Anyone parsing the logging output from regctl, regsync, and regbot will notice the format has changed. + +External referrers allow referrers to be pushed and pulled from a separate repository from the subject image. +This feature requires users to provide the external repository themselves since a registry has no way to communicate this to the user. +An example use case of this feature are third parties, like security scanners, providing attestations of images they do not control. + +Legacy packages have been disabled by default and will eventually be removed. +To continue using legacy packages until their removal, you may compile with `-tags legacy`. + +## Breaking + +- Breaking: Warning handlers switched from `logrus` to `slog` which will only impact those with a custom warning handler. ([PR 847][pr-847]) +- Breaking: Disable legacy packages by default. ([PR 852][pr-852]) + +## Features + +- Feat: Refactor logging to use log/slog. ([PR 847][pr-847]) +- Feat: Switch regbot to slog. ([PR 849][pr-849]) +- Feat: Switch regctl to slog. ([PR 850][pr-850]) +- Feat: Switch regsync to slog. ([PR 851][pr-851]) +- Feat: Move logrus calls into files excluded by wasm. ([PR 853][pr-853]) +- Feat: Allow plus in ocidir path. ([PR 856][pr-856]) +- Feat: Support referrers in an external repository. ([PR 866][pr-866]) +- Feat: Image mod environment variables. ([PR 867][pr-867]) +- Feat: Include source in referrers response. ([PR 870][pr-870]) +- Feat: Add external flag to regctl artifact put. ([PR 873][pr-873]) +- Feat: Copy image with external referrers. ([PR 874][pr-874]) +- Feat: Document community maintained packages. ([PR 878][pr-878]) +- Feat: Support external referrers in regsync. ([PR 881][pr-881]) +- Feat: Support incomplete subject descriptor. ([PR 885][pr-885]) + +## Fixes + +- Fix: Inject release notes by file. ([PR 854][pr-854]) +- Fix: Platform test for darwin/macos should not add variant. ([PR 879][pr-879]) +- Fix: Handle repeated digest in copy with external referrers. ([PR 882][pr-882]) + +## Chores + +- Chore: Improve error message when inspecting artifacts. ([PR 862][pr-862]) +- Chore: Remove unused short arg parameters. ([PR 877][pr-877]) + +## Contributors + +- @sudo-bmitch + +[pr-847]: https://github.com/regclient/regclient/pull/847 +[pr-849]: https://github.com/regclient/regclient/pull/849 +[pr-850]: https://github.com/regclient/regclient/pull/850 +[pr-851]: https://github.com/regclient/regclient/pull/851 +[pr-852]: https://github.com/regclient/regclient/pull/852 +[pr-853]: https://github.com/regclient/regclient/pull/853 +[pr-854]: https://github.com/regclient/regclient/pull/854 +[pr-856]: https://github.com/regclient/regclient/pull/856 +[pr-862]: https://github.com/regclient/regclient/pull/862 +[pr-866]: https://github.com/regclient/regclient/pull/866 +[pr-867]: https://github.com/regclient/regclient/pull/867 +[pr-870]: https://github.com/regclient/regclient/pull/870 +[pr-873]: https://github.com/regclient/regclient/pull/873 +[pr-874]: https://github.com/regclient/regclient/pull/874 +[pr-877]: https://github.com/regclient/regclient/pull/877 +[pr-878]: https://github.com/regclient/regclient/pull/878 +[pr-879]: https://github.com/regclient/regclient/pull/879 +[pr-881]: https://github.com/regclient/regclient/pull/881 +[pr-882]: https://github.com/regclient/regclient/pull/882 +[pr-885]: https://github.com/regclient/regclient/pull/885 diff --git a/vendor/github.com/regclient/regclient/repo.go b/vendor/github.com/regclient/regclient/repo.go new file mode 100644 index 00000000..c2a2d59f --- /dev/null +++ b/vendor/github.com/regclient/regclient/repo.go @@ -0,0 +1,33 @@ +package regclient + +import ( + "context" + "fmt" + "strings" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/repo" +) + +type repoLister interface { + RepoList(ctx context.Context, hostname string, opts ...scheme.RepoOpts) (*repo.RepoList, error) +} + +// RepoList returns a list of repositories on a registry. +// Note the underlying "_catalog" API is not supported on many cloud registries. +func (rc *RegClient) RepoList(ctx context.Context, hostname string, opts ...scheme.RepoOpts) (*repo.RepoList, error) { + i := strings.Index(hostname, "/") + if i > 0 { + return nil, fmt.Errorf("invalid hostname: %s%.0w", hostname, errs.ErrParsingFailed) + } + schemeAPI, err := rc.schemeGet("reg") + if err != nil { + return nil, err + } + rl, ok := schemeAPI.(repoLister) + if !ok { + return nil, errs.ErrNotImplemented + } + return rl.RepoList(ctx, hostname, opts...) +} diff --git a/vendor/github.com/regclient/regclient/scheme.go b/vendor/github.com/regclient/regclient/scheme.go new file mode 100644 index 00000000..a18a7af9 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme.go @@ -0,0 +1,33 @@ +package regclient + +import ( + "context" + "fmt" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/ref" +) + +func (rc *RegClient) schemeGet(scheme string) (scheme.API, error) { + s, ok := rc.schemes[scheme] + if !ok { + return nil, fmt.Errorf("%w: unknown scheme \"%s\"", errs.ErrNotImplemented, scheme) + } + return s, nil +} + +// Close is used to free resources associated with a reference. +// With ocidir, this may trigger a garbage collection process. +func (rc *RegClient) Close(ctx context.Context, r ref.Ref) error { + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return err + } + // verify Closer api is defined, noop if missing + sc, ok := schemeAPI.(scheme.Closer) + if !ok { + return nil + } + return sc.Close(ctx, r) +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/blob.go b/vendor/github.com/regclient/regclient/scheme/ocidir/blob.go new file mode 100644 index 00000000..a06e922e --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/blob.go @@ -0,0 +1,159 @@ +package ocidir + +import ( + "context" + "errors" + "fmt" + "io" + "io/fs" + "log/slog" + "os" + "path" + + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/types/blob" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/ref" +) + +// BlobDelete removes a blob from the repository. +// This method does not verify that blobs are unused. +// Calling the [OCIDir.Close] method to trigger the garbage collection is preferred. +func (o *OCIDir) BlobDelete(ctx context.Context, r ref.Ref, d descriptor.Descriptor) error { + err := d.Digest.Validate() + if err != nil { + return fmt.Errorf("failed to validate digest %s: %w", d.Digest.String(), err) + } + file := path.Join(r.Path, "blobs", d.Digest.Algorithm().String(), d.Digest.Encoded()) + return os.Remove(file) +} + +// BlobGet retrieves a blob, returning a reader +func (o *OCIDir) BlobGet(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { + err := d.Digest.Validate() + if err != nil { + return nil, fmt.Errorf("failed to validate digest %s: %w", d.Digest.String(), err) + } + file := path.Join(r.Path, "blobs", d.Digest.Algorithm().String(), d.Digest.Encoded()) + //#nosec G304 users should validate references they attempt to open + fd, err := os.Open(file) + if err != nil { + return nil, err + } + if d.Size <= 0 { + fi, err := fd.Stat() + if err != nil { + _ = fd.Close() + return nil, err + } + d.Size = fi.Size() + } + br := blob.NewReader( + blob.WithRef(r), + blob.WithReader(fd), + blob.WithDesc(d), + ) + o.slog.Debug("retrieved blob", + slog.String("ref", r.CommonName()), + slog.String("file", file)) + return br, nil +} + +// BlobHead verifies the existence of a blob, the reader contains the headers but no body to read +func (o *OCIDir) BlobHead(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { + err := d.Digest.Validate() + if err != nil { + return nil, fmt.Errorf("failed to validate digest %s: %w", d.Digest.String(), err) + } + file := path.Join(r.Path, "blobs", d.Digest.Algorithm().String(), d.Digest.Encoded()) + //#nosec G304 users should validate references they attempt to open + fd, err := os.Open(file) + if err != nil { + return nil, err + } + defer fd.Close() + if d.Size <= 0 { + fi, err := fd.Stat() + if err != nil { + return nil, err + } + d.Size = fi.Size() + } + br := blob.NewReader( + blob.WithRef(r), + blob.WithDesc(d), + ) + return br, nil +} + +// BlobMount attempts to perform a server side copy of the blob +func (o *OCIDir) BlobMount(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor) error { + return errs.ErrUnsupported +} + +// BlobPut sends a blob to the repository, returns the digest and size when successful +func (o *OCIDir) BlobPut(ctx context.Context, r ref.Ref, d descriptor.Descriptor, rdr io.Reader) (descriptor.Descriptor, error) { + t := o.throttleGet(r, false) + done, err := t.Acquire(ctx, reqmeta.Data{Kind: reqmeta.Blob, Size: d.Size}) + if err != nil { + return d, err + } + defer done() + + err = o.initIndex(r, false) + if err != nil { + return d, err + } + digester := d.DigestAlgo().Digester() + rdr = io.TeeReader(rdr, digester.Hash()) + // write the blob to a tmp file + dir := path.Join(r.Path, "blobs", d.DigestAlgo().String()) + tmpPattern := "*.tmp" + //#nosec G301 defer to user umask settings + err = os.MkdirAll(dir, 0777) + if err != nil && !errors.Is(err, fs.ErrExist) { + return d, fmt.Errorf("failed creating %s: %w", dir, err) + } + tmpFile, err := os.CreateTemp(dir, tmpPattern) + if err != nil { + return d, fmt.Errorf("failed creating blob tmp file: %w", err) + } + fi, err := tmpFile.Stat() + if err != nil { + return d, fmt.Errorf("failed to stat blob tmpfile: %w", err) + } + tmpName := fi.Name() + i, err := io.Copy(tmpFile, rdr) + errC := tmpFile.Close() + if err != nil { + return d, err + } + if errC != nil { + return d, errC + } + // validate result matches descriptor, or update descriptor if it wasn't defined + if d.Digest.Validate() != nil { + d.Digest = digester.Digest() + } else if d.Digest != digester.Digest() { + return d, fmt.Errorf("unexpected digest, expected %s, computed %s", d.Digest, digester.Digest()) + } + if d.Size <= 0 { + d.Size = i + } else if i != d.Size { + return d, fmt.Errorf("unexpected blob length, expected %d, received %d", d.Size, i) + } + file := path.Join(r.Path, "blobs", d.Digest.Algorithm().String(), d.Digest.Encoded()) + err = os.Rename(path.Join(dir, tmpName), file) + if err != nil { + return d, fmt.Errorf("failed to write blob (rename tmp file %s to %s): %w", path.Join(dir, tmpName), file, err) + } + o.slog.Debug("pushed blob", + slog.String("ref", r.CommonName()), + slog.String("file", file)) + + o.mu.Lock() + o.refMod(r) + o.mu.Unlock() + return d, nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/close.go b/vendor/github.com/regclient/regclient/scheme/ocidir/close.go new file mode 100644 index 00000000..23eae0d1 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/close.go @@ -0,0 +1,117 @@ +package ocidir + +import ( + "context" + "fmt" + "log/slog" + "os" + "path" + + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/ref" +) + +// Close triggers a garbage collection if the underlying path has been modified +func (o *OCIDir) Close(ctx context.Context, r ref.Ref) error { + if !o.gc { + return nil + } + + o.mu.Lock() + defer o.mu.Unlock() + if gc, ok := o.modRefs[r.Path]; !ok || !gc.mod || gc.locks > 0 { + // unmodified or locked, skip gc + return nil + } + + // perform GC + o.slog.Debug("running GC", + slog.String("ref", r.CommonName())) + dl := map[string]bool{} + // recurse through index, manifests, and blob lists, generating a digest list + index, err := o.readIndex(r, true) + if err != nil { + return err + } + im, err := manifest.New(manifest.WithOrig(index)) + if err != nil { + return err + } + err = o.closeProcManifest(ctx, r, im, &dl) + if err != nil { + return err + } + + // go through filesystem digest list, removing entries not seen in recursive pass + blobsPath := path.Join(r.Path, "blobs") + blobDirs, err := os.ReadDir(blobsPath) + if err != nil { + return err + } + for _, blobDir := range blobDirs { + if !blobDir.IsDir() { + // should this warn or delete unexpected files in the blobs folder? + continue + } + digestFiles, err := os.ReadDir(path.Join(blobsPath, blobDir.Name())) + if err != nil { + return err + } + for _, digestFile := range digestFiles { + digest := fmt.Sprintf("%s:%s", blobDir.Name(), digestFile.Name()) + if !dl[digest] { + o.slog.Debug("ocidir garbage collect", + slog.String("digest", digest)) + // delete + err = os.Remove(path.Join(blobsPath, blobDir.Name(), digestFile.Name())) + if err != nil { + return fmt.Errorf("failed to delete %s: %w", path.Join(blobsPath, blobDir.Name(), digestFile.Name()), err) + } + } + } + } + delete(o.modRefs, r.Path) + return nil +} + +func (o *OCIDir) closeProcManifest(ctx context.Context, r ref.Ref, m manifest.Manifest, dl *map[string]bool) error { + if mi, ok := m.(manifest.Indexer); ok { + // go through manifest list, updating dl, and recursively processing nested manifests + ml, err := mi.GetManifestList() + if err != nil { + return err + } + for _, cur := range ml { + cr := r.SetDigest(cur.Digest.String()) + (*dl)[cr.Digest] = true + cm, err := o.manifestGet(ctx, cr) + if err != nil { + // ignore errors in case a manifest has been deleted or sparse copy + o.slog.Debug("could not retrieve manifest", + slog.String("ref", cr.CommonName()), + slog.String("err", err.Error())) + continue + } + err = o.closeProcManifest(ctx, cr, cm, dl) + if err != nil { + return err + } + } + } + if mi, ok := m.(manifest.Imager); ok { + // get config from manifest if it exists + cd, err := mi.GetConfig() + if err == nil { + (*dl)[cd.Digest.String()] = true + } + // finally add all layers to digest list + layers, err := mi.GetLayers() + if err != nil { + return err + } + for _, layer := range layers { + (*dl)[layer.Digest.String()] = true + } + } + return nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/manifest.go b/vendor/github.com/regclient/regclient/scheme/ocidir/manifest.go new file mode 100644 index 00000000..c4cf0ee8 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/manifest.go @@ -0,0 +1,299 @@ +package ocidir + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "log/slog" + "os" + "path" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" + "github.com/regclient/regclient/types/ref" +) + +// ManifestDelete removes a manifest, including all tags that point to that manifest +func (o *OCIDir) ManifestDelete(ctx context.Context, r ref.Ref, opts ...scheme.ManifestOpts) error { + o.mu.Lock() + defer o.mu.Unlock() + + if r.Digest == "" { + return fmt.Errorf("digest required to delete manifest, reference %s%.0w", r.CommonName(), errs.ErrMissingDigest) + } + + mc := scheme.ManifestConfig{} + for _, opt := range opts { + opt(&mc) + } + + // always check for refers with ocidir + if mc.Manifest == nil { + m, err := o.manifestGet(ctx, r) + if err != nil { + return fmt.Errorf("failed to pull manifest for refers: %w", err) + } + mc.Manifest = m + } + if mc.Manifest != nil { + if ms, ok := mc.Manifest.(manifest.Subjecter); ok { + sDesc, err := ms.GetSubject() + if err == nil && sDesc != nil && sDesc.Digest != "" { + // attempt to delete the referrer, but ignore if the referrer entry wasn't found + err = o.referrerDelete(ctx, r, mc.Manifest) + if err != nil && !errors.Is(err, errs.ErrNotFound) && !errors.Is(err, fs.ErrNotExist) { + return err + } + } + } + } + + // get index + changed := false + index, err := o.readIndex(r, true) + if err != nil { + return fmt.Errorf("failed to read index: %w", err) + } + for i := len(index.Manifests) - 1; i >= 0; i-- { + // remove matching entry from index + if r.Digest != "" && index.Manifests[i].Digest.String() == r.Digest { + changed = true + index.Manifests = append(index.Manifests[:i], index.Manifests[i+1:]...) + } + } + // push manifest back out + if changed { + err = o.writeIndex(r, index, true) + if err != nil { + return fmt.Errorf("failed to write index: %w", err) + } + } + + // delete from filesystem like a registry would do + d := digest.Digest(r.Digest) + file := path.Join(r.Path, "blobs", d.Algorithm().String(), d.Encoded()) + err = os.Remove(file) + if err != nil { + return fmt.Errorf("failed to delete manifest: %w", err) + } + o.refMod(r) + return nil +} + +// ManifestGet retrieves a manifest from a repository +func (o *OCIDir) ManifestGet(ctx context.Context, r ref.Ref) (manifest.Manifest, error) { + o.mu.Lock() + defer o.mu.Unlock() + return o.manifestGet(ctx, r) +} + +func (o *OCIDir) manifestGet(_ context.Context, r ref.Ref) (manifest.Manifest, error) { + index, err := o.readIndex(r, true) + if err != nil { + return nil, fmt.Errorf("unable to read oci index: %w", err) + } + if r.Digest == "" && r.Tag == "" { + r.Tag = "latest" + } + desc, err := indexGet(index, r) + if err != nil { + if r.Digest != "" { + desc.Digest = digest.Digest(r.Digest) + } else { + return nil, err + } + } + if desc.Digest == "" { + return nil, errs.ErrNotFound + } + if err = desc.Digest.Validate(); err != nil { + return nil, fmt.Errorf("invalid digest in index: %s: %w", string(desc.Digest), err) + } + file := path.Join(r.Path, "blobs", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + //#nosec G304 users should validate references they attempt to open + fd, err := os.Open(file) + if err != nil { + return nil, fmt.Errorf("failed to open manifest: %w", err) + } + defer fd.Close() + mb, err := io.ReadAll(fd) + if err != nil { + return nil, fmt.Errorf("failed to read manifest: %w", err) + } + if desc.Size == 0 { + desc.Size = int64(len(mb)) + } + o.slog.Debug("retrieved manifest", + slog.String("ref", r.CommonName()), + slog.String("file", file)) + return manifest.New( + manifest.WithRef(r), + manifest.WithDesc(desc), + manifest.WithRaw(mb), + ) +} + +// ManifestHead gets metadata about the manifest (existence, digest, mediatype, size) +func (o *OCIDir) ManifestHead(ctx context.Context, r ref.Ref) (manifest.Manifest, error) { + index, err := o.readIndex(r, false) + if err != nil { + return nil, fmt.Errorf("unable to read oci index: %w", err) + } + if r.Digest == "" && r.Tag == "" { + r.Tag = "latest" + } + desc, err := indexGet(index, r) + if err != nil { + if r.Digest != "" { + desc.Digest = digest.Digest(r.Digest) + } else { + return nil, err + } + } + if desc.Digest == "" { + return nil, errs.ErrNotFound + } + if err = desc.Digest.Validate(); err != nil { + return nil, fmt.Errorf("invalid digest in index: %s: %w", string(desc.Digest), err) + } + // verify underlying file exists + file := path.Join(r.Path, "blobs", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + fi, err := os.Stat(file) + if err != nil || fi.IsDir() { + return nil, errs.ErrNotFound + } + // if missing, set media type on desc + if desc.MediaType == "" { + //#nosec G304 users should validate references they attempt to open + raw, err := os.ReadFile(file) + if err != nil { + return nil, err + } + mt := struct { + MediaType string `json:"mediaType,omitempty"` + SchemaVersion int `json:"schemaVersion,omitempty"` + Signatures []interface{} `json:"signatures,omitempty"` + }{} + err = json.Unmarshal(raw, &mt) + if err != nil { + return nil, err + } + if mt.MediaType != "" { + desc.MediaType = mt.MediaType + desc.Size = int64(len(raw)) + } else if mt.SchemaVersion == 1 && len(mt.Signatures) > 0 { + desc.MediaType = mediatype.Docker1ManifestSigned + } else if mt.SchemaVersion == 1 { + desc.MediaType = mediatype.Docker1Manifest + desc.Size = int64(len(raw)) + } + } + return manifest.New( + manifest.WithRef(r), + manifest.WithDesc(desc), + ) +} + +// ManifestPut sends a manifest to the repository +func (o *OCIDir) ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, opts ...scheme.ManifestOpts) error { + o.mu.Lock() + defer o.mu.Unlock() + return o.manifestPut(ctx, r, m, opts...) +} + +func (o *OCIDir) manifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, opts ...scheme.ManifestOpts) error { + config := scheme.ManifestConfig{} + for _, opt := range opts { + opt(&config) + } + if !config.Child && r.Digest == "" && r.Tag == "" { + r.Tag = "latest" + } + err := o.initIndex(r, true) + if err != nil { + return err + } + desc := m.GetDescriptor() + if err = desc.Digest.Validate(); err != nil { + return fmt.Errorf("invalid digest for manifest: %s: %w", string(desc.Digest), err) + } + b, err := m.RawBody() + if err != nil { + return fmt.Errorf("could not serialize manifest: %w", err) + } + if r.Tag == "" { + // force digest to match manifest value + r.Digest = desc.Digest.String() + } + if r.Tag != "" { + desc.Annotations = map[string]string{ + aOCIRefName: r.Tag, + } + } + // create manifest CAS file + dir := path.Join(r.Path, "blobs", desc.Digest.Algorithm().String()) + //#nosec G301 defer to user umask settings + err = os.MkdirAll(dir, 0777) + if err != nil && !errors.Is(err, fs.ErrExist) { + return fmt.Errorf("failed creating %s: %w", dir, err) + } + // write to a tmp file, rename after validating + tmpFile, err := os.CreateTemp(dir, desc.Digest.Encoded()+".*.tmp") + if err != nil { + return fmt.Errorf("failed to create manifest tmpfile: %w", err) + } + fi, err := tmpFile.Stat() + if err != nil { + return fmt.Errorf("failed to stat manifest tmpfile: %w", err) + } + tmpName := fi.Name() + _, err = tmpFile.Write(b) + errC := tmpFile.Close() + if err != nil { + return fmt.Errorf("failed to write manifest tmpfile: %w", err) + } + if errC != nil { + return fmt.Errorf("failed to close manifest tmpfile: %w", errC) + } + file := path.Join(dir, desc.Digest.Encoded()) + err = os.Rename(path.Join(dir, tmpName), file) + if err != nil { + return fmt.Errorf("failed to write manifest (rename tmpfile): %w", err) + } + + // verify/update index + err = o.updateIndex(r, desc, config.Child, true) + if err != nil { + return err + } + o.refMod(r) + o.slog.Debug("pushed manifest", + slog.String("ref", r.CommonName()), + slog.String("file", file)) + + // update referrers if defined on this manifest + if ms, ok := m.(manifest.Subjecter); ok { + mDesc, err := ms.GetSubject() + if err != nil { + return err + } + if mDesc != nil && mDesc.Digest != "" { + err = o.referrerPut(ctx, r, m) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir.go b/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir.go new file mode 100644 index 00000000..2dcf189d --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir.go @@ -0,0 +1,406 @@ +// Package ocidir implements the OCI Image Layout scheme with a directory (not packed in a tar) +package ocidir + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "log/slog" + "os" + "path" + "strings" + "sync" + + "github.com/regclient/regclient/internal/pqueue" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/ref" +) + +const ( + imageLayoutFile = "oci-layout" + aOCIRefName = "org.opencontainers.image.ref.name" + aCtrdImageName = "io.containerd.image.name" + defThrottle = 3 +) + +// OCIDir is used for accessing OCI Image Layouts defined as a directory +type OCIDir struct { + slog *slog.Logger + gc bool + modRefs map[string]*ociGC + throttle map[string]*pqueue.Queue[reqmeta.Data] + throttleDef int + mu sync.Mutex +} + +type ociGC struct { + mod bool + locks int +} + +type ociConf struct { + gc bool + slog *slog.Logger + throttle int +} + +// Opts are used for passing options to ocidir +type Opts func(*ociConf) + +// New creates a new OCIDir with options +func New(opts ...Opts) *OCIDir { + conf := ociConf{ + slog: slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{})), + gc: true, + throttle: defThrottle, + } + for _, opt := range opts { + opt(&conf) + } + return &OCIDir{ + slog: conf.slog, + gc: conf.gc, + modRefs: map[string]*ociGC{}, + throttle: map[string]*pqueue.Queue[reqmeta.Data]{}, + throttleDef: conf.throttle, + } +} + +// WithGC configures the garbage collection setting +// This defaults to enabled +func WithGC(gc bool) Opts { + return func(c *ociConf) { + c.gc = gc + } +} + +// WithSlog provides a slog logger. +// By default logging is disabled. +func WithSlog(slog *slog.Logger) Opts { + return func(c *ociConf) { + c.slog = slog + } +} + +// WithThrottle provides a number of concurrent write actions (blob/manifest put) +func WithThrottle(count int) Opts { + return func(c *ociConf) { + c.throttle = count + } +} + +// GCLock is used to prevent GC on a ref +func (o *OCIDir) GCLock(r ref.Ref) { + o.mu.Lock() + defer o.mu.Unlock() + if gc, ok := o.modRefs[r.Path]; ok && gc != nil { + gc.locks++ + } else { + o.modRefs[r.Path] = &ociGC{locks: 1} + } +} + +// GCUnlock removes a hold on GC of a ref, this must be done before the ref is closed +func (o *OCIDir) GCUnlock(r ref.Ref) { + o.mu.Lock() + defer o.mu.Unlock() + if gc, ok := o.modRefs[r.Path]; ok && gc != nil && gc.locks > 0 { + gc.locks-- + } +} + +// Throttle is used to limit concurrency +func (o *OCIDir) Throttle(r ref.Ref, put bool) []*pqueue.Queue[reqmeta.Data] { + tList := []*pqueue.Queue[reqmeta.Data]{} + // throttle only applies to put requests + if !put || o.throttleDef <= 0 { + return tList + } + return []*pqueue.Queue[reqmeta.Data]{o.throttleGet(r, false)} +} + +func (o *OCIDir) throttleGet(r ref.Ref, locked bool) *pqueue.Queue[reqmeta.Data] { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } + if t, ok := o.throttle[r.Path]; ok { + return t + } + // init a new throttle + o.throttle[r.Path] = pqueue.New(pqueue.Opts[reqmeta.Data]{Max: o.throttleDef}) + return o.throttle[r.Path] +} + +func (o *OCIDir) initIndex(r ref.Ref, locked bool) error { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } + layoutFile := path.Join(r.Path, imageLayoutFile) + _, err := os.Stat(layoutFile) + if err == nil { + return nil + } + //#nosec G301 defer to user umask settings + err = os.MkdirAll(r.Path, 0777) + if err != nil && !errors.Is(err, fs.ErrExist) { + return fmt.Errorf("failed creating %s: %w", r.Path, err) + } + // create/replace oci-layout file + layout := v1.ImageLayout{ + Version: "1.0.0", + } + lb, err := json.Marshal(layout) + if err != nil { + return fmt.Errorf("cannot marshal layout: %w", err) + } + //#nosec G304 users should validate references they attempt to open + lfh, err := os.Create(layoutFile) + if err != nil { + return fmt.Errorf("cannot create %s: %w", imageLayoutFile, err) + } + defer lfh.Close() + _, err = lfh.Write(lb) + if err != nil { + return fmt.Errorf("cannot write %s: %w", imageLayoutFile, err) + } + return nil +} + +func (o *OCIDir) readIndex(r ref.Ref, locked bool) (v1.Index, error) { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } + // validate dir + index := v1.Index{} + err := o.valid(r.Path, true) + if err != nil { + return index, err + } + indexFile := path.Join(r.Path, "index.json") + //#nosec G304 users should validate references they attempt to open + fh, err := os.Open(indexFile) + if err != nil { + return index, fmt.Errorf("%s cannot be open: %w", indexFile, err) + } + defer fh.Close() + ib, err := io.ReadAll(fh) + if err != nil { + return index, fmt.Errorf("%s cannot be read: %w", indexFile, err) + } + err = json.Unmarshal(ib, &index) + if err != nil { + return index, fmt.Errorf("%s cannot be parsed: %w", indexFile, err) + } + return index, nil +} + +func (o *OCIDir) updateIndex(r ref.Ref, d descriptor.Descriptor, child bool, locked bool) error { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } + indexChanged := false + index, err := o.readIndex(r, true) + if err != nil { + index = indexCreate() + indexChanged = true + } + if !child { + err := indexSet(&index, r, d) + if err != nil { + return fmt.Errorf("failed to update index: %w", err) + } + indexChanged = true + } + if indexChanged { + err = o.writeIndex(r, index, true) + if err != nil { + return fmt.Errorf("failed to write index: %w", err) + } + } + return nil +} + +func (o *OCIDir) writeIndex(r ref.Ref, i v1.Index, locked bool) error { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } + //#nosec G301 defer to user umask settings + err := os.MkdirAll(r.Path, 0777) + if err != nil && !errors.Is(err, fs.ErrExist) { + return fmt.Errorf("failed creating %s: %w", r.Path, err) + } + // create/replace oci-layout file + layout := v1.ImageLayout{ + Version: "1.0.0", + } + lb, err := json.Marshal(layout) + if err != nil { + return fmt.Errorf("cannot marshal layout: %w", err) + } + lfh, err := os.Create(path.Join(r.Path, imageLayoutFile)) + if err != nil { + return fmt.Errorf("cannot create %s: %w", imageLayoutFile, err) + } + defer lfh.Close() + _, err = lfh.Write(lb) + if err != nil { + return fmt.Errorf("cannot write %s: %w", imageLayoutFile, err) + } + // create/replace index.json file + tmpFile, err := os.CreateTemp(r.Path, "index.json.*.tmp") + if err != nil { + return fmt.Errorf("cannot create index tmpfile: %w", err) + } + fi, err := tmpFile.Stat() + if err != nil { + return fmt.Errorf("failed to stat index tmpfile: %w", err) + } + tmpName := fi.Name() + b, err := json.Marshal(i) + if err != nil { + return fmt.Errorf("cannot marshal index: %w", err) + } + _, err = tmpFile.Write(b) + errC := tmpFile.Close() + if err != nil { + return fmt.Errorf("cannot write index: %w", err) + } + if errC != nil { + return fmt.Errorf("cannot close index: %w", errC) + } + indexFile := path.Join(r.Path, "index.json") + err = os.Rename(path.Join(r.Path, tmpName), indexFile) + if err != nil { + return fmt.Errorf("cannot rename tmpfile to index: %w", err) + } + return nil +} + +// func valid (dir) (error) // check for `oci-layout` file and `index.json` for read +func (o *OCIDir) valid(dir string, locked bool) error { + if !locked { + o.mu.Lock() + defer o.mu.Unlock() + } + layout := v1.ImageLayout{} + reqVer := "1.0.0" + //#nosec G304 users should validate references they attempt to open + fh, err := os.Open(path.Join(dir, imageLayoutFile)) + if err != nil { + return fmt.Errorf("%s cannot be open: %w", imageLayoutFile, err) + } + defer fh.Close() + lb, err := io.ReadAll(fh) + if err != nil { + return fmt.Errorf("%s cannot be read: %w", imageLayoutFile, err) + } + err = json.Unmarshal(lb, &layout) + if err != nil { + return fmt.Errorf("%s cannot be parsed: %w", imageLayoutFile, err) + } + if layout.Version != reqVer { + return fmt.Errorf("unsupported oci layout version, expected %s, received %s", reqVer, layout.Version) + } + return nil +} + +func (o *OCIDir) refMod(r ref.Ref) { + if gc, ok := o.modRefs[r.Path]; ok && gc != nil { + gc.mod = true + } else { + o.modRefs[r.Path] = &ociGC{mod: true} + } +} + +func indexCreate() v1.Index { + i := v1.Index{ + Versioned: v1.IndexSchemaVersion, + MediaType: mediatype.OCI1ManifestList, + Manifests: []descriptor.Descriptor{}, + Annotations: map[string]string{}, + } + return i +} + +func indexGet(index v1.Index, r ref.Ref) (descriptor.Descriptor, error) { + if r.Digest == "" && r.Tag == "" { + r.Tag = "latest" + } + if r.Digest != "" { + for _, im := range index.Manifests { + if im.Digest.String() == r.Digest { + return im, nil + } + } + } else if r.Tag != "" { + for _, im := range index.Manifests { + if name, ok := im.Annotations[aOCIRefName]; ok && name == r.Tag { + return im, nil + } + } + // fall back to support full image name in annotation + for _, im := range index.Manifests { + if name, ok := im.Annotations[aOCIRefName]; ok && strings.HasSuffix(name, ":"+r.Tag) { + return im, nil + } + } + } + return descriptor.Descriptor{}, errs.ErrNotFound +} + +func indexSet(index *v1.Index, r ref.Ref, d descriptor.Descriptor) error { + if index == nil { + return fmt.Errorf("index is nil") + } + if r.Tag != "" { + if d.Annotations == nil { + d.Annotations = map[string]string{} + } + d.Annotations[aOCIRefName] = r.Tag + } + if index.Manifests == nil { + index.Manifests = []descriptor.Descriptor{} + } + pos := -1 + // search for existing + for i := range index.Manifests { + var name string + if index.Manifests[i].Annotations != nil { + name = index.Manifests[i].Annotations[aOCIRefName] + } + if (name == "" && index.Manifests[i].Digest == d.Digest) || (r.Tag != "" && name == r.Tag) { + index.Manifests[i] = d + pos = i + break + } + } + if pos >= 0 { + // existing entry was replaced, remove any dup entries + for i := len(index.Manifests) - 1; i > pos; i-- { + var name string + if index.Manifests[i].Annotations != nil { + name = index.Manifests[i].Annotations[aOCIRefName] + } + // prune entries without any tag and a matching digest + // or entries with a matching tag + if (name == "" && index.Manifests[i].Digest == d.Digest) || (r.Tag != "" && name == r.Tag) { + index.Manifests = append(index.Manifests[:i], index.Manifests[i+1:]...) + } + } + } else { + // existing entry to replace was not found, add the descriptor + index.Manifests = append(index.Manifests, d) + } + return nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir_nowasm.go b/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir_nowasm.go new file mode 100644 index 00000000..90143945 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/ocidir_nowasm.go @@ -0,0 +1,20 @@ +//go:build !wasm +// +build !wasm + +package ocidir + +import ( + "log/slog" + + "github.com/sirupsen/logrus" + + "github.com/regclient/regclient/internal/sloghandle" +) + +// WithLog provides a logrus logger. +// By default logging is disabled. +func WithLog(log *logrus.Logger) Opts { + return func(c *ociConf) { + c.slog = slog.New(sloghandle.Logrus(log)) + } +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/ping.go b/vendor/github.com/regclient/regclient/scheme/ocidir/ping.go new file mode 100644 index 00000000..d83df343 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/ping.go @@ -0,0 +1,29 @@ +package ocidir + +import ( + "context" + "fmt" + "os" + + "github.com/regclient/regclient/types/ping" + "github.com/regclient/regclient/types/ref" +) + +// Ping for an ocidir verifies access to read the path. +func (o *OCIDir) Ping(ctx context.Context, r ref.Ref) (ping.Result, error) { + ret := ping.Result{} + fd, err := os.Open(r.Path) + if err != nil { + return ret, err + } + defer fd.Close() + fi, err := fd.Stat() + if err != nil { + return ret, err + } + ret.Stat = fi + if !fi.IsDir() { + return ret, fmt.Errorf("failed to access %s: not a directory", r.Path) + } + return ret, nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/referrer.go b/vendor/github.com/regclient/regclient/scheme/ocidir/referrer.go new file mode 100644 index 00000000..7a89d27c --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/referrer.go @@ -0,0 +1,160 @@ +package ocidir + +import ( + "context" + "errors" + "fmt" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/referrer" +) + +// ReferrerList returns a list of referrers to a given reference. +// The reference must include the digest. Use [regclient.ReferrerList] to resolve the platform or tag. +func (o *OCIDir) ReferrerList(ctx context.Context, r ref.Ref, opts ...scheme.ReferrerOpts) (referrer.ReferrerList, error) { + o.mu.Lock() + defer o.mu.Unlock() + return o.referrerList(ctx, r, opts...) +} + +func (o *OCIDir) referrerList(ctx context.Context, rSubject ref.Ref, opts ...scheme.ReferrerOpts) (referrer.ReferrerList, error) { + config := scheme.ReferrerConfig{} + for _, opt := range opts { + opt(&config) + } + var r ref.Ref + if config.SrcRepo.IsSet() { + r = config.SrcRepo.SetDigest(rSubject.Digest) + } else { + r = rSubject.SetDigest(rSubject.Digest) + } + rl := referrer.ReferrerList{ + Tags: []string{}, + } + if rSubject.Digest == "" { + return rl, fmt.Errorf("digest required to query referrers %s", rSubject.CommonName()) + } + + // pull referrer list by tag + rlTag, err := referrer.FallbackTag(r) + if err != nil { + return rl, err + } + m, err := o.manifestGet(ctx, rlTag) + if err != nil { + if errors.Is(err, errs.ErrNotFound) { + // empty list, initialize a new manifest + rl.Manifest, err = manifest.New(manifest.WithOrig(v1.Index{ + Versioned: v1.IndexSchemaVersion, + MediaType: mediatype.OCI1ManifestList, + })) + if err != nil { + return rl, err + } + return rl, nil + } + return rl, err + } + ociML, ok := m.GetOrig().(v1.Index) + if !ok { + return rl, fmt.Errorf("manifest is not an OCI index: %s", rlTag.CommonName()) + } + // update referrer list + rl.Subject = rSubject + if config.SrcRepo.IsSet() { + rl.Source = config.SrcRepo + } + rl.Manifest = m + rl.Descriptors = ociML.Manifests + rl.Annotations = ociML.Annotations + rl.Tags = append(rl.Tags, rlTag.Tag) + rl = scheme.ReferrerFilter(config, rl) + + return rl, nil +} + +// referrerDelete deletes a referrer associated with a manifest +func (o *OCIDir) referrerDelete(ctx context.Context, r ref.Ref, m manifest.Manifest) error { + // get refers field + mSubject, ok := m.(manifest.Subjecter) + if !ok { + return fmt.Errorf("manifest does not support subject: %w", errs.ErrUnsupportedMediaType) + } + subject, err := mSubject.GetSubject() + if err != nil { + return err + } + // validate/set subject descriptor + if subject == nil || subject.Digest == "" { + return fmt.Errorf("subject is not set%.0w", errs.ErrNotFound) + } + + // get descriptor for subject + rSubject := r.SetDigest(subject.Digest.String()) + + // pull existing referrer list + rl, err := o.referrerList(ctx, rSubject) + if err != nil { + return err + } + err = rl.Delete(m) + if err != nil { + return err + } + + // push updated referrer list by tag + rlTag, err := referrer.FallbackTag(rSubject) + if err != nil { + return err + } + if rl.IsEmpty() { + err = o.tagDelete(ctx, rlTag) + if err == nil { + return nil + } + // if delete is not supported, fall back to pushing empty list + } + return o.manifestPut(ctx, rlTag, rl.Manifest) +} + +// referrerPut pushes a new referrer associated with a given reference +func (o *OCIDir) referrerPut(ctx context.Context, r ref.Ref, m manifest.Manifest) error { + // get subject field + mSubject, ok := m.(manifest.Subjecter) + if !ok { + return fmt.Errorf("manifest does not support subject: %w", errs.ErrUnsupportedMediaType) + } + subject, err := mSubject.GetSubject() + if err != nil { + return err + } + // validate/set subject descriptor + if subject == nil || subject.Digest == "" { + return fmt.Errorf("subject is not set%.0w", errs.ErrNotFound) + } + + // get descriptor for subject + rSubject := r.SetDigest(subject.Digest.String()) + + // pull existing referrer list + rl, err := o.referrerList(ctx, rSubject) + if err != nil { + return err + } + err = rl.Add(m) + if err != nil { + return err + } + + // push updated referrer list by tag + rlTag, err := referrer.FallbackTag(rSubject) + if err != nil { + return err + } + return o.manifestPut(ctx, rlTag, rl.Manifest) +} diff --git a/vendor/github.com/regclient/regclient/scheme/ocidir/tag.go b/vendor/github.com/regclient/regclient/scheme/ocidir/tag.go new file mode 100644 index 00000000..fd34a324 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/ocidir/tag.go @@ -0,0 +1,95 @@ +package ocidir + +import ( + "context" + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/tag" +) + +// TagDelete removes a tag from the repository +func (o *OCIDir) TagDelete(ctx context.Context, r ref.Ref) error { + o.mu.Lock() + defer o.mu.Unlock() + return o.tagDelete(ctx, r) +} + +func (o *OCIDir) tagDelete(_ context.Context, r ref.Ref) error { + if r.Tag == "" { + return errs.ErrMissingTag + } + // get index + index, err := o.readIndex(r, true) + if err != nil { + return fmt.Errorf("failed to read index: %w", err) + } + changed := false + for i, desc := range index.Manifests { + if t, ok := desc.Annotations[aOCIRefName]; ok && t == r.Tag { + // remove matching entry from index + index.Manifests = append(index.Manifests[:i], index.Manifests[i+1:]...) + changed = true + } + } + if !changed { + return fmt.Errorf("failed deleting %s: %w", r.CommonName(), errs.ErrNotFound) + } + // push manifest back out + err = o.writeIndex(r, index, true) + if err != nil { + return fmt.Errorf("failed to write index: %w", err) + } + o.refMod(r) + return nil +} + +// TagList returns a list of tags from the repository +func (o *OCIDir) TagList(ctx context.Context, r ref.Ref, opts ...scheme.TagOpts) (*tag.List, error) { + // get index + index, err := o.readIndex(r, false) + if err != nil { + return nil, err + } + tl := []string{} + for _, desc := range index.Manifests { + if t, ok := desc.Annotations[aOCIRefName]; ok { + if i := strings.LastIndex(t, ":"); i >= 0 { + t = t[i+1:] + } + found := false + for _, cur := range tl { + if cur == t { + found = true + break + } + } + if !found { + tl = append(tl, t) + } + } + } + sort.Strings(tl) + ib, err := json.Marshal(index) + if err != nil { + return nil, err + } + // return listing from index + t, err := tag.New( + tag.WithRaw(ib), + tag.WithRef(r), + tag.WithMT(mediatype.OCI1ManifestList), + tag.WithLayoutIndex(index), + tag.WithTags(tl), + ) + if err != nil { + return nil, err + } + return t, nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/blob.go b/vendor/github.com/regclient/regclient/scheme/reg/blob.go new file mode 100644 index 00000000..f477f887 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/blob.go @@ -0,0 +1,678 @@ +package reg + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "strconv" + "strings" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/types/blob" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/warning" +) + +var ( + zeroDig = digest.SHA256.FromBytes([]byte{}) +) + +// BlobDelete removes a blob from the repository +func (reg *Reg) BlobDelete(ctx context.Context, r ref.Ref, d descriptor.Descriptor) error { + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + Method: "DELETE", + Repository: r.Repository, + Path: "blobs/" + d.Digest.String(), + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return fmt.Errorf("failed to delete blob, digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), err) + } + if resp.HTTPResponse().StatusCode != 202 { + return fmt.Errorf("failed to delete blob, digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + return nil +} + +// BlobGet retrieves a blob from the repository, returning a blob reader +func (reg *Reg) BlobGet(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { + // build/send request + req := ®http.Req{ + MetaKind: reqmeta.Blob, + Host: r.Registry, + Method: "GET", + Repository: r.Repository, + Path: "blobs/" + d.Digest.String(), + ExpectLen: d.Size, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil && len(d.URLs) > 0 { + for _, curURL := range d.URLs { + // fallback for external blobs + var u *url.URL + u, err = url.Parse(curURL) + if err != nil { + return nil, fmt.Errorf("failed to parse external url \"%s\": %w", curURL, err) + } + req = ®http.Req{ + MetaKind: reqmeta.Blob, + Host: r.Registry, + Method: "GET", + Repository: r.Repository, + DirectURL: u, + NoMirrors: true, + ExpectLen: d.Size, + } + resp, err = reg.reghttp.Do(ctx, req) + if err == nil { + break + } + } + } + if err != nil { + return nil, fmt.Errorf("failed to get blob, digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), err) + } + if resp.HTTPResponse().StatusCode != 200 { + return nil, fmt.Errorf("failed to get blob, digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + b := blob.NewReader( + blob.WithRef(r), + blob.WithReader(resp), + blob.WithDesc(d), + blob.WithResp(resp.HTTPResponse()), + ) + return b, nil +} + +// BlobHead is used to verify if a blob exists and is accessible +func (reg *Reg) BlobHead(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) { + // build/send request + req := ®http.Req{ + MetaKind: reqmeta.Head, + Host: r.Registry, + Method: "HEAD", + Repository: r.Repository, + Path: "blobs/" + d.Digest.String(), + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil && len(d.URLs) > 0 { + for _, curURL := range d.URLs { + // fallback for external blobs + var u *url.URL + u, err = url.Parse(curURL) + if err != nil { + return nil, fmt.Errorf("failed to parse external url \"%s\": %w", curURL, err) + } + req = ®http.Req{ + MetaKind: reqmeta.Head, + Host: r.Registry, + Method: "HEAD", + Repository: r.Repository, + DirectURL: u, + NoMirrors: true, + } + resp, err = reg.reghttp.Do(ctx, req) + if err == nil { + break + } + } + } + if err != nil { + return nil, fmt.Errorf("failed to request blob head, digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 200 { + return nil, fmt.Errorf("failed to request blob head, digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + b := blob.NewReader( + blob.WithRef(r), + blob.WithDesc(d), + blob.WithResp(resp.HTTPResponse()), + ) + return b, nil +} + +// BlobMount attempts to perform a server side copy/mount of the blob between repositories +func (reg *Reg) BlobMount(ctx context.Context, rSrc ref.Ref, rTgt ref.Ref, d descriptor.Descriptor) error { + putURL, _, err := reg.blobMount(ctx, rTgt, d, rSrc) + // if mount fails and returns an upload location, cancel that upload + if err != nil { + _ = reg.blobUploadCancel(ctx, rTgt, putURL) + } + return err +} + +// BlobPut uploads a blob to a repository. +// Descriptor is optional, leave size and digest to zero value if unknown. +// Reader must also be an [io.Seeker] to support chunked upload fallback. +// +// This will attempt an anonymous blob mount first which some registries may support. +// It will then try doing a full put of the blob without chunking (most widely supported). +// If the full put fails, it will fall back to a chunked upload (useful for flaky networks). +func (reg *Reg) BlobPut(ctx context.Context, r ref.Ref, d descriptor.Descriptor, rdr io.Reader) (descriptor.Descriptor, error) { + var putURL *url.URL + var err error + validDesc := (d.Size > 0 && d.Digest.Validate() == nil) || (d.Size == 0 && d.Digest == zeroDig) + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + + // attempt an anonymous blob mount + if validDesc { + putURL, _, err = reg.blobMount(ctx, r, d, ref.Ref{}) + if err == nil { + return d, nil + } + if err != errs.ErrMountReturnedLocation { + putURL = nil + } + } + // fallback to requesting upload URL + if putURL == nil { + putURL, err = reg.blobGetUploadURL(ctx, r, d) + if err != nil { + return d, err + } + } + // send upload as one-chunk + tryPut := validDesc + if tryPut { + host := reg.hostGet(r.Registry) + maxPut := host.BlobMax + if maxPut == 0 { + maxPut = reg.blobMaxPut + } + if maxPut > 0 && d.Size > maxPut { + tryPut = false + } + } + if tryPut { + err = reg.blobPutUploadFull(ctx, r, d, putURL, rdr) + if err == nil { + return d, nil + } + // on failure, attempt to seek back to start to perform a chunked upload + rdrSeek, ok := rdr.(io.ReadSeeker) + if !ok { + _ = reg.blobUploadCancel(ctx, r, putURL) + return d, err + } + offset, errR := rdrSeek.Seek(0, io.SeekStart) + if errR != nil || offset != 0 { + _ = reg.blobUploadCancel(ctx, r, putURL) + return d, err + } + } + // send a chunked upload if full upload not possible or too large + d, err = reg.blobPutUploadChunked(ctx, r, d, putURL, rdr) + if err != nil { + _ = reg.blobUploadCancel(ctx, r, putURL) + } + return d, err +} + +func (reg *Reg) blobGetUploadURL(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (*url.URL, error) { + q := url.Values{} + if d.DigestAlgo() != digest.Canonical { + // TODO(bmitch): EXPERIMENTAL parameter, registry support and OCI spec change needed + q.Add(paramBlobDigestAlgo, d.DigestAlgo().String()) + } + // request an upload location + req := ®http.Req{ + MetaKind: reqmeta.Blob, + Host: r.Registry, + NoMirrors: true, + Method: "POST", + Repository: r.Repository, + Path: "blobs/uploads/", + Query: q, + TransactLen: d.Size, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to send blob post, ref %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 202 { + return nil, fmt.Errorf("failed to send blob post, ref %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + // if min size header received, check/adjust host settings + minSizeStr := resp.HTTPResponse().Header.Get(blobChunkMinHeader) + if minSizeStr != "" { + minSize, err := strconv.ParseInt(minSizeStr, 10, 64) + if err != nil { + reg.slog.Warn("Failed to parse chunk size header", + slog.String("size", minSizeStr), + slog.String("err", err.Error())) + } else { + host := reg.hostGet(r.Registry) + if (host.BlobChunk > 0 && minSize > host.BlobChunk) || (host.BlobChunk <= 0 && minSize > reg.blobChunkSize) { + if minSize > reg.blobChunkLimit { + host.BlobChunk = reg.blobChunkLimit + } else { + host.BlobChunk = minSize + } + reg.slog.Debug("Registry requested min chunk size", + slog.Int64("size", host.BlobChunk), + slog.String("host", host.Name)) + } + } + } + // Extract the location into a new putURL based on whether it's relative, fqdn with a scheme, or without a scheme. + location := resp.HTTPResponse().Header.Get("Location") + if location == "" { + return nil, fmt.Errorf("failed to send blob post, ref %s: %w", r.CommonName(), errs.ErrMissingLocation) + } + reg.slog.Debug("Upload location received", + slog.String("location", location)) + + // put url may be relative to the above post URL, so parse in that context + postURL := resp.HTTPResponse().Request.URL + putURL, err := postURL.Parse(location) + if err != nil { + reg.slog.Warn("Location url failed to parse", + slog.String("location", location), + slog.String("err", err.Error())) + return nil, fmt.Errorf("blob upload url invalid, ref %s: %w", r.CommonName(), err) + } + return putURL, nil +} + +func (reg *Reg) blobMount(ctx context.Context, rTgt ref.Ref, d descriptor.Descriptor, rSrc ref.Ref) (*url.URL, string, error) { + // build/send request + query := url.Values{} + query.Set("mount", d.Digest.String()) + ignoreErr := true // ignore errors from anonymous blob mount attempts + if rSrc.Registry == rTgt.Registry && rSrc.Repository != "" { + query.Set("from", rSrc.Repository) + ignoreErr = false + } + + req := ®http.Req{ + MetaKind: reqmeta.Blob, + Host: rTgt.Registry, + NoMirrors: true, + Method: "POST", + Repository: rTgt.Repository, + Path: "blobs/uploads/", + Query: query, + IgnoreErr: ignoreErr, + TransactLen: d.Size, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, "", fmt.Errorf("failed to mount blob, digest %s, ref %s: %w", d.Digest.String(), rTgt.CommonName(), err) + } + defer resp.Close() + + // if min size header received, check/adjust host settings + minSizeStr := resp.HTTPResponse().Header.Get(blobChunkMinHeader) + if minSizeStr != "" { + minSize, err := strconv.ParseInt(minSizeStr, 10, 64) + if err != nil { + reg.slog.Warn("Failed to parse chunk size header", + slog.String("size", minSizeStr), + slog.String("err", err.Error())) + } else { + host := reg.hostGet(rTgt.Registry) + if (host.BlobChunk > 0 && minSize > host.BlobChunk) || (host.BlobChunk <= 0 && minSize > reg.blobChunkSize) { + // TODO(bmitch): potential race condition, may need a lock before setting/using values in host + if minSize > reg.blobChunkLimit { + host.BlobChunk = reg.blobChunkLimit + } else { + host.BlobChunk = minSize + } + reg.slog.Debug("Registry requested min chunk size", + slog.Int64("size", host.BlobChunk), + slog.String("host", host.Name)) + } + } + } + // 201 indicates the blob mount succeeded + if resp.HTTPResponse().StatusCode == 201 { + return nil, "", nil + } + // 202 indicates blob mount failed but server ready to receive an upload at location + location := resp.HTTPResponse().Header.Get("Location") + uuid := resp.HTTPResponse().Header.Get("Docker-Upload-UUID") + if resp.HTTPResponse().StatusCode == 202 && location != "" { + postURL := resp.HTTPResponse().Request.URL + putURL, err := postURL.Parse(location) + if err != nil { + reg.slog.Warn("Mount location header failed to parse", + slog.String("digest", d.Digest.String()), + slog.String("target", rTgt.CommonName()), + slog.String("location", location), + slog.String("err", err.Error())) + } else { + return putURL, uuid, errs.ErrMountReturnedLocation + } + } + // all other responses unhandled + return nil, "", fmt.Errorf("failed to mount blob, digest %s, ref %s: %w", d.Digest.String(), rTgt.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) +} + +func (reg *Reg) blobPutUploadFull(ctx context.Context, r ref.Ref, d descriptor.Descriptor, putURL *url.URL, rdr io.Reader) error { + // append digest to request to use the monolithic upload option + if putURL.RawQuery != "" { + putURL.RawQuery = putURL.RawQuery + "&digest=" + url.QueryEscape(d.Digest.String()) + } else { + putURL.RawQuery = "digest=" + url.QueryEscape(d.Digest.String()) + } + + // make a reader function for the blob + readOnce := false + bodyFunc := func() (io.ReadCloser, error) { + // handle attempt to reuse blob reader (e.g. on a connection retry or fallback) + if readOnce { + rdrSeek, ok := rdr.(io.ReadSeeker) + if !ok { + return nil, fmt.Errorf("blob source is not a seeker%.0w", errs.ErrNotRetryable) + } + _, err := rdrSeek.Seek(0, io.SeekStart) + if err != nil { + return nil, fmt.Errorf("seek on blob source failed: %w%.0w", err, errs.ErrNotRetryable) + } + } + readOnce = true + return io.NopCloser(rdr), nil + } + // special case for the empty blob + if d.Size == 0 && d.Digest == zeroDig { + bodyFunc = nil + } + + // build/send request + header := http.Header{ + "Content-Type": {"application/octet-stream"}, + } + req := ®http.Req{ + MetaKind: reqmeta.Blob, + Host: r.Registry, + Method: "PUT", + Repository: r.Repository, + DirectURL: putURL, + BodyFunc: bodyFunc, + BodyLen: d.Size, + Headers: header, + NoMirrors: true, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return fmt.Errorf("failed to send blob (put), digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), err) + } + defer resp.Close() + // 201 follows distribution-spec, 204 is listed as possible in the Docker registry spec + if resp.HTTPResponse().StatusCode != 201 && resp.HTTPResponse().StatusCode != 204 { + return fmt.Errorf("failed to send blob (put), digest %s, ref %s: %w", d.Digest.String(), r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + return nil +} + +func (reg *Reg) blobPutUploadChunked(ctx context.Context, r ref.Ref, d descriptor.Descriptor, putURL *url.URL, rdr io.Reader) (descriptor.Descriptor, error) { + host := reg.hostGet(r.Registry) + bufSize := host.BlobChunk + if bufSize <= 0 { + bufSize = reg.blobChunkSize + } + bufBytes := make([]byte, 0, bufSize) + bufRdr := bytes.NewReader(bufBytes) + bufStart := int64(0) + bufChange := false + + // setup buffer and digest pipe + digester := d.DigestAlgo().Digester() + digestRdr := io.TeeReader(rdr, digester.Hash()) + finalChunk := false + chunkStart := int64(0) + chunkSize := 0 + bodyFunc := func() (io.ReadCloser, error) { + // reset to the start on every new read + _, err := bufRdr.Seek(0, io.SeekStart) + if err != nil { + return nil, err + } + return io.NopCloser(bufRdr), nil + } + chunkURL := *putURL + retryLimit := 10 // TODO: pull limit from reghttp + retryCur := 0 + var err error + + for !finalChunk || chunkStart < bufStart+int64(len(bufBytes)) { + bufChange = false + for chunkStart >= bufStart+int64(len(bufBytes)) && !finalChunk { + bufStart += int64(len(bufBytes)) + // reset length if previous read was short + if cap(bufBytes) != len(bufBytes) { + bufBytes = bufBytes[:cap(bufBytes)] + bufChange = true + } + // read a chunk into an input buffer, computing the digest + chunkSize, err = io.ReadFull(digestRdr, bufBytes) + if err == io.EOF || err == io.ErrUnexpectedEOF { + finalChunk = true + } else if err != nil { + return d, fmt.Errorf("failed to send blob chunk, ref %s: %w", r.CommonName(), err) + } + // update length on partial read + if chunkSize != len(bufBytes) { + bufBytes = bufBytes[:chunkSize] + bufChange = true + } + } + if chunkStart > bufStart && chunkStart < bufStart+int64(len(bufBytes)) { + // next chunk is inside the existing buf + bufBytes = bufBytes[chunkStart-bufStart:] + bufStart = chunkStart + chunkSize = len(bufBytes) + bufChange = true + } + if chunkSize > 0 && chunkStart != bufStart { + return d, fmt.Errorf("chunkStart (%d) != bufStart (%d)", chunkStart, bufStart) + } + if bufChange { + // need to recreate the reader on a change to the slice length, + // old reader is looking at the old slice metadata + bufRdr = bytes.NewReader(bufBytes) + } + + if chunkSize > 0 { + // write chunk + header := http.Header{ + "Content-Type": {"application/octet-stream"}, + "Content-Range": {fmt.Sprintf("%d-%d", chunkStart, chunkStart+int64(chunkSize)-1)}, + } + req := ®http.Req{ + MetaKind: reqmeta.Blob, + Host: r.Registry, + Method: "PATCH", + Repository: r.Repository, + DirectURL: &chunkURL, + BodyFunc: bodyFunc, + BodyLen: int64(chunkSize), + Headers: header, + NoMirrors: true, + TransactLen: d.Size - int64(chunkSize), + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil && !errors.Is(err, errs.ErrHTTPStatus) && !errors.Is(err, errs.ErrNotFound) { + return d, fmt.Errorf("failed to send blob (chunk), ref %s: http do: %w", r.CommonName(), err) + } + err = resp.Close() + if err != nil { + return d, fmt.Errorf("failed to close request: %w", err) + } + httpResp := resp.HTTPResponse() + // distribution-spec is 202, AWS ECR returns a 201 and rejects the put + if resp.HTTPResponse().StatusCode == 201 { + reg.slog.Debug("Early accept of chunk in PATCH before PUT request", + slog.String("ref", r.CommonName()), + slog.Int64("chunkStart", chunkStart), + slog.Int("chunkSize", chunkSize)) + } else if resp.HTTPResponse().StatusCode >= 400 && resp.HTTPResponse().StatusCode < 500 && + resp.HTTPResponse().Header.Get("Location") != "" && + resp.HTTPResponse().Header.Get("Range") != "" { + retryCur++ + reg.slog.Debug("Recoverable chunk upload error", + slog.String("ref", r.CommonName()), + slog.Int64("chunkStart", chunkStart), + slog.Int("chunkSize", chunkSize), + slog.String("range", resp.HTTPResponse().Header.Get("Range"))) + } else if resp.HTTPResponse().StatusCode != 202 { + retryCur++ + statusResp, statusErr := reg.blobUploadStatus(ctx, r, &chunkURL) + if retryCur > retryLimit || statusErr != nil { + return d, fmt.Errorf("failed to send blob (chunk), ref %s: http status: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + httpResp = statusResp + } else { + // successful request + if retryCur > 0 { + retryCur-- + } + } + rangeEnd, err := blobUploadCurBytes(httpResp) + if err == nil { + chunkStart = rangeEnd + 1 + } else { + chunkStart += int64(chunkSize) + } + location := httpResp.Header.Get("Location") + if location != "" { + reg.slog.Debug("Next chunk upload location received", + slog.String("location", location)) + prevURL := httpResp.Request.URL + parseURL, err := prevURL.Parse(location) + if err != nil { + return d, fmt.Errorf("failed to send blob (parse next chunk location), ref %s: %w", r.CommonName(), err) + } + chunkURL = *parseURL + } + } + } + + // compute digest + dOut := digester.Digest() + if d.Digest.Validate() == nil && dOut != d.Digest { + return d, fmt.Errorf("%w, expected %s, computed %s", errs.ErrDigestMismatch, d.Digest.String(), dOut.String()) + } + if d.Size != 0 && chunkStart != d.Size { + return d, fmt.Errorf("blob content size does not match descriptor, expected %d, received %d%.0w", d.Size, chunkStart, errs.ErrMismatch) + } + d.Digest = dOut + d.Size = chunkStart + + // send the final put + // append digest to request to use the monolithic upload option + if chunkURL.RawQuery != "" { + chunkURL.RawQuery = chunkURL.RawQuery + "&digest=" + url.QueryEscape(dOut.String()) + } else { + chunkURL.RawQuery = "digest=" + url.QueryEscape(dOut.String()) + } + + header := http.Header{ + "Content-Type": {"application/octet-stream"}, + } + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + Method: "PUT", + Repository: r.Repository, + DirectURL: &chunkURL, + BodyLen: int64(0), + Headers: header, + NoMirrors: true, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return d, fmt.Errorf("failed to send blob (chunk digest), digest %s, ref %s: %w", dOut, r.CommonName(), err) + } + defer resp.Close() + // 201 follows distribution-spec, 204 is listed as possible in the Docker registry spec + if resp.HTTPResponse().StatusCode != 201 && resp.HTTPResponse().StatusCode != 204 { + return d, fmt.Errorf("failed to send blob (chunk digest), digest %s, ref %s: %w", dOut, r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + return d, nil +} + +// blobUploadCancel stops an upload, releasing resources on the server. +func (reg *Reg) blobUploadCancel(ctx context.Context, r ref.Ref, putURL *url.URL) error { + if putURL == nil { + return fmt.Errorf("failed to cancel upload %s: url undefined", r.CommonName()) + } + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + NoMirrors: true, + Method: "DELETE", + Repository: r.Repository, + DirectURL: putURL, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return fmt.Errorf("failed to cancel upload %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 202 { + return fmt.Errorf("failed to cancel upload %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + return nil +} + +// blobUploadStatus provides a response with headers indicating the progress of an upload +func (reg *Reg) blobUploadStatus(ctx context.Context, r ref.Ref, putURL *url.URL) (*http.Response, error) { + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + Method: "GET", + Repository: r.Repository, + DirectURL: putURL, + NoMirrors: true, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to get upload status: %w", err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 204 { + return resp.HTTPResponse(), fmt.Errorf("failed to get upload status: %w", reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + return resp.HTTPResponse(), nil +} + +func blobUploadCurBytes(resp *http.Response) (int64, error) { + if resp == nil { + return 0, fmt.Errorf("missing response") + } + r := resp.Header.Get("Range") + if r == "" { + return 0, fmt.Errorf("missing range header") + } + rSplit := strings.SplitN(r, "-", 2) + if len(rSplit) < 2 { + return 0, fmt.Errorf("missing offset in range header") + } + return strconv.ParseInt(rSplit[1], 10, 64) +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/manifest.go b/vendor/github.com/regclient/regclient/scheme/reg/manifest.go new file mode 100644 index 00000000..c9789da7 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/manifest.go @@ -0,0 +1,295 @@ +package reg + +import ( + "context" + "errors" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "strconv" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/limitread" + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/warning" +) + +// ManifestDelete removes a manifest by reference (digest) from a registry. +// This will implicitly delete all tags pointing to that manifest. +func (reg *Reg) ManifestDelete(ctx context.Context, r ref.Ref, opts ...scheme.ManifestOpts) error { + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + if r.Digest == "" { + return fmt.Errorf("digest required to delete manifest, reference %s%.0w", r.CommonName(), errs.ErrMissingDigest) + } + + mc := scheme.ManifestConfig{} + for _, opt := range opts { + opt(&mc) + } + + if mc.CheckReferrers && mc.Manifest == nil { + m, err := reg.ManifestGet(ctx, r) + if err != nil { + return fmt.Errorf("failed to pull manifest for refers: %w", err) + } + mc.Manifest = m + } + if mc.Manifest != nil { + if mr, ok := mc.Manifest.(manifest.Subjecter); ok { + sDesc, err := mr.GetSubject() + if err == nil && sDesc != nil && sDesc.Digest != "" { + // attempt to delete the referrer, but ignore if the referrer entry wasn't found + err = reg.referrerDelete(ctx, r, mc.Manifest) + if err != nil && !errors.Is(err, errs.ErrNotFound) { + return err + } + } + } + } + rCache := r.SetDigest(r.Digest) + reg.cacheMan.Delete(rCache) + + // build/send request + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + NoMirrors: true, + Method: "DELETE", + Repository: r.Repository, + Path: "manifests/" + r.Digest, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return fmt.Errorf("failed to delete manifest %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 202 { + return fmt.Errorf("failed to delete manifest %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + return nil +} + +// ManifestGet retrieves a manifest from the registry +func (reg *Reg) ManifestGet(ctx context.Context, r ref.Ref) (manifest.Manifest, error) { + var tagOrDigest string + if r.Digest != "" { + rCache := r.SetDigest(r.Digest) + if m, err := reg.cacheMan.Get(rCache); err == nil { + return m, nil + } + tagOrDigest = r.Digest + } else if r.Tag != "" { + tagOrDigest = r.Tag + } else { + return nil, fmt.Errorf("reference missing tag and digest: %s%.0w", r.CommonName(), errs.ErrMissingTagOrDigest) + } + + // build/send request + headers := http.Header{ + "Accept": []string{ + mediatype.OCI1ManifestList, + mediatype.OCI1Manifest, + mediatype.Docker2ManifestList, + mediatype.Docker2Manifest, + mediatype.Docker1ManifestSigned, + mediatype.Docker1Manifest, + mediatype.OCI1Artifact, + }, + } + req := ®http.Req{ + MetaKind: reqmeta.Manifest, + Host: r.Registry, + Method: "GET", + Repository: r.Repository, + Path: "manifests/" + tagOrDigest, + Headers: headers, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to get manifest %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 200 { + return nil, fmt.Errorf("failed to get manifest %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + // limit length + size, _ := strconv.Atoi(resp.HTTPResponse().Header.Get("Content-Length")) + if size > 0 && reg.manifestMaxPull > 0 && int64(size) > reg.manifestMaxPull { + return nil, fmt.Errorf("manifest too large, received %d, limit %d: %s%.0w", size, reg.manifestMaxPull, r.CommonName(), errs.ErrSizeLimitExceeded) + } + rdr := &limitread.LimitRead{ + Reader: resp, + Limit: reg.manifestMaxPull, + } + + // read manifest + rawBody, err := io.ReadAll(rdr) + if err != nil { + return nil, fmt.Errorf("error reading manifest for %s: %w", r.CommonName(), err) + } + + m, err := manifest.New( + manifest.WithRef(r), + manifest.WithHeader(resp.HTTPResponse().Header), + manifest.WithRaw(rawBody), + ) + if err != nil { + return nil, err + } + rCache := r.SetDigest(m.GetDescriptor().Digest.String()) + reg.cacheMan.Set(rCache, m) + return m, nil +} + +// ManifestHead returns metadata on the manifest from the registry +func (reg *Reg) ManifestHead(ctx context.Context, r ref.Ref) (manifest.Manifest, error) { + // build the request + var tagOrDigest string + if r.Digest != "" { + rCache := r.SetDigest(r.Digest) + if m, err := reg.cacheMan.Get(rCache); err == nil { + return m, nil + } + tagOrDigest = r.Digest + } else if r.Tag != "" { + tagOrDigest = r.Tag + } else { + return nil, fmt.Errorf("reference missing tag and digest: %s%.0w", r.CommonName(), errs.ErrMissingTagOrDigest) + } + + // build/send request + headers := http.Header{ + "Accept": []string{ + mediatype.OCI1ManifestList, + mediatype.OCI1Manifest, + mediatype.Docker2ManifestList, + mediatype.Docker2Manifest, + mediatype.Docker1ManifestSigned, + mediatype.Docker1Manifest, + mediatype.OCI1Artifact, + }, + } + req := ®http.Req{ + MetaKind: reqmeta.Head, + Host: r.Registry, + Method: "HEAD", + Repository: r.Repository, + Path: "manifests/" + tagOrDigest, + Headers: headers, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to request manifest head %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 200 { + return nil, fmt.Errorf("failed to request manifest head %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + return manifest.New( + manifest.WithRef(r), + manifest.WithHeader(resp.HTTPResponse().Header), + ) +} + +// ManifestPut uploads a manifest to a registry +func (reg *Reg) ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, opts ...scheme.ManifestOpts) error { + var tagOrDigest string + if r.Digest != "" { + tagOrDigest = r.Digest + } else if r.Tag != "" { + tagOrDigest = r.Tag + } else { + reg.slog.Warn("Manifest put requires a tag", + slog.String("ref", r.Reference)) + return errs.ErrMissingTag + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + + // create the request body + mj, err := m.MarshalJSON() + if err != nil { + reg.slog.Warn("Error marshaling manifest", + slog.String("ref", r.Reference), + slog.String("err", err.Error())) + return fmt.Errorf("error marshalling manifest for %s: %w", r.CommonName(), err) + } + + // limit length + if reg.manifestMaxPush > 0 && int64(len(mj)) > reg.manifestMaxPush { + return fmt.Errorf("manifest too large, calculated %d, limit %d: %s%.0w", len(mj), reg.manifestMaxPush, r.CommonName(), errs.ErrSizeLimitExceeded) + } + + // build/send request + headers := http.Header{ + "Content-Type": []string{manifest.GetMediaType(m)}, + } + q := url.Values{} + if tagOrDigest == r.Tag && m.GetDescriptor().Digest.Algorithm() != digest.Canonical { + // TODO(bmitch): EXPERIMENTAL parameter, registry support and OCI spec change needed + q.Add(paramManifestDigest, m.GetDescriptor().Digest.String()) + } + req := ®http.Req{ + MetaKind: reqmeta.Manifest, + Host: r.Registry, + NoMirrors: true, + Method: "PUT", + Repository: r.Repository, + Path: "manifests/" + tagOrDigest, + Query: q, + Headers: headers, + BodyLen: int64(len(mj)), + BodyBytes: mj, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return fmt.Errorf("failed to put manifest %s: %w", r.CommonName(), err) + } + err = resp.Close() + if err != nil { + return fmt.Errorf("failed to close request: %w", err) + } + if resp.HTTPResponse().StatusCode != 201 { + return fmt.Errorf("failed to put manifest %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + rCache := r.SetDigest(m.GetDescriptor().Digest.String()) + reg.cacheMan.Set(rCache, m) + + // update referrers if defined on this manifest + if mr, ok := m.(manifest.Subjecter); ok { + mDesc, err := mr.GetSubject() + if err != nil { + return err + } + if mDesc != nil && mDesc.Digest.String() != "" { + rSubj := r.SetDigest(mDesc.Digest.String()) + reg.cacheRL.Delete(rSubj) + if mDesc.Digest.String() != resp.HTTPResponse().Header.Get(OCISubjectHeader) { + err = reg.referrerPut(ctx, r, m) + if err != nil { + return err + } + } + } + } + + return nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/ping.go b/vendor/github.com/regclient/regclient/scheme/reg/ping.go new file mode 100644 index 00000000..e4fc0f02 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/ping.go @@ -0,0 +1,39 @@ +package reg + +import ( + "context" + "fmt" + + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/types/ping" + "github.com/regclient/regclient/types/ref" +) + +// Ping queries the /v2/ API of the registry to verify connectivity and access. +func (reg *Reg) Ping(ctx context.Context, r ref.Ref) (ping.Result, error) { + ret := ping.Result{} + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + NoMirrors: true, + Method: "GET", + Path: "", + } + + resp, err := reg.reghttp.Do(ctx, req) + if resp != nil && resp.HTTPResponse() != nil { + ret.Header = resp.HTTPResponse().Header + } + if err != nil { + return ret, fmt.Errorf("failed to ping registry %s: %w", r.Registry, err) + } + defer resp.Close() + + if resp.HTTPResponse().StatusCode != 200 { + return ret, fmt.Errorf("failed to ping registry %s: %w", + r.Registry, reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + return ret, nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/referrer.go b/vendor/github.com/regclient/regclient/scheme/reg/referrer.go new file mode 100644 index 00000000..2e9757cb --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/referrer.go @@ -0,0 +1,366 @@ +package reg + +import ( + "context" + "errors" + "fmt" + "io" + "net/url" + + "github.com/regclient/regclient/internal/httplink" + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/referrer" + "github.com/regclient/regclient/types/warning" +) + +const OCISubjectHeader = "OCI-Subject" + +// ReferrerList returns a list of referrers to a given reference. +// The reference must include the digest. Use [regclient.ReferrerList] to resolve the platform or tag. +func (reg *Reg) ReferrerList(ctx context.Context, rSubject ref.Ref, opts ...scheme.ReferrerOpts) (referrer.ReferrerList, error) { + config := scheme.ReferrerConfig{} + for _, opt := range opts { + opt(&config) + } + var r ref.Ref + if config.SrcRepo.IsSet() { + r = config.SrcRepo.SetDigest(rSubject.Digest) + } else { + r = rSubject.SetDigest(rSubject.Digest) + } + rl := referrer.ReferrerList{ + Tags: []string{}, + } + if rSubject.Digest == "" { + return rl, fmt.Errorf("digest required to query referrers %s", rSubject.CommonName()) + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + + found := false + // try cache + rl, err := reg.cacheRL.Get(r) + if err == nil { + found = true + } + // try referrers API + if !found { + referrerEnabled, ok := reg.featureGet("referrer", r.Registry, r.Repository) + if !ok || referrerEnabled { + // attempt to call the referrer API + rl, err = reg.referrerListByAPI(ctx, r, config) + if !ok { + // save the referrer API state + reg.featureSet("referrer", r.Registry, r.Repository, err == nil) + } + if err == nil { + if config.MatchOpt.ArtifactType == "" { + // only cache if successful and artifactType is not filtered + reg.cacheRL.Set(r, rl) + } + found = true + } + } + } + // fall back to tag + if !found { + rl, err = reg.referrerListByTag(ctx, r) + if err == nil { + reg.cacheRL.Set(r, rl) + } + } + rl.Subject = rSubject + if config.SrcRepo.IsSet() { + rl.Source = config.SrcRepo + } + if err != nil { + return rl, err + } + + // apply client side filters and return result + rl = scheme.ReferrerFilter(config, rl) + return rl, nil +} + +func (reg *Reg) referrerListByAPI(ctx context.Context, r ref.Ref, config scheme.ReferrerConfig) (referrer.ReferrerList, error) { + rl := referrer.ReferrerList{ + Subject: r, + Tags: []string{}, + } + var link *url.URL + // loop for paging + for { + rlAdd, linkNext, err := reg.referrerListByAPIPage(ctx, r, config, link) + if err != nil { + return rl, err + } + if rl.Manifest == nil { + rl = rlAdd + } else { + rl.Descriptors = append(rl.Descriptors, rlAdd.Descriptors...) + } + if linkNext == nil { + break + } + link = linkNext + } + return rl, nil +} + +func (reg *Reg) referrerListByAPIPage(ctx context.Context, r ref.Ref, config scheme.ReferrerConfig, link *url.URL) (referrer.ReferrerList, *url.URL, error) { + rl := referrer.ReferrerList{ + Subject: r, + Tags: []string{}, + } + query := url.Values{} + if config.MatchOpt.ArtifactType != "" { + query.Set("artifactType", config.MatchOpt.ArtifactType) + } + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + Method: "GET", + Repository: r.Repository, + } + if link == nil { + req.Path = "referrers/" + r.Digest + req.Query = query + req.IgnoreErr = true + } + if link != nil { + req.DirectURL = link + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return rl, nil, fmt.Errorf("failed to get referrers %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 200 { + return rl, nil, fmt.Errorf("failed to get referrers %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + // read manifest + rawBody, err := io.ReadAll(resp) + if err != nil { + return rl, nil, fmt.Errorf("error reading referrers for %s: %w", r.CommonName(), err) + } + + m, err := manifest.New( + manifest.WithRef(r.SetDigest("")), + manifest.WithHeader(resp.HTTPResponse().Header), + manifest.WithRaw(rawBody), + ) + if err != nil { + return rl, nil, err + } + ociML, ok := m.GetOrig().(v1.Index) + if !ok { + return rl, nil, fmt.Errorf("unexpected manifest type for referrers: %s, %w", m.GetDescriptor().MediaType, errs.ErrUnsupportedMediaType) + } + rl.Manifest = m + rl.Descriptors = ociML.Manifests + rl.Annotations = ociML.Annotations + + // lookup next link + respHead := resp.HTTPResponse().Header + links, err := httplink.Parse((respHead.Values("Link"))) + if err != nil { + return rl, nil, err + } + next, err := links.Get("rel", "next") + if err != nil { + // no next link + link = nil + } else { + link = resp.HTTPResponse().Request.URL + if link == nil { + return rl, nil, fmt.Errorf("referrers list failed to get URL of previous request") + } + link, err = link.Parse(next.URI) + if err != nil { + return rl, nil, fmt.Errorf("referrers list failed to parse Link: %w", err) + } + } + + return rl, link, nil +} + +func (reg *Reg) referrerListByTag(ctx context.Context, r ref.Ref) (referrer.ReferrerList, error) { + rl := referrer.ReferrerList{ + Subject: r, + Tags: []string{}, + } + rlTag, err := referrer.FallbackTag(r) + if err != nil { + return rl, err + } + m, err := reg.ManifestGet(ctx, rlTag) + if err != nil { + if errors.Is(err, errs.ErrNotFound) { + // empty list, initialize a new manifest + rl.Manifest, err = manifest.New(manifest.WithOrig(v1.Index{ + Versioned: v1.IndexSchemaVersion, + MediaType: mediatype.OCI1ManifestList, + })) + if err != nil { + return rl, err + } + return rl, nil + } + return rl, err + } + ociML, ok := m.GetOrig().(v1.Index) + if !ok { + return rl, fmt.Errorf("manifest is not an OCI index: %s", rlTag.CommonName()) + } + // return resulting index + rl.Manifest = m + rl.Descriptors = ociML.Manifests + rl.Annotations = ociML.Annotations + rl.Tags = append(rl.Tags, rlTag.Tag) + return rl, nil +} + +// referrerDelete deletes a referrer associated with a manifest +func (reg *Reg) referrerDelete(ctx context.Context, r ref.Ref, m manifest.Manifest) error { + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + // get subject field + mSubject, ok := m.(manifest.Subjecter) + if !ok { + return fmt.Errorf("manifest does not support the subject field: %w", errs.ErrUnsupportedMediaType) + } + subject, err := mSubject.GetSubject() + if err != nil { + return err + } + // validate/set subject descriptor + if subject == nil || subject.Digest == "" { + return fmt.Errorf("refers is not set%.0w", errs.ErrNotFound) + } + + // remove from cache + rSubject := r.SetDigest(subject.Digest.String()) + reg.cacheRL.Delete(rSubject) + + // if referrer API is available, nothing to do, return + if reg.referrerPing(ctx, rSubject) { + return nil + } + + // fallback to using tag schema for refers + rl, err := reg.referrerListByTag(ctx, rSubject) + if err != nil { + return err + } + err = rl.Delete(m) + if err != nil { + return err + } + // push updated referrer list by tag + rlTag, err := referrer.FallbackTag(rSubject) + if err != nil { + return err + } + if rl.IsEmpty() { + err = reg.TagDelete(ctx, rlTag) + if err == nil { + return nil + } + // if delete is not supported, fall back to pushing empty list + } + return reg.ManifestPut(ctx, rlTag, rl.Manifest) +} + +// referrerPut pushes a new referrer associated with a manifest +func (reg *Reg) referrerPut(ctx context.Context, r ref.Ref, m manifest.Manifest) error { + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + // get subject field + mSubject, ok := m.(manifest.Subjecter) + if !ok { + return fmt.Errorf("manifest does not support the subject field: %w", errs.ErrUnsupportedMediaType) + } + subject, err := mSubject.GetSubject() + if err != nil { + return err + } + // validate/set subject descriptor + if subject == nil || subject.Digest == "" { + return fmt.Errorf("subject is not set%.0w", errs.ErrNotFound) + } + + // lock to avoid internal race conditions between pulling and pushing tag + reg.muRefTag.Lock() + defer reg.muRefTag.Unlock() + // fallback to using tag schema for refers + rSubject := r.SetDigest(subject.Digest.String()) + rl, err := reg.referrerListByTag(ctx, rSubject) + if err != nil { + return err + } + err = rl.Add(m) + if err != nil { + return err + } + // ensure the referrer list does not have a subject itself (avoiding circular locks) + if ms, ok := rl.Manifest.(manifest.Subjecter); ok { + mDesc, err := ms.GetSubject() + if err != nil { + return err + } + if mDesc != nil && mDesc.Digest != "" { + return fmt.Errorf("fallback referrers manifest should not have a subject: %s", rSubject.CommonName()) + } + } + // push updated referrer list by tag + rlTag, err := referrer.FallbackTag(rSubject) + if err != nil { + return err + } + if len(rl.Tags) == 0 { + rl.Tags = []string{rlTag.Tag} + } + err = reg.ManifestPut(ctx, rlTag, rl.Manifest) + if err == nil { + reg.cacheRL.Set(rSubject, rl) + } + return err +} + +// referrerPing verifies the registry supports the referrers API +func (reg *Reg) referrerPing(ctx context.Context, r ref.Ref) bool { + referrerEnabled, ok := reg.featureGet("referrer", r.Registry, r.Repository) + if ok { + return referrerEnabled + } + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + Method: "GET", + Repository: r.Repository, + Path: "referrers/" + r.Digest, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + reg.featureSet("referrer", r.Registry, r.Repository, false) + return false + } + _ = resp.Close() + result := resp.HTTPResponse().StatusCode == 200 + reg.featureSet("referrer", r.Registry, r.Repository, result) + return result +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/reg.go b/vendor/github.com/regclient/regclient/scheme/reg/reg.go new file mode 100644 index 00000000..5f2a27e9 --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/reg.go @@ -0,0 +1,272 @@ +// Package reg implements the OCI registry scheme used by most images (host:port/repo:tag) +package reg + +import ( + "log/slog" + "net/http" + "sync" + "time" + + "github.com/regclient/regclient/config" + "github.com/regclient/regclient/internal/cache" + "github.com/regclient/regclient/internal/pqueue" + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/referrer" +) + +const ( + // blobChunkMinHeader is returned by registries requesting a minimum chunk size + blobChunkMinHeader = "OCI-Chunk-Min-Length" + // defaultBlobChunk 1M chunks, this is allocated in a memory buffer + defaultBlobChunk = 1024 * 1024 + // defaultBlobChunkLimit 1G chunks, prevents a memory exhaustion attack + defaultBlobChunkLimit = 1024 * 1024 * 1024 + // defaultBlobMax is disabled to support registries without chunked upload support + defaultBlobMax = -1 + // defaultManifestMaxPull limits the largest manifest that will be pulled + defaultManifestMaxPull = 1024 * 1024 * 8 + // defaultManifestMaxPush limits the largest manifest that will be pushed + defaultManifestMaxPush = 1024 * 1024 * 4 + // paramBlobDigestAlgo specifies the query parameter to request a specific digest algorithm. + // TODO(bmitch): EXPERIMENTAL field, registry support and OCI spec update needed + paramBlobDigestAlgo = "digest-algorithm" + // paramManifestDigest specifies the query parameter to specify the digest of a manifest pushed by tag. + // TODO(bmitch): EXPERIMENTAL field, registry support and OCI spec update needed + paramManifestDigest = "digest" +) + +// Reg is used for interacting with remote registry servers +type Reg struct { + reghttp *reghttp.Client + reghttpOpts []reghttp.Opts + slog *slog.Logger + hosts map[string]*config.Host + hostDefault *config.Host + features map[featureKey]*featureVal + blobChunkSize int64 + blobChunkLimit int64 + blobMaxPut int64 + manifestMaxPull int64 + manifestMaxPush int64 + cacheMan *cache.Cache[ref.Ref, manifest.Manifest] + cacheRL *cache.Cache[ref.Ref, referrer.ReferrerList] + muHost sync.Mutex + muRefTag sync.Mutex +} + +type featureKey struct { + kind string + reg string + repo string +} +type featureVal struct { + enabled bool + expire time.Time +} + +var featureExpire = time.Minute * time.Duration(5) + +// Opts provides options to access registries +type Opts func(*Reg) + +// New returns a Reg pointer with any provided options +func New(opts ...Opts) *Reg { + r := Reg{ + reghttpOpts: []reghttp.Opts{}, + blobChunkSize: defaultBlobChunk, + blobChunkLimit: defaultBlobChunkLimit, + blobMaxPut: defaultBlobMax, + manifestMaxPull: defaultManifestMaxPull, + manifestMaxPush: defaultManifestMaxPush, + hosts: map[string]*config.Host{}, + features: map[featureKey]*featureVal{}, + } + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithConfigHostFn(r.hostGet)) + for _, opt := range opts { + opt(&r) + } + r.reghttp = reghttp.NewClient(r.reghttpOpts...) + return &r +} + +// Throttle is used to limit concurrency +func (reg *Reg) Throttle(r ref.Ref, put bool) []*pqueue.Queue[reqmeta.Data] { + tList := []*pqueue.Queue[reqmeta.Data]{} + host := reg.hostGet(r.Registry) + t := reg.reghttp.GetThrottle(r.Registry) + if t != nil { + tList = append(tList, t) + } + if !put { + for _, mirror := range host.Mirrors { + t := reg.reghttp.GetThrottle(mirror) + if t != nil { + tList = append(tList, t) + } + } + } + return tList +} + +func (reg *Reg) hostGet(hostname string) *config.Host { + reg.muHost.Lock() + defer reg.muHost.Unlock() + if _, ok := reg.hosts[hostname]; !ok { + newHost := config.HostNewDefName(reg.hostDefault, hostname) + // check for normalized hostname + if newHost.Name != hostname { + hostname = newHost.Name + if h, ok := reg.hosts[hostname]; ok { + return h + } + } + reg.hosts[hostname] = newHost + } + return reg.hosts[hostname] +} + +// featureGet returns enabled and ok +func (reg *Reg) featureGet(kind, registry, repo string) (bool, bool) { + reg.muHost.Lock() + defer reg.muHost.Unlock() + if v, ok := reg.features[featureKey{kind: kind, reg: registry, repo: repo}]; ok { + if time.Now().Before(v.expire) { + return v.enabled, true + } + } + return false, false +} + +func (reg *Reg) featureSet(kind, registry, repo string, enabled bool) { + reg.muHost.Lock() + reg.features[featureKey{kind: kind, reg: registry, repo: repo}] = &featureVal{enabled: enabled, expire: time.Now().Add(featureExpire)} + reg.muHost.Unlock() +} + +// WithBlobSize overrides default blob sizes +func WithBlobSize(size, max int64) Opts { + return func(r *Reg) { + if size > 0 { + r.blobChunkSize = size + } + if max != 0 { + r.blobMaxPut = max + } + } +} + +// WithBlobLimit overrides default blob limit +func WithBlobLimit(limit int64) Opts { + return func(r *Reg) { + if limit > 0 { + r.blobChunkLimit = limit + } + if r.blobMaxPut > 0 && r.blobMaxPut < limit { + r.blobMaxPut = limit + } + } +} + +// WithCache defines a cache used for various requests +func WithCache(timeout time.Duration, count int) Opts { + return func(r *Reg) { + cm := cache.New[ref.Ref, manifest.Manifest](cache.WithAge(timeout), cache.WithCount(count)) + r.cacheMan = &cm + crl := cache.New[ref.Ref, referrer.ReferrerList](cache.WithAge(timeout), cache.WithCount(count)) + r.cacheRL = &crl + } +} + +// WithCerts adds certificates +func WithCerts(certs [][]byte) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithCerts(certs)) + } +} + +// WithCertDirs adds certificate directories for host specific certs +func WithCertDirs(dirs []string) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithCertDirs(dirs)) + } +} + +// WithCertFiles adds certificates by filename +func WithCertFiles(files []string) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithCertFiles(files)) + } +} + +// WithConfigHostDefault provides default settings for hosts. +func WithConfigHostDefault(ch *config.Host) Opts { + return func(r *Reg) { + r.hostDefault = ch + } +} + +// WithConfigHosts adds host configs for credentials +func WithConfigHosts(configHosts []*config.Host) Opts { + return func(r *Reg) { + for _, host := range configHosts { + if host.Name == "" { + continue + } + r.hosts[host.Name] = host + } + } +} + +// WithDelay initial time to wait between retries (increased with exponential backoff) +func WithDelay(delayInit time.Duration, delayMax time.Duration) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithDelay(delayInit, delayMax)) + } +} + +// WithHTTPClient uses a specific http client with retryable requests +func WithHTTPClient(hc *http.Client) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithHTTPClient(hc)) + } +} + +// WithManifestMax sets the push and pull limits for manifests +func WithManifestMax(push, pull int64) Opts { + return func(r *Reg) { + r.manifestMaxPush = push + r.manifestMaxPull = pull + } +} + +// WithRetryLimit restricts the number of retries (defaults to 5) +func WithRetryLimit(l int) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithRetryLimit(l)) + } +} + +// WithSlog injects a slog Logger configuration +func WithSlog(slog *slog.Logger) Opts { + return func(r *Reg) { + r.slog = slog + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithLog(slog)) + } +} + +// WithTransport uses a specific http transport with retryable requests +func WithTransport(t *http.Transport) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithTransport(t)) + } +} + +// WithUserAgent sets a user agent header +func WithUserAgent(ua string) Opts { + return func(r *Reg) { + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithUserAgent(ua)) + } +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/reg_nowasm.go b/vendor/github.com/regclient/regclient/scheme/reg/reg_nowasm.go new file mode 100644 index 00000000..82a5a82f --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/reg_nowasm.go @@ -0,0 +1,21 @@ +//go:build !wasm +// +build !wasm + +package reg + +import ( + "log/slog" + + "github.com/sirupsen/logrus" + + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/sloghandle" +) + +// WithLog injects a logrus Logger configuration +func WithLog(log *logrus.Logger) Opts { + return func(r *Reg) { + r.slog = slog.New(sloghandle.Logrus(log)) + r.reghttpOpts = append(r.reghttpOpts, reghttp.WithLog(r.slog)) + } +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/repo.go b/vendor/github.com/regclient/regclient/scheme/reg/repo.go new file mode 100644 index 00000000..b9b0307c --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/repo.go @@ -0,0 +1,79 @@ +package reg + +import ( + "context" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "strconv" + + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/mediatype" + "github.com/regclient/regclient/types/repo" +) + +// RepoList returns a list of repositories on a registry +// Note the underlying "_catalog" API is not supported on many cloud registries +func (reg *Reg) RepoList(ctx context.Context, hostname string, opts ...scheme.RepoOpts) (*repo.RepoList, error) { + config := scheme.RepoConfig{} + for _, opt := range opts { + opt(&config) + } + + query := url.Values{} + if config.Last != "" { + query.Set("last", config.Last) + } + if config.Limit > 0 { + query.Set("n", strconv.Itoa(config.Limit)) + } + + headers := http.Header{ + "Accept": []string{"application/json"}, + } + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: hostname, + NoMirrors: true, + Method: "GET", + Path: "_catalog", + NoPrefix: true, + Query: query, + Headers: headers, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to list repositories for %s: %w", hostname, err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 200 { + return nil, fmt.Errorf("failed to list repositories for %s: %w", hostname, reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + + respBody, err := io.ReadAll(resp) + if err != nil { + reg.slog.Warn("Failed to read repo list", + slog.String("err", err.Error()), + slog.String("host", hostname)) + return nil, fmt.Errorf("failed to read repo list for %s: %w", hostname, err) + } + mt := mediatype.Base(resp.HTTPResponse().Header.Get("Content-Type")) + rl, err := repo.New( + repo.WithMT(mt), + repo.WithRaw(respBody), + repo.WithHost(hostname), + repo.WithHeaders(resp.HTTPResponse().Header), + ) + if err != nil { + reg.slog.Warn("Failed to unmarshal repo list", + slog.String("err", err.Error()), + slog.String("body", string(respBody)), + slog.String("host", hostname)) + return nil, fmt.Errorf("failed to parse repo list for %s: %w", hostname, err) + } + return rl, nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/reg/tag.go b/vendor/github.com/regclient/regclient/scheme/reg/tag.go new file mode 100644 index 00000000..e24b54be --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/reg/tag.go @@ -0,0 +1,344 @@ +package reg + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "strconv" + "time" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/httplink" + "github.com/regclient/regclient/internal/reghttp" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/docker/schema2" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/platform" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/tag" + "github.com/regclient/regclient/types/warning" +) + +// TagDelete removes a tag from a repository. +// It first attempts the newer OCI API to delete by tag name (not widely supported). +// If the OCI API fails, it falls back to pushing a unique empty manifest and deleting that. +func (reg *Reg) TagDelete(ctx context.Context, r ref.Ref) error { + var tempManifest manifest.Manifest + if r.Tag == "" { + return errs.ErrMissingTag + } + // dedup warnings + if w := warning.FromContext(ctx); w == nil { + ctx = warning.NewContext(ctx, &warning.Warning{Hook: warning.DefaultHook()}) + } + + // attempt to delete the tag directly, available in OCI distribution-spec, and Hub API + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + NoMirrors: true, + Method: "DELETE", + Repository: r.Repository, + Path: "manifests/" + r.Tag, + IgnoreErr: true, // do not trigger backoffs if this fails + } + + resp, err := reg.reghttp.Do(ctx, req) + if resp != nil { + defer resp.Close() + } + if err == nil && resp != nil && resp.HTTPResponse().StatusCode == 202 { + return nil + } + // ignore errors, fallback to creating a temporary manifest to replace the tag and deleting that manifest + + // lookup the current manifest media type + curManifest, err := reg.ManifestHead(ctx, r) + if err != nil && errors.Is(err, errs.ErrUnsupportedAPI) { + curManifest, err = reg.ManifestGet(ctx, r) + } + if err != nil { + return err + } + + // create empty image config with single label + // Note, this should be MediaType specific, but it appears that docker uses OCI for the config + now := time.Now() + conf := v1.Image{ + Created: &now, + Config: v1.ImageConfig{ + Labels: map[string]string{ + "delete-tag": r.Tag, + "delete-date": now.String(), + }, + }, + Platform: platform.Platform{ + OS: "linux", + Architecture: "amd64", + }, + History: []v1.History{ + { + Created: &now, + CreatedBy: "# regclient", + Comment: "empty JSON blob", + }, + }, + RootFS: v1.RootFS{ + Type: "layers", + DiffIDs: []digest.Digest{ + descriptor.EmptyDigest, + }, + }, + } + confB, err := json.Marshal(conf) + if err != nil { + return err + } + digester := digest.Canonical.Digester() + confBuf := bytes.NewBuffer(confB) + _, err = confBuf.WriteTo(digester.Hash()) + if err != nil { + return err + } + confDigest := digester.Digest() + + // create manifest with config, matching the original tag manifest type + switch manifest.GetMediaType(curManifest) { + case mediatype.OCI1Manifest, mediatype.OCI1ManifestList: + tempManifest, err = manifest.New(manifest.WithOrig(v1.Manifest{ + Versioned: v1.ManifestSchemaVersion, + MediaType: mediatype.OCI1Manifest, + Config: descriptor.Descriptor{ + MediaType: mediatype.OCI1ImageConfig, + Digest: confDigest, + Size: int64(len(confB)), + }, + Layers: []descriptor.Descriptor{ + { + MediaType: mediatype.OCI1Layer, + Size: int64(len(descriptor.EmptyData)), + Digest: descriptor.EmptyDigest, + }, + }, + })) + if err != nil { + return err + } + default: // default to the docker v2 schema + tempManifest, err = manifest.New(manifest.WithOrig(schema2.Manifest{ + Versioned: schema2.ManifestSchemaVersion, + Config: descriptor.Descriptor{ + MediaType: mediatype.Docker2ImageConfig, + Digest: confDigest, + Size: int64(len(confB)), + }, + Layers: []descriptor.Descriptor{ + { + MediaType: mediatype.Docker2LayerGzip, + Size: int64(len(descriptor.EmptyData)), + Digest: descriptor.EmptyDigest, + }, + }, + })) + if err != nil { + return err + } + } + reg.slog.Debug("Sending dummy manifest to replace tag", + slog.String("ref", r.Reference)) + + // push empty layer + _, err = reg.BlobPut(ctx, r, descriptor.Descriptor{Digest: descriptor.EmptyDigest, Size: int64(len(descriptor.EmptyData))}, bytes.NewReader(descriptor.EmptyData)) + if err != nil { + return err + } + + // push config + _, err = reg.BlobPut(ctx, r, descriptor.Descriptor{Digest: confDigest, Size: int64(len(confB))}, bytes.NewReader(confB)) + if err != nil { + return fmt.Errorf("failed sending dummy config to delete %s: %w", r.CommonName(), err) + } + + // push manifest to tag + err = reg.ManifestPut(ctx, r, tempManifest) + if err != nil { + return fmt.Errorf("failed sending dummy manifest to delete %s: %w", r.CommonName(), err) + } + + r.Digest = tempManifest.GetDescriptor().Digest.String() + + // delete manifest by digest + reg.slog.Debug("Deleting dummy manifest", + slog.String("ref", r.Reference), + slog.String("digest", r.Digest)) + err = reg.ManifestDelete(ctx, r) + if err != nil { + return fmt.Errorf("failed deleting dummy manifest for %s: %w", r.CommonName(), err) + } + + return nil +} + +// TagList returns a listing to tags from the repository +func (reg *Reg) TagList(ctx context.Context, r ref.Ref, opts ...scheme.TagOpts) (*tag.List, error) { + var config scheme.TagConfig + for _, opt := range opts { + opt(&config) + } + + tl, err := reg.tagListOCI(ctx, r, config) + if err != nil { + return tl, err + } + + for { + // if limit reached, stop searching + if config.Limit > 0 && len(tl.Tags) >= config.Limit { + break + } + tlHead, err := tl.RawHeaders() + if err != nil { + return tl, err + } + links, err := httplink.Parse(tlHead.Values("Link")) + if err != nil { + return tl, err + } + next, err := links.Get("rel", "next") + // if Link header with rel="next" is defined + if err == nil { + link := tl.GetURL() + if link == nil { + return tl, fmt.Errorf("tag list, failed to get URL of previous request") + } + link, err = link.Parse(next.URI) + if err != nil { + return tl, fmt.Errorf("tag list failed to parse Link: %w", err) + } + tlAdd, err := reg.tagListLink(ctx, r, config, link) + if err != nil { + return tl, fmt.Errorf("tag list failed to get Link: %w", err) + } + err = tl.Append(tlAdd) + if err != nil { + return tl, fmt.Errorf("tag list failed to append entries: %w", err) + } + } else { + // do not automatically expand tags with OCI methods, + // OCI registries should send all possible entries up to the specified limit + break + } + } + + return tl, nil +} + +func (reg *Reg) tagListOCI(ctx context.Context, r ref.Ref, config scheme.TagConfig) (*tag.List, error) { + query := url.Values{} + if config.Last != "" { + query.Set("last", config.Last) + } + if config.Limit > 0 { + query.Set("n", strconv.Itoa(config.Limit)) + } + headers := http.Header{ + "Accept": []string{"application/json"}, + } + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + Method: "GET", + Repository: r.Repository, + Path: "tags/list", + Query: query, + Headers: headers, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to list tags for %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 200 { + return nil, fmt.Errorf("failed to list tags for %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + respBody, err := io.ReadAll(resp) + if err != nil { + reg.slog.Warn("Failed to read tag list", + slog.String("err", err.Error()), + slog.String("ref", r.CommonName())) + return nil, fmt.Errorf("failed to read tags for %s: %w", r.CommonName(), err) + } + tl, err := tag.New( + tag.WithRef(r), + tag.WithRaw(respBody), + tag.WithResp(resp.HTTPResponse()), + ) + if err != nil { + reg.slog.Warn("Failed to unmarshal tag list", + slog.String("err", err.Error()), + slog.String("body", string(respBody)), + slog.String("ref", r.CommonName())) + return tl, fmt.Errorf("failed to unmarshal tag list for %s: %w", r.CommonName(), err) + } + + return tl, nil +} + +func (reg *Reg) tagListLink(ctx context.Context, r ref.Ref, _ scheme.TagConfig, link *url.URL) (*tag.List, error) { + headers := http.Header{ + "Accept": []string{"application/json"}, + } + req := ®http.Req{ + MetaKind: reqmeta.Query, + Host: r.Registry, + Method: "GET", + DirectURL: link, + Repository: r.Repository, + Headers: headers, + } + resp, err := reg.reghttp.Do(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to list tags for %s: %w", r.CommonName(), err) + } + defer resp.Close() + if resp.HTTPResponse().StatusCode != 200 { + return nil, fmt.Errorf("failed to list tags for %s: %w", r.CommonName(), reghttp.HTTPError(resp.HTTPResponse().StatusCode)) + } + respBody, err := io.ReadAll(resp) + if err != nil { + reg.slog.Warn("Failed to read tag list", + slog.String("err", err.Error()), + slog.String("ref", r.CommonName())) + return nil, fmt.Errorf("failed to read tags for %s: %w", r.CommonName(), err) + } + tl, err := tag.New( + tag.WithRef(r), + tag.WithRaw(respBody), + tag.WithResp(resp.HTTPResponse()), + ) + if err != nil { + reg.slog.Warn("Failed to unmarshal tag list", + slog.String("err", err.Error()), + slog.String("body", string(respBody)), + slog.String("ref", r.CommonName())) + return tl, fmt.Errorf("failed to unmarshal tag list for %s: %w", r.CommonName(), err) + } + + return tl, nil +} diff --git a/vendor/github.com/regclient/regclient/scheme/scheme.go b/vendor/github.com/regclient/regclient/scheme/scheme.go new file mode 100644 index 00000000..84388a3b --- /dev/null +++ b/vendor/github.com/regclient/regclient/scheme/scheme.go @@ -0,0 +1,234 @@ +// Package scheme defines the interface for various reference schemes. +package scheme + +import ( + "context" + "io" + + "github.com/regclient/regclient/internal/pqueue" + "github.com/regclient/regclient/internal/reqmeta" + "github.com/regclient/regclient/types/blob" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/manifest" + "github.com/regclient/regclient/types/ping" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/referrer" + "github.com/regclient/regclient/types/tag" +) + +// API is used to interface between different methods to store images. +type API interface { + // BlobDelete removes a blob from the repository. + BlobDelete(ctx context.Context, r ref.Ref, d descriptor.Descriptor) error + // BlobGet retrieves a blob, returning a reader. + BlobGet(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) + // BlobHead verifies the existence of a blob, the reader contains the headers but no body to read. + BlobHead(ctx context.Context, r ref.Ref, d descriptor.Descriptor) (blob.Reader, error) + // BlobMount attempts to perform a server side copy of the blob. + BlobMount(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d descriptor.Descriptor) error + // BlobPut sends a blob to the repository, returns the digest and size when successful. + BlobPut(ctx context.Context, r ref.Ref, d descriptor.Descriptor, rdr io.Reader) (descriptor.Descriptor, error) + + // ManifestDelete removes a manifest, including all tags that point to that manifest. + ManifestDelete(ctx context.Context, r ref.Ref, opts ...ManifestOpts) error + // ManifestGet retrieves a manifest from a repository. + ManifestGet(ctx context.Context, r ref.Ref) (manifest.Manifest, error) + // ManifestHead gets metadata about the manifest (existence, digest, mediatype, size). + ManifestHead(ctx context.Context, r ref.Ref) (manifest.Manifest, error) + // ManifestPut sends a manifest to the repository. + ManifestPut(ctx context.Context, r ref.Ref, m manifest.Manifest, opts ...ManifestOpts) error + + // Ping verifies access to a registry or equivalent. + Ping(ctx context.Context, r ref.Ref) (ping.Result, error) + + // ReferrerList returns a list of referrers to a given reference. + ReferrerList(ctx context.Context, r ref.Ref, opts ...ReferrerOpts) (referrer.ReferrerList, error) + + // TagDelete removes a tag from the repository. + TagDelete(ctx context.Context, r ref.Ref) error + // TagList returns a list of tags from the repository. + TagList(ctx context.Context, r ref.Ref, opts ...TagOpts) (*tag.List, error) +} + +// Closer is used to check if a scheme implements the Close API. +type Closer interface { + Close(ctx context.Context, r ref.Ref) error +} + +// GCLocker is used to indicate locking is available for GC management. +type GCLocker interface { + // GCLock a reference to prevent GC from triggering during a put, locks are not exclusive. + GCLock(r ref.Ref) + // GCUnlock a reference to allow GC (once all locks are released). + // The reference should be closed after this step and unlock should only be called once per each Lock call. + GCUnlock(r ref.Ref) +} + +// Throttler is used to indicate the scheme implements Throttle. +type Throttler interface { + Throttle(r ref.Ref, put bool) []*pqueue.Queue[reqmeta.Data] +} + +// ManifestConfig is used by schemes to import [ManifestOpts]. +type ManifestConfig struct { + CheckReferrers bool + Child bool // used when pushing a child of a manifest list, skips indexing in ocidir + Manifest manifest.Manifest +} + +// ManifestOpts is used to set options on manifest APIs. +type ManifestOpts func(*ManifestConfig) + +// WithManifestCheckReferrers is used when deleting a manifest. +// It indicates the manifest should be fetched and referrers should be deleted if defined. +func WithManifestCheckReferrers() ManifestOpts { + return func(config *ManifestConfig) { + config.CheckReferrers = true + } +} + +// WithManifestChild indicates the API call is on a child manifest. +// This is used internally when copying multi-platform manifests. +// This bypasses tracking of an untagged digest in ocidir which is needed for garbage collection. +func WithManifestChild() ManifestOpts { + return func(config *ManifestConfig) { + config.Child = true + } +} + +// WithManifest is used to pass the manifest to a method to avoid an extra GET request. +// This is used on a delete to check for referrers. +func WithManifest(m manifest.Manifest) ManifestOpts { + return func(mc *ManifestConfig) { + mc.Manifest = m + } +} + +// ReferrerConfig is used by schemes to import [ReferrerOpts]. +type ReferrerConfig struct { + MatchOpt descriptor.MatchOpt // filter/sort results + Platform string // get referrers for a specific platform + SrcRepo ref.Ref // repo used to query referrers +} + +// ReferrerOpts is used to set options on referrer APIs. +type ReferrerOpts func(*ReferrerConfig) + +// WithReferrerMatchOpt filters results using [descriptor.MatchOpt]. +func WithReferrerMatchOpt(mo descriptor.MatchOpt) ReferrerOpts { + return func(config *ReferrerConfig) { + config.MatchOpt = mo + } +} + +// WithReferrerPlatform gets referrers for a single platform from a multi-platform manifest. +// Note that this is implemented by [regclient.ReferrerList] and not the individual scheme implementations. +func WithReferrerPlatform(p string) ReferrerOpts { + return func(config *ReferrerConfig) { + config.Platform = p + } +} + +// WithReferrerSource pulls referrers from a separate source. +// Note that this is implemented by [regclient.ReferrerList] and not the individual scheme implementations. +func WithReferrerSource(r ref.Ref) ReferrerOpts { + return func(config *ReferrerConfig) { + config.SrcRepo = r + } +} + +// WithReferrerAT filters by a specific artifactType value. +// +// Deprecated: replace with [WithReferrerMatchOpt]. +func WithReferrerAT(at string) ReferrerOpts { + return func(config *ReferrerConfig) { + config.MatchOpt.ArtifactType = at + } +} + +// WithReferrerAnnotations filters by a list of annotations, all of which must match. +// +// Deprecated: replace with [WithReferrerMatchOpt]. +func WithReferrerAnnotations(annotations map[string]string) ReferrerOpts { + return func(config *ReferrerConfig) { + if config.MatchOpt.Annotations == nil { + config.MatchOpt.Annotations = annotations + } else { + for k, v := range annotations { + config.MatchOpt.Annotations[k] = v + } + } + } +} + +// WithReferrerSort orders the resulting referrers listing according to a specified annotation. +// +// Deprecated: replace with [WithReferrerMatchOpt]. +func WithReferrerSort(annotation string, desc bool) ReferrerOpts { + return func(config *ReferrerConfig) { + config.MatchOpt.SortAnnotation = annotation + config.MatchOpt.SortDesc = desc + } +} + +// ReferrerFilter filters the referrer list according to the config. +func ReferrerFilter(config ReferrerConfig, rlIn referrer.ReferrerList) referrer.ReferrerList { + return referrer.ReferrerList{ + Subject: rlIn.Subject, + Source: rlIn.Source, + Manifest: rlIn.Manifest, + Annotations: rlIn.Annotations, + Tags: rlIn.Tags, + Descriptors: descriptor.DescriptorListFilter(rlIn.Descriptors, config.MatchOpt), + } +} + +// RepoConfig is used by schemes to import [RepoOpts]. +type RepoConfig struct { + Limit int + Last string +} + +// RepoOpts is used to set options on repo APIs. +type RepoOpts func(*RepoConfig) + +// WithRepoLimit passes a maximum number of repositories to return to the repository list API. +// Registries may ignore this. +func WithRepoLimit(l int) RepoOpts { + return func(config *RepoConfig) { + config.Limit = l + } +} + +// WithRepoLast passes the last received repository for requesting the next batch of repositories. +// Registries may ignore this. +func WithRepoLast(l string) RepoOpts { + return func(config *RepoConfig) { + config.Last = l + } +} + +// TagConfig is used by schemes to import [TagOpts]. +type TagConfig struct { + Limit int + Last string +} + +// TagOpts is used to set options on tag APIs. +type TagOpts func(*TagConfig) + +// WithTagLimit passes a maximum number of tags to return to the tag list API. +// Registries may ignore this. +func WithTagLimit(limit int) TagOpts { + return func(t *TagConfig) { + t.Limit = limit + } +} + +// WithTagLast passes the last received tag for requesting the next batch of tags. +// Registries may ignore this. +func WithTagLast(last string) TagOpts { + return func(t *TagConfig) { + t.Last = last + } +} diff --git a/vendor/github.com/regclient/regclient/tag.go b/vendor/github.com/regclient/regclient/tag.go new file mode 100644 index 00000000..6b522804 --- /dev/null +++ b/vendor/github.com/regclient/regclient/tag.go @@ -0,0 +1,40 @@ +package regclient + +import ( + "context" + "fmt" + + "github.com/regclient/regclient/scheme" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/ref" + "github.com/regclient/regclient/types/tag" +) + +// TagDelete deletes a tag from the registry. Since there's no API for this, +// you'd want to normally just delete the manifest. However multiple tags may +// point to the same manifest, so instead you must: +// 1. Make a manifest, for this we put a few labels and timestamps to be unique. +// 2. Push that manifest to the tag. +// 3. Delete the digest for that new manifest that is only used by that tag. +func (rc *RegClient) TagDelete(ctx context.Context, r ref.Ref) error { + if !r.IsSet() { + return fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return err + } + return schemeAPI.TagDelete(ctx, r) +} + +// TagList returns a tag list from a repository +func (rc *RegClient) TagList(ctx context.Context, r ref.Ref, opts ...scheme.TagOpts) (*tag.List, error) { + if !r.IsSetRepo() { + return nil, fmt.Errorf("ref is not set: %s%.0w", r.CommonName(), errs.ErrInvalidReference) + } + schemeAPI, err := rc.schemeGet(r.Scheme) + if err != nil { + return nil, err + } + return schemeAPI.TagList(ctx, r, opts...) +} diff --git a/vendor/github.com/regclient/regclient/types/annotations.go b/vendor/github.com/regclient/regclient/types/annotations.go new file mode 100644 index 00000000..c989eb66 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/annotations.go @@ -0,0 +1,72 @@ +// Content in this file comes from OCI +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +const ( + // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). + AnnotationCreated = "org.opencontainers.image.created" + + // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). + AnnotationAuthors = "org.opencontainers.image.authors" + + // AnnotationURL is the annotation key for the URL to find more information on the image. + AnnotationURL = "org.opencontainers.image.url" + + // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. + AnnotationDocumentation = "org.opencontainers.image.documentation" + + // AnnotationSource is the annotation key for the URL to get source code for building the image. + AnnotationSource = "org.opencontainers.image.source" + + // AnnotationVersion is the annotation key for the version of the packaged software. + // The version MAY match a label or tag in the source code repository. + // The version MAY be Semantic versioning-compatible. + AnnotationVersion = "org.opencontainers.image.version" + + // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. + AnnotationRevision = "org.opencontainers.image.revision" + + // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. + AnnotationVendor = "org.opencontainers.image.vendor" + + // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. + AnnotationLicenses = "org.opencontainers.image.licenses" + + // AnnotationRefName is the annotation key for the name of the reference for a target. + // SHOULD only be considered valid when on descriptors on `index.json` within image layout. + AnnotationRefName = "org.opencontainers.image.ref.name" + + // AnnotationTitle is the annotation key for the human-readable title of the image. + AnnotationTitle = "org.opencontainers.image.title" + + // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. + AnnotationDescription = "org.opencontainers.image.description" + + // AnnotationBaseImageDigest is the annotation key for the digest of the image's base image. + AnnotationBaseImageDigest = "org.opencontainers.image.base.digest" + + // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. + AnnotationBaseImageName = "org.opencontainers.image.base.name" + + // AnnotationArtifactCreated is the annotation key for the date and time on which the artifact was built, conforming to RFC 3339. + AnnotationArtifactCreated = "org.opencontainers.artifact.created" + + // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. + AnnotationArtifactDescription = "org.opencontainers.artifact.description" + + // AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing. + AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied" +) diff --git a/vendor/github.com/regclient/regclient/types/blob/blob.go b/vendor/github.com/regclient/regclient/types/blob/blob.go new file mode 100644 index 00000000..86d215cb --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/blob/blob.go @@ -0,0 +1,103 @@ +// Package blob is the underlying type for pushing and pulling blobs. +package blob + +import ( + "io" + "net/http" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types/descriptor" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/ref" +) + +// Blob interface is used for returning blobs. +type Blob interface { + // GetDescriptor returns the descriptor associated with the blob. + GetDescriptor() descriptor.Descriptor + // RawBody returns the raw content of the blob. + RawBody() ([]byte, error) + // RawHeaders returns the headers received from the registry. + RawHeaders() http.Header + // Response returns the response associated with the blob. + Response() *http.Response + + // Digest returns the provided or calculated digest of the blob. + // + // Deprecated: Digest should be replaced by GetDescriptor().Digest. + Digest() digest.Digest + // Length returns the provided or calculated length of the blob. + // + // Deprecated: Length should be replaced by GetDescriptor().Size. + Length() int64 + // MediaType returns the Content-Type header received from the registry. + // + // Deprecated: MediaType should be replaced by GetDescriptor().MediaType. + MediaType() string +} + +type blobConfig struct { + desc descriptor.Descriptor + header http.Header + image *v1.Image + r ref.Ref + rdr io.Reader + resp *http.Response + rawBody []byte +} + +// Opts is used for options to create a new blob. +type Opts func(*blobConfig) + +// WithDesc specifies the descriptor associated with the blob. +func WithDesc(d descriptor.Descriptor) Opts { + return func(bc *blobConfig) { + bc.desc = d + } +} + +// WithHeader defines the headers received when pulling a blob. +func WithHeader(header http.Header) Opts { + return func(bc *blobConfig) { + bc.header = header + } +} + +// WithImage provides the OCI Image config needed for config blobs. +func WithImage(image v1.Image) Opts { + return func(bc *blobConfig) { + bc.image = &image + } +} + +// WithRawBody defines the raw blob contents for OCIConfig. +func WithRawBody(raw []byte) Opts { + return func(bc *blobConfig) { + bc.rawBody = raw + } +} + +// WithReader defines the reader for a new blob. +func WithReader(rc io.Reader) Opts { + return func(bc *blobConfig) { + bc.rdr = rc + } +} + +// WithRef specifies the reference where the blob was pulled from. +func WithRef(r ref.Ref) Opts { + return func(bc *blobConfig) { + bc.r = r + } +} + +// WithResp includes the http response, which is used to extract the headers and reader. +func WithResp(resp *http.Response) Opts { + return func(bc *blobConfig) { + bc.resp = resp + if bc.header == nil && resp != nil { + bc.header = resp.Header + } + } +} diff --git a/vendor/github.com/regclient/regclient/types/blob/common.go b/vendor/github.com/regclient/regclient/types/blob/common.go new file mode 100644 index 00000000..44e652ad --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/blob/common.go @@ -0,0 +1,62 @@ +package blob + +import ( + "net/http" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/ref" +) + +// Common was previously an interface. A type alias is provided for upgrades. +type Common = *BCommon + +// BCommon is a common struct for all blobs which includes various shared methods. +type BCommon struct { + r ref.Ref + desc descriptor.Descriptor + blobSet bool + rawHeader http.Header + resp *http.Response +} + +// GetDescriptor returns the descriptor associated with the blob. +func (c *BCommon) GetDescriptor() descriptor.Descriptor { + return c.desc +} + +// Digest returns the provided or calculated digest of the blob. +// +// Deprecated: Digest should be replaced by GetDescriptor().Digest, see [GetDescriptor]. +func (c *BCommon) Digest() digest.Digest { + return c.desc.Digest +} + +// Length returns the provided or calculated length of the blob. +// +// Deprecated: Length should be replaced by GetDescriptor().Size, see [GetDescriptor]. +func (c *BCommon) Length() int64 { + return c.desc.Size +} + +// MediaType returns the Content-Type header received from the registry. +// +// Deprecated: MediaType should be replaced by GetDescriptor().MediaType, see [GetDescriptor]. +func (c *BCommon) MediaType() string { + return c.desc.MediaType +} + +// RawHeaders returns the headers received from the registry. +func (c *BCommon) RawHeaders() http.Header { + return c.rawHeader +} + +// Response returns the response associated with the blob. +func (c *BCommon) Response() *http.Response { + return c.resp +} diff --git a/vendor/github.com/regclient/regclient/types/blob/ociconfig.go b/vendor/github.com/regclient/regclient/types/blob/ociconfig.go new file mode 100644 index 00000000..619ea32c --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/blob/ociconfig.go @@ -0,0 +1,127 @@ +package blob + +import ( + "encoding/json" + "fmt" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" +) + +// OCIConfig was previously an interface. A type alias is provided for upgrading. +type OCIConfig = *BOCIConfig + +// BOCIConfig includes an OCI Image Config struct that may be extracted from or pushed to a blob. +type BOCIConfig struct { + BCommon + rawBody []byte + image v1.Image +} + +// NewOCIConfig creates a new BOCIConfig. +// When created from an existing blob, a BOCIConfig will be created using BReader.ToOCIConfig(). +func NewOCIConfig(opts ...Opts) *BOCIConfig { + bc := blobConfig{} + for _, opt := range opts { + opt(&bc) + } + if bc.image != nil && len(bc.rawBody) == 0 { + var err error + bc.rawBody, err = json.Marshal(bc.image) + if err != nil { + bc.rawBody = []byte{} + } + } + if len(bc.rawBody) > 0 { + if bc.image == nil { + bc.image = &v1.Image{} + err := json.Unmarshal(bc.rawBody, bc.image) + if err != nil { + bc.image = nil + } + } + // force descriptor to match raw body, even if we generated the raw body + bc.desc.Digest = bc.desc.DigestAlgo().FromBytes(bc.rawBody) + bc.desc.Size = int64(len(bc.rawBody)) + if bc.desc.MediaType == "" { + bc.desc.MediaType = mediatype.OCI1ImageConfig + } + } + b := BOCIConfig{ + BCommon: BCommon{ + desc: bc.desc, + r: bc.r, + rawHeader: bc.header, + resp: bc.resp, + }, + rawBody: bc.rawBody, + } + if bc.image != nil { + b.image = *bc.image + b.blobSet = true + } + return &b +} + +// GetConfig returns OCI config. +func (oc *BOCIConfig) GetConfig() v1.Image { + return oc.image +} + +// RawBody returns the original body from the request. +func (oc *BOCIConfig) RawBody() ([]byte, error) { + var err error + if !oc.blobSet { + return []byte{}, fmt.Errorf("Blob is not defined") + } + if len(oc.rawBody) == 0 { + oc.rawBody, err = json.Marshal(oc.image) + } + return oc.rawBody, err +} + +// SetConfig updates the config, including raw body and descriptor. +func (oc *BOCIConfig) SetConfig(image v1.Image) { + oc.image = image + oc.rawBody, _ = json.Marshal(oc.image) + if oc.desc.MediaType == "" { + oc.desc.MediaType = mediatype.OCI1ImageConfig + } + oc.desc.Digest = oc.desc.DigestAlgo().FromBytes(oc.rawBody) + oc.desc.Size = int64(len(oc.rawBody)) + oc.blobSet = true +} + +// MarshalJSON passes through the marshalling to the underlying image if rawBody is not available. +func (oc *BOCIConfig) MarshalJSON() ([]byte, error) { + if !oc.blobSet { + return []byte{}, fmt.Errorf("Blob is not defined") + } + if len(oc.rawBody) > 0 { + return oc.rawBody, nil + } + return json.Marshal(oc.image) +} + +// UnmarshalJSON extracts json content and populates the content. +func (oc *BOCIConfig) UnmarshalJSON(data []byte) error { + image := v1.Image{} + err := json.Unmarshal(data, &image) + if err != nil { + return err + } + oc.image = image + oc.rawBody = make([]byte, len(data)) + copy(oc.rawBody, data) + if oc.desc.MediaType == "" { + oc.desc.MediaType = mediatype.OCI1ImageConfig + } + oc.desc.Digest = oc.desc.DigestAlgo().FromBytes(oc.rawBody) + oc.desc.Size = int64(len(oc.rawBody)) + oc.blobSet = true + return nil +} diff --git a/vendor/github.com/regclient/regclient/types/blob/reader.go b/vendor/github.com/regclient/regclient/types/blob/reader.go new file mode 100644 index 00000000..632fac20 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/blob/reader.go @@ -0,0 +1,210 @@ +package blob + +import ( + "fmt" + "io" + "strconv" + "sync" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/limitread" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" +) + +// Reader was previously an interface. A type alias is provided for upgrading. +type Reader = *BReader + +// BReader is used to read blobs. +type BReader struct { + BCommon + readBytes int64 + reader io.Reader + origRdr io.Reader + digester digest.Digester + mu sync.Mutex +} + +// NewReader creates a new BReader. +func NewReader(opts ...Opts) *BReader { + bc := blobConfig{} + for _, opt := range opts { + opt(&bc) + } + if bc.resp != nil { + // extract headers and reader if other fields not passed + if bc.header == nil { + bc.header = bc.resp.Header + } + if bc.rdr == nil { + bc.rdr = bc.resp.Body + } + } + if bc.header != nil { + // extract fields from header if descriptor not passed + if bc.desc.MediaType == "" { + bc.desc.MediaType = mediatype.Base(bc.header.Get("Content-Type")) + } + if bc.desc.Size == 0 { + cl, _ := strconv.Atoi(bc.header.Get("Content-Length")) + bc.desc.Size = int64(cl) + } + if bc.desc.Digest == "" { + bc.desc.Digest, _ = digest.Parse(bc.header.Get("Docker-Content-Digest")) + } + } + br := BReader{ + BCommon: BCommon{ + r: bc.r, + desc: bc.desc, + rawHeader: bc.header, + resp: bc.resp, + }, + origRdr: bc.rdr, + } + if bc.rdr != nil { + br.blobSet = true + br.digester = br.desc.DigestAlgo().Digester() + rdr := bc.rdr + if br.desc.Size > 0 { + rdr = &limitread.LimitRead{ + Reader: rdr, + Limit: br.desc.Size, + } + } + br.reader = io.TeeReader(rdr, br.digester.Hash()) + } + return &br +} + +// Close attempts to close the reader and populates/validates the digest. +func (r *BReader) Close() error { + if r == nil || r.origRdr == nil { + return nil + } + // attempt to close if available in original reader + bc, ok := r.origRdr.(io.Closer) + if !ok { + return nil + } + return bc.Close() +} + +// RawBody returns the original body from the request. +func (r *BReader) RawBody() ([]byte, error) { + return io.ReadAll(r) +} + +// Read passes through the read operation while computing the digest and tracking the size. +func (r *BReader) Read(p []byte) (int, error) { + if r == nil || r.reader == nil { + return 0, fmt.Errorf("blob has no reader: %w", io.ErrUnexpectedEOF) + } + r.mu.Lock() + defer r.mu.Unlock() + size, err := r.reader.Read(p) + r.readBytes = r.readBytes + int64(size) + if err == io.EOF { + // check/save size + if r.desc.Size == 0 { + r.desc.Size = r.readBytes + } else if r.readBytes < r.desc.Size { + err = fmt.Errorf("%w [expected %d, received %d]: %w", errs.ErrShortRead, r.desc.Size, r.readBytes, err) + } else if r.readBytes > r.desc.Size { + err = fmt.Errorf("%w [expected %d, received %d]: %w", errs.ErrSizeLimitExceeded, r.desc.Size, r.readBytes, err) + } + // check/save digest + if r.desc.Digest.Validate() != nil { + r.desc.Digest = r.digester.Digest() + } else if r.desc.Digest != r.digester.Digest() { + err = fmt.Errorf("%w [expected %s, calculated %s]: %w", errs.ErrDigestMismatch, r.desc.Digest.String(), r.digester.Digest().String(), err) + } + } + return size, err +} + +// Seek passes through the seek operation, reseting or invalidating the digest +func (r *BReader) Seek(offset int64, whence int) (int64, error) { + if r == nil || r.origRdr == nil { + return 0, fmt.Errorf("blob has no reader") + } + r.mu.Lock() + defer r.mu.Unlock() + if offset == 0 && whence == io.SeekCurrent { + return r.readBytes, nil + } + // cannot do an arbitrary seek and still digest without a lot more complication + if offset != 0 || whence != io.SeekStart { + return r.readBytes, fmt.Errorf("unable to seek to arbitrary position") + } + rdrSeek, ok := r.origRdr.(io.Seeker) + if !ok { + return r.readBytes, fmt.Errorf("Seek unsupported") + } + o, err := rdrSeek.Seek(offset, whence) + if err != nil || o != 0 { + return r.readBytes, err + } + // reset internal offset and digest calculation + rdr := r.origRdr + if r.desc.Size > 0 { + rdr = &limitread.LimitRead{ + Reader: rdr, + Limit: r.desc.Size, + } + } + r.digester = r.desc.DigestAlgo().Digester() + r.reader = io.TeeReader(rdr, r.digester.Hash()) + r.readBytes = 0 + + return 0, nil +} + +// ToOCIConfig converts a BReader to a BOCIConfig. +func (r *BReader) ToOCIConfig() (*BOCIConfig, error) { + if r == nil || !r.blobSet { + return nil, fmt.Errorf("blob is not defined") + } + if r.readBytes != 0 { + return nil, fmt.Errorf("unable to convert after read has been performed") + } + blobBody, err := io.ReadAll(r) + errC := r.Close() + if err != nil { + return nil, fmt.Errorf("error reading image config for %s: %w", r.r.CommonName(), err) + } + if errC != nil { + return nil, fmt.Errorf("error closing blob reader: %w", err) + } + return NewOCIConfig( + WithDesc(r.desc), + WithHeader(r.rawHeader), + WithRawBody(blobBody), + WithRef(r.r), + WithResp(r.resp), + ), nil +} + +// ToTarReader converts a BReader to a BTarReader +func (r *BReader) ToTarReader() (*BTarReader, error) { + if r == nil || !r.blobSet { + return nil, fmt.Errorf("blob is not defined") + } + r.mu.Lock() + defer r.mu.Unlock() + if r.readBytes != 0 { + return nil, fmt.Errorf("unable to convert after read has been performed") + } + return NewTarReader( + WithDesc(r.desc), + WithHeader(r.rawHeader), + WithRef(r.r), + WithResp(r.resp), + WithReader(r.reader), + ), nil +} diff --git a/vendor/github.com/regclient/regclient/types/blob/tar.go b/vendor/github.com/regclient/regclient/types/blob/tar.go new file mode 100644 index 00000000..6f47f4bb --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/blob/tar.go @@ -0,0 +1,191 @@ +package blob + +import ( + "archive/tar" + "errors" + "fmt" + "io" + "path/filepath" + "strings" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/limitread" + "github.com/regclient/regclient/pkg/archive" + "github.com/regclient/regclient/types/errs" +) + +// TarReader was previously an interface. A type alias is provided for upgrading. +type TarReader = *BTarReader + +// BTarReader is used to read individual files from an image layer. +type BTarReader struct { + BCommon + origRdr io.Reader + reader io.Reader + digester digest.Digester + tr *tar.Reader +} + +// NewTarReader creates a BTarReader. +// Typically a BTarReader will be created using BReader.ToTarReader(). +func NewTarReader(opts ...Opts) *BTarReader { + bc := blobConfig{} + for _, opt := range opts { + opt(&bc) + } + tr := BTarReader{ + BCommon: BCommon{ + desc: bc.desc, + r: bc.r, + rawHeader: bc.header, + resp: bc.resp, + }, + origRdr: bc.rdr, + } + if bc.rdr != nil { + tr.blobSet = true + tr.digester = tr.desc.DigestAlgo().Digester() + rdr := bc.rdr + if tr.desc.Size > 0 { + rdr = &limitread.LimitRead{ + Reader: rdr, + Limit: tr.desc.Size, + } + } + tr.reader = io.TeeReader(rdr, tr.digester.Hash()) + } + return &tr +} + +// Close attempts to close the reader and populates/validates the digest. +func (tr *BTarReader) Close() error { + // attempt to close if available in original reader + if trc, ok := tr.origRdr.(io.Closer); ok && trc != nil { + return trc.Close() + } + return nil +} + +// GetTarReader returns the tar.Reader for the blob. +func (tr *BTarReader) GetTarReader() (*tar.Reader, error) { + if tr.reader == nil { + return nil, fmt.Errorf("blob has no reader defined") + } + if tr.tr == nil { + dr, err := archive.Decompress(tr.reader) + if err != nil { + return nil, err + } + tr.tr = tar.NewReader(dr) + } + return tr.tr, nil +} + +// RawBody returns the original body from the request. +func (tr *BTarReader) RawBody() ([]byte, error) { + if !tr.blobSet { + return []byte{}, fmt.Errorf("Blob is not defined") + } + if tr.tr != nil { + return []byte{}, fmt.Errorf("RawBody cannot be returned after TarReader returned") + } + b, err := io.ReadAll(tr.reader) + if err != nil { + return b, err + } + if tr.digester != nil { + dig := tr.digester.Digest() + tr.digester = nil + if tr.desc.Digest.String() != "" && dig != tr.desc.Digest { + return b, fmt.Errorf("%w, expected %s, received %s", errs.ErrDigestMismatch, tr.desc.Digest.String(), dig.String()) + } + tr.desc.Digest = dig + } + err = tr.Close() + return b, err +} + +// ReadFile parses the tar to find a file. +func (tr *BTarReader) ReadFile(filename string) (*tar.Header, io.Reader, error) { + if strings.HasPrefix(filename, ".wh.") { + return nil, nil, fmt.Errorf(".wh. prefix is reserved for whiteout files") + } + // normalize filenames, + filename = filepath.Clean(filename) + if filename[0] == '/' { + filename = filename[1:] + } + // get reader + rdr, err := tr.GetTarReader() + if err != nil { + return nil, nil, err + } + // loop through files until whiteout or target file is found + whiteout := false + for { + th, err := rdr.Next() + if err != nil { + // break on eof, everything else is an error + if errors.Is(err, io.EOF) { + break + } + return nil, nil, err + } + thFile := filepath.Clean(th.Name) + if thFile[0] == '/' { + thFile = thFile[1:] + } + // found the target file + if thFile == filename { + return th, rdr, nil + } + // check/track whiteout file + name := filepath.Base(th.Name) + if !whiteout && strings.HasPrefix(name, ".wh.") && tarCmpWhiteout(th.Name, filename) { + // continue searching after finding a whiteout file + // a new file may be created in the same layer + whiteout = true + } + } + // EOF encountered + if whiteout { + return nil, nil, errs.ErrFileDeleted + } + if tr.digester != nil { + _, _ = io.Copy(io.Discard, tr.reader) // process/digest any trailing bytes from reader + dig := tr.digester.Digest() + tr.digester = nil + if tr.desc.Digest.String() != "" && dig != tr.desc.Digest { + return nil, nil, fmt.Errorf("%w, expected %s, received %s", errs.ErrDigestMismatch, tr.desc.Digest.String(), dig.String()) + } + tr.desc.Digest = dig + } + return nil, nil, errs.ErrFileNotFound +} + +func tarCmpWhiteout(whFile, tgtFile string) bool { + whSplit := strings.Split(whFile, "/") + tgtSplit := strings.Split(tgtFile, "/") + // the -1 handles the opaque whiteout + if len(whSplit)-1 > len(tgtSplit) { + return false + } + // verify the path matches up to the whiteout + for i := range whSplit[:len(whSplit)-1] { + if whSplit[i] != tgtSplit[i] { + return false + } + } + i := len(whSplit) - 1 + // opaque whiteout of entire directory + if whSplit[i] == ".wh..wh..opq" { + return true + } + // compare whiteout name to next path entry + if i > len(tgtSplit)-1 { + return false + } + whName := strings.TrimPrefix(whSplit[i], ".wh.") + return whName == tgtSplit[i] +} diff --git a/vendor/github.com/regclient/regclient/types/callback.go b/vendor/github.com/regclient/regclient/types/callback.go new file mode 100644 index 00000000..c4c2fad9 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/callback.go @@ -0,0 +1,29 @@ +package types + +type CallbackState int + +const ( + CallbackUndef CallbackState = iota + CallbackSkipped + CallbackStarted + CallbackActive + CallbackFinished + CallbackArchived +) + +type CallbackKind int + +const ( + CallbackManifest CallbackKind = iota + CallbackBlob +) + +func (k CallbackKind) String() string { + switch k { + case CallbackBlob: + return "blob" + case CallbackManifest: + return "manifest" + } + return "unknown" +} diff --git a/vendor/github.com/regclient/regclient/types/descriptor.go b/vendor/github.com/regclient/regclient/types/descriptor.go new file mode 100644 index 00000000..ad31e7e4 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/descriptor.go @@ -0,0 +1,27 @@ +package types + +import "github.com/regclient/regclient/types/descriptor" + +type ( + // Descriptor is used in manifests to refer to content by media type, size, and digest. + // + // Deprecated: replace with [descriptor.Descriptor]. + Descriptor = descriptor.Descriptor + // MatchOpt defines conditions for a match descriptor. + // + // Deprecated: replace with [descriptor.MatchOpt]. + MatchOpt = descriptor.MatchOpt +) + +var ( + // EmptyData is the content of the empty JSON descriptor. See [mediatype.OCI1Empty]. + // + // Deprecated: replace with [descriptor.EmptyData]. + EmptyData = descriptor.EmptyData + // EmptyDigest is the digest of the empty JSON descriptor. See [mediatype.OCI1Empty]. + // + // Deprecated: replace with [descriptor.EmptyDigest]. + EmptyDigest = descriptor.EmptyDigest + DescriptorListFilter = descriptor.DescriptorListFilter + DescriptorListSearch = descriptor.DescriptorListSearch +) diff --git a/vendor/github.com/regclient/regclient/types/descriptor/descriptor.go b/vendor/github.com/regclient/regclient/types/descriptor/descriptor.go new file mode 100644 index 00000000..14226933 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/descriptor/descriptor.go @@ -0,0 +1,312 @@ +// Package descriptor defines the OCI descriptor data structure used in manifests to reference content addressable data. +package descriptor + +import ( + "fmt" + "sort" + "strings" + "text/tabwriter" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/units" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + "github.com/regclient/regclient/types/platform" +) + +// Descriptor is used in manifests to refer to content by media type, size, and digest. +type Descriptor struct { + // MediaType describe the type of the content. + MediaType string `json:"mediaType"` + + // Digest uniquely identifies the content. + Digest digest.Digest `json:"digest"` + + // Size in bytes of content. + Size int64 `json:"size"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Data is an embedding of the targeted content. This is encoded as a base64 + // string when marshalled to JSON (automatically, by encoding/json). If + // present, Data can be used directly to avoid fetching the targeted content. + Data []byte `json:"data,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // This should only be used when referring to a manifest. + Platform *platform.Platform `json:"platform,omitempty"` + + // ArtifactType is the media type of the artifact this descriptor refers to. + ArtifactType string `json:"artifactType,omitempty"` + + // digestAlgo is the preferred digest algorithm for when the digest is unset. + digestAlgo digest.Algorithm +} + +var ( + // EmptyData is the content of the empty JSON descriptor. See [mediatype.OCI1Empty]. + EmptyData = []byte("{}") + // EmptyDigest is the digest of the empty JSON descriptor. See [mediatype.OCI1Empty]. + EmptyDigest = digest.SHA256.FromBytes(EmptyData) + mtToOCI map[string]string +) + +func init() { + mtToOCI = map[string]string{ + mediatype.Docker2ManifestList: mediatype.OCI1ManifestList, + mediatype.Docker2Manifest: mediatype.OCI1Manifest, + mediatype.Docker2ImageConfig: mediatype.OCI1ImageConfig, + mediatype.Docker2Layer: mediatype.OCI1Layer, + mediatype.Docker2LayerGzip: mediatype.OCI1LayerGzip, + mediatype.Docker2LayerZstd: mediatype.OCI1LayerZstd, + mediatype.OCI1ManifestList: mediatype.OCI1ManifestList, + mediatype.OCI1Manifest: mediatype.OCI1Manifest, + mediatype.OCI1ImageConfig: mediatype.OCI1ImageConfig, + mediatype.OCI1Layer: mediatype.OCI1Layer, + mediatype.OCI1LayerGzip: mediatype.OCI1LayerGzip, + mediatype.OCI1LayerZstd: mediatype.OCI1LayerZstd, + } +} + +// DigestAlgo returns the algorithm for computing the digest. +// This prefers the algorithm used by the digest when set, falling back to the preferred digest algorithm, and finally the canonical algorithm. +func (d Descriptor) DigestAlgo() digest.Algorithm { + if d.Digest != "" && d.Digest.Validate() == nil { + return d.Digest.Algorithm() + } + if d.digestAlgo != "" && d.digestAlgo.Available() { + return d.digestAlgo + } + return digest.Canonical +} + +// DigestAlgoPrefer sets the preferred digest algorithm for when the digest is unset. +func (d *Descriptor) DigestAlgoPrefer(algo digest.Algorithm) error { + if !algo.Available() { + return fmt.Errorf("digest algorithm is not available: %s%.0w", algo.String(), errs.ErrUnsupported) + } + d.digestAlgo = algo + return nil +} + +// GetData decodes the Data field from the descriptor if available +func (d Descriptor) GetData() ([]byte, error) { + // verify length + if int64(len(d.Data)) != d.Size { + return nil, errs.ErrParsingFailed + } + // generate and verify digest + if d.Digest != d.DigestAlgo().FromBytes(d.Data) { + return nil, errs.ErrParsingFailed + } + // return data + return d.Data, nil +} + +// Equal indicates the two descriptors are identical, effectively a DeepEqual. +func (d Descriptor) Equal(d2 Descriptor) bool { + if !d.Same(d2) { + return false + } + if d.MediaType != d2.MediaType { + return false + } + if d.ArtifactType != d2.ArtifactType { + return false + } + if d.Platform == nil || d2.Platform == nil { + if d.Platform != nil || d2.Platform != nil { + return false + } + } else if !platform.Match(*d.Platform, *d2.Platform) { + return false + } + if d.URLs == nil || d2.URLs == nil { + if d.URLs != nil || d2.URLs != nil { + return false + } + } else if len(d.URLs) != len(d2.URLs) { + return false + } else { + for i := range d.URLs { + if d.URLs[i] != d2.URLs[i] { + return false + } + } + } + if d.Annotations == nil || d2.Annotations == nil { + if d.Annotations != nil || d2.Annotations != nil { + return false + } + } else if len(d.Annotations) != len(d2.Annotations) { + return false + } else { + for i := range d.Annotations { + if d.Annotations[i] != d2.Annotations[i] { + return false + } + } + } + return true +} + +// Same indicates two descriptors point to the same CAS object. +// This verifies the digest, media type, and size all match. +func (d Descriptor) Same(d2 Descriptor) bool { + if d.Digest != d2.Digest || d.Size != d2.Size { + return false + } + // loosen the check on media type since this can be converted from a build + if d.MediaType != d2.MediaType && (mtToOCI[d.MediaType] != mtToOCI[d2.MediaType] || mtToOCI[d.MediaType] == "") { + return false + } + return true +} + +func (d Descriptor) MarshalPrettyTW(tw *tabwriter.Writer, prefix string) error { + fmt.Fprintf(tw, "%sDigest:\t%s\n", prefix, string(d.Digest)) + fmt.Fprintf(tw, "%sMediaType:\t%s\n", prefix, d.MediaType) + if d.ArtifactType != "" { + fmt.Fprintf(tw, "%sArtifactType:\t%s\n", prefix, d.ArtifactType) + } + switch d.MediaType { + case mediatype.Docker1Manifest, mediatype.Docker1ManifestSigned, + mediatype.Docker2Manifest, mediatype.Docker2ManifestList, + mediatype.OCI1Manifest, mediatype.OCI1ManifestList: + // skip printing size for descriptors to manifests + default: + if d.Size > 100000 { + fmt.Fprintf(tw, "%sSize:\t%s\n", prefix, units.HumanSize(float64(d.Size))) + } else { + fmt.Fprintf(tw, "%sSize:\t%dB\n", prefix, d.Size) + } + } + if p := d.Platform; p != nil && p.OS != "" { + fmt.Fprintf(tw, "%sPlatform:\t%s\n", prefix, p.String()) + if p.OSVersion != "" { + fmt.Fprintf(tw, "%sOSVersion:\t%s\n", prefix, p.OSVersion) + } + if len(p.OSFeatures) > 0 { + fmt.Fprintf(tw, "%sOSFeatures:\t%s\n", prefix, strings.Join(p.OSFeatures, ", ")) + } + } + if len(d.URLs) > 0 { + fmt.Fprintf(tw, "%sURLs:\t%s\n", prefix, strings.Join(d.URLs, ", ")) + } + if d.Annotations != nil { + fmt.Fprintf(tw, "%sAnnotations:\t\n", prefix) + for k, v := range d.Annotations { + fmt.Fprintf(tw, "%s %s:\t%s\n", prefix, k, v) + } + } + return nil +} + +// MatchOpt defines conditions for a match descriptor. +type MatchOpt struct { + Platform *platform.Platform // Platform to match including compatible platforms (darwin/arm64 matches linux/arm64) + ArtifactType string // Match ArtifactType in the descriptor + Annotations map[string]string // Match each of the specified annotations and their value, an empty value verifies the key is set + SortAnnotation string // Sort the results by an annotation, string based comparison, descriptors without the annotation are sorted last + SortDesc bool // Set to true to sort in descending order +} + +// Match returns true if the descriptor matches the options, including compatible platforms. +func (d Descriptor) Match(opt MatchOpt) bool { + if opt.ArtifactType != "" && d.ArtifactType != opt.ArtifactType { + return false + } + if len(opt.Annotations) > 0 { + if d.Annotations == nil { + return false + } + for k, v := range opt.Annotations { + if dv, ok := d.Annotations[k]; !ok || (v != "" && v != dv) { + return false + } + } + } + if opt.Platform != nil { + if d.Platform == nil { + return false + } + if !platform.Compatible(*opt.Platform, *d.Platform) { + return false + } + } + return true +} + +// DescriptorListFilter returns a list of descriptors from the list matching the search options. +// When opt.SortAnnotation is set, the order of descriptors with matching annotations is undefined. +func DescriptorListFilter(dl []Descriptor, opt MatchOpt) []Descriptor { + ret := []Descriptor{} + for _, d := range dl { + if d.Match(opt) { + ret = append(ret, d) + } + } + if opt.SortAnnotation != "" { + sort.Slice(ret, func(i, j int) bool { + // if annotations are not defined, sort to the very end + if ret[i].Annotations == nil { + return false + } + if _, ok := ret[i].Annotations[opt.SortAnnotation]; !ok { + return false + } + if ret[j].Annotations == nil { + return true + } + if _, ok := ret[j].Annotations[opt.SortAnnotation]; !ok { + return true + } + // else sort by string + if strings.Compare(ret[i].Annotations[opt.SortAnnotation], ret[j].Annotations[opt.SortAnnotation]) < 0 { + return !opt.SortDesc + } + return opt.SortDesc + }) + } + return ret +} + +// DescriptorListSearch returns the first descriptor from the list matching the search options. +func DescriptorListSearch(dl []Descriptor, opt MatchOpt) (Descriptor, error) { + if opt.ArtifactType != "" || opt.SortAnnotation != "" || len(opt.Annotations) > 0 { + dl = DescriptorListFilter(dl, opt) + } + var ret Descriptor + var retPlat platform.Platform + if len(dl) == 0 { + return ret, errs.ErrNotFound + } + if opt.Platform == nil { + return dl[0], nil + } + found := false + comp := platform.NewCompare(*opt.Platform) + for _, d := range dl { + if d.Platform == nil { + continue + } + if comp.Better(*d.Platform, retPlat) { + found = true + ret = d + retPlat = *d.Platform + } + } + if !found { + return ret, errs.ErrNotFound + } + return ret, nil +} diff --git a/vendor/github.com/regclient/regclient/types/doc.go b/vendor/github.com/regclient/regclient/types/doc.go new file mode 100644 index 00000000..828541d3 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/doc.go @@ -0,0 +1,3 @@ +// Package types defines various types that have no other internal imports +// This allows them to be used between other packages without creating import loops +package types diff --git a/vendor/github.com/regclient/regclient/types/docker/schema1/manifest.go b/vendor/github.com/regclient/regclient/types/docker/schema1/manifest.go new file mode 100644 index 00000000..92379730 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/docker/schema1/manifest.go @@ -0,0 +1,135 @@ +// Package schema1 defines the manifest and json marshal/unmarshal for docker schema1 +package schema1 + +import ( + "encoding/json" + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types/docker" + "github.com/regclient/regclient/types/mediatype" +) + +var ( + // ManifestSchemaVersion provides a pre-initialized version structure schema1 manifests. + ManifestSchemaVersion = docker.Versioned{ + SchemaVersion: 1, + MediaType: mediatype.Docker1Manifest, + } + // ManifestSignedSchemaVersion provides a pre-initialized version structure schema1 signed manifests. + ManifestSignedSchemaVersion = docker.Versioned{ + SchemaVersion: 1, + MediaType: mediatype.Docker1ManifestSigned, + } +) + +// FSLayer is a container struct for BlobSums defined in an image manifest +type FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// History stores unstructured v1 compatibility information +type History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + +// Manifest defines the schema v1 docker manifest +type Manifest struct { + docker.Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []History `json:"history"` +} + +// SignedManifest provides an envelope for a signed image manifest, including the format sensitive raw bytes. +type SignedManifest struct { + Manifest + + // Canonical is the canonical byte representation of the ImageManifest, without any attached signatures. + // The manifest byte representation cannot change or it will have to be re-signed. + Canonical []byte `json:"-"` + + // all contains the byte representation of the Manifest including signatures and is returned by Payload() + all []byte +} + +// UnmarshalJSON populates a new SignedManifest struct from JSON data. +func (sm *SignedManifest) UnmarshalJSON(b []byte) error { + sm.all = make([]byte, len(b)) + // store manifest and signatures in all + copy(sm.all, b) + + jsig, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return err + } + + // Resolve the payload in the manifest. + bytes, err := jsig.Payload() + if err != nil { + return err + } + + // sm.Canonical stores the canonical manifest JSON + sm.Canonical = make([]byte, len(bytes)) + copy(sm.Canonical, bytes) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { + return err + } + + sm.Manifest = manifest + + return nil +} + +// MarshalJSON returns the contents of raw. +// If Raw is nil, marshals the inner contents. +// Applications requiring a marshaled signed manifest should simply use Raw directly, since the the content produced by json.Marshal will be compacted and will fail signature checks. +func (sm *SignedManifest) MarshalJSON() ([]byte, error) { + if len(sm.all) > 0 { + return sm.all, nil + } + + // If the raw data is not available, just dump the inner content. + return json.Marshal(&sm.Manifest) +} + +// TODO: verify Payload and Signatures methods are required + +// Payload returns the signed content of the signed manifest. +func (sm SignedManifest) Payload() (string, []byte, error) { + return mediatype.Docker1ManifestSigned, sm.all, nil +} + +// Signatures returns the signatures as provided by (*libtrust.JSONSignature).Signatures. +// The byte slices are opaque jws signatures. +func (sm *SignedManifest) Signatures() ([][]byte, error) { + jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + // Resolve the payload in the manifest. + return jsig.Signatures() +} diff --git a/vendor/github.com/regclient/regclient/types/docker/schema2/doc.go b/vendor/github.com/regclient/regclient/types/docker/schema2/doc.go new file mode 100644 index 00000000..ada3e795 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/docker/schema2/doc.go @@ -0,0 +1,2 @@ +// Package schema2 contains structs for Docker schema v2 manifests. +package schema2 diff --git a/vendor/github.com/regclient/regclient/types/docker/schema2/manifest.go b/vendor/github.com/regclient/regclient/types/docker/schema2/manifest.go new file mode 100644 index 00000000..16bb8cb5 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/docker/schema2/manifest.go @@ -0,0 +1,29 @@ +package schema2 + +import ( + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/docker" + "github.com/regclient/regclient/types/mediatype" +) + +// ManifestSchemaVersion is a pre-configured versioned field for manifests +var ManifestSchemaVersion = docker.Versioned{ + SchemaVersion: 2, + MediaType: mediatype.Docker2Manifest, +} + +// Manifest defines a schema2 manifest. +type Manifest struct { + docker.Versioned + + // Config references the image configuration as a blob. + Config descriptor.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []descriptor.Descriptor `json:"layers"` + + // Annotations contains arbitrary metadata for the image index. + // Note, this is not a defined docker schema2 field. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/regclient/regclient/types/docker/schema2/manifestlist.go b/vendor/github.com/regclient/regclient/types/docker/schema2/manifestlist.go new file mode 100644 index 00000000..8fa80471 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/docker/schema2/manifestlist.go @@ -0,0 +1,25 @@ +package schema2 + +import ( + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/docker" + "github.com/regclient/regclient/types/mediatype" +) + +// ManifestListSchemaVersion is a pre-configured versioned field for manifest lists +var ManifestListSchemaVersion = docker.Versioned{ + SchemaVersion: 2, + MediaType: mediatype.Docker2ManifestList, +} + +// ManifestList references manifests for various platforms. +type ManifestList struct { + docker.Versioned + + // Manifests lists descriptors in the manifest list + Manifests []descriptor.Descriptor `json:"manifests"` + + // Annotations contains arbitrary metadata for the image index. + // Note, this is not a defined docker schema2 field. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/regclient/regclient/types/docker/versioned.go b/vendor/github.com/regclient/regclient/types/docker/versioned.go new file mode 100644 index 00000000..9685f2c7 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/docker/versioned.go @@ -0,0 +1,10 @@ +// Package docker defines the common types for all docker schemas +package docker + +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` +} diff --git a/vendor/github.com/regclient/regclient/types/error.go b/vendor/github.com/regclient/regclient/types/error.go new file mode 100644 index 00000000..8841d015 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/error.go @@ -0,0 +1,150 @@ +package types + +import "github.com/regclient/regclient/types/errs" + +var ( + // ErrAllRequestsFailed when there are no mirrors left to try + // + // Deprecated: replace with [errs.ErrAllRequestsFailed]. + ErrAllRequestsFailed = errs.ErrAllRequestsFailed + // ErrAPINotFound if an api is not available for the host + // + // Deprecated: replace with [errs.ErrAPINotFound]. + ErrAPINotFound = errs.ErrAPINotFound + // ErrBackoffLimit maximum backoff attempts reached + // + // Deprecated: replace with [errs.ErrBackoffLimit]. + ErrBackoffLimit = errs.ErrBackoffLimit + // ErrCanceled if the context was canceled + // + // Deprecated: replace with [errs.ErrCanceled]. + ErrCanceled = errs.ErrCanceled + // ErrDigestMismatch if the expected digest wasn't received + // + // Deprecated: replace with [errs.ErrDigestMismatch]. + ErrDigestMismatch = errs.ErrDigestMismatch + // ErrEmptyChallenge indicates an issue with the received challenge in the WWW-Authenticate header + // + // Deprecated: replace with [errs.ErrEmptyChallenge]. + ErrEmptyChallenge = errs.ErrEmptyChallenge + // ErrFileDeleted indicates a requested file has been deleted + // + // Deprecated: replace with [errs.ErrFileDeleted]. + ErrFileDeleted = errs.ErrFileDeleted + // ErrFileNotFound indicates a requested file is not found + // + // Deprecated: replace with [errs.ErrFileNotFound]. + ErrFileNotFound = errs.ErrFileNotFound + // ErrHTTPStatus if the http status code was unexpected + // + // Deprecated: replace with [errs.ErrHTTPStatus]. + ErrHTTPStatus = errs.ErrHTTPStatus + // ErrInvalidChallenge indicates an issue with the received challenge in the WWW-Authenticate header + // + // Deprecated: replace with [errs.ErrInvalidChallenge]. + ErrInvalidChallenge = errs.ErrInvalidChallenge + // ErrInvalidReference indicates the reference to an image is has an invalid syntax + // + // Deprecated: replace with [errs.ErrInvalidReference]. + ErrInvalidReference = errs.ErrInvalidReference + // ErrLoopDetected indicates a child node points back to the parent + // + // Deprecated: replace with [errs.ErrLoopDetected]. + ErrLoopDetected = errs.ErrLoopDetected + // ErrManifestNotSet indicates the manifest is not set, it must be pulled with a ManifestGet first + // + // Deprecated: replace with [errs.ErrManifestNotSet]. + ErrManifestNotSet = errs.ErrManifestNotSet + // ErrMissingAnnotation returned when a needed annotation is not found + // + // Deprecated: replace with [errs.ErrMissingAnnotation]. + ErrMissingAnnotation = errs.ErrMissingAnnotation + // ErrMissingDigest returned when image reference does not include a digest + // + // Deprecated: replace with [errs.ErrMissingDigest]. + ErrMissingDigest = errs.ErrMissingDigest + // ErrMissingLocation returned when the location header is missing + // + // Deprecated: replace with [errs.ErrMissingLocation]. + ErrMissingLocation = errs.ErrMissingLocation + // ErrMissingName returned when name missing for host + // + // Deprecated: replace with [errs.ErrMissingName]. + ErrMissingName = errs.ErrMissingName + // ErrMissingTag returned when image reference does not include a tag + // + // Deprecated: replace with [errs.ErrMissingTag]. + ErrMissingTag = errs.ErrMissingTag + // ErrMissingTagOrDigest returned when image reference does not include a tag or digest + // + // Deprecated: replace with [errs.ErrMissingTagOrDigest]. + ErrMissingTagOrDigest = errs.ErrMissingTagOrDigest + // ErrMismatch returned when a comparison detects a difference + // + // Deprecated: replace with [errs.ErrMismatch]. + ErrMismatch = errs.ErrMismatch + // ErrMountReturnedLocation when a blob mount fails but a location header is received + // + // Deprecated: replace with [errs.ErrMountReturnedLocation]. + ErrMountReturnedLocation = errs.ErrMountReturnedLocation + // ErrNoNewChallenge indicates a challenge update did not result in any change + // + // Deprecated: replace with [errs.ErrNoNewChallenge]. + ErrNoNewChallenge = errs.ErrNoNewChallenge + // ErrNotFound isn't there, search for your value elsewhere + // + // Deprecated: replace with [errs.ErrNotFound]. + ErrNotFound = errs.ErrNotFound + // ErrNotImplemented returned when method has not been implemented yet + // + // Deprecated: replace with [errs.ErrNotImplemented]. + ErrNotImplemented = errs.ErrNotImplemented + // ErrNotRetryable indicates the process cannot be retried + // + // Deprecated: replace with [errs.ErrNotRetryable]. + ErrNotRetryable = errs.ErrNotRetryable + // ErrParsingFailed when a string cannot be parsed + // + // Deprecated: replace with [errs.ErrParsingFailed]. + ErrParsingFailed = errs.ErrParsingFailed + // ErrRetryNeeded indicates a request needs to be retried + // + // Deprecated: replace with [errs.ErrRetryNeeded]. + ErrRetryNeeded = errs.ErrRetryNeeded + // ErrShortRead if contents are less than expected the size + // + // Deprecated: replace with [errs.ErrShortRead]. + ErrShortRead = errs.ErrShortRead + // ErrSizeLimitExceeded if contents exceed the size limit + // + // Deprecated: replace with [errs.ErrSizeLimitExceeded]. + ErrSizeLimitExceeded = errs.ErrSizeLimitExceeded + // ErrUnavailable when a requested value is not available + // + // Deprecated: replace with [errs.ErrUnavailable]. + ErrUnavailable = errs.ErrUnavailable + // ErrUnsupported indicates the request was unsupported + // + // Deprecated: replace with [errs.ErrUnsupported]. + ErrUnsupported = errs.ErrUnsupported + // ErrUnsupportedAPI happens when an API is not supported on a registry + // + // Deprecated: replace with [errs.ErrUnsupportedAPI]. + ErrUnsupportedAPI = errs.ErrUnsupportedAPI + // ErrUnsupportedConfigVersion happens when config file version is greater than this command supports + // + // Deprecated: replace with [errs.ErrUnsupportedConfigVersion]. + ErrUnsupportedConfigVersion = errs.ErrUnsupportedConfigVersion + // ErrUnsupportedMediaType returned when media type is unknown or unsupported + // + // Deprecated: replace with [errs.ErrUnsupportedMediaType]. + ErrUnsupportedMediaType = errs.ErrUnsupportedMediaType + // ErrHTTPRateLimit when requests exceed server rate limit + // + // Deprecated: replace with [errs.ErrHTTPRateLimit]. + ErrHTTPRateLimit = errs.ErrHTTPRateLimit + // ErrHTTPUnauthorized when authentication fails + // + // Deprecated: replace with [errs.ErrHTTPUnauthorized]. + ErrHTTPUnauthorized = errs.ErrHTTPUnauthorized +) diff --git a/vendor/github.com/regclient/regclient/types/errs/error.go b/vendor/github.com/regclient/regclient/types/errs/error.go new file mode 100644 index 00000000..03f1842f --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/errs/error.go @@ -0,0 +1,89 @@ +// Package errs is used for predefined error values. +package errs + +import ( + "errors" + "fmt" + "io/fs" +) + +var ( + // ErrAllRequestsFailed when there are no mirrors left to try + ErrAllRequestsFailed = errors.New("all requests failed") + // ErrAPINotFound if an api is not available for the host + ErrAPINotFound = errors.New("API not found") + // ErrBackoffLimit maximum backoff attempts reached + ErrBackoffLimit = errors.New("backoff limit reached") + // ErrCanceled if the context was canceled + ErrCanceled = errors.New("context was canceled") + // ErrDigestMismatch if the expected digest wasn't received + ErrDigestMismatch = errors.New("digest mismatch") + // ErrEmptyChallenge indicates an issue with the received challenge in the WWW-Authenticate header + ErrEmptyChallenge = errors.New("empty challenge header") + // ErrFileDeleted indicates a requested file has been deleted + ErrFileDeleted = errors.New("file deleted") + // ErrFileNotFound indicates a requested file is not found + ErrFileNotFound = fmt.Errorf("file not found%.0w", fs.ErrNotExist) + // ErrHTTPStatus if the http status code was unexpected + ErrHTTPStatus = errors.New("unexpected http status code") + // ErrInvalidChallenge indicates an issue with the received challenge in the WWW-Authenticate header + ErrInvalidChallenge = errors.New("invalid challenge header") + // ErrInvalidReference indicates the reference to an image is has an invalid syntax + ErrInvalidReference = errors.New("invalid reference") + // ErrLoopDetected indicates a child node points back to the parent + ErrLoopDetected = errors.New("loop detected") + // ErrManifestNotSet indicates the manifest is not set, it must be pulled with a ManifestGet first + ErrManifestNotSet = errors.New("manifest not set") + // ErrMissingAnnotation returned when a needed annotation is not found + ErrMissingAnnotation = errors.New("annotation is missing") + // ErrMissingDigest returned when image reference does not include a digest + ErrMissingDigest = errors.New("digest missing from image reference") + // ErrMissingLocation returned when the location header is missing + ErrMissingLocation = errors.New("location header missing") + // ErrMissingName returned when name missing for host + ErrMissingName = errors.New("name missing") + // ErrMissingTag returned when image reference does not include a tag + ErrMissingTag = errors.New("tag missing from image reference") + // ErrMissingTagOrDigest returned when image reference does not include a tag or digest + ErrMissingTagOrDigest = errors.New("tag or Digest missing from image reference") + // ErrMismatch returned when a comparison detects a difference + ErrMismatch = errors.New("content does not match") + // ErrMountReturnedLocation when a blob mount fails but a location header is received + ErrMountReturnedLocation = errors.New("blob mount returned a location to upload") + // ErrNoNewChallenge indicates a challenge update did not result in any change + ErrNoNewChallenge = errors.New("no new challenge") + // ErrNotFound isn't there, search for your value elsewhere + ErrNotFound = errors.New("not found") + // ErrNotImplemented returned when method has not been implemented yet + ErrNotImplemented = errors.New("not implemented") + // ErrNotRetryable indicates the process cannot be retried + ErrNotRetryable = errors.New("not retryable") + // ErrParsingFailed when a string cannot be parsed + ErrParsingFailed = errors.New("parsing failed") + // ErrRetryNeeded indicates a request needs to be retried + ErrRetryNeeded = errors.New("retry needed") + // ErrRetryLimitExceeded indicates too many retries have occurred + ErrRetryLimitExceeded = errors.New("retry limit exceeded") + // ErrShortRead if contents are less than expected the size + ErrShortRead = errors.New("short read") + // ErrSizeLimitExceeded if contents exceed the size limit + ErrSizeLimitExceeded = errors.New("size limit exceeded") + // ErrUnavailable when a requested value is not available + ErrUnavailable = errors.New("unavailable") + // ErrUnsupported indicates the request was unsupported + ErrUnsupported = errors.New("unsupported") + // ErrUnsupportedAPI happens when an API is not supported on a registry + ErrUnsupportedAPI = errors.New("unsupported API") + // ErrUnsupportedConfigVersion happens when config file version is greater than this command supports + ErrUnsupportedConfigVersion = errors.New("unsupported config version") + // ErrUnsupportedMediaType returned when media type is unknown or unsupported + ErrUnsupportedMediaType = errors.New("unsupported media type") +) + +// custom HTTP errors extend the ErrHTTPStatus error +var ( + // ErrHTTPRateLimit when requests exceed server rate limit + ErrHTTPRateLimit = fmt.Errorf("rate limit exceeded%.0w", ErrHTTPStatus) + // ErrHTTPUnauthorized when authentication fails + ErrHTTPUnauthorized = fmt.Errorf("unauthorized%.0w", ErrHTTPStatus) +) diff --git a/vendor/github.com/regclient/regclient/types/manifest/common.go b/vendor/github.com/regclient/regclient/types/manifest/common.go new file mode 100644 index 00000000..70139b78 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/manifest/common.go @@ -0,0 +1,127 @@ +package manifest + +import ( + "net/http" + "strconv" + "strings" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + digest "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/ref" +) + +type common struct { + r ref.Ref + desc descriptor.Descriptor + manifSet bool + ratelimit types.RateLimit + rawHeader http.Header + rawBody []byte +} + +// GetDigest returns the digest +func (m *common) GetDigest() digest.Digest { + return m.desc.Digest +} + +// GetDescriptor returns the descriptor +func (m *common) GetDescriptor() descriptor.Descriptor { + return m.desc +} + +// GetMediaType returns the media type +func (m *common) GetMediaType() string { + return m.desc.MediaType +} + +// GetRateLimit returns the rate limit when the manifest was pulled from a registry. +// This supports the headers used by Docker Hub. +func (m *common) GetRateLimit() types.RateLimit { + return m.ratelimit +} + +// GetRef returns the reference from the upstream registry +func (m *common) GetRef() ref.Ref { + return m.r +} + +// HasRateLimit indicates if the rate limit is set +func (m *common) HasRateLimit() bool { + return m.ratelimit.Set +} + +// IsList indicates if the manifest is a docker Manifest List or OCI Index +func (m *common) IsList() bool { + switch m.desc.MediaType { + case MediaTypeDocker2ManifestList, MediaTypeOCI1ManifestList: + return true + default: + return false + } +} + +// IsSet indicates if the manifest is defined. +// A false indicates this is from a HEAD request, providing the digest, media-type, and other headers, but no body. +func (m *common) IsSet() bool { + return m.manifSet +} + +// RawBody returns the raw body from the manifest if available. +func (m *common) RawBody() ([]byte, error) { + if len(m.rawBody) == 0 { + return m.rawBody, errs.ErrManifestNotSet + } + return m.rawBody, nil +} + +// RawHeaders returns any headers included when manifest was pulled from a registry. +func (m *common) RawHeaders() (http.Header, error) { + return m.rawHeader, nil +} + +func (m *common) setRateLimit(header http.Header) { + // check for rate limit headers + rlLimit := header.Get("RateLimit-Limit") + rlRemain := header.Get("RateLimit-Remaining") + rlReset := header.Get("RateLimit-Reset") + if rlLimit != "" { + lpSplit := strings.Split(rlLimit, ",") + lSplit := strings.Split(lpSplit[0], ";") + rlLimitI, err := strconv.Atoi(lSplit[0]) + if err != nil { + m.ratelimit.Limit = 0 + } else { + m.ratelimit.Limit = rlLimitI + } + if len(lSplit) > 1 { + m.ratelimit.Policies = lpSplit + } else if len(lpSplit) > 1 { + m.ratelimit.Policies = lpSplit[1:] + } + } + if rlRemain != "" { + rSplit := strings.Split(rlRemain, ";") + rlRemainI, err := strconv.Atoi(rSplit[0]) + if err != nil { + m.ratelimit.Remain = 0 + } else { + m.ratelimit.Remain = rlRemainI + m.ratelimit.Set = true + } + } + if rlReset != "" { + rlResetI, err := strconv.Atoi(rlReset) + if err != nil { + m.ratelimit.Reset = 0 + } else { + m.ratelimit.Reset = rlResetI + } + } +} diff --git a/vendor/github.com/regclient/regclient/types/manifest/docker1.go b/vendor/github.com/regclient/regclient/types/manifest/docker1.go new file mode 100644 index 00000000..e95aa43f --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/manifest/docker1.go @@ -0,0 +1,237 @@ +package manifest + +import ( + "bytes" + "encoding/json" + "fmt" + "text/tabwriter" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + digest "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/docker/schema1" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + "github.com/regclient/regclient/types/platform" +) + +const ( + // MediaTypeDocker1Manifest deprecated media type for docker schema1 manifests. + MediaTypeDocker1Manifest = "application/vnd.docker.distribution.manifest.v1+json" + // MediaTypeDocker1ManifestSigned is a deprecated schema1 manifest with jws signing. + MediaTypeDocker1ManifestSigned = "application/vnd.docker.distribution.manifest.v1+prettyjws" +) + +type docker1Manifest struct { + common + schema1.Manifest +} +type docker1SignedManifest struct { + common + schema1.SignedManifest +} + +func (m *docker1Manifest) GetConfig() (descriptor.Descriptor, error) { + return descriptor.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *docker1Manifest) GetConfigDigest() (digest.Digest, error) { + return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *docker1SignedManifest) GetConfig() (descriptor.Descriptor, error) { + return descriptor.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *docker1SignedManifest) GetConfigDigest() (digest.Digest, error) { + return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) GetManifestList() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *docker1SignedManifest) GetManifestList() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) GetLayers() ([]descriptor.Descriptor, error) { + if !m.manifSet { + return []descriptor.Descriptor{}, errs.ErrManifestNotSet + } + + var dl []descriptor.Descriptor + for _, sd := range m.FSLayers { + dl = append(dl, descriptor.Descriptor{ + Digest: sd.BlobSum, + }) + } + return dl, nil +} +func (m *docker1SignedManifest) GetLayers() ([]descriptor.Descriptor, error) { + if !m.manifSet { + return []descriptor.Descriptor{}, errs.ErrManifestNotSet + } + + var dl []descriptor.Descriptor + for _, sd := range m.FSLayers { + dl = append(dl, descriptor.Descriptor{ + Digest: sd.BlobSum, + }) + } + return dl, nil +} + +func (m *docker1Manifest) GetOrig() interface{} { + return m.Manifest +} +func (m *docker1SignedManifest) GetOrig() interface{} { + return m.SignedManifest +} + +func (m *docker1Manifest) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *docker1SignedManifest) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) GetPlatformList() ([]*platform.Platform, error) { + return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *docker1SignedManifest) GetPlatformList() ([]*platform.Platform, error) { + return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) GetSize() (int64, error) { + return 0, fmt.Errorf("GetSize is not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1SignedManifest) GetSize() (int64, error) { + return 0, fmt.Errorf("GetSize is not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) MarshalJSON() ([]byte, error) { + if !m.manifSet { + return []byte{}, errs.ErrManifestNotSet + } + + if len(m.rawBody) > 0 { + return m.rawBody, nil + } + + return json.Marshal((m.Manifest)) +} + +func (m *docker1SignedManifest) MarshalJSON() ([]byte, error) { + if !m.manifSet { + return []byte{}, errs.ErrManifestNotSet + } + + return m.SignedManifest.MarshalJSON() +} + +func (m *docker1Manifest) MarshalPretty() ([]byte, error) { + if m == nil { + return []byte{}, nil + } + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + if m.r.Reference != "" { + fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) + } + fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Layers:\t\n") + for _, d := range m.FSLayers { + fmt.Fprintf(tw, " Digest:\t%s\n", string(d.BlobSum)) + } + err := tw.Flush() + return buf.Bytes(), err +} +func (m *docker1SignedManifest) MarshalPretty() ([]byte, error) { + if m == nil { + return []byte{}, nil + } + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + if m.r.Reference != "" { + fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) + } + fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Layers:\t\n") + for _, d := range m.FSLayers { + fmt.Fprintf(tw, " Digest:\t%s\n", string(d.BlobSum)) + } + err := tw.Flush() + return buf.Bytes(), err +} + +func (m *docker1Manifest) SetConfig(d descriptor.Descriptor) error { + return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1SignedManifest) SetConfig(d descriptor.Descriptor) error { + return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) SetLayers(dl []descriptor.Descriptor) error { + return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1SignedManifest) SetLayers(dl []descriptor.Descriptor) error { + return fmt.Errorf("set methods not supported for for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker1Manifest) SetOrig(origIn interface{}) error { + orig, ok := origIn.(schema1.Manifest) + if !ok { + return errs.ErrUnsupportedMediaType + } + if orig.MediaType != mediatype.Docker1Manifest { + // TODO: error? + orig.MediaType = mediatype.Docker1Manifest + } + mj, err := json.Marshal(orig) + if err != nil { + return err + } + m.manifSet = true + m.rawBody = mj + m.desc = descriptor.Descriptor{ + MediaType: mediatype.Docker1Manifest, + Digest: m.desc.DigestAlgo().FromBytes(mj), + Size: int64(len(mj)), + } + m.Manifest = orig + + return nil +} + +func (m *docker1SignedManifest) SetOrig(origIn interface{}) error { + orig, ok := origIn.(schema1.SignedManifest) + if !ok { + return errs.ErrUnsupportedMediaType + } + if orig.MediaType != mediatype.Docker1ManifestSigned { + // TODO: error? + orig.MediaType = mediatype.Docker1ManifestSigned + } + mj, err := json.Marshal(orig) + if err != nil { + return err + } + m.manifSet = true + m.rawBody = mj + m.desc = descriptor.Descriptor{ + MediaType: mediatype.Docker1ManifestSigned, + Digest: m.desc.DigestAlgo().FromBytes(orig.Canonical), + Size: int64(len(orig.Canonical)), + } + m.SignedManifest = orig + + return nil +} diff --git a/vendor/github.com/regclient/regclient/types/manifest/docker2.go b/vendor/github.com/regclient/regclient/types/manifest/docker2.go new file mode 100644 index 00000000..7d04bb72 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/manifest/docker2.go @@ -0,0 +1,351 @@ +package manifest + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "text/tabwriter" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + digest "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/units" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/docker/schema2" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + "github.com/regclient/regclient/types/platform" +) + +const ( + // MediaTypeDocker2Manifest is the media type when pulling manifests from a v2 registry + MediaTypeDocker2Manifest = mediatype.Docker2Manifest + // MediaTypeDocker2ManifestList is the media type when pulling a manifest list from a v2 registry + MediaTypeDocker2ManifestList = mediatype.Docker2ManifestList +) + +type docker2Manifest struct { + common + schema2.Manifest +} +type docker2ManifestList struct { + common + schema2.ManifestList +} + +func (m *docker2Manifest) GetAnnotations() (map[string]string, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Annotations, nil +} +func (m *docker2Manifest) GetConfig() (descriptor.Descriptor, error) { + if !m.manifSet { + return descriptor.Descriptor{}, errs.ErrManifestNotSet + } + return m.Config, nil +} +func (m *docker2Manifest) GetConfigDigest() (digest.Digest, error) { + if !m.manifSet { + return digest.Digest(""), errs.ErrManifestNotSet + } + return m.Config.Digest, nil +} +func (m *docker2ManifestList) GetAnnotations() (map[string]string, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Annotations, nil +} +func (m *docker2ManifestList) GetConfig() (descriptor.Descriptor, error) { + return descriptor.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *docker2ManifestList) GetConfigDigest() (digest.Digest, error) { + return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker2Manifest) GetManifestList() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *docker2ManifestList) GetManifestList() ([]descriptor.Descriptor, error) { + if !m.manifSet { + return []descriptor.Descriptor{}, errs.ErrManifestNotSet + } + return m.Manifests, nil +} + +func (m *docker2Manifest) GetLayers() ([]descriptor.Descriptor, error) { + if !m.manifSet { + return []descriptor.Descriptor{}, errs.ErrManifestNotSet + } + return m.Layers, nil +} +func (m *docker2ManifestList) GetLayers() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("layers are not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *docker2Manifest) GetOrig() interface{} { + return m.Manifest +} +func (m *docker2ManifestList) GetOrig() interface{} { + return m.ManifestList +} + +func (m *docker2Manifest) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *docker2ManifestList) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + if p == nil { + return nil, fmt.Errorf("invalid input, platform is nil%.0w", errs.ErrNotFound) + } + d, err := descriptor.DescriptorListSearch(m.Manifests, descriptor.MatchOpt{Platform: p}) + if err != nil { + return nil, fmt.Errorf("platform not found: %s%.0w", *p, err) + } + return &d, nil +} + +func (m *docker2Manifest) GetPlatformList() ([]*platform.Platform, error) { + return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *docker2ManifestList) GetPlatformList() ([]*platform.Platform, error) { + dl, err := m.GetManifestList() + if err != nil { + return nil, err + } + return getPlatformList(dl) +} + +// GetSize returns the size in bytes of all layers +func (m *docker2Manifest) GetSize() (int64, error) { + if !m.manifSet { + return 0, errs.ErrManifestNotSet + } + var total int64 + for _, d := range m.Layers { + total += d.Size + } + return total, nil +} + +func (m *docker2Manifest) MarshalJSON() ([]byte, error) { + if !m.manifSet { + return []byte{}, errs.ErrManifestNotSet + } + if len(m.rawBody) > 0 { + return m.rawBody, nil + } + return json.Marshal((m.Manifest)) +} +func (m *docker2ManifestList) MarshalJSON() ([]byte, error) { + if !m.manifSet { + return []byte{}, errs.ErrManifestNotSet + } + if len(m.rawBody) > 0 { + return m.rawBody, nil + } + return json.Marshal((m.ManifestList)) +} + +func (m *docker2Manifest) MarshalPretty() ([]byte, error) { + if m == nil { + return []byte{}, nil + } + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + if m.r.Reference != "" { + fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) + } + fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) + if len(m.Annotations) > 0 { + fmt.Fprintf(tw, "Annotations:\t\n") + keys := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keys = append(keys, k) + } + sort.Strings(keys) + for _, name := range keys { + val := m.Annotations[name] + fmt.Fprintf(tw, " %s:\t%s\n", name, val) + } + } + var total int64 + for _, d := range m.Layers { + total += d.Size + } + fmt.Fprintf(tw, "Total Size:\t%s\n", units.HumanSize(float64(total))) + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Config:\t\n") + err := m.Config.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Layers:\t\n") + for _, d := range m.Layers { + fmt.Fprintf(tw, "\t\n") + err := d.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + err = tw.Flush() + return buf.Bytes(), err +} +func (m *docker2ManifestList) MarshalPretty() ([]byte, error) { + if m == nil { + return []byte{}, nil + } + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + if m.r.Reference != "" { + fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) + } + fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) + if len(m.Annotations) > 0 { + fmt.Fprintf(tw, "Annotations:\t\n") + keys := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keys = append(keys, k) + } + sort.Strings(keys) + for _, name := range keys { + val := m.Annotations[name] + fmt.Fprintf(tw, " %s:\t%s\n", name, val) + } + } + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Manifests:\t\n") + for _, d := range m.Manifests { + fmt.Fprintf(tw, "\t\n") + dRef := m.r + if dRef.Reference != "" { + dRef.Digest = d.Digest.String() + fmt.Fprintf(tw, " Name:\t%s\n", dRef.CommonName()) + } + err := d.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + err := tw.Flush() + return buf.Bytes(), err +} + +func (m *docker2Manifest) SetAnnotation(key, val string) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + if m.Annotations == nil { + m.Annotations = map[string]string{} + } + if val != "" { + m.Annotations[key] = val + } else { + delete(m.Annotations, key) + } + return m.updateDesc() +} +func (m *docker2ManifestList) SetAnnotation(key, val string) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + if m.Annotations == nil { + m.Annotations = map[string]string{} + } + if val != "" { + m.Annotations[key] = val + } else { + delete(m.Annotations, key) + } + return m.updateDesc() +} + +func (m *docker2Manifest) SetConfig(d descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Config = d + return m.updateDesc() +} + +func (m *docker2Manifest) SetLayers(dl []descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Layers = dl + return m.updateDesc() +} + +func (m *docker2ManifestList) SetManifestList(dl []descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Manifests = dl + return m.updateDesc() +} + +func (m *docker2Manifest) SetOrig(origIn interface{}) error { + orig, ok := origIn.(schema2.Manifest) + if !ok { + return errs.ErrUnsupportedMediaType + } + if orig.MediaType != mediatype.Docker2Manifest { + // TODO: error? + orig.MediaType = mediatype.Docker2Manifest + } + m.manifSet = true + m.Manifest = orig + return m.updateDesc() +} + +func (m *docker2ManifestList) SetOrig(origIn interface{}) error { + orig, ok := origIn.(schema2.ManifestList) + if !ok { + return errs.ErrUnsupportedMediaType + } + if orig.MediaType != mediatype.Docker2ManifestList { + // TODO: error? + orig.MediaType = mediatype.Docker2ManifestList + } + m.manifSet = true + m.ManifestList = orig + return m.updateDesc() +} + +func (m *docker2Manifest) updateDesc() error { + mj, err := json.Marshal(m.Manifest) + if err != nil { + return err + } + m.rawBody = mj + m.desc = descriptor.Descriptor{ + MediaType: mediatype.Docker2Manifest, + Digest: m.desc.DigestAlgo().FromBytes(mj), + Size: int64(len(mj)), + } + return nil +} +func (m *docker2ManifestList) updateDesc() error { + mj, err := json.Marshal(m.ManifestList) + if err != nil { + return err + } + m.rawBody = mj + m.desc = descriptor.Descriptor{ + MediaType: mediatype.Docker2ManifestList, + Digest: m.desc.DigestAlgo().FromBytes(mj), + Size: int64(len(mj)), + } + return nil + +} diff --git a/vendor/github.com/regclient/regclient/types/manifest/manifest.go b/vendor/github.com/regclient/regclient/types/manifest/manifest.go new file mode 100644 index 00000000..dd2a640e --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/manifest/manifest.go @@ -0,0 +1,594 @@ +// Package manifest abstracts the various types of supported manifests. +// Supported types include OCI index and image, and Docker manifest list and manifest. +package manifest + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + + // Crypto libraries are included for go-digest. + _ "crypto/sha256" + _ "crypto/sha512" + + digest "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/docker/schema1" + "github.com/regclient/regclient/types/docker/schema2" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/platform" + "github.com/regclient/regclient/types/ref" +) + +// Manifest interface is implemented by all supported manifests but +// many calls are only supported by certain underlying media types. +type Manifest interface { + GetDescriptor() descriptor.Descriptor + GetOrig() interface{} + GetRef() ref.Ref + IsList() bool + IsSet() bool + MarshalJSON() ([]byte, error) + RawBody() ([]byte, error) + RawHeaders() (http.Header, error) + SetOrig(interface{}) error + + // Deprecated: GetConfig should be accessed using [Imager] interface. + GetConfig() (descriptor.Descriptor, error) + // Deprecated: GetLayers should be accessed using [Imager] interface. + GetLayers() ([]descriptor.Descriptor, error) + + // Deprecated: GetManifestList should be accessed using [Indexer] interface. + GetManifestList() ([]descriptor.Descriptor, error) + + // Deprecated: GetConfigDigest should be replaced with [GetConfig]. + GetConfigDigest() (digest.Digest, error) + // Deprecated: GetDigest should be replaced with GetDescriptor().Digest, see [GetDescriptor]. + GetDigest() digest.Digest + // Deprecated: GetMediaType should be replaced with GetDescriptor().MediaType, see [GetDescriptor]. + GetMediaType() string + // Deprecated: GetPlatformDesc method should be replaced with [manifest.GetPlatformDesc]. + GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) + // Deprecated: GetPlatformList method should be replaced with [manifest.GetPlatformList]. + GetPlatformList() ([]*platform.Platform, error) + // Deprecated: GetRateLimit method should be replaced with [manifest.GetRateLimit]. + GetRateLimit() types.RateLimit + // Deprecated: HasRateLimit method should be replaced with [manifest.HasRateLimit]. + HasRateLimit() bool +} + +// Annotator is used by manifests that support annotations. +// Note this will work for Docker manifests despite the spec not officially supporting it. +type Annotator interface { + GetAnnotations() (map[string]string, error) + SetAnnotation(key, val string) error +} + +// Indexer is used by manifests that contain a manifest list. +type Indexer interface { + GetManifestList() ([]descriptor.Descriptor, error) + SetManifestList(dl []descriptor.Descriptor) error +} + +// Imager is used by manifests packaging an image. +type Imager interface { + GetConfig() (descriptor.Descriptor, error) + GetLayers() ([]descriptor.Descriptor, error) + SetConfig(d descriptor.Descriptor) error + SetLayers(dl []descriptor.Descriptor) error + GetSize() (int64, error) +} + +// Subjecter is used by manifests that may have a subject field. +type Subjecter interface { + GetSubject() (*descriptor.Descriptor, error) + SetSubject(d *descriptor.Descriptor) error +} + +type manifestConfig struct { + r ref.Ref + desc descriptor.Descriptor + raw []byte + orig interface{} + header http.Header +} +type Opts func(*manifestConfig) + +// New creates a new manifest based on provided options. +// The digest for the manifest will be checked against the descriptor, reference, or headers, depending on which is available first (later digests will be ignored). +func New(opts ...Opts) (Manifest, error) { + mc := manifestConfig{} + for _, opt := range opts { + opt(&mc) + } + c := common{ + r: mc.r, + desc: mc.desc, + rawBody: mc.raw, + rawHeader: mc.header, + } + if c.r.Digest != "" && c.desc.Digest == "" { + dig, err := digest.Parse(c.r.Digest) + if err != nil { + return nil, fmt.Errorf("failed to parse digest from ref: %w", err) + } + c.desc.Digest = dig + } + // extract fields from header where available + if mc.header != nil { + if c.desc.MediaType == "" { + c.desc.MediaType = mediatype.Base(mc.header.Get("Content-Type")) + } + if c.desc.Size == 0 { + cl, _ := strconv.Atoi(mc.header.Get("Content-Length")) + c.desc.Size = int64(cl) + } + if c.desc.Digest == "" { + c.desc.Digest, _ = digest.Parse(mc.header.Get("Docker-Content-Digest")) + } + c.setRateLimit(mc.header) + } + if mc.orig != nil { + return fromOrig(c, mc.orig) + } + return fromCommon(c) +} + +// WithDesc specifies the descriptor for the manifest. +func WithDesc(desc descriptor.Descriptor) Opts { + return func(mc *manifestConfig) { + mc.desc = desc + } +} + +// WithHeader provides the headers from the response when pulling the manifest. +func WithHeader(header http.Header) Opts { + return func(mc *manifestConfig) { + mc.header = header + } +} + +// WithOrig provides the original manifest variable. +func WithOrig(orig interface{}) Opts { + return func(mc *manifestConfig) { + mc.orig = orig + } +} + +// WithRaw provides the manifest bytes or HTTP response body. +func WithRaw(raw []byte) Opts { + return func(mc *manifestConfig) { + mc.raw = raw + } +} + +// WithRef provides the reference used to get the manifest. +func WithRef(r ref.Ref) Opts { + return func(mc *manifestConfig) { + mc.r = r + } +} + +// GetDigest returns the digest from the manifest descriptor. +func GetDigest(m Manifest) digest.Digest { + d := m.GetDescriptor() + return d.Digest +} + +// GetMediaType returns the media type from the manifest descriptor. +func GetMediaType(m Manifest) string { + d := m.GetDescriptor() + return d.MediaType +} + +// GetPlatformDesc returns the descriptor for a specific platform from an index. +func GetPlatformDesc(m Manifest, p *platform.Platform) (*descriptor.Descriptor, error) { + if p == nil { + return nil, fmt.Errorf("invalid input, platform is nil%.0w", errs.ErrNotFound) + } + mi, ok := m.(Indexer) + if !ok { + return nil, fmt.Errorf("unsupported manifest type: %s", m.GetDescriptor().MediaType) + } + dl, err := mi.GetManifestList() + if err != nil { + return nil, fmt.Errorf("failed to get manifest list: %w", err) + } + d, err := descriptor.DescriptorListSearch(dl, descriptor.MatchOpt{Platform: p}) + if err != nil { + return nil, fmt.Errorf("platform not found: %s%.0w", *p, err) + } + return &d, nil +} + +// GetPlatformList returns the list of platforms from an index. +func GetPlatformList(m Manifest) ([]*platform.Platform, error) { + mi, ok := m.(Indexer) + if !ok { + return nil, fmt.Errorf("unsupported manifest type: %s", m.GetDescriptor().MediaType) + } + dl, err := mi.GetManifestList() + if err != nil { + return nil, fmt.Errorf("failed to get manifest list: %w", err) + } + return getPlatformList(dl) +} + +// GetRateLimit returns the current rate limit seen in headers. +func GetRateLimit(m Manifest) types.RateLimit { + rl := types.RateLimit{} + header, err := m.RawHeaders() + if err != nil { + return rl + } + // check for rate limit headers + rlLimit := header.Get("RateLimit-Limit") + rlRemain := header.Get("RateLimit-Remaining") + rlReset := header.Get("RateLimit-Reset") + if rlLimit != "" { + lpSplit := strings.Split(rlLimit, ",") + lSplit := strings.Split(lpSplit[0], ";") + rlLimitI, err := strconv.Atoi(lSplit[0]) + if err != nil { + rl.Limit = 0 + } else { + rl.Limit = rlLimitI + } + if len(lSplit) > 1 { + rl.Policies = lpSplit + } else if len(lpSplit) > 1 { + rl.Policies = lpSplit[1:] + } + } + if rlRemain != "" { + rSplit := strings.Split(rlRemain, ";") + rlRemainI, err := strconv.Atoi(rSplit[0]) + if err != nil { + rl.Remain = 0 + } else { + rl.Remain = rlRemainI + rl.Set = true + } + } + if rlReset != "" { + rlResetI, err := strconv.Atoi(rlReset) + if err != nil { + rl.Reset = 0 + } else { + rl.Reset = rlResetI + } + } + return rl +} + +// HasRateLimit indicates whether the rate limit is set and available. +func HasRateLimit(m Manifest) bool { + rl := GetRateLimit(m) + return rl.Set +} + +// OCIIndexFromAny converts manifest lists to an OCI index. +func OCIIndexFromAny(orig interface{}) (v1.Index, error) { + ociI := v1.Index{ + Versioned: v1.IndexSchemaVersion, + MediaType: mediatype.OCI1ManifestList, + } + switch orig := orig.(type) { + case schema2.ManifestList: + ociI.Manifests = orig.Manifests + ociI.Annotations = orig.Annotations + case v1.Index: + ociI = orig + default: + return ociI, fmt.Errorf("unable to convert %T to OCI index", orig) + } + return ociI, nil +} + +// OCIIndexToAny converts from an OCI index back to the manifest list. +func OCIIndexToAny(ociI v1.Index, origP interface{}) error { + // reflect is used to handle both *interface{} and *Manifest + rv := reflect.ValueOf(origP) + for rv.IsValid() && rv.Type().Kind() == reflect.Ptr { + rv = rv.Elem() + } + if !rv.IsValid() { + return fmt.Errorf("invalid manifest output parameter: %T", origP) + } + if !rv.CanSet() { + return fmt.Errorf("manifest output must be a pointer: %T", origP) + } + origR := rv.Interface() + switch orig := (origR).(type) { + case schema2.ManifestList: + orig.Versioned = schema2.ManifestListSchemaVersion + orig.Manifests = ociI.Manifests + orig.Annotations = ociI.Annotations + rv.Set(reflect.ValueOf(orig)) + case v1.Index: + rv.Set(reflect.ValueOf(ociI)) + default: + return fmt.Errorf("unable to convert OCI index to %T", origR) + } + return nil +} + +// OCIManifestFromAny converts an image manifest to an OCI manifest. +func OCIManifestFromAny(orig interface{}) (v1.Manifest, error) { + ociM := v1.Manifest{ + Versioned: v1.ManifestSchemaVersion, + MediaType: mediatype.OCI1Manifest, + } + switch orig := orig.(type) { + case schema2.Manifest: + ociM.Config = orig.Config + ociM.Layers = orig.Layers + ociM.Annotations = orig.Annotations + case v1.Manifest: + ociM = orig + default: + // TODO: consider supporting Docker schema v1 media types + return ociM, fmt.Errorf("unable to convert %T to OCI image", orig) + } + return ociM, nil +} + +// OCIManifestToAny converts an OCI manifest back to the image manifest. +func OCIManifestToAny(ociM v1.Manifest, origP interface{}) error { + // reflect is used to handle both *interface{} and *Manifest + rv := reflect.ValueOf(origP) + for rv.IsValid() && rv.Type().Kind() == reflect.Ptr { + rv = rv.Elem() + } + if !rv.IsValid() { + return fmt.Errorf("invalid manifest output parameter: %T", origP) + } + if !rv.CanSet() { + return fmt.Errorf("manifest output must be a pointer: %T", origP) + } + origR := rv.Interface() + switch orig := (origR).(type) { + case schema2.Manifest: + orig.Versioned = schema2.ManifestSchemaVersion + orig.Config = ociM.Config + orig.Layers = ociM.Layers + orig.Annotations = ociM.Annotations + rv.Set(reflect.ValueOf(orig)) + case v1.Manifest: + rv.Set(reflect.ValueOf(ociM)) + default: + // Docker schema v1 will not be supported, can't resign, and no need for unsigned + return fmt.Errorf("unable to convert OCI image to %T", origR) + } + return nil +} + +// FromOrig creates a new manifest from the original upstream manifest type. +// This method should be used if you are creating a new manifest rather than pulling one from a registry. +func fromOrig(c common, orig interface{}) (Manifest, error) { + var mt string + var m Manifest + origDigest := c.desc.Digest + + mj, err := json.Marshal(orig) + if err != nil { + return nil, err + } + c.manifSet = true + if len(c.rawBody) == 0 { + c.rawBody = mj + } + if _, ok := orig.(schema1.SignedManifest); !ok { + c.desc.Digest = c.desc.DigestAlgo().FromBytes(mj) + } + if c.desc.Size == 0 { + c.desc.Size = int64(len(mj)) + } + // create manifest based on type + switch mOrig := orig.(type) { + case schema1.Manifest: + mt = mOrig.MediaType + c.desc.MediaType = mediatype.Docker1Manifest + m = &docker1Manifest{ + common: c, + Manifest: mOrig, + } + case schema1.SignedManifest: + mt = mOrig.MediaType + c.desc.MediaType = mediatype.Docker1ManifestSigned + // recompute digest on the canonical data + c.desc.Digest = c.desc.DigestAlgo().FromBytes(mOrig.Canonical) + m = &docker1SignedManifest{ + common: c, + SignedManifest: mOrig, + } + case schema2.Manifest: + mt = mOrig.MediaType + c.desc.MediaType = mediatype.Docker2Manifest + m = &docker2Manifest{ + common: c, + Manifest: mOrig, + } + case schema2.ManifestList: + mt = mOrig.MediaType + c.desc.MediaType = mediatype.Docker2ManifestList + m = &docker2ManifestList{ + common: c, + ManifestList: mOrig, + } + case v1.Manifest: + mt = mOrig.MediaType + c.desc.MediaType = mediatype.OCI1Manifest + m = &oci1Manifest{ + common: c, + Manifest: mOrig, + } + case v1.Index: + mt = mOrig.MediaType + c.desc.MediaType = mediatype.OCI1ManifestList + m = &oci1Index{ + common: c, + Index: orig.(v1.Index), + } + case v1.ArtifactManifest: + mt = mOrig.MediaType + c.desc.MediaType = mediatype.OCI1Artifact + m = &oci1Artifact{ + common: c, + ArtifactManifest: mOrig, + } + default: + return nil, fmt.Errorf("unsupported type to convert to a manifest: %T", orig) + } + // verify media type + err = verifyMT(c.desc.MediaType, mt) + if err != nil { + return nil, err + } + // verify digest didn't change + if origDigest != "" && origDigest != c.desc.Digest { + return nil, fmt.Errorf("manifest digest mismatch, expected %s, computed %s%.0w", origDigest, c.desc.Digest, errs.ErrDigestMismatch) + } + return m, nil +} + +// fromCommon is used to create a manifest when the underlying manifest struct is not provided. +func fromCommon(c common) (Manifest, error) { + var err error + var m Manifest + var mt string + origDigest := c.desc.Digest + // extract common data from from rawBody + if len(c.rawBody) > 0 { + c.manifSet = true + // extract media type from body, either explicitly or with duck typing + if c.desc.MediaType == "" { + mt := struct { + MediaType string `json:"mediaType,omitempty"` + SchemaVersion int `json:"schemaVersion,omitempty"` + Signatures []interface{} `json:"signatures,omitempty"` + Manifests []descriptor.Descriptor `json:"manifests,omitempty"` + Layers []descriptor.Descriptor `json:"layers,omitempty"` + }{} + err = json.Unmarshal(c.rawBody, &mt) + if mt.MediaType != "" { + c.desc.MediaType = mt.MediaType + } else if mt.SchemaVersion == 1 && len(mt.Signatures) > 0 { + c.desc.MediaType = mediatype.Docker1ManifestSigned + } else if mt.SchemaVersion == 1 { + c.desc.MediaType = mediatype.Docker1Manifest + } else if len(mt.Manifests) > 0 { + if strings.HasPrefix(mt.Manifests[0].MediaType, "application/vnd.docker.") { + c.desc.MediaType = mediatype.Docker2ManifestList + } else { + c.desc.MediaType = mediatype.OCI1ManifestList + } + } else if len(mt.Layers) > 0 { + if strings.HasPrefix(mt.Layers[0].MediaType, "application/vnd.docker.") { + c.desc.MediaType = mediatype.Docker2Manifest + } else { + c.desc.MediaType = mediatype.OCI1Manifest + } + } + } + // compute digest + if c.desc.MediaType != mediatype.Docker1ManifestSigned { + d := c.desc.DigestAlgo().FromBytes(c.rawBody) + c.desc.Digest = d + c.desc.Size = int64(len(c.rawBody)) + } + } + switch c.desc.MediaType { + case mediatype.Docker1Manifest: + var mOrig schema1.Manifest + if len(c.rawBody) > 0 { + err = json.Unmarshal(c.rawBody, &mOrig) + mt = mOrig.MediaType + } + m = &docker1Manifest{common: c, Manifest: mOrig} + case mediatype.Docker1ManifestSigned: + var mOrig schema1.SignedManifest + if len(c.rawBody) > 0 { + err = json.Unmarshal(c.rawBody, &mOrig) + mt = mOrig.MediaType + d := c.desc.DigestAlgo().FromBytes(mOrig.Canonical) + c.desc.Digest = d + c.desc.Size = int64(len(mOrig.Canonical)) + } + m = &docker1SignedManifest{common: c, SignedManifest: mOrig} + case mediatype.Docker2Manifest: + var mOrig schema2.Manifest + if len(c.rawBody) > 0 { + err = json.Unmarshal(c.rawBody, &mOrig) + mt = mOrig.MediaType + } + m = &docker2Manifest{common: c, Manifest: mOrig} + case mediatype.Docker2ManifestList: + var mOrig schema2.ManifestList + if len(c.rawBody) > 0 { + err = json.Unmarshal(c.rawBody, &mOrig) + mt = mOrig.MediaType + } + m = &docker2ManifestList{common: c, ManifestList: mOrig} + case mediatype.OCI1Manifest: + var mOrig v1.Manifest + if len(c.rawBody) > 0 { + err = json.Unmarshal(c.rawBody, &mOrig) + mt = mOrig.MediaType + } + m = &oci1Manifest{common: c, Manifest: mOrig} + case mediatype.OCI1ManifestList: + var mOrig v1.Index + if len(c.rawBody) > 0 { + err = json.Unmarshal(c.rawBody, &mOrig) + mt = mOrig.MediaType + } + m = &oci1Index{common: c, Index: mOrig} + case mediatype.OCI1Artifact: + var mOrig v1.ArtifactManifest + if len(c.rawBody) > 0 { + err = json.Unmarshal(c.rawBody, &mOrig) + mt = mOrig.MediaType + } + m = &oci1Artifact{common: c, ArtifactManifest: mOrig} + default: + return nil, fmt.Errorf("%w: \"%s\"", errs.ErrUnsupportedMediaType, c.desc.MediaType) + } + if err != nil { + return nil, fmt.Errorf("error unmarshaling manifest for %s: %w", c.r.CommonName(), err) + } + // verify media type + err = verifyMT(c.desc.MediaType, mt) + if err != nil { + return nil, err + } + // verify digest didn't change + if origDigest != "" && origDigest != c.desc.Digest { + return nil, fmt.Errorf("manifest digest mismatch, expected %s, computed %s%.0w", origDigest, c.desc.Digest, errs.ErrDigestMismatch) + } + return m, nil +} + +func verifyMT(expected, received string) error { + if received != "" && expected != received { + return fmt.Errorf("manifest contains an unexpected media type: expected %s, received %s", expected, received) + } + return nil +} + +func getPlatformList(dl []descriptor.Descriptor) ([]*platform.Platform, error) { + var l []*platform.Platform + for _, d := range dl { + if d.Platform != nil { + l = append(l, d.Platform) + } + } + return l, nil +} diff --git a/vendor/github.com/regclient/regclient/types/manifest/oci1.go b/vendor/github.com/regclient/regclient/types/manifest/oci1.go new file mode 100644 index 00000000..594a765a --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/manifest/oci1.go @@ -0,0 +1,581 @@ +package manifest + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "text/tabwriter" + + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + digest "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/internal/units" + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/platform" +) + +const ( + // MediaTypeOCI1Manifest OCI v1 manifest media type + MediaTypeOCI1Manifest = mediatype.OCI1Manifest + // MediaTypeOCI1ManifestList OCI v1 manifest list media type + MediaTypeOCI1ManifestList = mediatype.OCI1ManifestList +) + +type oci1Manifest struct { + common + v1.Manifest +} +type oci1Index struct { + common + v1.Index +} + +// oci1Artifact is EXPERIMENTAL +type oci1Artifact struct { + common + v1.ArtifactManifest +} + +func (m *oci1Manifest) GetAnnotations() (map[string]string, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Annotations, nil +} +func (m *oci1Manifest) GetConfig() (descriptor.Descriptor, error) { + if !m.manifSet { + return descriptor.Descriptor{}, errs.ErrManifestNotSet + } + return m.Config, nil +} +func (m *oci1Manifest) GetConfigDigest() (digest.Digest, error) { + if !m.manifSet { + return digest.Digest(""), errs.ErrManifestNotSet + } + return m.Config.Digest, nil +} +func (m *oci1Index) GetAnnotations() (map[string]string, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Annotations, nil +} +func (m *oci1Index) GetConfig() (descriptor.Descriptor, error) { + return descriptor.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *oci1Index) GetConfigDigest() (digest.Digest, error) { + return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *oci1Artifact) GetAnnotations() (map[string]string, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Annotations, nil +} +func (m *oci1Artifact) GetConfig() (descriptor.Descriptor, error) { + return descriptor.Descriptor{}, fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *oci1Artifact) GetConfigDigest() (digest.Digest, error) { + return "", fmt.Errorf("config digest not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Manifest) GetManifestList() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *oci1Index) GetManifestList() ([]descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Manifests, nil +} +func (m *oci1Artifact) GetManifestList() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("platform descriptor list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Manifest) GetLayers() ([]descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Layers, nil +} +func (m *oci1Index) GetLayers() ([]descriptor.Descriptor, error) { + return []descriptor.Descriptor{}, fmt.Errorf("layers are not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *oci1Artifact) GetLayers() ([]descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Blobs, nil +} + +func (m *oci1Manifest) GetOrig() interface{} { + return m.Manifest +} +func (m *oci1Index) GetOrig() interface{} { + return m.Index +} +func (m *oci1Artifact) GetOrig() interface{} { + return m.ArtifactManifest +} + +func (m *oci1Manifest) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *oci1Index) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + if p == nil { + return nil, fmt.Errorf("invalid input, platform is nil%.0w", errs.ErrNotFound) + } + d, err := descriptor.DescriptorListSearch(m.Manifests, descriptor.MatchOpt{Platform: p}) + if err != nil { + return nil, fmt.Errorf("platform not found: %s%.0w", *p, err) + } + return &d, nil +} +func (m *oci1Artifact) GetPlatformDesc(p *platform.Platform) (*descriptor.Descriptor, error) { + return nil, fmt.Errorf("platform lookup not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Manifest) GetPlatformList() ([]*platform.Platform, error) { + return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} +func (m *oci1Index) GetPlatformList() ([]*platform.Platform, error) { + dl, err := m.GetManifestList() + if err != nil { + return nil, err + } + return getPlatformList(dl) +} +func (m *oci1Artifact) GetPlatformList() ([]*platform.Platform, error) { + return nil, fmt.Errorf("platform list not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Manifest) MarshalJSON() ([]byte, error) { + if !m.manifSet { + return []byte{}, errs.ErrManifestNotSet + } + + if len(m.rawBody) > 0 { + return m.rawBody, nil + } + + return json.Marshal((m.Manifest)) +} +func (m *oci1Manifest) GetSubject() (*descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Manifest.Subject, nil +} +func (m *oci1Index) GetSubject() (*descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.Index.Subject, nil +} +func (m *oci1Artifact) GetSubject() (*descriptor.Descriptor, error) { + if !m.manifSet { + return nil, errs.ErrManifestNotSet + } + return m.ArtifactManifest.Subject, nil +} + +func (m *oci1Index) MarshalJSON() ([]byte, error) { + if !m.manifSet { + return []byte{}, errs.ErrManifestNotSet + } + + if len(m.rawBody) > 0 { + return m.rawBody, nil + } + + return json.Marshal((m.Index)) +} +func (m *oci1Artifact) MarshalJSON() ([]byte, error) { + if !m.manifSet { + return []byte{}, errs.ErrManifestNotSet + } + + if len(m.rawBody) > 0 { + return m.rawBody, nil + } + + return json.Marshal((m.ArtifactManifest)) +} + +func (m *oci1Manifest) MarshalPretty() ([]byte, error) { + if m == nil { + return []byte{}, nil + } + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + if m.r.Reference != "" { + fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) + } + fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + if m.ArtifactType != "" { + fmt.Fprintf(tw, "ArtifactType:\t%s\n", m.ArtifactType) + } + fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) + if len(m.Annotations) > 0 { + fmt.Fprintf(tw, "Annotations:\t\n") + keys := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keys = append(keys, k) + } + sort.Strings(keys) + for _, name := range keys { + val := m.Annotations[name] + fmt.Fprintf(tw, " %s:\t%s\n", name, val) + } + } + var total int64 + for _, d := range m.Layers { + total += d.Size + } + fmt.Fprintf(tw, "Total Size:\t%s\n", units.HumanSize(float64(total))) + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Config:\t\n") + err := m.Config.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Layers:\t\n") + for _, d := range m.Layers { + fmt.Fprintf(tw, "\t\n") + err := d.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + if m.Subject != nil { + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Subject:\t\n") + err := m.Subject.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + err = tw.Flush() + return buf.Bytes(), err +} +func (m *oci1Index) MarshalPretty() ([]byte, error) { + if m == nil { + return []byte{}, nil + } + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + if m.r.Reference != "" { + fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) + } + fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + if m.ArtifactType != "" { + fmt.Fprintf(tw, "ArtifactType:\t%s\n", m.ArtifactType) + } + fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) + if len(m.Annotations) > 0 { + fmt.Fprintf(tw, "Annotations:\t\n") + keys := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keys = append(keys, k) + } + sort.Strings(keys) + for _, name := range keys { + val := m.Annotations[name] + fmt.Fprintf(tw, " %s:\t%s\n", name, val) + } + } + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Manifests:\t\n") + for _, d := range m.Manifests { + fmt.Fprintf(tw, "\t\n") + dRef := m.r + if dRef.Reference != "" { + dRef.Digest = d.Digest.String() + fmt.Fprintf(tw, " Name:\t%s\n", dRef.CommonName()) + } + err := d.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + if m.Subject != nil { + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Subject:\t\n") + err := m.Subject.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + err := tw.Flush() + return buf.Bytes(), err +} +func (m *oci1Artifact) MarshalPretty() ([]byte, error) { + if m == nil { + return []byte{}, nil + } + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + if m.r.Reference != "" { + fmt.Fprintf(tw, "Name:\t%s\n", m.r.Reference) + } + fmt.Fprintf(tw, "MediaType:\t%s\n", m.desc.MediaType) + fmt.Fprintf(tw, "ArtifactType:\t%s\n", m.ArtifactType) + fmt.Fprintf(tw, "Digest:\t%s\n", m.desc.Digest.String()) + if len(m.Annotations) > 0 { + fmt.Fprintf(tw, "Annotations:\t\n") + keys := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keys = append(keys, k) + } + sort.Strings(keys) + for _, name := range keys { + val := m.Annotations[name] + fmt.Fprintf(tw, " %s:\t%s\n", name, val) + } + } + var total int64 + for _, d := range m.Blobs { + total += d.Size + } + fmt.Fprintf(tw, "Total Size:\t%s\n", units.HumanSize(float64(total))) + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Blobs:\t\n") + for _, d := range m.Blobs { + fmt.Fprintf(tw, "\t\n") + err := d.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + if m.Subject != nil { + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Subject:\t\n") + err := m.Subject.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + err := tw.Flush() + return buf.Bytes(), err +} + +func (m *oci1Manifest) SetAnnotation(key, val string) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + if m.Annotations == nil { + m.Annotations = map[string]string{} + } + if val != "" { + m.Annotations[key] = val + } else { + delete(m.Annotations, key) + } + return m.updateDesc() +} +func (m *oci1Index) SetAnnotation(key, val string) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + if m.Annotations == nil { + m.Annotations = map[string]string{} + } + if val != "" { + m.Annotations[key] = val + } else { + delete(m.Annotations, key) + } + return m.updateDesc() +} +func (m *oci1Artifact) SetAnnotation(key, val string) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + if m.Annotations == nil { + m.Annotations = map[string]string{} + } + if val != "" { + m.Annotations[key] = val + } else { + delete(m.Annotations, key) + } + return m.updateDesc() +} + +func (m *oci1Artifact) SetConfig(d descriptor.Descriptor) error { + return fmt.Errorf("set config not available for media type %s%.0w", m.desc.MediaType, errs.ErrUnsupportedMediaType) +} + +func (m *oci1Manifest) SetConfig(d descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Config = d + return m.updateDesc() +} + +func (m *oci1Artifact) SetLayers(dl []descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Blobs = dl + return m.updateDesc() +} + +// GetSize returns the size in bytes of all layers +func (m *oci1Manifest) GetSize() (int64, error) { + if !m.manifSet { + return 0, errs.ErrManifestNotSet + } + var total int64 + for _, d := range m.Layers { + total += d.Size + } + return total, nil +} + +// GetSize returns the size in bytes of all layers +func (m *oci1Artifact) GetSize() (int64, error) { + if !m.manifSet { + return 0, errs.ErrManifestNotSet + } + var total int64 + for _, d := range m.Blobs { + total += d.Size + } + return total, nil +} + +func (m *oci1Manifest) SetLayers(dl []descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Layers = dl + return m.updateDesc() +} + +func (m *oci1Index) SetManifestList(dl []descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Manifests = dl + return m.updateDesc() +} + +func (m *oci1Manifest) SetOrig(origIn interface{}) error { + orig, ok := origIn.(v1.Manifest) + if !ok { + return errs.ErrUnsupportedMediaType + } + if orig.MediaType != mediatype.OCI1Manifest { + // TODO: error? + orig.MediaType = mediatype.OCI1Manifest + } + m.manifSet = true + m.Manifest = orig + + return m.updateDesc() +} + +func (m *oci1Index) SetOrig(origIn interface{}) error { + orig, ok := origIn.(v1.Index) + if !ok { + return errs.ErrUnsupportedMediaType + } + if orig.MediaType != mediatype.OCI1ManifestList { + // TODO: error? + orig.MediaType = mediatype.OCI1ManifestList + } + m.manifSet = true + m.Index = orig + + return m.updateDesc() +} + +func (m *oci1Artifact) SetSubject(d *descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.ArtifactManifest.Subject = d + return m.updateDesc() +} +func (m *oci1Manifest) SetSubject(d *descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Manifest.Subject = d + return m.updateDesc() +} +func (m *oci1Index) SetSubject(d *descriptor.Descriptor) error { + if !m.manifSet { + return errs.ErrManifestNotSet + } + m.Index.Subject = d + return m.updateDesc() +} + +func (m *oci1Artifact) SetOrig(origIn interface{}) error { + orig, ok := origIn.(v1.ArtifactManifest) + if !ok { + return errs.ErrUnsupportedMediaType + } + if orig.MediaType != mediatype.OCI1Artifact { + // TODO: error? + orig.MediaType = mediatype.OCI1Artifact + } + m.manifSet = true + m.ArtifactManifest = orig + + return m.updateDesc() +} + +func (m *oci1Manifest) updateDesc() error { + mj, err := json.Marshal(m.Manifest) + if err != nil { + return err + } + m.rawBody = mj + m.desc = descriptor.Descriptor{ + MediaType: mediatype.OCI1Manifest, + Digest: m.desc.DigestAlgo().FromBytes(mj), + Size: int64(len(mj)), + } + return nil +} +func (m *oci1Index) updateDesc() error { + mj, err := json.Marshal(m.Index) + if err != nil { + return err + } + m.rawBody = mj + m.desc = descriptor.Descriptor{ + MediaType: mediatype.OCI1ManifestList, + Digest: m.desc.DigestAlgo().FromBytes(mj), + Size: int64(len(mj)), + } + return nil +} +func (m *oci1Artifact) updateDesc() error { + mj, err := json.Marshal(m.ArtifactManifest) + if err != nil { + return err + } + m.rawBody = mj + m.desc = descriptor.Descriptor{ + MediaType: mediatype.OCI1Artifact, + Digest: m.desc.DigestAlgo().FromBytes(mj), + Size: int64(len(mj)), + } + return nil +} diff --git a/vendor/github.com/regclient/regclient/types/mediatype.go b/vendor/github.com/regclient/regclient/types/mediatype.go new file mode 100644 index 00000000..d80acde8 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/mediatype.go @@ -0,0 +1,91 @@ +package types + +import ( + "github.com/regclient/regclient/types/mediatype" +) + +const ( + // MediaTypeDocker1Manifest deprecated media type for docker schema1 manifests. + // + // Deprecated: replace with [mediatype.Docker1Manifest]. + MediaTypeDocker1Manifest = mediatype.Docker1Manifest + // MediaTypeDocker1ManifestSigned is a deprecated schema1 manifest with jws signing. + // + // Deprecated: replace with [mediatype.Docker1ManifestSigned]. + MediaTypeDocker1ManifestSigned = mediatype.Docker1ManifestSigned + // MediaTypeDocker2Manifest is the media type when pulling manifests from a v2 registry. + // + // Deprecated: replace with [mediatype.Docker2Manifest]. + MediaTypeDocker2Manifest = mediatype.Docker2Manifest + // MediaTypeDocker2ManifestList is the media type when pulling a manifest list from a v2 registry. + // + // Deprecated: replace with [mediatype.Docker2ManifestList]. + MediaTypeDocker2ManifestList = mediatype.Docker2ManifestList + // MediaTypeDocker2ImageConfig is for the configuration json object media type. + // + // Deprecated: replace with [mediatype.Docker2ImageConfig]. + MediaTypeDocker2ImageConfig = mediatype.Docker2ImageConfig + // MediaTypeOCI1Artifact EXPERIMENTAL OCI v1 artifact media type. + // + // Deprecated: replace with [mediatype.OCI1Artifact]. + MediaTypeOCI1Artifact = mediatype.OCI1Artifact + // MediaTypeOCI1Manifest OCI v1 manifest media type. + // + // Deprecated: replace with [mediatype.OCI1Manifest]. + MediaTypeOCI1Manifest = mediatype.OCI1Manifest + // MediaTypeOCI1ManifestList OCI v1 manifest list media type. + // + // Deprecated: replace with [mediatype.OCI1ManifestList]. + MediaTypeOCI1ManifestList = mediatype.OCI1ManifestList + // MediaTypeOCI1ImageConfig OCI v1 configuration json object media type. + // + // Deprecated: replace with [mediatype.OCI1ImageConfig]. + MediaTypeOCI1ImageConfig = mediatype.OCI1ImageConfig + // MediaTypeDocker2LayerGzip is the default compressed layer for docker schema2. + // + // Deprecated: replace with [mediatype.Docker2LayerGzip]. + MediaTypeDocker2LayerGzip = mediatype.Docker2LayerGzip + // MediaTypeDocker2ForeignLayer is the default compressed layer for foreign layers in docker schema2. + // + // Deprecated: replace with [mediatype.Docker2ForeignLayer]. + MediaTypeDocker2ForeignLayer = mediatype.Docker2ForeignLayer + // MediaTypeOCI1Layer is the uncompressed layer for OCIv1. + // + // Deprecated: replace with [mediatype.OCI1Layer]. + MediaTypeOCI1Layer = mediatype.OCI1Layer + // MediaTypeOCI1LayerGzip is the gzip compressed layer for OCI v1. + // + // Deprecated: replace with [mediatype.OCI1LayerGzip]. + MediaTypeOCI1LayerGzip = mediatype.OCI1LayerGzip + // MediaTypeOCI1LayerZstd is the zstd compressed layer for OCI v1. + // + // Deprecated: replace with [mediatype.OCI1LayerZstd]. + MediaTypeOCI1LayerZstd = mediatype.OCI1LayerZstd + // MediaTypeOCI1ForeignLayer is the foreign layer for OCI v1. + // + // Deprecated: replace with [mediatype.OCI1ForeignLayer]. + MediaTypeOCI1ForeignLayer = mediatype.OCI1ForeignLayer + // MediaTypeOCI1ForeignLayerGzip is the gzip compressed foreign layer for OCI v1. + // + // Deprecated: replace with [mediatype.OCI1ForeignLayerGzip]. + MediaTypeOCI1ForeignLayerGzip = mediatype.OCI1ForeignLayerGzip + // MediaTypeOCI1ForeignLayerZstd is the zstd compressed foreign layer for OCI v1. + // + // Deprecated: replace with [mediatype.OCI1ForeignLayerZstd]. + MediaTypeOCI1ForeignLayerZstd = mediatype.OCI1ForeignLayerZstd + // MediaTypeOCI1Empty is used for blobs containing the empty JSON data `{}`. + // + // Deprecated: replace with [mediatype.OCI1Empty]. + MediaTypeOCI1Empty = mediatype.OCI1Empty + // MediaTypeBuildkitCacheConfig is used by buildkit cache images. + // + // Deprecated: replace with [mediatype.BuildkitCacheConfig]. + MediaTypeBuildkitCacheConfig = mediatype.BuildkitCacheConfig +) + +var ( + // Base cleans the Content-Type header to return only the lower case base media type. + // + // Deprecated: replace with [mediatype.Base]. + MediaTypeBase = mediatype.Base +) diff --git a/vendor/github.com/regclient/regclient/types/mediatype/mediatype.go b/vendor/github.com/regclient/regclient/types/mediatype/mediatype.go new file mode 100644 index 00000000..b74bb2c2 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/mediatype/mediatype.go @@ -0,0 +1,65 @@ +// Package mediatype defines well known media types. +package mediatype + +import ( + "regexp" + "strings" +) + +const ( + // Docker1Manifest deprecated media type for docker schema1 manifests. + Docker1Manifest = "application/vnd.docker.distribution.manifest.v1+json" + // Docker1ManifestSigned is a deprecated schema1 manifest with jws signing. + Docker1ManifestSigned = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // Docker2Manifest is the media type when pulling manifests from a v2 registry. + Docker2Manifest = "application/vnd.docker.distribution.manifest.v2+json" + // Docker2ManifestList is the media type when pulling a manifest list from a v2 registry. + Docker2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + // Docker2ImageConfig is for the configuration json object media type. + Docker2ImageConfig = "application/vnd.docker.container.image.v1+json" + // OCI1Artifact EXPERIMENTAL OCI v1 artifact media type. + OCI1Artifact = "application/vnd.oci.artifact.manifest.v1+json" + // OCI1Manifest OCI v1 manifest media type. + OCI1Manifest = "application/vnd.oci.image.manifest.v1+json" + // OCI1ManifestList OCI v1 manifest list media type. + OCI1ManifestList = "application/vnd.oci.image.index.v1+json" + // OCI1ImageConfig OCI v1 configuration json object media type. + OCI1ImageConfig = "application/vnd.oci.image.config.v1+json" + // Docker2Layer is the uncompressed layer for docker schema2. + Docker2Layer = "application/vnd.docker.image.rootfs.diff.tar" + // Docker2LayerGzip is the default compressed layer for docker schema2. + Docker2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip" + // Docker2LayerZstd is the default compressed layer for docker schema2. + Docker2LayerZstd = "application/vnd.docker.image.rootfs.diff.tar.zstd" + // Docker2ForeignLayer is the default compressed layer for foreign layers in docker schema2. + Docker2ForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + // OCI1Layer is the uncompressed layer for OCIv1. + OCI1Layer = "application/vnd.oci.image.layer.v1.tar" + // OCI1LayerGzip is the gzip compressed layer for OCI v1. + OCI1LayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + // OCI1LayerZstd is the zstd compressed layer for OCI v1. + OCI1LayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" + // OCI1ForeignLayer is the foreign layer for OCI v1. + OCI1ForeignLayer = "application/vnd.oci.image.layer.nondistributable.v1.tar" + // OCI1ForeignLayerGzip is the gzip compressed foreign layer for OCI v1. + OCI1ForeignLayerGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + // OCI1ForeignLayerZstd is the zstd compressed foreign layer for OCI v1. + OCI1ForeignLayerZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" + // OCI1Empty is used for blobs containing the empty JSON data `{}`. + OCI1Empty = "application/vnd.oci.empty.v1+json" + // BuildkitCacheConfig is used by buildkit cache images. + BuildkitCacheConfig = "application/vnd.buildkit.cacheconfig.v0" +) + +// Base cleans the Content-Type header to return only the lower case base media type. +func Base(orig string) string { + base, _, _ := strings.Cut(orig, ";") + return strings.TrimSpace(strings.ToLower(base)) +} + +// Valid returns true if the media type matches the rfc6838 4.2 naming requirements. +func Valid(mt string) bool { + return validateRegexp.MatchString(mt) +} + +var validateRegexp = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9!#$&^_.+-]{0,126}/[A-Za-z0-9][A-Za-z0-9!#$&^_.+-]{0,126}$`) diff --git a/vendor/github.com/regclient/regclient/types/oci/doc.go b/vendor/github.com/regclient/regclient/types/oci/doc.go new file mode 100644 index 00000000..8903fd69 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/doc.go @@ -0,0 +1,18 @@ +// Package oci defiles OCI image-spec types +package oci + +// Contents of this folder refer to types defined at with the following license: + +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/artifact.go b/vendor/github.com/regclient/regclient/types/oci/v1/artifact.go new file mode 100644 index 00000000..8effe168 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/v1/artifact.go @@ -0,0 +1,21 @@ +package v1 + +import "github.com/regclient/regclient/types/descriptor" + +// ArtifactManifest EXPERIMENTAL defines an OCI Artifact +type ArtifactManifest struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType"` + + // ArtifactType is the media type of the artifact this schema refers to. + ArtifactType string `json:"artifactType,omitempty"` + + // Blobs is a collection of blobs referenced by this manifest. + Blobs []descriptor.Descriptor `json:"blobs,omitempty"` + + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *descriptor.Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the artifact manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/config.go b/vendor/github.com/regclient/regclient/types/oci/v1/config.go new file mode 100644 index 00000000..6448f134 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/v1/config.go @@ -0,0 +1,141 @@ +package v1 + +// Docker specific content in this file is included from +// https://github.com/moby/moby/blob/master/api/types/container/config.go + +import ( + "time" + // crypto libraries included for go-digest + _ "crypto/sha256" + _ "crypto/sha512" + + digest "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types/platform" +) + +// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. +type ImageConfig struct { + // User defines the username or UID which the process in the container should run as. + User string `json:"User,omitempty"` + + // ExposedPorts a set of ports to expose from a container running this image. + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + + // Env is a list of environment variables to be used in a container. + Env []string `json:"Env,omitempty"` + + // Entrypoint defines a list of arguments to use as the command to execute when the container starts. + Entrypoint []string `json:"Entrypoint,omitempty"` + + // Cmd defines the default arguments to the entrypoint of the container. + Cmd []string `json:"Cmd,omitempty"` + + // Volumes is a set of directories describing where the process is likely write data specific to a container instance. + Volumes map[string]struct{} `json:"Volumes,omitempty"` + + // WorkingDir sets the current working directory of the entrypoint process in the container. + WorkingDir string `json:"WorkingDir,omitempty"` + + // Labels contains arbitrary metadata for the container. + Labels map[string]string `json:"Labels,omitempty"` + + // StopSignal contains the system call signal that will be sent to the container to exit. + StopSignal string `json:"StopSignal,omitempty"` + + // StopTimeout is the time in seconds to stop the container. + // This is a Docker specific extension to the config, and not part of the OCI spec. + StopTimeout *int `json:",omitempty"` + + // ArgsEscaped `[Deprecated]` - This field is present only for legacy + // compatibility with Docker and should not be used by new image builders. + // It is used by Docker for Windows images to indicate that the `Entrypoint` + // or `Cmd` or both, contains only a single element array, that is a + // pre-escaped, and combined into a single string `CommandLine`. If `true` + // the value in `Entrypoint` or `Cmd` should be used as-is to avoid double + // escaping. + ArgsEscaped bool `json:"ArgsEscaped,omitempty"` + + // Healthcheck describes how to check if the container is healthy. + // This is a Docker specific extension to the config, and not part of the OCI spec. + Healthcheck *HealthConfig `json:"Healthcheck,omitempty"` + + // OnBuild lists any ONBUILD steps defined in the Dockerfile. + // This is a Docker specific extension to the config, and not part of the OCI spec. + OnBuild []string `json:"OnBuild,omitempty"` + + // Shell for the shell-form of RUN, CMD, and ENTRYPOINT. + // This is a Docker specific extension to the config, and not part of the OCI spec. + Shell []string `json:"Shell,omitempty"` +} + +// RootFS describes a layer content addresses +type RootFS struct { + // Type is the type of the rootfs. + Type string `json:"type"` + + // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. + DiffIDs []digest.Digest `json:"diff_ids"` +} + +// History describes the history of a layer. +type History struct { + // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // CreatedBy is the command which created the layer. + CreatedBy string `json:"created_by,omitempty"` + + // Author is the author of the build point. + Author string `json:"author,omitempty"` + + // Comment is a custom message set when creating the layer. + Comment string `json:"comment,omitempty"` + + // EmptyLayer is used to mark if the history item created a filesystem diff. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. + Author string `json:"author,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + platform.Platform + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` + + // RootFS references the layer content addresses used by the image. + RootFS RootFS `json:"rootfs"` + + // History describes the history of each layer. + History []History `json:"history,omitempty"` +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +// This is a Docker specific extension to the config, and not part of the OCI spec. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/doc.go b/vendor/github.com/regclient/regclient/types/oci/v1/doc.go new file mode 100644 index 00000000..2d198388 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/v1/doc.go @@ -0,0 +1,18 @@ +// Package v1 defiles version 1 of OCI image-spec types +package v1 + +// Contents of this folder refer to types defined at with the following license: + +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/index.go b/vendor/github.com/regclient/regclient/types/oci/v1/index.go new file mode 100644 index 00000000..887a5c87 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/v1/index.go @@ -0,0 +1,32 @@ +package v1 + +import ( + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/oci" +) + +// IndexSchemaVersion is a pre-configured versioned field for manifests +var IndexSchemaVersion = oci.Versioned{ + SchemaVersion: 2, +} + +// Index references manifests for various platforms. +// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. +type Index struct { + oci.Versioned + + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + + // Manifests references platform specific manifests. + Manifests []descriptor.Descriptor `json:"manifests"` + + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *descriptor.Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/layout.go b/vendor/github.com/regclient/regclient/types/oci/v1/layout.go new file mode 100644 index 00000000..508db627 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/v1/layout.go @@ -0,0 +1,6 @@ +package v1 + +// ImageLayout is the structure in the "oci-layout" file, found in the root of an OCI Image-layout directory. +type ImageLayout struct { + Version string `json:"imageLayoutVersion"` +} diff --git a/vendor/github.com/regclient/regclient/types/oci/v1/manifest.go b/vendor/github.com/regclient/regclient/types/oci/v1/manifest.go new file mode 100644 index 00000000..d49c39ae --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/v1/manifest.go @@ -0,0 +1,35 @@ +package v1 + +import ( + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/oci" +) + +// ManifestSchemaVersion is a pre-configured versioned field for manifests +var ManifestSchemaVersion = oci.Versioned{ + SchemaVersion: 2, +} + +// Manifest defines an OCI image +type Manifest struct { + oci.Versioned + + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + + // Config references a configuration object for a container, by digest. + // The referenced configuration object is a JSON blob that the runtime uses to set up the container. + Config descriptor.Descriptor `json:"config"` + + // Layers is an indexed list of layers referenced by the manifest. + Layers []descriptor.Descriptor `json:"layers"` + + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *descriptor.Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/regclient/regclient/types/oci/version.go b/vendor/github.com/regclient/regclient/types/oci/version.go new file mode 100644 index 00000000..0040374c --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/oci/version.go @@ -0,0 +1,8 @@ +// Package oci defines common settings for all OCI types +package oci + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} diff --git a/vendor/github.com/regclient/regclient/types/ping/ping.go b/vendor/github.com/regclient/regclient/types/ping/ping.go new file mode 100644 index 00000000..9417b4a5 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/ping/ping.go @@ -0,0 +1,13 @@ +// Package ping is used for data types with the Ping methods. +package ping + +import ( + "io/fs" + "net/http" +) + +// Result is the response to a ping request. +type Result struct { + Header http.Header // Header is defined for responses from a registry. + Stat fs.FileInfo // Stat is defined for responses from an ocidir. +} diff --git a/vendor/github.com/regclient/regclient/types/platform/compare.go b/vendor/github.com/regclient/regclient/types/platform/compare.go new file mode 100644 index 00000000..53018033 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/compare.go @@ -0,0 +1,213 @@ +package platform + +import ( + "strconv" + "strings" +) + +type compare struct { + host Platform +} + +type CompareOpts func(*compare) + +// NewCompare is used to compare multiple target entries to a host value. +func NewCompare(host Platform, opts ...CompareOpts) *compare { + (&host).normalize() + c := compare{ + host: host, + } + for _, optFn := range opts { + optFn(&c) + } + return &c +} + +// Better returns true when the target is compatible and a better match than the previous platform. +// The previous platform value may be the zero value when no previous match has been found. +func (c *compare) Better(target, prev Platform) bool { + if !Compatible(c.host, target) { + return false + } + (&target).normalize() + (&prev).normalize() + if prev.OS != target.OS { + if target.OS == c.host.OS { + return true + } else if prev.OS == c.host.OS { + return false + } + } + if prev.Architecture != target.Architecture { + if target.Architecture == c.host.Architecture { + return true + } else if prev.Architecture == c.host.Architecture { + return false + } + } + if prev.Variant != target.Variant { + if target.Variant == c.host.Variant { + return true + } else if prev.Variant == c.host.Variant { + return false + } + pV := variantVer(prev.Variant) + tV := variantVer(target.Variant) + if tV > pV { + return true + } else if tV < pV { + return false + } + } + if prev.OSVersion != target.OSVersion { + if target.OSVersion == c.host.OSVersion { + return true + } else if prev.OSVersion == c.host.OSVersion { + return false + } + cmp := semverCmp(prev.OSVersion, target.OSVersion) + if cmp != 0 { + return cmp < 0 + } + } + return false +} + +// Compatible indicates if a host can run a specified target platform image. +// This accounts for Docker Desktop for Mac and Windows using a Linux VM. +func (c *compare) Compatible(target Platform) bool { + (&target).normalize() + if c.host.OS == "linux" { + return c.host.OS == target.OS && c.host.Architecture == target.Architecture && + variantCompatible(c.host.Variant, target.Variant) + } else if c.host.OS == "windows" { + if target.OS == "windows" { + return c.host.Architecture == target.Architecture && + variantCompatible(c.host.Variant, target.Variant) && + osVerCompatible(c.host.OSVersion, target.OSVersion) + } else if target.OS == "linux" { + return c.host.Architecture == target.Architecture && + variantCompatible(c.host.Variant, target.Variant) + } + return false + } else if c.host.OS == "darwin" { + return (target.OS == "darwin" || target.OS == "linux") && + c.host.Architecture == target.Architecture && + variantCompatible(c.host.Variant, target.Variant) + } else { + return c.host.OS == target.OS && c.host.Architecture == target.Architecture && + variantCompatible(c.host.Variant, target.Variant) && + c.host.OSVersion == target.OSVersion && + strSliceEq(c.host.OSFeatures, target.OSFeatures) && + strSliceEq(c.host.Features, target.Features) + } +} + +// Match indicates if two platforms are the same. +func (c *compare) Match(target Platform) bool { + (&target).normalize() + if c.host.OS != target.OS { + return false + } + if c.host.OS == "linux" { + return c.host.Architecture == target.Architecture && c.host.Variant == target.Variant + } else if c.host.OS == "windows" { + return c.host.Architecture == target.Architecture && c.host.Variant == target.Variant && + osVerSemver(c.host.OSVersion) == osVerSemver(target.OSVersion) + } else { + return c.host.Architecture == target.Architecture && + c.host.Variant == target.Variant && + c.host.OSVersion == target.OSVersion && + strSliceEq(c.host.OSFeatures, target.OSFeatures) && + strSliceEq(c.host.Features, target.Features) + } +} + +// Compatible indicates if a host can run a specified target platform image. +// This accounts for Docker Desktop for Mac and Windows using a Linux VM. +func Compatible(host, target Platform) bool { + comp := NewCompare(host) + return comp.Compatible(target) +} + +// Match indicates if two platforms are the same. +func Match(a, b Platform) bool { + comp := NewCompare(a) + return comp.Match(b) +} + +func osVerCompatible(host, target string) bool { + if host == "" { + return true + } + vHost := osVerSemver(host) + vTarget := osVerSemver(target) + return vHost == vTarget +} + +func osVerSemver(platVer string) string { + verParts := strings.Split(platVer, ".") + if len(verParts) < 4 { + return platVer + } + return strings.Join(verParts[0:3], ".") +} + +// return: -1 if ab +func semverCmp(a, b string) int { + aParts := strings.Split(a, ".") + bParts := strings.Split(b, ".") + for i := range aParts { + if len(bParts) < i+1 { + return 1 + } + aInt, aErr := strconv.Atoi(aParts[i]) + bInt, bErr := strconv.Atoi(bParts[i]) + if aErr != nil { + if bErr != nil { + return 0 + } + return -1 + } + if bErr != nil { + return 1 + } + if aInt < bInt { + return -1 + } + if aInt > bInt { + return 1 + } + } + return 0 +} + +func strSliceEq(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +func variantCompatible(host, target string) bool { + vHost := variantVer(host) + vTarget := variantVer(target) + if vHost >= vTarget || (vHost == 1 && target == "") || (host == "" && vTarget == 1) { + return true + } + return false +} + +func variantVer(v string) int { + v = strings.TrimPrefix(v, "v") + ver, err := strconv.Atoi(v) + if err != nil { + return 0 + } + return ver +} diff --git a/vendor/github.com/regclient/regclient/types/platform/cpuinfo.go b/vendor/github.com/regclient/regclient/types/platform/cpuinfo.go new file mode 100644 index 00000000..75fe335a --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/cpuinfo.go @@ -0,0 +1,30 @@ +// Related implementations: +// +// +// +// +// + +package platform + +import ( + "runtime" + "sync" +) + +// cpuVariantValue is the variant of the local CPU architecture. +// For example on ARM, v7 and v8. And on AMD64, v1 - v4. +// Don't use this value directly; call cpuVariant() instead. +var cpuVariantValue string + +var cpuVariantOnce sync.Once + +func cpuVariant() string { + cpuVariantOnce.Do(func() { + switch runtime.GOARCH { + case "amd64", "arm", "arm64": + cpuVariantValue = lookupCPUVariant() + } + }) + return cpuVariantValue +} diff --git a/vendor/github.com/regclient/regclient/types/platform/cpuinfo_armx.go b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_armx.go new file mode 100644 index 00000000..60940dfa --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_armx.go @@ -0,0 +1,83 @@ +//go:build arm || arm64 +// +build arm arm64 + +package platform + +import ( + "bufio" + "os" + "runtime" + "strings" +) + +func lookupCPUVariant() string { + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + switch runtime.GOARCH { + case "arm64": + return "v8" + case "arm": + return "v7" + } + return "" + } + + variant := getCPUInfo("Cpu architecture") + + // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + if runtime.GOARCH == "arm" && variant == "7" { + model := getCPUInfo("model name") + if strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + variant = "6" + } + } + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "" + } + + return variant +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string) { + if runtime.GOOS != "linux" { + return "" + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "" + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]) + } + } + return "" +} diff --git a/vendor/github.com/regclient/regclient/types/platform/cpuinfo_other.go b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_other.go new file mode 100644 index 00000000..5ac63b2e --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_other.go @@ -0,0 +1,8 @@ +//go:build !386 && !amd64 && !amd64p32 && !arm && !arm64 +// +build !386,!amd64,!amd64p32,!arm,!arm64 + +package platform + +func lookupCPUVariant() string { + return "" +} diff --git a/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.go b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.go new file mode 100644 index 00000000..8e3ba7e5 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.go @@ -0,0 +1,100 @@ +//go:build 386 || amd64 || amd64p32 +// +build 386 amd64 amd64p32 + +package platform + +const ( + ecx1SSE3 = 0 + ecx1SSSE3 = 9 + ecx1FMA = 12 + ecx1CX16 = 13 + ecx1SSE4_1 = 19 + ecx1SSE4_2 = 20 + ecx1MOVBE = 22 + ecx1POPCNT = 23 + ecx1XSAVE = 26 + ecx1OSXSAVE = 27 + ecx1AVX = 28 + ecx1F16C = 29 + + ebx7BMI1 = 3 + ebx7AVX2 = 5 + ebx7BMI2 = 8 + ebx7AVX512F = 16 + ebx7AVX512DQ = 17 + ebx7AVX512CD = 28 + ebx7AVX512BW = 30 + ebx7AVX512VL = 31 + + ecxxLAHF = 0 + ecxxLZCNT = 5 + + eaxOSXMM = 1 + eaxOSYMM = 2 + eaxOSOpMask = 5 + eaxOSZMMHi16 = 6 + eaxOSZMMHi256 = 7 +) + +var ( + // GOAMD64=v1 (default): The baseline. Exclusively generates instructions that all 64-bit x86 processors can execute. + // GOAMD64=v2: all v1 instructions, plus CX16, LAHF-SAHF, POPCNT, SSE3, SSE4.1, SSE4.2, SSSE3. + // GOAMD64=v3: all v2 instructions, plus AVX, AVX2, BMI1, BMI2, F16C, FMA, LZCNT, MOVBE, OSXSAVE. + // GOAMD64=v4: all v3 instructions, plus AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL. + ecx1FeaturesV2 = bitSet(ecx1CX16) | bitSet(ecx1POPCNT) | bitSet(ecx1SSE3) | bitSet(ecx1SSE4_1) | bitSet(ecx1SSE4_2) | bitSet(ecx1SSSE3) + ecx1FeaturesV3 = ecx1FeaturesV2 | bitSet(ecx1AVX) | bitSet(ecx1F16C) | bitSet(ecx1FMA) | bitSet(ecx1MOVBE) | bitSet(ecx1OSXSAVE) + ebx7FeaturesV3 = bitSet(ebx7AVX2) | bitSet(ebx7BMI1) | bitSet(ebx7BMI2) + ebx7FeaturesV4 = ebx7FeaturesV3 | bitSet(ebx7AVX512F) | bitSet(ebx7AVX512BW) | bitSet(ebx7AVX512CD) | bitSet(ebx7AVX512DQ) | bitSet(ebx7AVX512VL) + ecxxFeaturesV2 = bitSet(ecxxLAHF) + ecxxFeaturesV3 = ecxxFeaturesV2 | bitSet(ecxxLZCNT) + eaxOSFeaturesV3 = bitSet(eaxOSXMM) | bitSet(eaxOSYMM) + eaxOSFeaturesV4 = eaxOSFeaturesV3 | bitSet(eaxOSOpMask) | bitSet(eaxOSZMMHi16) | bitSet(eaxOSZMMHi256) +) + +// cpuid is implemented in cpuinfo_x86.s. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s. +func xgetbv() (eax, edx uint32) + +func lookupCPUVariant() string { + variant := "v1" + maxID, _, _, _ := cpuid(0, 0) + if maxID < 7 { + return variant + } + _, _, ecx1, _ := cpuid(1, 0) + _, ebx7, _, _ := cpuid(7, 0) + maxX, _, _, _ := cpuid(0x80000000, 0) + _, _, ecxx, _ := cpuid(0x80000001, 0) + + if maxX < 0x80000001 || !bitIsSet(ecx1FeaturesV2, ecx1) || !bitIsSet(ecxxFeaturesV2, ecxx) { + return variant + } + variant = "v2" + + if !bitIsSet(ecx1FeaturesV3, ecx1) || !bitIsSet(ebx7FeaturesV3, ebx7) || !bitIsSet(ecxxFeaturesV3, ecxx) { + return variant + } + // For XGETBV, OSXSAVE bit is required and verified by ecx1FeaturesV3. + eaxOS, _ := xgetbv() + if !bitIsSet(eaxOSFeaturesV3, eaxOS) { + return variant + } + variant = "v3" + + // Darwin support for AVX-512 appears to have issues. + if isDarwin || !bitIsSet(ebx7FeaturesV4, ebx7) || !bitIsSet(eaxOSFeaturesV4, eaxOS) { + return variant + } + variant = "v4" + + return variant +} + +func bitSet(bitpos uint) uint32 { + return 1 << bitpos +} +func bitIsSet(bits, value uint32) bool { + return (value & bits) == bits +} diff --git a/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.s b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.s new file mode 100644 index 00000000..7d7ba33e --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/cpuinfo_x86.s @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc + +#include "textflag.h" + +// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuid(SB), NOSPLIT, $0-24 + MOVL eaxArg+0(FP), AX + MOVL ecxArg+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv() (eax, edx uint32) +TEXT ·xgetbv(SB),NOSPLIT,$0-8 + MOVL $0, CX + XGETBV + MOVL AX, eax+0(FP) + MOVL DX, edx+4(FP) + RET diff --git a/vendor/github.com/regclient/regclient/types/platform/os_darwin.go b/vendor/github.com/regclient/regclient/types/platform/os_darwin.go new file mode 100644 index 00000000..161f89ed --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/os_darwin.go @@ -0,0 +1,8 @@ +//go:build darwin +// +build darwin + +package platform + +const isDarwin = true + +var _ = isDarwin diff --git a/vendor/github.com/regclient/regclient/types/platform/os_other.go b/vendor/github.com/regclient/regclient/types/platform/os_other.go new file mode 100644 index 00000000..01b004e6 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/os_other.go @@ -0,0 +1,8 @@ +//go:build !darwin +// +build !darwin + +package platform + +const isDarwin = false + +var _ = isDarwin diff --git a/vendor/github.com/regclient/regclient/types/platform/platform.go b/vendor/github.com/regclient/regclient/types/platform/platform.go new file mode 100644 index 00000000..3137774b --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/platform.go @@ -0,0 +1,185 @@ +// Package platform handles the parsing and comparing of the image platform (e.g. linux/amd64) +package platform + +// Some of the code in the package and all of the inspiration for this comes from . +// Their license is included here: +/* + Copyright The containerd Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "fmt" + "path" + "regexp" + "strings" + + "github.com/regclient/regclient/internal/strparse" + "github.com/regclient/regclient/types/errs" +) + +var ( + partRE = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) +) + +// Platform specifies a platform where a particular image manifest is applicable. +type Platform struct { + // Architecture field specifies the CPU architecture, for example `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system version, for example `10.0.10586`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for example `ppc64le` to specify a little-endian version of a PowerPC CPU. + Variant string `json:"variant,omitempty"` + + // Features is an optional field specifying an array of strings, each listing a required CPU feature (for example `sse4` or `aes`). + Features []string `json:"features,omitempty"` +} + +// String outputs the platform in the // notation +func (p Platform) String() string { + (&p).normalize() + if p.OS == "" { + return "unknown" + } else { + return path.Join(p.OS, p.Architecture, p.Variant) + } +} + +// Parse converts a platform string into a struct +func Parse(platStr string) (Platform, error) { + // args are a regclient specific way to extend the platform string + platArgs := strings.SplitN(platStr, ",", 2) + // split on slash, validate each component + platSplit := strings.Split(platArgs[0], "/") + for i, part := range platSplit { + if !partRE.MatchString(part) { + return Platform{}, fmt.Errorf("invalid platform component %s in %s%.0w", part, platStr, errs.ErrParsingFailed) + } + platSplit[i] = strings.ToLower(part) + } + plat := &Platform{} + if len(platSplit) == 1 && knownArch(platSplit[0]) { + // special case of architecture only + plat.Architecture = platSplit[0] + } else if len(platSplit) >= 1 { + plat.OS = platSplit[0] + } + if len(platSplit) >= 2 { + plat.Architecture = platSplit[1] + } + if len(platSplit) >= 3 { + plat.Variant = platSplit[2] + } + if len(platArgs) > 1 { + kvMap, err := strparse.SplitCSKV(platArgs[1]) + if err != nil { + return Platform{}, fmt.Errorf("failed to split platform args in %s: %w", platStr, err) + } + for k, v := range kvMap { + k := strings.TrimSpace(k) + v := strings.TrimSpace(v) + switch strings.ToLower(k) { + case "osver", "osversion": + plat.OSVersion = v + default: + return Platform{}, fmt.Errorf("unsupported platform arg type, %s in %s%.0w", k, platStr, errs.ErrParsingFailed) + } + } + } + // gather local platform details + platLocal := Local() + // normalize and extrapolate missing fields + if platStr == "local" { + *plat = platLocal + } else if plat.OS == "local" || plat.OS == "" { + plat.OS = platLocal.OS + } + plat.normalize() + switch plat.OS { + case "linux", "darwin", "windows": + // expand short references to local platform with architecture and variant + if Compatible(Platform{OS: platLocal.OS}, Platform{OS: plat.OS}) && len(platSplit) < 2 { + if plat.Architecture == "" { + plat.Architecture = platLocal.Architecture + } + if plat.Architecture == platLocal.Architecture && plat.Variant == "" { + plat.Variant = platLocal.Variant + } + } + } + if plat.OS == "windows" && plat.OS == platLocal.OS && plat.Architecture == platLocal.Architecture && variantCompatible(platLocal.Variant, plat.Variant) && plat.OSVersion == "" { + plat.OSVersion = platLocal.OSVersion + } + + return *plat, nil +} + +// knownArch is a list of known architectures that can be parsed without the OS field. +// Otherwise the OS is required. +func knownArch(arch string) bool { + switch arch { + case "386", "amd64", "i386", "x86_64", "x86-64", + "arm", "armhf", "armel", "arm64", "aarch64", + "mips", "mips64", "mips64le", + "ppc", "ppc64", "ppc64le", + "loong64", + "riscv", "riscv64", + "s390", "s390x", + "sparc", "sparc64", + "wasm": + return true + } + return false +} + +func (p *Platform) normalize() { + switch p.OS { + case "macos": + p.OS = "darwin" + } + switch p.Architecture { + case "i386": + p.Architecture = "386" + p.Variant = "" + case "x86_64", "x86-64", "amd64": + p.Architecture = "amd64" + if p.Variant == "v1" { + p.Variant = "" + } + case "aarch64", "arm64": + p.Architecture = "arm64" + switch p.Variant { + case "8", "v8": + p.Variant = "" + } + case "armhf": + p.Architecture = "arm" + p.Variant = "v7" + case "armel": + p.Architecture = "arm" + p.Variant = "v6" + case "arm": + switch p.Variant { + case "", "7": + p.Variant = "v7" + case "5", "6", "8": + p.Variant = "v" + p.Variant + } + } +} diff --git a/vendor/github.com/regclient/regclient/types/platform/platform_other.go b/vendor/github.com/regclient/regclient/types/platform/platform_other.go new file mode 100644 index 00000000..be99816e --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/platform_other.go @@ -0,0 +1,17 @@ +//go:build !windows +// +build !windows + +package platform + +import "runtime" + +// Local retrieves the local platform details +func Local() Platform { + plat := Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + Variant: cpuVariant(), + } + plat.normalize() + return plat +} diff --git a/vendor/github.com/regclient/regclient/types/platform/platform_windows.go b/vendor/github.com/regclient/regclient/types/platform/platform_windows.go new file mode 100644 index 00000000..d0dda6a4 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/platform/platform_windows.go @@ -0,0 +1,24 @@ +//go:build windows +// +build windows + +package platform + +import ( + "fmt" + "runtime" + + "golang.org/x/sys/windows" +) + +// Local retrieves the local platform details +func Local() Platform { + major, minor, build := windows.RtlGetNtVersionNumbers() + plat := Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + Variant: cpuVariant(), + OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), + } + plat.normalize() + return plat +} diff --git a/vendor/github.com/regclient/regclient/types/ratelimit.go b/vendor/github.com/regclient/regclient/types/ratelimit.go new file mode 100644 index 00000000..3ecbd608 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/ratelimit.go @@ -0,0 +1,8 @@ +package types + +// RateLimit is returned from some http requests +type RateLimit struct { + Remain, Limit, Reset int + Set bool + Policies []string +} diff --git a/vendor/github.com/regclient/regclient/types/ref/ref.go b/vendor/github.com/regclient/regclient/types/ref/ref.go new file mode 100644 index 00000000..7dbc9684 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/ref/ref.go @@ -0,0 +1,297 @@ +// Package ref is used to define references. +// References default to remote registry references (registry:port/repo:tag). +// Schemes can be included in front of the reference for different reference types. +package ref + +import ( + "fmt" + "path" + "regexp" + "strings" + + "github.com/regclient/regclient/types/errs" +) + +const ( + dockerLibrary = "library" + // dockerRegistry is the name resolved in docker images on Hub. + dockerRegistry = "docker.io" + // dockerRegistryLegacy is the name resolved in docker images on Hub. + dockerRegistryLegacy = "index.docker.io" + // dockerRegistryDNS is the host to connect to for Hub. + dockerRegistryDNS = "registry-1.docker.io" +) + +var ( + hostPartS = `(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?)` + hostPortS = `(?:` + hostPartS + `(?:` + regexp.QuoteMeta(`.`) + hostPartS + `)*` + regexp.QuoteMeta(`.`) + `?` + regexp.QuoteMeta(`:`) + `[0-9]+)` + hostDomainS = `(?:` + hostPartS + `(?:(?:` + regexp.QuoteMeta(`.`) + hostPartS + `)+` + regexp.QuoteMeta(`.`) + `?|` + regexp.QuoteMeta(`.`) + `))` + hostUpperS = `(?:[a-zA-Z0-9]*[A-Z][a-zA-Z0-9-]*[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[A-Z][a-zA-Z0-9]*)` + registryS = `(?:` + hostDomainS + `|` + hostPortS + `|` + hostUpperS + `|localhost(?:` + regexp.QuoteMeta(`:`) + `[0-9]+)?)` + repoPartS = `[a-z0-9]+(?:(?:\.|_|__|-+)[a-z0-9]+)*` + pathS = `[/a-zA-Z0-9_\-. ~\+]+` + tagS = `[a-zA-Z0-9_][a-zA-Z0-9._-]{0,127}` + digestS = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}` + schemeRE = regexp.MustCompile(`^([a-z]+)://(.+)$`) + registryRE = regexp.MustCompile(`^(` + registryS + `)$`) + refRE = regexp.MustCompile(`^(?:(` + registryS + `)` + regexp.QuoteMeta(`/`) + `)?` + + `(` + repoPartS + `(?:` + regexp.QuoteMeta(`/`) + repoPartS + `)*)` + + `(?:` + regexp.QuoteMeta(`:`) + `(` + tagS + `))?` + + `(?:` + regexp.QuoteMeta(`@`) + `(` + digestS + `))?$`) + ocidirRE = regexp.MustCompile(`^(` + pathS + `)` + + `(?:` + regexp.QuoteMeta(`:`) + `(` + tagS + `))?` + + `(?:` + regexp.QuoteMeta(`@`) + `(` + digestS + `))?$`) +) + +// Ref is a reference to a registry/repository. +// Direct access to the contents of this struct should not be assumed. +type Ref struct { + Scheme string // Scheme is the type of reference, "reg" or "ocidir". + Reference string // Reference is the unparsed string or common name. + Registry string // Registry is the server for the "reg" scheme. + Repository string // Repository is the path on the registry for the "reg" scheme. + Tag string // Tag is a mutable tag for a reference. + Digest string // Digest is an immutable hash for a reference. + Path string // Path is the directory of the OCI Layout for "ocidir". +} + +// New returns a reference based on the scheme (defaulting to "reg"). +func New(parse string) (Ref, error) { + scheme := "" + tail := parse + matchScheme := schemeRE.FindStringSubmatch(parse) + if len(matchScheme) == 3 { + scheme = matchScheme[1] + tail = matchScheme[2] + } + ret := Ref{ + Scheme: scheme, + Reference: parse, + } + switch scheme { + case "": + ret.Scheme = "reg" + matchRef := refRE.FindStringSubmatch(tail) + if matchRef == nil || len(matchRef) < 5 { + if refRE.FindStringSubmatch(strings.ToLower(tail)) != nil { + return Ref{}, fmt.Errorf("%w \"%s\", repo must be lowercase", errs.ErrInvalidReference, tail) + } + return Ref{}, fmt.Errorf("%w \"%s\"", errs.ErrInvalidReference, tail) + } + ret.Registry = matchRef[1] + ret.Repository = matchRef[2] + ret.Tag = matchRef[3] + ret.Digest = matchRef[4] + + // handle localhost use case since it matches the regex for a repo path entry + repoPath := strings.Split(ret.Repository, "/") + if ret.Registry == "" && repoPath[0] == "localhost" { + ret.Registry = repoPath[0] + ret.Repository = strings.Join(repoPath[1:], "/") + } + switch ret.Registry { + case "", dockerRegistryDNS, dockerRegistryLegacy: + ret.Registry = dockerRegistry + } + if ret.Registry == dockerRegistry && !strings.Contains(ret.Repository, "/") { + ret.Repository = dockerLibrary + "/" + ret.Repository + } + if ret.Tag == "" && ret.Digest == "" { + ret.Tag = "latest" + } + if ret.Repository == "" { + return Ref{}, fmt.Errorf("%w \"%s\"", errs.ErrInvalidReference, tail) + } + + case "ocidir", "ocifile": + matchPath := ocidirRE.FindStringSubmatch(tail) + if matchPath == nil || len(matchPath) < 2 || matchPath[1] == "" { + return Ref{}, fmt.Errorf("%w, invalid path for scheme \"%s\": %s", errs.ErrInvalidReference, scheme, tail) + } + ret.Path = matchPath[1] + if len(matchPath) > 2 && matchPath[2] != "" { + ret.Tag = matchPath[2] + } + if len(matchPath) > 3 && matchPath[3] != "" { + ret.Digest = matchPath[3] + } + + default: + return Ref{}, fmt.Errorf("%w, unknown scheme \"%s\" in \"%s\"", errs.ErrInvalidReference, scheme, parse) + } + return ret, nil +} + +// NewHost returns a Reg for a registry hostname or equivalent. +// The ocidir schema equivalent is the path. +func NewHost(parse string) (Ref, error) { + scheme := "" + tail := parse + matchScheme := schemeRE.FindStringSubmatch(parse) + if len(matchScheme) == 3 { + scheme = matchScheme[1] + tail = matchScheme[2] + } + ret := Ref{ + Scheme: scheme, + } + + switch scheme { + case "": + ret.Scheme = "reg" + matchReg := registryRE.FindStringSubmatch(tail) + if matchReg == nil || len(matchReg) < 2 { + return Ref{}, fmt.Errorf("%w \"%s\"", errs.ErrParsingFailed, tail) + } + ret.Registry = matchReg[1] + if ret.Registry == "" { + return Ref{}, fmt.Errorf("%w \"%s\"", errs.ErrParsingFailed, tail) + } + + case "ocidir", "ocifile": + matchPath := ocidirRE.FindStringSubmatch(tail) + if matchPath == nil || len(matchPath) < 2 || matchPath[1] == "" { + return Ref{}, fmt.Errorf("%w, invalid path for scheme \"%s\": %s", errs.ErrParsingFailed, scheme, tail) + } + ret.Path = matchPath[1] + + default: + return Ref{}, fmt.Errorf("%w, unknown scheme \"%s\" in \"%s\"", errs.ErrParsingFailed, scheme, parse) + } + return ret, nil +} + +// CommonName outputs a parsable name from a reference. +func (r Ref) CommonName() string { + cn := "" + switch r.Scheme { + case "reg": + if r.Registry != "" { + cn = r.Registry + "/" + } + if r.Repository == "" { + return "" + } + cn = cn + r.Repository + if r.Tag != "" { + cn = cn + ":" + r.Tag + } + if r.Digest != "" { + cn = cn + "@" + r.Digest + } + case "ocidir": + cn = fmt.Sprintf("ocidir://%s", r.Path) + if r.Tag != "" { + cn = cn + ":" + r.Tag + } + if r.Digest != "" { + cn = cn + "@" + r.Digest + } + } + return cn +} + +// IsSet returns true if needed values are defined for a specific reference. +func (r Ref) IsSet() bool { + if !r.IsSetRepo() { + return false + } + // Registry requires a tag or digest, OCI Layout doesn't require these. + if r.Scheme == "reg" && r.Tag == "" && r.Digest == "" { + return false + } + return true +} + +// IsSetRepo returns true when the ref includes values for a specific repository. +func (r Ref) IsSetRepo() bool { + switch r.Scheme { + case "reg": + if r.Registry != "" && r.Repository != "" { + return true + } + case "ocidir": + if r.Path != "" { + return true + } + } + return false +} + +// IsZero returns true if ref is unset. +func (r Ref) IsZero() bool { + if r.Scheme == "" && r.Registry == "" && r.Repository == "" && r.Path == "" && r.Tag == "" && r.Digest == "" { + return true + } + return false +} + +// SetDigest returns a ref with the requested digest set. +// The tag will be unset and the reference value will be reset. +func (r Ref) SetDigest(digest string) Ref { + r.Digest = digest + r.Tag = "" + r.Reference = r.CommonName() + return r +} + +// SetTag returns a ref with the requested tag set. +// The digest will be unset and the reference value will be reset. +func (r Ref) SetTag(tag string) Ref { + r.Tag = tag + r.Digest = "" + r.Reference = r.CommonName() + return r +} + +// ToReg converts a reference to a registry like syntax. +func (r Ref) ToReg() Ref { + switch r.Scheme { + case "ocidir": + r.Scheme = "reg" + r.Registry = "localhost" + // clean the path to strip leading ".." + r.Repository = path.Clean("/" + r.Path)[1:] + r.Repository = strings.ToLower(r.Repository) + // convert any unsupported characters to "-" in the path + re := regexp.MustCompile(`[^/a-z0-9]+`) + r.Repository = string(re.ReplaceAll([]byte(r.Repository), []byte("-"))) + } + return r +} + +// EqualRegistry compares the registry between two references. +func EqualRegistry(a, b Ref) bool { + if a.Scheme != b.Scheme { + return false + } + switch a.Scheme { + case "reg": + return a.Registry == b.Registry + case "ocidir": + return a.Path == b.Path + case "": + // both undefined + return true + default: + return false + } +} + +// EqualRepository compares the repository between two references. +func EqualRepository(a, b Ref) bool { + if a.Scheme != b.Scheme { + return false + } + switch a.Scheme { + case "reg": + return a.Registry == b.Registry && a.Repository == b.Repository + case "ocidir": + return a.Path == b.Path + case "": + // both undefined + return true + default: + return false + } +} diff --git a/vendor/github.com/regclient/regclient/types/referrer/referrer.go b/vendor/github.com/regclient/regclient/types/referrer/referrer.go new file mode 100644 index 00000000..5a7bc4f3 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/referrer/referrer.go @@ -0,0 +1,165 @@ +// Package referrer is used for responses to the referrers to a manifest +package referrer + +import ( + "bytes" + "fmt" + "sort" + "text/tabwriter" + + "github.com/opencontainers/go-digest" + + "github.com/regclient/regclient/types/descriptor" + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/manifest" + v1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/ref" +) + +// ReferrerList contains the response to a request for referrers to a subject +type ReferrerList struct { + Subject ref.Ref `json:"subject"` // subject queried + Source ref.Ref `json:"source"` // source for referrers, if different from subject + Descriptors []descriptor.Descriptor `json:"descriptors"` // descriptors found in Index + Annotations map[string]string `json:"annotations,omitempty"` // annotations extracted from Index + Manifest manifest.Manifest `json:"-"` // returned OCI Index + Tags []string `json:"-"` // tags matched when fetching referrers +} + +// Add appends an entry to rl.Manifest, used to modify the client managed Index +func (rl *ReferrerList) Add(m manifest.Manifest) error { + rlM, ok := rl.Manifest.GetOrig().(v1.Index) + if !ok { + return fmt.Errorf("referrer list manifest is not an OCI index for %s", rl.Subject.CommonName()) + } + // if entry already exists, return + mDesc := m.GetDescriptor() + for _, d := range rlM.Manifests { + if d.Digest == mDesc.Digest { + return nil + } + } + // update descriptor, pulling up artifact type and annotations + switch mOrig := m.GetOrig().(type) { + case v1.ArtifactManifest: + mDesc.Annotations = mOrig.Annotations + mDesc.ArtifactType = mOrig.ArtifactType + case v1.Manifest: + mDesc.Annotations = mOrig.Annotations + if mOrig.ArtifactType != "" { + mDesc.ArtifactType = mOrig.ArtifactType + } else { + mDesc.ArtifactType = mOrig.Config.MediaType + } + case v1.Index: + mDesc.Annotations = mOrig.Annotations + mDesc.ArtifactType = mOrig.ArtifactType + default: + // other types are not supported + return fmt.Errorf("invalid manifest for referrer \"%t\": %w", m.GetOrig(), errs.ErrUnsupportedMediaType) + } + // append descriptor to index + rlM.Manifests = append(rlM.Manifests, mDesc) + rl.Descriptors = rlM.Manifests + err := rl.Manifest.SetOrig(rlM) + if err != nil { + return err + } + return nil +} + +// Delete removes an entry from rl.Manifest, used to modify the client managed Index +func (rl *ReferrerList) Delete(m manifest.Manifest) error { + rlM, ok := rl.Manifest.GetOrig().(v1.Index) + if !ok { + return fmt.Errorf("referrer list manifest is not an OCI index for %s", rl.Subject.CommonName()) + } + // delete matching entries from the list + mDesc := m.GetDescriptor() + found := false + for i := len(rlM.Manifests) - 1; i >= 0; i-- { + if rlM.Manifests[i].Digest == mDesc.Digest { + if i < len(rlM.Manifests)-1 { + rlM.Manifests = append(rlM.Manifests[:i], rlM.Manifests[i+1:]...) + } else { + rlM.Manifests = rlM.Manifests[:i] + } + found = true + } + } + if !found { + return fmt.Errorf("subject not found in referrer list%.0w", errs.ErrNotFound) + } + rl.Descriptors = rlM.Manifests + err := rl.Manifest.SetOrig(rlM) + if err != nil { + return err + } + return nil +} + +// IsEmpty reports if the returned Index contains no manifests +func (rl ReferrerList) IsEmpty() bool { + rlM, ok := rl.Manifest.GetOrig().(v1.Index) + if !ok || len(rlM.Manifests) == 0 { + return true + } + return false +} + +// MarshalPretty is used for printPretty template formatting +func (rl ReferrerList) MarshalPretty() ([]byte, error) { + buf := &bytes.Buffer{} + tw := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + var rRef ref.Ref + if rl.Subject.IsSet() { + rRef = rl.Subject + fmt.Fprintf(tw, "Subject:\t%s\n", rl.Subject.CommonName()) + } + if rl.Source.IsSet() { + rRef = rl.Source + fmt.Fprintf(tw, "Source:\t%s\n", rl.Source.CommonName()) + } + fmt.Fprintf(tw, "\t\n") + fmt.Fprintf(tw, "Referrers:\t\n") + for _, d := range rl.Descriptors { + fmt.Fprintf(tw, "\t\n") + if rRef.IsSet() { + fmt.Fprintf(tw, " Name:\t%s\n", rRef.SetDigest(d.Digest.String()).CommonName()) + } + err := d.MarshalPrettyTW(tw, " ") + if err != nil { + return []byte{}, err + } + } + if len(rl.Annotations) > 0 { + fmt.Fprintf(tw, "Annotations:\t\n") + keys := make([]string, 0, len(rl.Annotations)) + for k := range rl.Annotations { + keys = append(keys, k) + } + sort.Strings(keys) + for _, name := range keys { + val := rl.Annotations[name] + fmt.Fprintf(tw, " %s:\t%s\n", name, val) + } + } + err := tw.Flush() + return buf.Bytes(), err +} + +// FallbackTag returns the ref that should be used when the registry does not support the referrers API +func FallbackTag(r ref.Ref) (ref.Ref, error) { + dig, err := digest.Parse(r.Digest) + if err != nil { + return r, fmt.Errorf("failed to parse digest for referrers: %w", err) + } + rr := r.SetTag(fmt.Sprintf("%s-%s", dig.Algorithm(), stringMax(dig.Hex(), 64))) + return rr, nil +} +func stringMax(s string, max int) string { + if len(s) <= max { + return s + } + return s[:max] +} diff --git a/vendor/github.com/regclient/regclient/types/repo/repolist.go b/vendor/github.com/regclient/regclient/types/repo/repolist.go new file mode 100644 index 00000000..a27aa259 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/repo/repolist.go @@ -0,0 +1,133 @@ +// Package repo handles a list of repositories from a registry +package repo + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "sort" + "strings" + + "github.com/regclient/regclient/types/errs" +) + +// RepoList is the response for a repository listing. +type RepoList struct { + repoCommon + RepoRegistryList +} + +type repoCommon struct { + host string + mt string + orig interface{} + rawHeader http.Header + rawBody []byte +} + +type repoConfig struct { + host string + mt string + raw []byte + header http.Header +} + +type Opts func(*repoConfig) + +// New is used to create a repository listing. +func New(opts ...Opts) (*RepoList, error) { + conf := repoConfig{ + mt: "application/json", + } + for _, opt := range opts { + opt(&conf) + } + rl := RepoList{} + rc := repoCommon{ + mt: conf.mt, + rawHeader: conf.header, + rawBody: conf.raw, + host: conf.host, + } + + mt := strings.Split(conf.mt, ";")[0] // "application/json; charset=utf-8" -> "application/json" + switch mt { + case "application/json", "text/plain": + err := json.Unmarshal(conf.raw, &rl) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("%w: media type: %s, hostname: %s", errs.ErrUnsupportedMediaType, conf.mt, conf.host) + } + + rl.repoCommon = rc + return &rl, nil +} + +func WithHeaders(header http.Header) Opts { + return func(c *repoConfig) { + c.header = header + } +} +func WithHost(host string) Opts { + return func(c *repoConfig) { + c.host = host + } +} +func WithMT(mt string) Opts { + return func(c *repoConfig) { + c.mt = mt + } +} +func WithRaw(raw []byte) Opts { + return func(c *repoConfig) { + c.raw = raw + } +} + +// RepoRegistryList is a list of repositories from the _catalog API +type RepoRegistryList struct { + Repositories []string `json:"repositories"` +} + +func (r repoCommon) GetOrig() interface{} { + return r.orig +} + +func (r repoCommon) MarshalJSON() ([]byte, error) { + if len(r.rawBody) > 0 { + return r.rawBody, nil + } + + if r.orig != nil { + return json.Marshal((r.orig)) + } + return []byte{}, fmt.Errorf("JSON marshalling failed: %w", errs.ErrNotFound) +} + +func (r repoCommon) RawBody() ([]byte, error) { + return r.rawBody, nil +} + +func (r repoCommon) RawHeaders() (http.Header, error) { + return r.rawHeader, nil +} + +// GetRepos returns the repositories +func (rl RepoRegistryList) GetRepos() ([]string, error) { + return rl.Repositories, nil +} + +// MarshalPretty is used for printPretty template formatting +func (rl RepoRegistryList) MarshalPretty() ([]byte, error) { + sort.Slice(rl.Repositories, func(i, j int) bool { + return strings.Compare(rl.Repositories[i], rl.Repositories[j]) < 0 + }) + buf := &bytes.Buffer{} + for _, tag := range rl.Repositories { + fmt.Fprintf(buf, "%s\n", tag) + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/regclient/regclient/types/slog.go b/vendor/github.com/regclient/regclient/types/slog.go new file mode 100644 index 00000000..99a4f520 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/slog.go @@ -0,0 +1,8 @@ +package types + +import "log/slog" + +const ( + // LevelTrace is used for tracing network requests. + LevelTrace = slog.LevelDebug - 4 +) diff --git a/vendor/github.com/regclient/regclient/types/tag/gcrlist.go b/vendor/github.com/regclient/regclient/types/tag/gcrlist.go new file mode 100644 index 00000000..beec3bd9 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/tag/gcrlist.go @@ -0,0 +1,98 @@ +// Contents in this file are from github.com/google/go-containerregistry + +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tag + +import ( + "encoding/json" + "strconv" + "time" +) + +type gcrRawManifestInfo struct { + Size string `json:"imageSizeBytes"` + MediaType string `json:"mediaType"` + Created string `json:"timeCreatedMs"` + Uploaded string `json:"timeUploadedMs"` + Tags []string `json:"tag"` +} + +// GCRManifestInfo is a Manifests entry is the output of List and Walk. +type GCRManifestInfo struct { + Size uint64 `json:"imageSizeBytes"` + MediaType string `json:"mediaType"` + Created time.Time `json:"timeCreatedMs"` + Uploaded time.Time `json:"timeUploadedMs"` + Tags []string `json:"tag"` +} + +func fromUnixMs(ms int64) time.Time { + sec := ms / 1000 + ns := (ms % 1000) * 1000000 + return time.Unix(sec, ns) +} + +func toUnixMs(t time.Time) string { + return strconv.FormatInt(t.UnixNano()/1000000, 10) +} + +// MarshalJSON implements json.Marshaler +func (m GCRManifestInfo) MarshalJSON() ([]byte, error) { + return json.Marshal(gcrRawManifestInfo{ + Size: strconv.FormatUint(m.Size, 10), + MediaType: m.MediaType, + Created: toUnixMs(m.Created), + Uploaded: toUnixMs(m.Uploaded), + Tags: m.Tags, + }) +} + +// UnmarshalJSON implements json.Unmarshaler +func (m *GCRManifestInfo) UnmarshalJSON(data []byte) error { + raw := gcrRawManifestInfo{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + if raw.Size != "" { + size, err := strconv.ParseUint(raw.Size, 10, 64) + if err != nil { + return err + } + m.Size = size + } + + if raw.Created != "" { + created, err := strconv.ParseInt(raw.Created, 10, 64) + if err != nil { + return err + } + m.Created = fromUnixMs(created) + } + + if raw.Uploaded != "" { + uploaded, err := strconv.ParseInt(raw.Uploaded, 10, 64) + if err != nil { + return err + } + m.Uploaded = fromUnixMs(uploaded) + } + + m.MediaType = raw.MediaType + m.Tags = raw.Tags + + return nil +} diff --git a/vendor/github.com/regclient/regclient/types/tag/tag.go b/vendor/github.com/regclient/regclient/types/tag/tag.go new file mode 100644 index 00000000..e76a9852 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/tag/tag.go @@ -0,0 +1,2 @@ +// Package tag is used for wrapping tag lists +package tag diff --git a/vendor/github.com/regclient/regclient/types/tag/taglist.go b/vendor/github.com/regclient/regclient/types/tag/taglist.go new file mode 100644 index 00000000..02661e24 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/tag/taglist.go @@ -0,0 +1,255 @@ +package tag + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + + "github.com/regclient/regclient/types/errs" + "github.com/regclient/regclient/types/mediatype" + ociv1 "github.com/regclient/regclient/types/oci/v1" + "github.com/regclient/regclient/types/ref" +) + +// List contains a tag list. +// Currently this is a struct but the underlying type could be changed to an interface in the future. +// Using methods is recommended over directly accessing fields. +type List struct { + tagCommon + DockerList + GCRList + LayoutList +} + +type tagCommon struct { + r ref.Ref + mt string + orig interface{} + rawHeader http.Header + rawBody []byte + url *url.URL +} + +// DockerList is returned from registry/2.0 API's. +type DockerList struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// GCRList fields are from gcr.io. +type GCRList struct { + Children []string `json:"child,omitempty"` + Manifests map[string]GCRManifestInfo `json:"manifest,omitempty"` +} + +// LayoutList includes the OCI Index from an OCI Layout. +type LayoutList struct { + Index ociv1.Index +} + +type tagConfig struct { + ref ref.Ref + mt string + raw []byte + header http.Header + index ociv1.Index + tags []string + url *url.URL +} + +// Opts defines options for creating a new tag. +type Opts func(*tagConfig) + +// New creates a tag list from options. +// Tags may be provided directly, or they will be parsed from the raw input based on the media type. +func New(opts ...Opts) (*List, error) { + conf := tagConfig{} + for _, opt := range opts { + opt(&conf) + } + if conf.mt == "" { + conf.mt = "application/json" + } + tl := List{} + tc := tagCommon{ + r: conf.ref, + mt: conf.mt, + rawHeader: conf.header, + rawBody: conf.raw, + url: conf.url, + } + if len(conf.tags) > 0 { + tl.Tags = conf.tags + } + if conf.index.Manifests != nil { + tl.LayoutList.Index = conf.index + } + if len(conf.raw) > 0 { + mt := mediatype.Base(conf.mt) + switch mt { + case "application/json", "text/plain": + err := json.Unmarshal(conf.raw, &tl) + if err != nil { + return nil, err + } + case mediatype.OCI1ManifestList: + // noop + default: + return nil, fmt.Errorf("%w: media type: %s, reference: %s", errs.ErrUnsupportedMediaType, conf.mt, conf.ref.CommonName()) + } + } + tl.tagCommon = tc + + return &tl, nil +} + +// WithHeaders includes data from http headers when creating tag list. +func WithHeaders(header http.Header) Opts { + return func(tConf *tagConfig) { + tConf.header = header + } +} + +// WithLayoutIndex include the index from an OCI Layout. +func WithLayoutIndex(index ociv1.Index) Opts { + return func(tConf *tagConfig) { + tConf.index = index + } +} + +// WithMT sets the returned media type on the tag list. +func WithMT(mt string) Opts { + return func(tConf *tagConfig) { + tConf.mt = mt + } +} + +// WithRaw defines the raw response from the tag list request. +func WithRaw(raw []byte) Opts { + return func(tConf *tagConfig) { + tConf.raw = raw + } +} + +// WithRef specifies the reference (repository) associated with the tag list. +func WithRef(ref ref.Ref) Opts { + return func(tConf *tagConfig) { + tConf.ref = ref + } +} + +// WithResp includes the response from an http request. +func WithResp(resp *http.Response) Opts { + return func(tConf *tagConfig) { + if len(tConf.raw) == 0 { + body, err := io.ReadAll(resp.Body) + if err == nil { + tConf.raw = body + } + } + if tConf.header == nil { + tConf.header = resp.Header + } + if tConf.mt == "" && resp.Header != nil { + tConf.mt = resp.Header.Get("Content-Type") + } + if tConf.url == nil { + tConf.url = resp.Request.URL + } + } +} + +// WithTags provides the parsed tags for the tag list. +func WithTags(tags []string) Opts { + return func(tConf *tagConfig) { + tConf.tags = tags + } +} + +// Append extends a tag list with another. +func (l *List) Append(add *List) error { + // verify two lists are compatible + if l.mt != add.mt || !ref.EqualRepository(l.r, add.r) || l.Name != add.Name { + return fmt.Errorf("unable to append, lists are incompatible") + } + if add.orig != nil { + l.orig = add.orig + } + if add.rawBody != nil { + l.rawBody = add.rawBody + } + if add.rawHeader != nil { + l.rawHeader = add.rawHeader + } + if add.url != nil { + l.url = add.url + } + l.Tags = append(l.Tags, add.Tags...) + if add.Children != nil { + l.Children = append(l.Children, add.Children...) + } + if add.Manifests != nil { + if l.Manifests == nil { + l.Manifests = add.Manifests + } else { + for k, v := range add.Manifests { + l.Manifests[k] = v + } + } + } + return nil +} + +// GetOrig returns the underlying tag data structure if defined. +func (t tagCommon) GetOrig() interface{} { + return t.orig +} + +// MarshalJSON returns the tag list in json. +func (t tagCommon) MarshalJSON() ([]byte, error) { + if len(t.rawBody) > 0 { + return t.rawBody, nil + } + + if t.orig != nil { + return json.Marshal((t.orig)) + } + return []byte{}, fmt.Errorf("JSON marshalling failed: %w", errs.ErrNotFound) +} + +// RawBody returns the original tag list response. +func (t tagCommon) RawBody() ([]byte, error) { + return t.rawBody, nil +} + +// RawHeaders returns the received http headers. +func (t tagCommon) RawHeaders() (http.Header, error) { + return t.rawHeader, nil +} + +// GetURL returns the URL of the request. +func (t tagCommon) GetURL() *url.URL { + return t.url +} + +// GetTags returns the tags from a list. +func (tl DockerList) GetTags() ([]string, error) { + return tl.Tags, nil +} + +// MarshalPretty is used for printPretty template formatting. +func (tl DockerList) MarshalPretty() ([]byte, error) { + sort.Slice(tl.Tags, func(i, j int) bool { + return strings.Compare(tl.Tags[i], tl.Tags[j]) < 0 + }) + buf := &bytes.Buffer{} + for _, tag := range tl.Tags { + fmt.Fprintf(buf, "%s\n", tag) + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/regclient/regclient/types/warning/warning.go b/vendor/github.com/regclient/regclient/types/warning/warning.go new file mode 100644 index 00000000..f14fb333 --- /dev/null +++ b/vendor/github.com/regclient/regclient/types/warning/warning.go @@ -0,0 +1,79 @@ +// Package warning is used to handle HTTP warning headers +package warning + +import ( + "context" + "log/slog" + "sync" +) + +type contextKey string + +var key contextKey = "key" + +type Warning struct { + List []string + Hook *func(context.Context, *slog.Logger, string) + mu sync.Mutex +} + +func (w *Warning) Handle(ctx context.Context, slog *slog.Logger, msg string) { + w.mu.Lock() + defer w.mu.Unlock() + // dedup + for _, entry := range w.List { + if entry == msg { + return + } + } + w.List = append(w.List, msg) + // handle new warning if hook defined + if w.Hook != nil { + (*w.Hook)(ctx, slog, msg) + } +} + +func NewContext(ctx context.Context, w *Warning) context.Context { + return context.WithValue(ctx, key, w) +} + +func FromContext(ctx context.Context) *Warning { + wAny := ctx.Value(key) + if wAny == nil { + return nil + } + w, ok := wAny.(*Warning) + if !ok { + return nil + } + return w +} + +func NewHook(log *slog.Logger) *func(context.Context, *slog.Logger, string) { + hook := func(_ context.Context, _ *slog.Logger, msg string) { + logMsg(log, msg) + } + return &hook +} + +func DefaultHook() *func(context.Context, *slog.Logger, string) { + hook := func(_ context.Context, slog *slog.Logger, msg string) { + logMsg(slog, msg) + } + return &hook +} + +func Handle(ctx context.Context, slog *slog.Logger, msg string) { + // check for context + if w := FromContext(ctx); w != nil { + w.Handle(ctx, slog, msg) + return + } + + // fallback to log + logMsg(slog, msg) +} + +func logMsg(log *slog.Logger, msg string) { + log.Warn("Registry warning message", slog.String("warning", msg)) +} diff --git a/vendor/github.com/ulikunitz/xz/.gitignore b/vendor/github.com/ulikunitz/xz/.gitignore new file mode 100644 index 00000000..eb3d5f51 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/.gitignore @@ -0,0 +1,28 @@ +# .gitignore + +TODO.html +README.html + +lzma/writer.txt +lzma/reader.txt + +cmd/gxz/gxz +cmd/xb/xb + +# test executables +*.test + +# profile files +*.out + +# vim swap file +.*.swp + +# executables on windows +*.exe + +# default compression test file +enwik8* + +# file generated by example +example.xz \ No newline at end of file diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE new file mode 100644 index 00000000..8a7f0877 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2014-2022 Ulrich Kunitz +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* My name, Ulrich Kunitz, may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md new file mode 100644 index 00000000..56d49275 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -0,0 +1,88 @@ +# Package xz + +This Go language package supports the reading and writing of xz +compressed streams. It includes also a gxz command for compressing and +decompressing data. The package is completely written in Go and doesn't +have any dependency on any C code. + +The package is currently under development. There might be bugs and APIs +are not considered stable. At this time the package cannot compete with +the xz tool regarding compression speed and size. The algorithms there +have been developed over a long time and are highly optimized. However +there are a number of improvements planned and I'm very optimistic about +parallel compression and decompression. Stay tuned! + +## Using the API + +The following example program shows how to use the API. + +```go +package main + +import ( + "bytes" + "io" + "log" + "os" + + "github.com/ulikunitz/xz" +) + +func main() { + const text = "The quick brown fox jumps over the lazy dog.\n" + var buf bytes.Buffer + // compress text + w, err := xz.NewWriter(&buf) + if err != nil { + log.Fatalf("xz.NewWriter error %s", err) + } + if _, err := io.WriteString(w, text); err != nil { + log.Fatalf("WriteString error %s", err) + } + if err := w.Close(); err != nil { + log.Fatalf("w.Close error %s", err) + } + // decompress buffer and write output to stdout + r, err := xz.NewReader(&buf) + if err != nil { + log.Fatalf("NewReader error %s", err) + } + if _, err = io.Copy(os.Stdout, r); err != nil { + log.Fatalf("io.Copy error %s", err) + } +} +``` + +## Documentation + +You can find the full documentation at [pkg.go.dev](https://pkg.go.dev/github.com/ulikunitz/xz). + +## Using the gxz compression tool + +The package includes a gxz command line utility for compression and +decompression. + +Use following command for installation: + + $ go get github.com/ulikunitz/xz/cmd/gxz + +To test it call the following command. + + $ gxz bigfile + +After some time a much smaller file bigfile.xz will replace bigfile. +To decompress it use the following command. + + $ gxz -d bigfile.xz + +## Security & Vulnerabilities + +The security policy is documented in [SECURITY.md](SECURITY.md). + +The software is not affected by the supply chain attack on the original xz +implementation, [CVE-2024-3094](https://nvd.nist.gov/vuln/detail/CVE-2024-3094). +This implementation doesn't share any files with the original xz implementation +and no patches or pull requests are accepted without a review. + +All security advisories for this project are published under +[github.com/ulikunitz/xz/security/advisories](https://github.com/ulikunitz/xz/security/advisories?state=published). diff --git a/vendor/github.com/ulikunitz/xz/SECURITY.md b/vendor/github.com/ulikunitz/xz/SECURITY.md new file mode 100644 index 00000000..1bdc8887 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +Currently the last minor version v0.5.x is supported. + +## Reporting a Vulnerability + +You can privately report a vulnerability following this +[procedure](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +Alternatively you can create a Github issue at +. + +In both cases expect a response in at least 7 days. + +## Security Advisories + +All security advisories for this project are published under +[github.com/ulikunitz/xz/security/advisories](https://github.com/ulikunitz/xz/security/advisories?state=published). diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md new file mode 100644 index 00000000..c466ffed --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -0,0 +1,377 @@ +# TODO list + +## Release v0.5.x + +1. Support check flag in gxz command. + +## Release v0.6 + +1. Review encoder and check for lzma improvements under xz. +2. Fix binary tree matcher. +3. Compare compression ratio with xz tool using comparable parameters and optimize parameters +4. rename operation action and make it a simple type of size 8 +5. make maxMatches, wordSize parameters +6. stop searching after a certain length is found (parameter sweetLen) + +## Release v0.7 + +1. Optimize code +2. Do statistical analysis to get linear presets. +3. Test sync.Pool compatability for xz and lzma Writer and Reader +4. Fuzz optimized code. + +## Release v0.8 + +1. Support parallel go routines for writing and reading xz files. +2. Support a ReaderAt interface for xz files with small block sizes. +3. Improve compatibility between gxz and xz +4. Provide manual page for gxz + +## Release v0.9 + +1. Improve documentation +2. Fuzz again + +## Release v1.0 + +1. Full functioning gxz +2. Add godoc URL to README.md (godoc.org) +3. Resolve all issues. +4. Define release candidates. +5. Public announcement. + +## Package lzma + +### v0.6 + +* Rewrite Encoder into a simple greedy one-op-at-a-time encoder including + * simple scan at the dictionary head for the same byte + * use the killer byte (requiring matches to get longer, the first test should be the byte that would make the match longer) + +## Optimizations + +* There may be a lot of false sharing in lzma. State; check whether this can be improved by reorganizing the internal structure of it. + +* Check whether batching encoding and decoding improves speed. + +### DAG optimizations + +* Use full buffer to create minimal bit-length above range encoder. +* Might be too slow (see v0.4) + +### Different match finders + +* hashes with 2, 3 characters additional to 4 characters +* binary trees with 2-7 characters (uint64 as key, use uint32 as + + pointers into a an array) + +* rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + + into an array with bit-steeling for the colors) + +## Release Procedure + +* execute goch -l for all packages; probably with lower param like 0.5. +* check orthography with gospell +* Write release notes in doc/relnotes. +* Update README.md +* xb copyright . in xz directory to ensure all new files have Copyright header +* `VERSION= go generate github.com/ulikunitz/xz/...` to update version files +* Execute test for Linux/amd64, Linux/x86 and Windows/amd64. +* Update TODO.md - write short log entry +* `git checkout master && git merge dev` +* `git tag -a ` +* `git push` + +## Log + +### 2024-04-03 + +Release v0.5.12 updates README.md and SECURITY.md to address the supply chain +attack on the original xz implementation. + +### 2022-12-12 + +Matt Dantay (@bodgit) reported an issue with the LZMA reader. The implementation +returned an error if the dictionary size was less than 4096 byte, but the +recommendation stated the actual used window size should be set to 4096 byte in +that case. It actually was the pull request +[#52](https://github.com/ulikunitz/xz/pull/52). The new patch v0.5.11 will fix +it. + +### 2021-02-02 + +Mituo Heijo has fuzzed xz and found a bug in the function readIndexBody. The +function allocated a slice of records immediately after reading the value +without further checks. Since the number has been too large the make function +did panic. The fix is to check the number against the expected number of records +before allocating the records. + +### 2020-12-17 + +Release v0.5.9 fixes warnings, a typo and adds SECURITY.md. + +One fix is interesting. + +```go +const ( + a byte = 0x1 + b = 0x2 +) +``` + +The constants a and b don't have the same type. Correct is + +```go +const ( + a byte = 0x1 + b byte = 0x2 +) +``` + +### 2020-08-19 + +Release v0.5.8 fixes issue +[issue #35](https://github.com/ulikunitz/xz/issues/35). + +### 2020-02-24 + +Release v0.5.7 supports the check-ID None and fixes +[issue #27](https://github.com/ulikunitz/xz/issues/27). + +### 2019-02-20 + +Release v0.5.6 supports the go.mod file. + +### 2018-10-28 + +Release v0.5.5 fixes issues #19 observing ErrLimit outputs. + +### 2017-06-05 + +Release v0.5.4 fixes issues #15 of another problem with the padding size +check for the xz block header. I removed the check completely. + +### 2017-02-15 + +Release v0.5.3 fixes issue #12 regarding the decompression of an empty +XZ stream. Many thanks to Tomasz Kłak, who reported the issue. + +### 2016-12-02 + +Release v0.5.2 became necessary to allow the decoding of xz files with +4-byte padding in the block header. Many thanks to Greg, who reported +the issue. + +### 2016-07-23 + +Release v0.5.1 became necessary to fix problems with 32-bit platforms. +Many thanks to Bruno Brigas, who reported the issue. + +### 2016-07-04 + +Release v0.5 provides improvements to the compressor and provides support for +the decompression of xz files with multiple xz streams. + +### 2016-01-31 + +Another compression rate increase by checking the byte at length of the +best match first, before checking the whole prefix. This makes the +compressor even faster. We have now a large time budget to beat the +compression ratio of the xz tool. For enwik8 we have now over 40 seconds +to reduce the compressed file size for another 7 MiB. + +### 2016-01-30 + +I simplified the encoder. Speed and compression rate increased +dramatically. A high compression rate affects also the decompression +speed. The approach with the buffer and optimizing for operation +compression rate has not been successful. Going for the maximum length +appears to be the best approach. + +### 2016-01-28 + +The release v0.4 is ready. It provides a working xz implementation, +which is rather slow, but works and is interoperable with the xz tool. +It is an important milestone. + +### 2016-01-10 + +I have the first working implementation of an xz reader and writer. I'm +happy about reaching this milestone. + +### 2015-12-02 + +I'm now ready to implement xz because, I have a working LZMA2 +implementation. I decided today that v0.4 will use the slow encoder +using the operations buffer to be able to go back, if I intend to do so. + +### 2015-10-21 + +I have restarted the work on the library. While trying to implement +LZMA2, I discovered that I need to resimplify the encoder and decoder +functions. The option approach is too complicated. Using a limited byte +writer and not caring for written bytes at all and not to try to handle +uncompressed data simplifies the LZMA encoder and decoder much. +Processing uncompressed data and handling limits is a feature of the +LZMA2 format not of LZMA. + +I learned an interesting method from the LZO format. If the last copy is +too far away they are moving the head one 2 bytes and not 1 byte to +reduce processing times. + +### 2015-08-26 + +I have now reimplemented the lzma package. The code is reasonably fast, +but can still be optimized. The next step is to implement LZMA2 and then +xz. + +### 2015-07-05 + +Created release v0.3. The version is the foundation for a full xz +implementation that is the target of v0.4. + +### 2015-06-11 + +The gflag package has been developed because I couldn't use flag and +pflag for a fully compatible support of gzip's and lzma's options. It +seems to work now quite nicely. + +### 2015-06-05 + +The overflow issue was interesting to research, however Henry S. Warren +Jr. Hacker's Delight book was very helpful as usual and had the issue +explained perfectly. Fefe's information on his website was based on the +C FAQ and quite bad, because it didn't address the issue of -MININT == +MININT. + +### 2015-06-04 + +It has been a productive day. I improved the interface of lzma. Reader +and lzma. Writer and fixed the error handling. + +### 2015-06-01 + +By computing the bit length of the LZMA operations I was able to +improve the greedy algorithm implementation. By using an 8 MByte buffer +the compression rate was not as good as for xz but already better then +gzip default. + +Compression is currently slow, but this is something we will be able to +improve over time. + +### 2015-05-26 + +Checked the license of ogier/pflag. The binary lzmago binary should +include the license terms for the pflag library. + +I added the endorsement clause as used by Google for the Go sources the +LICENSE file. + +### 2015-05-22 + +The package lzb contains now the basic implementation for creating or +reading LZMA byte streams. It allows the support for the implementation +of the DAG-shortest-path algorithm for the compression function. + +### 2015-04-23 + +Completed yesterday the lzbase classes. I'm a little bit concerned that +using the components may require too much code, but on the other hand +there is a lot of flexibility. + +### 2015-04-22 + +Implemented Reader and Writer during the Bayern game against Porto. The +second half gave me enough time. + +### 2015-04-21 + +While showering today morning I discovered that the design for OpEncoder +and OpDecoder doesn't work, because encoding/decoding might depend on +the current status of the dictionary. This is not exactly the right way +to start the day. + +Therefore we need to keep the Reader and Writer design. This time around +we simplify it by ignoring size limits. These can be added by wrappers +around the Reader and Writer interfaces. The Parameters type isn't +needed anymore. + +However I will implement a ReaderState and WriterState type to use +static typing to ensure the right State object is combined with the +right lzbase. Reader and lzbase. Writer. + +As a start I have implemented ReaderState and WriterState to ensure +that the state for reading is only used by readers and WriterState only +used by Writers. + +### 2015-04-20 + +Today I implemented the OpDecoder and tested OpEncoder and OpDecoder. + +### 2015-04-08 + +Came up with a new simplified design for lzbase. I implemented already +the type State that replaces OpCodec. + +### 2015-04-06 + +The new lzma package is now fully usable and lzmago is using it now. The +old lzma package has been completely removed. + +### 2015-04-05 + +Implemented lzma. Reader and tested it. + +### 2015-04-04 + +Implemented baseReader by adapting code form lzma. Reader. + +### 2015-04-03 + +The opCodec has been copied yesterday to lzma2. opCodec has a high +number of dependencies on other files in lzma2. Therefore I had to copy +almost all files from lzma. + +### 2015-03-31 + +Removed only a TODO item. + +However in Francesco Campoy's presentation "Go for Javaneros +(Javaïstes?)" is the the idea that using an embedded field E, all the +methods of E will be defined on T. If E is an interface T satisfies E. + + + +I have never used this, but it seems to be a cool idea. + +### 2015-03-30 + +Finished the type writerDict and wrote a simple test. + +### 2015-03-25 + +I started to implement the writerDict. + +### 2015-03-24 + +After thinking long about the LZMA2 code and several false starts, I +have now a plan to create a self-sufficient lzma2 package that supports +the classic LZMA format as well as LZMA2. The core idea is to support a +baseReader and baseWriter type that support the basic LZMA stream +without any headers. Both types must support the reuse of dictionaries +and the opCodec. + +### 2015-01-10 + +1. Implemented simple lzmago tool +2. Tested tool against large 4.4G file + * compression worked correctly; tested decompression with lzma + * decompression hits a full buffer condition +3. Fixed a bug in the compressor and wrote a test for it +4. Executed full cycle for 4.4 GB file; performance can be improved ;-) + +### 2015-01-11 + +* Release v0.2 because of the working LZMA encoder and decoder diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go new file mode 100644 index 00000000..b30f1ec9 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/bits.go @@ -0,0 +1,79 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "io" +) + +// putUint32LE puts the little-endian representation of x into the first +// four bytes of p. +func putUint32LE(p []byte, x uint32) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) +} + +// putUint64LE puts the little-endian representation of x into the first +// eight bytes of p. +func putUint64LE(p []byte, x uint64) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) + p[4] = byte(x >> 32) + p[5] = byte(x >> 40) + p[6] = byte(x >> 48) + p[7] = byte(x >> 56) +} + +// uint32LE converts a little endian representation to an uint32 value. +func uint32LE(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | + uint32(p[3])<<24 +} + +// putUvarint puts a uvarint representation of x into the byte slice. +func putUvarint(p []byte, x uint64) int { + i := 0 + for x >= 0x80 { + p[i] = byte(x) | 0x80 + x >>= 7 + i++ + } + p[i] = byte(x) + return i + 1 +} + +// errOverflow indicates an overflow of the 64-bit unsigned integer. +var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") + +// readUvarint reads a uvarint from the given byte reader. +func readUvarint(r io.ByteReader) (x uint64, n int, err error) { + const maxUvarintLen = 10 + + var s uint + i := 0 + for { + b, err := r.ReadByte() + if err != nil { + return x, i, err + } + i++ + if i > maxUvarintLen { + return x, i, errOverflowU64 + } + if b < 0x80 { + if i == maxUvarintLen && b > 1 { + return x, i, errOverflowU64 + } + return x | uint64(b)< 0 { + k = 4 - k + } + return k +} + +/*** Header ***/ + +// headerMagic stores the magic bytes for the header +var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00} + +// HeaderLen provides the length of the xz file header. +const HeaderLen = 12 + +// Constants for the checksum methods supported by xz. +const ( + None byte = 0x0 + CRC32 byte = 0x1 + CRC64 byte = 0x4 + SHA256 byte = 0xa +) + +// errInvalidFlags indicates that flags are invalid. +var errInvalidFlags = errors.New("xz: invalid flags") + +// verifyFlags returns the error errInvalidFlags if the value is +// invalid. +func verifyFlags(flags byte) error { + switch flags { + case None, CRC32, CRC64, SHA256: + return nil + default: + return errInvalidFlags + } +} + +// flagstrings maps flag values to strings. +var flagstrings = map[byte]string{ + None: "None", + CRC32: "CRC-32", + CRC64: "CRC-64", + SHA256: "SHA-256", +} + +// flagString returns the string representation for the given flags. +func flagString(flags byte) string { + s, ok := flagstrings[flags] + if !ok { + return "invalid" + } + return s +} + +// newHashFunc returns a function that creates hash instances for the +// hash method encoded in flags. +func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { + switch flags { + case None: + newHash = newNoneHash + case CRC32: + newHash = newCRC32 + case CRC64: + newHash = newCRC64 + case SHA256: + newHash = sha256.New + default: + err = errInvalidFlags + } + return +} + +// header provides the actual content of the xz file header: the flags. +type header struct { + flags byte +} + +// Errors returned by readHeader. +var errHeaderMagic = errors.New("xz: invalid header magic bytes") + +// ValidHeader checks whether data is a correct xz file header. The +// length of data must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + err := h.UnmarshalBinary(data) + return err == nil +} + +// String returns a string representation of the flags. +func (h header) String() string { + return flagString(h.flags) +} + +// UnmarshalBinary reads header from the provided data slice. +func (h *header) UnmarshalBinary(data []byte) error { + // header length + if len(data) != HeaderLen { + return errors.New("xz: wrong file header length") + } + + // magic header + if !bytes.Equal(headerMagic, data[:6]) { + return errHeaderMagic + } + + // checksum + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + if uint32LE(data[8:]) != crc.Sum32() { + return errors.New("xz: invalid checksum for file header") + } + + // stream flags + if data[6] != 0 { + return errInvalidFlags + } + flags := data[7] + if err := verifyFlags(flags); err != nil { + return err + } + + h.flags = flags + return nil +} + +// MarshalBinary generates the xz file header. +func (h *header) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(h.flags); err != nil { + return nil, err + } + + data = make([]byte, 12) + copy(data, headerMagic) + data[7] = h.flags + + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + putUint32LE(data[8:], crc.Sum32()) + + return data, nil +} + +/*** Footer ***/ + +// footerLen defines the length of the footer. +const footerLen = 12 + +// footerMagic contains the footer magic bytes. +var footerMagic = []byte{'Y', 'Z'} + +// footer represents the content of the xz file footer. +type footer struct { + indexSize int64 + flags byte +} + +// String prints a string representation of the footer structure. +func (f footer) String() string { + return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize) +} + +// Minimum and maximum for the size of the index (backward size). +const ( + minIndexSize = 4 + maxIndexSize = (1 << 32) * 4 +) + +// MarshalBinary converts footer values into an xz file footer. Note +// that the footer value is checked for correctness. +func (f *footer) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(f.flags); err != nil { + return nil, err + } + if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { + return nil, errors.New("xz: index size out of range") + } + if f.indexSize%4 != 0 { + return nil, errors.New( + "xz: index size not aligned to four bytes") + } + + data = make([]byte, footerLen) + + // backward size (index size) + s := (f.indexSize / 4) - 1 + putUint32LE(data[4:], uint32(s)) + // flags + data[9] = f.flags + // footer magic + copy(data[10:], footerMagic) + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + putUint32LE(data, crc.Sum32()) + + return data, nil +} + +// UnmarshalBinary sets the footer value by unmarshalling an xz file +// footer. +func (f *footer) UnmarshalBinary(data []byte) error { + if len(data) != footerLen { + return errors.New("xz: wrong footer length") + } + + // magic bytes + if !bytes.Equal(data[10:], footerMagic) { + return errors.New("xz: footer magic invalid") + } + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + if uint32LE(data) != crc.Sum32() { + return errors.New("xz: footer checksum error") + } + + var g footer + // backward size (index size) + g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 + + // flags + if data[8] != 0 { + return errInvalidFlags + } + g.flags = data[9] + if err := verifyFlags(g.flags); err != nil { + return err + } + + *f = g + return nil +} + +/*** Block Header ***/ + +// blockHeader represents the content of an xz block header. +type blockHeader struct { + compressedSize int64 + uncompressedSize int64 + filters []filter +} + +// String converts the block header into a string. +func (h blockHeader) String() string { + var buf bytes.Buffer + first := true + if h.compressedSize >= 0 { + fmt.Fprintf(&buf, "compressed size %d", h.compressedSize) + first = false + } + if h.uncompressedSize >= 0 { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize) + first = false + } + for _, f := range h.filters { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "filter %s", f) + first = false + } + return buf.String() +} + +// Masks for the block flags. +const ( + filterCountMask = 0x03 + compressedSizePresent = 0x40 + uncompressedSizePresent = 0x80 + reservedBlockFlags = 0x3C +) + +// errIndexIndicator signals that an index indicator (0x00) has been found +// instead of an expected block header indicator. +var errIndexIndicator = errors.New("xz: found index indicator") + +// readBlockHeader reads the block header. +func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) { + var buf bytes.Buffer + buf.Grow(20) + + // block header size + z, err := io.CopyN(&buf, r, 1) + n = int(z) + if err != nil { + return nil, n, err + } + s := buf.Bytes()[0] + if s == 0 { + return nil, n, errIndexIndicator + } + + // read complete header + headerLen := (int(s) + 1) * 4 + buf.Grow(headerLen - 1) + z, err = io.CopyN(&buf, r, int64(headerLen-1)) + n += int(z) + if err != nil { + return nil, n, err + } + + // unmarshal block header + h = new(blockHeader) + if err = h.UnmarshalBinary(buf.Bytes()); err != nil { + return nil, n, err + } + + return h, n, nil +} + +// readSizeInBlockHeader reads the uncompressed or compressed size +// fields in the block header. The present value informs the function +// whether the respective field is actually present in the header. +func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) { + if !present { + return -1, nil + } + x, _, err := readUvarint(r) + if err != nil { + return 0, err + } + if x >= 1<<63 { + return 0, errors.New("xz: size overflow in block header") + } + return int64(x), nil +} + +// UnmarshalBinary unmarshals the block header. +func (h *blockHeader) UnmarshalBinary(data []byte) error { + // Check header length + s := data[0] + if data[0] == 0 { + return errIndexIndicator + } + headerLen := (int(s) + 1) * 4 + if len(data) != headerLen { + return fmt.Errorf("xz: data length %d; want %d", len(data), + headerLen) + } + n := headerLen - 4 + + // Check CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[:n]) + if crc.Sum32() != uint32LE(data[n:]) { + return errors.New("xz: checksum error for block header") + } + + // Block header flags + flags := data[1] + if flags&reservedBlockFlags != 0 { + return errors.New("xz: reserved block header flags set") + } + + r := bytes.NewReader(data[2:n]) + + // Compressed size + var err error + h.compressedSize, err = readSizeInBlockHeader( + r, flags&compressedSizePresent != 0) + if err != nil { + return err + } + + // Uncompressed size + h.uncompressedSize, err = readSizeInBlockHeader( + r, flags&uncompressedSizePresent != 0) + if err != nil { + return err + } + + h.filters, err = readFilters(r, int(flags&filterCountMask)+1) + if err != nil { + return err + } + + // Check padding + // Since headerLen is a multiple of 4 we don't need to check + // alignment. + k := r.Len() + // The standard spec says that the padding should have not more + // than 3 bytes. However we found paddings of 4 or 5 in the + // wild. See https://github.com/ulikunitz/xz/pull/11 and + // https://github.com/ulikunitz/xz/issues/15 + // + // The only reasonable approach seems to be to ignore the + // padding size. We still check that all padding bytes are zero. + if !allZeros(data[n-k : n]) { + return errPadding + } + return nil +} + +// MarshalBinary marshals the binary header. +func (h *blockHeader) MarshalBinary() (data []byte, err error) { + if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) { + return nil, errors.New("xz: filter count wrong") + } + for i, f := range h.filters { + if i < len(h.filters)-1 { + if f.id() == lzmaFilterID { + return nil, errors.New( + "xz: LZMA2 filter is not the last") + } + } else { + // last filter + if f.id() != lzmaFilterID { + return nil, errors.New("xz: " + + "last filter must be the LZMA2 filter") + } + } + } + + var buf bytes.Buffer + // header size must set at the end + buf.WriteByte(0) + + // flags + flags := byte(len(h.filters) - 1) + if h.compressedSize >= 0 { + flags |= compressedSizePresent + } + if h.uncompressedSize >= 0 { + flags |= uncompressedSizePresent + } + buf.WriteByte(flags) + + p := make([]byte, 10) + if h.compressedSize >= 0 { + k := putUvarint(p, uint64(h.compressedSize)) + buf.Write(p[:k]) + } + if h.uncompressedSize >= 0 { + k := putUvarint(p, uint64(h.uncompressedSize)) + buf.Write(p[:k]) + } + + for _, f := range h.filters { + fp, err := f.MarshalBinary() + if err != nil { + return nil, err + } + buf.Write(fp) + } + + // padding + for i := padLen(int64(buf.Len())); i > 0; i-- { + buf.WriteByte(0) + } + + // crc place holder + buf.Write(p[:4]) + + data = buf.Bytes() + if len(data)%4 != 0 { + panic("data length not aligned") + } + s := len(data)/4 - 1 + if !(1 < s && s <= 255) { + panic("wrong block header size") + } + data[0] = byte(s) + + crc := crc32.NewIEEE() + crc.Write(data[:len(data)-4]) + putUint32LE(data[len(data)-4:], crc.Sum32()) + + return data, nil +} + +// Constants used for marshalling and unmarshalling filters in the xz +// block header. +const ( + minFilters = 1 + maxFilters = 4 + minReservedID = 1 << 62 +) + +// filter represents a filter in the block header. +type filter interface { + id() uint64 + UnmarshalBinary(data []byte) error + MarshalBinary() (data []byte, err error) + reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error) + writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error) + // filter must be last filter + last() bool +} + +// readFilter reads a block filter from the block header. At this point +// in time only the LZMA2 filter is supported. +func readFilter(r io.Reader) (f filter, err error) { + br := lzma.ByteReader(r) + + // index + id, _, err := readUvarint(br) + if err != nil { + return nil, err + } + + var data []byte + switch id { + case lzmaFilterID: + data = make([]byte, lzmaFilterLen) + data[0] = lzmaFilterID + if _, err = io.ReadFull(r, data[1:]); err != nil { + return nil, err + } + f = new(lzmaFilter) + default: + if id >= minReservedID { + return nil, errors.New( + "xz: reserved filter id in block stream header") + } + return nil, errors.New("xz: invalid filter id") + } + if err = f.UnmarshalBinary(data); err != nil { + return nil, err + } + return f, err +} + +// readFilters reads count filters. At this point in time only the count +// 1 is supported. +func readFilters(r io.Reader, count int) (filters []filter, err error) { + if count != 1 { + return nil, errors.New("xz: unsupported filter count") + } + f, err := readFilter(r) + if err != nil { + return nil, err + } + return []filter{f}, err +} + +/*** Index ***/ + +// record describes a block in the xz file index. +type record struct { + unpaddedSize int64 + uncompressedSize int64 +} + +// readRecord reads an index record. +func readRecord(r io.ByteReader) (rec record, n int, err error) { + u, k, err := readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.unpaddedSize = int64(u) + if rec.unpaddedSize < 0 { + return rec, n, errors.New("xz: unpadded size negative") + } + + u, k, err = readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.uncompressedSize = int64(u) + if rec.uncompressedSize < 0 { + return rec, n, errors.New("xz: uncompressed size negative") + } + + return rec, n, nil +} + +// MarshalBinary converts an index record in its binary encoding. +func (rec *record) MarshalBinary() (data []byte, err error) { + // maximum length of a uvarint is 10 + p := make([]byte, 20) + n := putUvarint(p, uint64(rec.unpaddedSize)) + n += putUvarint(p[n:], uint64(rec.uncompressedSize)) + return p[:n], nil +} + +// writeIndex writes the index, a sequence of records. +func writeIndex(w io.Writer, index []record) (n int64, err error) { + crc := crc32.NewIEEE() + mw := io.MultiWriter(w, crc) + + // index indicator + k, err := mw.Write([]byte{0}) + n += int64(k) + if err != nil { + return n, err + } + + // number of records + p := make([]byte, 10) + k = putUvarint(p, uint64(len(index))) + k, err = mw.Write(p[:k]) + n += int64(k) + if err != nil { + return n, err + } + + // list of records + for _, rec := range index { + p, err := rec.MarshalBinary() + if err != nil { + return n, err + } + k, err = mw.Write(p) + n += int64(k) + if err != nil { + return n, err + } + } + + // index padding + k, err = mw.Write(make([]byte, padLen(int64(n)))) + n += int64(k) + if err != nil { + return n, err + } + + // crc32 checksum + putUint32LE(p, crc.Sum32()) + k, err = w.Write(p[:4]) + n += int64(k) + + return n, err +} + +// readIndexBody reads the index from the reader. It assumes that the +// index indicator has already been read. +func readIndexBody(r io.Reader, expectedRecordLen int) (records []record, n int64, err error) { + crc := crc32.NewIEEE() + // index indicator + crc.Write([]byte{0}) + + br := lzma.ByteReader(io.TeeReader(r, crc)) + + // number of records + u, k, err := readUvarint(br) + n += int64(k) + if err != nil { + return nil, n, err + } + recLen := int(u) + if recLen < 0 || uint64(recLen) != u { + return nil, n, errors.New("xz: record number overflow") + } + if recLen != expectedRecordLen { + return nil, n, fmt.Errorf( + "xz: index length is %d; want %d", + recLen, expectedRecordLen) + } + + // list of records + records = make([]record, recLen) + for i := range records { + records[i], k, err = readRecord(br) + n += int64(k) + if err != nil { + return nil, n, err + } + } + + p := make([]byte, padLen(int64(n+1)), 4) + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return nil, n, err + } + if !allZeros(p) { + return nil, n, errors.New("xz: non-zero byte in index padding") + } + + // crc32 + s := crc.Sum32() + p = p[:4] + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return records, n, err + } + if uint32LE(p) != s { + return nil, n, errors.New("xz: wrong checksum for index") + } + + return records, n, nil +} diff --git a/vendor/github.com/ulikunitz/xz/fox-check-none.xz b/vendor/github.com/ulikunitz/xz/fox-check-none.xz new file mode 100644 index 00000000..46043f7d Binary files /dev/null and b/vendor/github.com/ulikunitz/xz/fox-check-none.xz differ diff --git a/vendor/github.com/ulikunitz/xz/fox.xz b/vendor/github.com/ulikunitz/xz/fox.xz new file mode 100644 index 00000000..4b820bd5 Binary files /dev/null and b/vendor/github.com/ulikunitz/xz/fox.xz differ diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go new file mode 100644 index 00000000..dae159db --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -0,0 +1,181 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// CyclicPoly provides a cyclic polynomial rolling hash. +type CyclicPoly struct { + h uint64 + p []uint64 + i int +} + +// ror rotates the unsigned 64-bit integer to right. The argument s must be +// less than 64. +func ror(x uint64, s uint) uint64 { + return (x >> s) | (x << (64 - s)) +} + +// NewCyclicPoly creates a new instance of the CyclicPoly structure. The +// argument n gives the number of bytes for which a hash will be executed. +// This number must be positive; the method panics if this isn't the case. +func NewCyclicPoly(n int) *CyclicPoly { + if n < 1 { + panic("argument n must be positive") + } + return &CyclicPoly{p: make([]uint64, 0, n)} +} + +// Len returns the length of the byte sequence for which a hash is generated. +func (r *CyclicPoly) Len() int { + return cap(r.p) +} + +// RollByte hashes the next byte and returns a hash value. The complete becomes +// available after at least Len() bytes have been hashed. +func (r *CyclicPoly) RollByte(x byte) uint64 { + y := hash[x] + if len(r.p) < cap(r.p) { + r.h = ror(r.h, 1) ^ y + r.p = append(r.p, y) + } else { + r.h ^= ror(r.p[r.i], uint(cap(r.p)-1)) + r.h = ror(r.h, 1) ^ y + r.p[r.i] = y + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} + +// Stores the hash for the individual bytes. +var hash = [256]uint64{ + 0x2e4fc3f904065142, 0xc790984cfbc99527, + 0x879f95eb8c62f187, 0x3b61be86b5021ef2, + 0x65a896a04196f0a5, 0xc5b307b80470b59e, + 0xd3bff376a70df14b, 0xc332f04f0b3f1701, + 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53, + 0x1906a10c2c1c0208, 0xfb0c712a03421c0d, + 0x38be311a65c9552b, 0xfee7ee4ca6445c7e, + 0x71aadeded184f21e, 0xd73426fccda23b2d, + 0x29773fb5fb9600b5, 0xce410261cd32981a, + 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c, + 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc, + 0xa5f10b3910482cea, 0x2945d59be02dfaad, + 0x06ee334ff70571b5, 0xbabf9d8070f44380, + 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7, + 0x26183cb9f7b1664c, 0xea71dac7da068f21, + 0xea92eca5bd1d0bb7, 0x415595862defcd75, + 0x248a386023c60648, 0x9cf021ab284b3c8a, + 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc, + 0x73e799d139dc6975, 0x7b15ae312486363c, + 0xb70e5454a2239c80, 0x208e3fb31d3b2263, + 0x01f563cabb930f44, 0x2ac4533d2a3240d8, + 0x84231ed1064f6f7c, 0xa9f020977c2a6d19, + 0x213c227271c20122, 0x09fe8a9a0a03d07a, + 0x4236dc75bcaf910c, 0x460a8b2bead8f17e, + 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e, + 0x70adb010543bea12, 0xcdae938f7ea6f579, + 0x3f3d870208672f4d, 0x8e6ccbce9d349536, + 0xe4c0871a389095ae, 0xf5f2a49152bca080, + 0x9a43f9b97269934e, 0xc17b3753cb6f475c, + 0xd56d941e8e206bd4, 0xac0a4f3e525eda00, + 0xa06d5a011912a550, 0x5537ed19537ad1df, + 0xa32fe713d611449d, 0x2a1d05b47c3b579f, + 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0, + 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97, + 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44, + 0x0b63d5d801708420, 0x8f227ca8f37ffaec, + 0x0256278670887c24, 0x107e14877dbf540b, + 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61, + 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001, + 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7, + 0xd99264421147eb03, 0x535a2d6d38aefcfe, + 0x6ba8b4454a916237, 0xfa39366eaae4719c, + 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4, + 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8, + 0xd61c2503fe639144, 0x30ce625441eb92d3, + 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5, + 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf, + 0xc7ea4872c96b83ae, 0x6dd5d376f4392382, + 0x1be88681aaa9792f, 0xfef465ee1b6c10d9, + 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9, + 0x7808e902b3857d0b, 0x171c9c4ea4607972, + 0x58d66274850146df, 0x42b311c10d3981d1, + 0x647fa8c621c41a4c, 0xf472771c66ddfedc, + 0x338d27e3f847b46b, 0x6402ce3da97545ce, + 0x5162db616fc38638, 0x9c83be97bc22a50e, + 0x2d3d7478a78d5e72, 0xe621a9b938fd5397, + 0x9454614eb0f81c45, 0x395fb6e742ed39b6, + 0x77dd9179d06037bf, 0xc478d0fee4d2656d, + 0x35d9d6cb772007af, 0x83a56e92c883f0f6, + 0x27937453250c00a1, 0x27bd6ebc3a46a97d, + 0x9f543bf784342d51, 0xd158f38c48b0ed52, + 0x8dd8537c045f66b4, 0x846a57230226f6d5, + 0x6b13939e0c4e7cdf, 0xfca25425d8176758, + 0x92e5fc6cd52788e6, 0x9992e13d7a739170, + 0x518246f7a199e8ea, 0xf104c2a71b9979c7, + 0x86b3ffaabea4768f, 0x6388061cf3e351ad, + 0x09d9b5295de5bbb5, 0x38bf1638c2599e92, + 0x1d759846499e148d, 0x4c0ff015e5f96ef4, + 0xa41a94cfa270f565, 0x42d76f9cb2326c0b, + 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a, + 0x337523aabbe6cf8d, 0x646bb14001d42b12, + 0xc178729d138adc74, 0xf900ef4491f24086, + 0xee1a90d334bb5ac4, 0x9755c92247301a50, + 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9, + 0x0fa8084cf91ac6ff, 0x10d226cf136e6189, + 0xd302057a07d4fb21, 0x5f03800e20a0fcc3, + 0x80118d4ae46bd210, 0x58ab61a522843733, + 0x51edd575c5432a4b, 0x94ee6ff67f9197f7, + 0x765669e0e5e8157b, 0xa5347830737132f0, + 0x3ba485a69f01510c, 0x0b247d7b957a01c3, + 0x1b3d63449fd807dc, 0x0fdc4721c30ad743, + 0x8b535ed3829b2b14, 0xee41d0cad65d232c, + 0xe6a99ed97a6a982f, 0x65ac6194c202003d, + 0x692accf3a70573eb, 0xcc3c02c3e200d5af, + 0x0d419e8b325914a3, 0x320f160f42c25e40, + 0x00710d647a51fe7a, 0x3c947692330aed60, + 0x9288aa280d355a7a, 0xa1806a9b791d1696, + 0x5d60e38496763da1, 0x6c69e22e613fd0f4, + 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba, + 0x460c17992cbaece1, 0xf7822c5444d3297f, + 0x344a9790c69b74aa, 0xb80a42e6cae09dce, + 0x1b1361eaf2b1e757, 0xd84c1e758e236f01, + 0x88e0b7be347627cc, 0x45246009b7a99490, + 0x8011c6dd3fe50472, 0xc341d682bffb99d7, + 0x2511be93808e2d15, 0xd5bc13d7fd739840, + 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157, + 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0, + 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc, + 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e, + 0xa559cce0d9199aac, 0xde39d47ef3723380, + 0xe5b69d848ce42e35, 0xefa24296f8e79f52, + 0x70190b59db9a5afc, 0x26f166cdb211e7bf, + 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017, + 0xb9059b05e9420d90, 0x2f0da855c9388754, + 0x611d5e9ab77949cc, 0x2912038ac01163f4, + 0x0231df50402b2fba, 0x45660fc4f3245f58, + 0xb91cc97c7c8dac50, 0xb72d2aafe4953427, + 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2, + 0x1310e1c1a48d21c3, 0xad48a7810cdd8544, + 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de, + 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442, + 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93, + 0x9b37db9d0335a39c, 0x494b6f870f5cfebc, + 0x6d1b3c1149dda943, 0x372c943a518c1093, + 0xad27af45e77c09c4, 0x3b6f92b646044604, + 0xac2917909f5fcf4f, 0x2069a60e977e5557, + 0x353a469e71014de5, 0x24be356281f55c15, + 0x2b6d710ba8e9adea, 0x404ad1751c749c29, + 0xed7311bf23d7f185, 0xba4f6976b4acc43e, + 0x32d7198d2bc39000, 0xee667019014d6e01, + 0x494ef3e128d14c83, 0x1f95a152baecd6be, + 0x201648dff1f483a5, 0x68c28550c8384af6, + 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28, + 0xd82bbd95e9b30909, 0x234f0d1694c53f6d, + 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e, + 0xf8f6b97f5585080a, 0x74236084be57b95b, + 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b, + 0x4378ffe93e1528c5, 0x94ca92a17118e2d2, +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go new file mode 100644 index 00000000..b4cf8b75 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -0,0 +1,14 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package hash provides rolling hashes. + +Rolling hashes have to be used for maintaining the positions of n-byte +sequences in the dictionary buffer. + +The package provides currently the Rabin-Karp rolling hash and a Cyclic +Polynomial hash. Both support the Hashes method to be used with an interface. +*/ +package hash diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go new file mode 100644 index 00000000..5322342e --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -0,0 +1,66 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// A is the default constant for Robin-Karp rolling hash. This is a random +// prime. +const A = 0x97b548add41d5da1 + +// RabinKarp supports the computation of a rolling hash. +type RabinKarp struct { + A uint64 + // a^n + aOldest uint64 + h uint64 + p []byte + i int +} + +// NewRabinKarp creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The default constant will will be +// used. +func NewRabinKarp(n int) *RabinKarp { + return NewRabinKarpConst(n, A) +} + +// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The argument a provides the +// constant used to compute the hash. +func NewRabinKarpConst(n int, a uint64) *RabinKarp { + if n <= 0 { + panic("number of bytes n must be positive") + } + aOldest := uint64(1) + // There are faster methods. For the small n required by the LZMA + // compressor O(n) is sufficient. + for i := 0; i < n; i++ { + aOldest *= a + } + return &RabinKarp{ + A: a, aOldest: aOldest, + p: make([]byte, 0, n), + } +} + +// Len returns the length of the byte sequence. +func (r *RabinKarp) Len() int { + return cap(r.p) +} + +// RollByte computes the hash after x has been added. +func (r *RabinKarp) RollByte(x byte) uint64 { + if len(r.p) < cap(r.p) { + r.h += uint64(x) + r.h *= r.A + r.p = append(r.p, x) + } else { + r.h -= uint64(r.p[r.i]) * r.aOldest + r.h += uint64(x) + r.h *= r.A + r.p[r.i] = x + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go new file mode 100644 index 00000000..a9898335 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -0,0 +1,29 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// Roller provides an interface for rolling hashes. The hash value will become +// valid after hash has been called Len times. +type Roller interface { + Len() int + RollByte(x byte) uint64 +} + +// Hashes computes all hash values for the array p. Note that the state of the +// roller is changed. +func Hashes(r Roller, p []byte) []uint64 { + n := r.Len() + if len(p) < n { + return nil + } + h := make([]uint64, len(p)-n+1) + for i := 0; i < n-1; i++ { + r.RollByte(p[i]) + } + for i := range h { + h[i] = r.RollByte(p[i+n-1]) + } + return h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go new file mode 100644 index 00000000..f4627ea1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -0,0 +1,456 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xlog provides a simple logging package that allows to disable +// certain message categories. It defines a type, Logger, with multiple +// methods for formatting output. The package has also a predefined +// 'standard' Logger accessible through helper function Print[f|ln], +// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln] +// that are easier to use then creating a Logger manually. That logger +// writes to standard error and prints the date and time of each logged +// message, which can be configured using the function SetFlags. +// +// The Fatal functions call os.Exit(1) after the message is output +// unless not suppressed by the flags. The Panic functions call panic +// after the writing the log message unless suppressed. +package xlog + +import ( + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +// The flags define what information is prefixed to each log entry +// generated by the Logger. The Lno* versions allow the suppression of +// specific output. The bits are or'ed together to control what will be +// printed. There is no control over the order of the items printed and +// the format. The full format is: +// +// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message +const ( + Ldate = 1 << iota // the date: 2009-01-23 + Ltime // the time: 01:23:23 + Lmicroseconds // microsecond resolution: 01:23:23.123123 + Llongfile // full file name and line number: /a/b/c/d.go:23 + Lshortfile // final file name element and line number: d.go:23 + Lnopanic // suppresses output from Panic[f|ln] but not the panic call + Lnofatal // suppresses output from Fatal[f|ln] but not the exit + Lnowarn // suppresses output from Warn[f|ln] + Lnoprint // suppresses output from Print[f|ln] + Lnodebug // suppresses output from Debug[f|ln] + // initial values for the standard logger + Lstdflags = Ldate | Ltime | Lnodebug +) + +// A Logger represents an active logging object that generates lines of +// output to an io.Writer. Each logging operation if not suppressed +// makes a single call to the Writer's Write method. A Logger can be +// used simultaneously from multiple goroutines; it guarantees to +// serialize access to the Writer. +type Logger struct { + mu sync.Mutex // ensures atomic writes; and protects the following + // fields + prefix string // prefix to write at beginning of each line + flag int // properties + out io.Writer // destination for output + buf []byte // for accumulating text to write +} + +// New creates a new Logger. The out argument sets the destination to +// which the log output will be written. The prefix appears at the +// beginning of each log line. The flag argument defines the logging +// properties. +func New(out io.Writer, prefix string, flag int) *Logger { + return &Logger{out: out, prefix: prefix, flag: flag} +} + +// std is the standard logger used by the package scope functions. +var std = New(os.Stderr, "", Lstdflags) + +// itoa converts the integer to ASCII. A negative widths will avoid +// zero-padding. The function supports only non-negative integers. +func itoa(buf *[]byte, i int, wid int) { + var u = uint(i) + if u == 0 && wid <= 1 { + *buf = append(*buf, '0') + return + } + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + *buf = append(*buf, b[bp:]...) +} + +// formatHeader puts the header into the buf field of the buffer. +func (l *Logger) formatHeader(t time.Time, file string, line int) { + l.buf = append(l.buf, l.prefix...) + if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { + if l.flag&Ldate != 0 { + year, month, day := t.Date() + itoa(&l.buf, year, 4) + l.buf = append(l.buf, '-') + itoa(&l.buf, int(month), 2) + l.buf = append(l.buf, '-') + itoa(&l.buf, day, 2) + l.buf = append(l.buf, ' ') + } + if l.flag&(Ltime|Lmicroseconds) != 0 { + hour, min, sec := t.Clock() + itoa(&l.buf, hour, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, min, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, sec, 2) + if l.flag&Lmicroseconds != 0 { + l.buf = append(l.buf, '.') + itoa(&l.buf, t.Nanosecond()/1e3, 6) + } + l.buf = append(l.buf, ' ') + } + } + if l.flag&(Lshortfile|Llongfile) != 0 { + if l.flag&Lshortfile != 0 { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + } + l.buf = append(l.buf, file...) + l.buf = append(l.buf, ':') + itoa(&l.buf, line, -1) + l.buf = append(l.buf, ": "...) + } +} + +func (l *Logger) output(calldepth int, now time.Time, s string) error { + var file string + var line int + if l.flag&(Lshortfile|Llongfile) != 0 { + l.mu.Unlock() + var ok bool + _, file, line, ok = runtime.Caller(calldepth) + if !ok { + file = "???" + line = 0 + } + l.mu.Lock() + } + l.buf = l.buf[:0] + l.formatHeader(now, file, line) + l.buf = append(l.buf, s...) + if len(s) == 0 || s[len(s)-1] != '\n' { + l.buf = append(l.buf, '\n') + } + _, err := l.out.Write(l.buf) + return err +} + +// Output writes the string s with the header controlled by the flags to +// the l.out writer. A newline will be appended if s doesn't end in a +// newline. Calldepth is used to recover the PC, although all current +// calls of Output use the call depth 2. Access to the function is serialized. +func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprint(v...) + return l.output(calldepth+1, now, s) +} + +// Outputf works like output but formats the output like Printf. +func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintf(format, v...) + return l.output(calldepth+1, now, s) +} + +// Outputln works like output but formats the output like Println. +func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintln(v...) + return l.output(calldepth+1, now, s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panic(v ...interface{}) { + l.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panic(v ...interface{}) { + std.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicf(format string, v ...interface{}) { + l.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicf(format string, v ...interface{}) { + std.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicln(v ...interface{}) { + l.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicln(v ...interface{}) { + std.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatal(v ...interface{}) { + l.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatal(v ...interface{}) { + std.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalf(format string, v ...interface{}) { + std.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalln(format string, v ...interface{}) { + l.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalln(format string, v ...interface{}) { + std.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warn(v ...interface{}) { + l.Output(2, Lnowarn, v...) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func Warn(v ...interface{}) { + std.Output(2, Lnowarn, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnf(format string, v ...interface{}) { + l.Outputf(2, Lnowarn, format, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func Warnf(format string, v ...interface{}) { + std.Outputf(2, Lnowarn, format, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnln(v ...interface{}) { + l.Outputln(2, Lnowarn, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func Warnln(v ...interface{}) { + std.Outputln(2, Lnowarn, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Print(v ...interface{}) { + l.Output(2, Lnoprint, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func Print(v ...interface{}) { + std.Output(2, Lnoprint, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Printf(format string, v ...interface{}) { + l.Outputf(2, Lnoprint, format, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func Printf(format string, v ...interface{}) { + std.Outputf(2, Lnoprint, format, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func (l *Logger) Println(v ...interface{}) { + l.Outputln(2, Lnoprint, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func Println(v ...interface{}) { + std.Outputln(2, Lnoprint, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debug(v ...interface{}) { + l.Output(2, Lnodebug, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func Debug(v ...interface{}) { + std.Output(2, Lnodebug, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugf(format string, v ...interface{}) { + l.Outputf(2, Lnodebug, format, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func Debugf(format string, v ...interface{}) { + std.Outputf(2, Lnodebug, format, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugln(v ...interface{}) { + l.Outputln(2, Lnodebug, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func Debugln(v ...interface{}) { + std.Outputln(2, Lnodebug, v...) +} + +// Flags returns the current flags used by the logger. +func (l *Logger) Flags() int { + l.mu.Lock() + defer l.mu.Unlock() + return l.flag +} + +// Flags returns the current flags used by the standard logger. +func Flags() int { + return std.Flags() +} + +// SetFlags sets the flags of the logger. +func (l *Logger) SetFlags(flag int) { + l.mu.Lock() + defer l.mu.Unlock() + l.flag = flag +} + +// SetFlags sets the flags for the standard logger. +func SetFlags(flag int) { + std.SetFlags(flag) +} + +// Prefix returns the prefix used by the logger. +func (l *Logger) Prefix() string { + l.mu.Lock() + defer l.mu.Unlock() + return l.prefix +} + +// Prefix returns the prefix used by the standard logger of the package. +func Prefix() string { + return std.Prefix() +} + +// SetPrefix sets the prefix for the logger. +func (l *Logger) SetPrefix(prefix string) { + l.mu.Lock() + defer l.mu.Unlock() + l.prefix = prefix +} + +// SetPrefix sets the prefix of the standard logger of the package. +func SetPrefix(prefix string) { + std.SetPrefix(prefix) +} + +// SetOutput sets the output of the logger. +func (l *Logger) SetOutput(w io.Writer) { + l.mu.Lock() + defer l.mu.Unlock() + l.out = w +} + +// SetOutput sets the output for the standard logger of the package. +func SetOutput(w io.Writer) { + std.SetOutput(w) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go new file mode 100644 index 00000000..2b39da6f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -0,0 +1,522 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "unicode" +) + +// node represents a node in the binary tree. +type node struct { + // x is the search value + x uint32 + // p parent node + p uint32 + // l left child + l uint32 + // r right child + r uint32 +} + +// wordLen is the number of bytes represented by the v field of a node. +const wordLen = 4 + +// binTree supports the identification of the next operation based on a +// binary tree. +// +// Nodes will be identified by their index into the ring buffer. +type binTree struct { + dict *encoderDict + // ring buffer of nodes + node []node + // absolute offset of the entry for the next node. Position 4 + // byte larger. + hoff int64 + // front position in the node ring buffer + front uint32 + // index of the root node + root uint32 + // current x value + x uint32 + // preallocated array + data []byte +} + +// null represents the nonexistent index. We can't use zero because it +// would always exist or we would need to decrease the index for each +// reference. +const null uint32 = 1<<32 - 1 + +// newBinTree initializes the binTree structure. The capacity defines +// the size of the buffer and defines the maximum distance for which +// matches will be found. +func newBinTree(capacity int) (t *binTree, err error) { + if capacity < 1 { + return nil, errors.New( + "newBinTree: capacity must be larger than zero") + } + if int64(capacity) >= int64(null) { + return nil, errors.New( + "newBinTree: capacity must less 2^{32}-1") + } + t = &binTree{ + node: make([]node, capacity), + hoff: -int64(wordLen), + root: null, + data: make([]byte, maxMatchLen), + } + return t, nil +} + +func (t *binTree) SetDict(d *encoderDict) { t.dict = d } + +// WriteByte writes a single byte into the binary tree. +func (t *binTree) WriteByte(c byte) error { + t.x = (t.x << 8) | uint32(c) + t.hoff++ + if t.hoff < 0 { + return nil + } + v := t.front + if int64(v) < t.hoff { + // We are overwriting old nodes stored in the tree. + t.remove(v) + } + t.node[v].x = t.x + t.add(v) + t.front++ + if int64(t.front) >= int64(len(t.node)) { + t.front = 0 + } + return nil +} + +// Writes writes a sequence of bytes into the binTree structure. +func (t *binTree) Write(p []byte) (n int, err error) { + for _, c := range p { + t.WriteByte(c) + } + return len(p), nil +} + +// add puts the node v into the tree. The node must not be part of the +// tree before. +func (t *binTree) add(v uint32) { + vn := &t.node[v] + // Set left and right to null indices. + vn.l, vn.r = null, null + // If the binary tree is empty make v the root. + if t.root == null { + t.root = v + vn.p = null + return + } + x := vn.x + p := t.root + // Search for the right leave link and add the new node. + for { + pn := &t.node[p] + if x <= pn.x { + if pn.l == null { + pn.l = v + vn.p = p + return + } + p = pn.l + } else { + if pn.r == null { + pn.r = v + vn.p = p + return + } + p = pn.r + } + } +} + +// parent returns the parent node index of v and the pointer to v value +// in the parent. +func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) { + if t.root == v { + return null, &t.root + } + p = t.node[v].p + if t.node[p].l == v { + ptr = &t.node[p].l + } else { + ptr = &t.node[p].r + } + return +} + +// Remove node v. +func (t *binTree) remove(v uint32) { + vn := &t.node[v] + p, ptr := t.parent(v) + l, r := vn.l, vn.r + if l == null { + // Move the right child up. + *ptr = r + if r != null { + t.node[r].p = p + } + return + } + if r == null { + // Move the left child up. + *ptr = l + t.node[l].p = p + return + } + + // Search the in-order predecessor u. + un := &t.node[l] + ur := un.r + if ur == null { + // In order predecessor is l. Move it up. + un.r = r + t.node[r].p = l + un.p = p + *ptr = l + return + } + var u uint32 + for { + // Look for the max value in the tree where l is root. + u = ur + ur = t.node[u].r + if ur == null { + break + } + } + // replace u with ul + un = &t.node[u] + ul := un.l + up := un.p + t.node[up].r = ul + if ul != null { + t.node[ul].p = up + } + + // replace v by u + un.l, un.r = l, r + t.node[l].p = u + t.node[r].p = u + *ptr = u + un.p = p +} + +// search looks for the node that have the value x or for the nodes that +// brace it. The node highest in the tree with the value x will be +// returned. All other nodes with the same value live in left subtree of +// the returned node. +func (t *binTree) search(v uint32, x uint32) (a, b uint32) { + a, b = null, null + if v == null { + return + } + for { + vn := &t.node[v] + if x <= vn.x { + if x == vn.x { + return v, v + } + b = v + if vn.l == null { + return + } + v = vn.l + } else { + a = v + if vn.r == null { + return + } + v = vn.r + } + } +} + +// max returns the node with maximum value in the subtree with v as +// root. +func (t *binTree) max(v uint32) uint32 { + if v == null { + return null + } + for { + r := t.node[v].r + if r == null { + return v + } + v = r + } +} + +// min returns the node with the minimum value in the subtree with v as +// root. +func (t *binTree) min(v uint32) uint32 { + if v == null { + return null + } + for { + l := t.node[v].l + if l == null { + return v + } + v = l + } +} + +// pred returns the in-order predecessor of node v. +func (t *binTree) pred(v uint32) uint32 { + if v == null { + return null + } + u := t.max(t.node[v].l) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].r == v { + return p + } + v = p + } +} + +// succ returns the in-order successor of node v. +func (t *binTree) succ(v uint32) uint32 { + if v == null { + return null + } + u := t.min(t.node[v].r) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].l == v { + return p + } + v = p + } +} + +// xval converts the first four bytes of a into an 32-bit unsigned +// integer in big-endian order. +func xval(a []byte) uint32 { + var x uint32 + switch len(a) { + default: + x |= uint32(a[3]) + fallthrough + case 3: + x |= uint32(a[2]) << 8 + fallthrough + case 2: + x |= uint32(a[1]) << 16 + fallthrough + case 1: + x |= uint32(a[0]) << 24 + case 0: + } + return x +} + +// dumpX converts value x into a four-letter string. +func dumpX(x uint32) string { + a := make([]byte, 4) + for i := 0; i < 4; i++ { + c := byte(x >> uint((3-i)*8)) + if unicode.IsGraphic(rune(c)) { + a[i] = c + } else { + a[i] = '.' + } + } + return string(a) +} + +/* +// dumpNode writes a representation of the node v into the io.Writer. +func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { + if v == null { + return + } + + vn := &t.node[v] + + t.dumpNode(w, vn.r, indent+2) + + for i := 0; i < indent; i++ { + fmt.Fprint(w, " ") + } + if vn.p == null { + fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x)) + } else { + fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p) + } + + t.dumpNode(w, vn.l, indent+2) +} + +// dump prints a representation of the binary tree into the writer. +func (t *binTree) dump(w io.Writer) error { + bw := bufio.NewWriter(w) + t.dumpNode(bw, t.root, 0) + return bw.Flush() +} +*/ + +func (t *binTree) distance(v uint32) int { + dist := int(t.front) - int(v) + if dist <= 0 { + dist += len(t.node) + } + return dist +} + +type matchParams struct { + rep [4]uint32 + // length when match will be accepted + nAccept int + // nodes to check + check int + // finish if length get shorter + stopShorter bool +} + +func (t *binTree) match(m match, distIter func() (int, bool), p matchParams, +) (r match, checked int, accepted bool) { + buf := &t.dict.buf + for { + if checked >= p.check { + return m, checked, true + } + dist, ok := distIter() + if !ok { + return m, checked, false + } + checked++ + if m.n > 0 { + i := buf.rear - dist + m.n - 1 + if i < 0 { + i += len(buf.data) + } else if i >= len(buf.data) { + i -= len(buf.data) + } + if buf.data[i] != t.data[m.n-1] { + if p.stopShorter { + return m, checked, false + } + continue + } + } + n := buf.matchLen(dist, t.data) + switch n { + case 0: + if p.stopShorter { + return m, checked, false + } + continue + case 1: + if uint32(dist-minDistance) != p.rep[0] { + continue + } + } + if n < m.n || (n == m.n && int64(dist) >= m.distance) { + continue + } + m = match{int64(dist), n} + if n >= p.nAccept { + return m, checked, true + } + } +} + +func (t *binTree) NextOp(rep [4]uint32) operation { + // retrieve maxMatchLen data + n, _ := t.dict.buf.Peek(t.data[:maxMatchLen]) + if n == 0 { + panic("no data in buffer") + } + t.data = t.data[:n] + + var ( + m match + x, u, v uint32 + iterPred, iterSucc func() (int, bool) + ) + p := matchParams{ + rep: rep, + nAccept: maxMatchLen, + check: 32, + } + i := 4 + iterSmall := func() (dist int, ok bool) { + i-- + if i <= 0 { + return 0, false + } + return i, true + } + m, checked, accepted := t.match(m, iterSmall, p) + if accepted { + goto end + } + p.check -= checked + x = xval(t.data) + u, v = t.search(t.root, x) + if u == v && len(t.data) == 4 { + iter := func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u, v = t.search(t.node[u].l, x) + if u != v { + u = null + } + return dist, true + } + m, _, _ = t.match(m, iter, p) + goto end + } + p.stopShorter = true + iterSucc = func() (dist int, ok bool) { + if v == null { + return 0, false + } + dist = t.distance(v) + v = t.succ(v) + return dist, true + } + m, checked, accepted = t.match(m, iterSucc, p) + if accepted { + goto end + } + p.check -= checked + iterPred = func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u = t.pred(u) + return dist, true + } + m, _, _ = t.match(m, iterPred, p) +end: + if m.n == 0 { + return lit{t.data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go new file mode 100644 index 00000000..20109170 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -0,0 +1,47 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +/* Naming conventions follows the CodeReviewComments in the Go Wiki. */ + +// ntz32Const is used by the functions NTZ and NLZ. +const ntz32Const = 0x04d7651f + +// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé. +// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26. +var ntz32Table = [32]int8{ + 0, 1, 2, 24, 3, 19, 6, 25, + 22, 4, 20, 10, 16, 7, 12, 26, + 31, 23, 18, 5, 21, 9, 15, 11, + 30, 17, 8, 14, 29, 13, 28, 27, +} + +/* +// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. +func ntz32(x uint32) int { + if x == 0 { + return 32 + } + x = (x & -x) * ntz32Const + return int(ntz32Table[x>>27]) +} +*/ + +// nlz32 computes the number of leading zeros for an unsigned 32-bit integer. +func nlz32(x uint32) int { + // Smear left most bit to the right + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + // Use ntz mechanism to calculate nlz. + x++ + if x == 0 { + return 0 + } + x *= ntz32Const + return 32 - int(ntz32Table[x>>27]) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go new file mode 100644 index 00000000..9dfdf28b --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -0,0 +1,39 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// breader provides the ReadByte function for a Reader. It doesn't read +// more data from the reader than absolutely necessary. +type breader struct { + io.Reader + // helper slice to save allocations + p []byte +} + +// ByteReader converts an io.Reader into an io.ByteReader. +func ByteReader(r io.Reader) io.ByteReader { + br, ok := r.(io.ByteReader) + if !ok { + return &breader{r, make([]byte, 1)} + } + return br +} + +// ReadByte read byte function. +func (r *breader) ReadByte() (c byte, err error) { + n, err := r.Reader.Read(r.p) + if n < 1 { + if err == nil { + err = errors.New("breader.ReadByte: no data") + } + return 0, err + } + return r.p[0], nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go new file mode 100644 index 00000000..af41d5b2 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -0,0 +1,171 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" +) + +// buffer provides a circular buffer of bytes. If the front index equals +// the rear index the buffer is empty. As a consequence front cannot be +// equal rear for a full buffer. So a full buffer has a length that is +// one byte less the the length of the data slice. +type buffer struct { + data []byte + front int + rear int +} + +// newBuffer creates a buffer with the given size. +func newBuffer(size int) *buffer { + return &buffer{data: make([]byte, size+1)} +} + +// Cap returns the capacity of the buffer. +func (b *buffer) Cap() int { + return len(b.data) - 1 +} + +// Resets the buffer. The front and rear index are set to zero. +func (b *buffer) Reset() { + b.front = 0 + b.rear = 0 +} + +// Buffered returns the number of bytes buffered. +func (b *buffer) Buffered() int { + delta := b.front - b.rear + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// Available returns the number of bytes available for writing. +func (b *buffer) Available() int { + delta := b.rear - 1 - b.front + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// addIndex adds a non-negative integer to the index i and returns the +// resulting index. The function takes care of wrapping the index as +// well as potential overflow situations. +func (b *buffer) addIndex(i int, n int) int { + // subtraction of len(b.data) prevents overflow + i += n - len(b.data) + if i < 0 { + i += len(b.data) + } + return i +} + +// Read reads bytes from the buffer into p and returns the number of +// bytes read. The function never returns an error but might return less +// data than requested. +func (b *buffer) Read(p []byte) (n int, err error) { + n, err = b.Peek(p) + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// Peek reads bytes from the buffer into p without changing the buffer. +// Peek will never return an error but might return less data than +// requested. +func (b *buffer) Peek(p []byte) (n int, err error) { + m := b.Buffered() + n = len(p) + if m < n { + n = m + p = p[:n] + } + k := copy(p, b.data[b.rear:]) + if k < n { + copy(p[k:], b.data) + } + return n, nil +} + +// Discard skips the n next bytes to read from the buffer, returning the +// bytes discarded. +// +// If Discards skips fewer than n bytes, it returns an error. +func (b *buffer) Discard(n int) (discarded int, err error) { + if n < 0 { + return 0, errors.New("buffer.Discard: negative argument") + } + m := b.Buffered() + if m < n { + n = m + err = errors.New( + "buffer.Discard: discarded less bytes then requested") + } + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// ErrNoSpace indicates that there is insufficient space for the Write +// operation. +var ErrNoSpace = errors.New("insufficient space") + +// Write puts data into the buffer. If less bytes are written than +// requested ErrNoSpace is returned. +func (b *buffer) Write(p []byte) (n int, err error) { + m := b.Available() + n = len(p) + if m < n { + n = m + p = p[:m] + err = ErrNoSpace + } + k := copy(b.data[b.front:], p) + if k < n { + copy(b.data, p[k:]) + } + b.front = b.addIndex(b.front, n) + return n, err +} + +// WriteByte writes a single byte into the buffer. The error ErrNoSpace +// is returned if no single byte is available in the buffer for writing. +func (b *buffer) WriteByte(c byte) error { + if b.Available() < 1 { + return ErrNoSpace + } + b.data[b.front] = c + b.front = b.addIndex(b.front, 1) + return nil +} + +// prefixLen returns the length of the common prefix of a and b. +func prefixLen(a, b []byte) int { + if len(a) > len(b) { + a, b = b, a + } + for i, c := range a { + if b[i] != c { + return i + } + } + return len(a) +} + +// matchLen returns the length of the common prefix for the given +// distance from the rear and the byte slice p. +func (b *buffer) matchLen(distance int, p []byte) int { + var n int + i := b.rear - distance + if i < 0 { + if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { + return n + } + p = p[n:] + i = 0 + } + n += prefixLen(p, b.data[i:]) + return n +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go new file mode 100644 index 00000000..f27e31a4 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -0,0 +1,37 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// ErrLimit indicates that the limit of the LimitedByteWriter has been +// reached. +var ErrLimit = errors.New("limit reached") + +// LimitedByteWriter provides a byte writer that can be written until a +// limit is reached. The field N provides the number of remaining +// bytes. +type LimitedByteWriter struct { + BW io.ByteWriter + N int64 +} + +// WriteByte writes a single byte to the limited byte writer. It returns +// ErrLimit if the limit has been reached. If the byte is successfully +// written the field N of the LimitedByteWriter will be decremented by +// one. +func (l *LimitedByteWriter) WriteByte(c byte) error { + if l.N <= 0 { + return ErrLimit + } + if err := l.BW.WriteByte(c); err != nil { + return err + } + l.N-- + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go new file mode 100644 index 00000000..3765484e --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -0,0 +1,277 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// decoder decodes a raw LZMA stream without any header. +type decoder struct { + // dictionary; the rear pointer of the buffer will be used for + // reading the data. + Dict *decoderDict + // decoder state + State *state + // range decoder + rd *rangeDecoder + // start stores the head value of the dictionary for the LZMA + // stream + start int64 + // size of uncompressed data + size int64 + // end-of-stream encountered + eos bool + // EOS marker found + eosMarker bool +} + +// newDecoder creates a new decoder instance. The parameter size provides +// the expected byte size of the decompressed data. If the size is +// unknown use a negative value. In that case the decoder will look for +// a terminating end-of-stream marker. +func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) { + rd, err := newRangeDecoder(br) + if err != nil { + return nil, err + } + d = &decoder{ + State: state, + Dict: dict, + rd: rd, + size: size, + start: dict.pos(), + } + return d, nil +} + +// Reopen restarts the decoder with a new byte reader and a new size. Reopen +// resets the Decompressed counter to zero. +func (d *decoder) Reopen(br io.ByteReader, size int64) error { + var err error + if d.rd, err = newRangeDecoder(br); err != nil { + return err + } + d.start = d.Dict.pos() + d.size = size + d.eos = false + return nil +} + +// decodeLiteral decodes a single literal from the LZMA stream. +func (d *decoder) decodeLiteral() (op operation, err error) { + litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head) + match := d.Dict.byteAt(int(d.State.rep[0]) + 1) + s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState) + if err != nil { + return nil, err + } + return lit{s}, nil +} + +// errEOS indicates that an EOS marker has been found. +var errEOS = errors.New("EOS marker found") + +// readOp decodes the next operation from the compressed stream. It +// returns the operation. If an explicit end of stream marker is +// identified the eos error is returned. +func (d *decoder) readOp() (op operation, err error) { + // Value of the end of stream (EOS) marker + const eosDist = 1<<32 - 1 + + state, state2, posState := d.State.states(d.Dict.head) + + b, err := d.State.isMatch[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // literal + op, err := d.decodeLiteral() + if err != nil { + return nil, err + } + d.State.updateStateLiteral() + return op, nil + } + b, err = d.State.isRep[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // simple match + d.State.rep[3], d.State.rep[2], d.State.rep[1] = + d.State.rep[2], d.State.rep[1], d.State.rep[0] + + d.State.updateStateMatch() + // The length decoder returns the length offset. + n, err := d.State.lenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + // The dist decoder returns the distance offset. The actual + // distance is 1 higher. + d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n) + if err != nil { + return nil, err + } + if d.State.rep[0] == eosDist { + d.eosMarker = true + return nil, errEOS + } + op = match{n: int(n) + minMatchLen, + distance: int64(d.State.rep[0]) + minDistance} + return op, nil + } + b, err = d.State.isRepG0[state].Decode(d.rd) + if err != nil { + return nil, err + } + dist := d.State.rep[0] + if b == 0 { + // rep match 0 + b, err = d.State.isRepG0Long[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + d.State.updateStateShortRep() + op = match{n: 1, distance: int64(dist) + minDistance} + return op, nil + } + } else { + b, err = d.State.isRepG1[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[1] + } else { + b, err = d.State.isRepG2[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[2] + } else { + dist = d.State.rep[3] + d.State.rep[3] = d.State.rep[2] + } + d.State.rep[2] = d.State.rep[1] + } + d.State.rep[1] = d.State.rep[0] + d.State.rep[0] = dist + } + n, err := d.State.repLenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + d.State.updateStateRep() + op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance} + return op, nil +} + +// apply takes the operation and transforms the decoder dictionary accordingly. +func (d *decoder) apply(op operation) error { + var err error + switch x := op.(type) { + case match: + err = d.Dict.writeMatch(x.distance, x.n) + case lit: + err = d.Dict.WriteByte(x.b) + default: + panic("op is neither a match nor a literal") + } + return err +} + +// decompress fills the dictionary unless no space for new data is +// available. If the end of the LZMA stream has been reached io.EOF will +// be returned. +func (d *decoder) decompress() error { + if d.eos { + return io.EOF + } + for d.Dict.Available() >= maxMatchLen { + op, err := d.readOp() + switch err { + case nil: + // break + case errEOS: + d.eos = true + if !d.rd.possiblyAtEnd() { + return errDataAfterEOS + } + if d.size >= 0 && d.size != d.Decompressed() { + return errSize + } + return io.EOF + case io.EOF: + d.eos = true + return io.ErrUnexpectedEOF + default: + return err + } + if err = d.apply(op); err != nil { + return err + } + if d.size >= 0 && d.Decompressed() >= d.size { + d.eos = true + if d.Decompressed() > d.size { + return errSize + } + if !d.rd.possiblyAtEnd() { + switch _, err = d.readOp(); err { + case nil: + return errSize + case io.EOF: + return io.ErrUnexpectedEOF + case errEOS: + break + default: + return err + } + } + return io.EOF + } + } + return nil +} + +// Errors that may be returned while decoding data. +var ( + errDataAfterEOS = errors.New("lzma: data after end of stream marker") + errSize = errors.New("lzma: wrong uncompressed data size") +) + +// Read reads data from the buffer. If no more data is available io.EOF is +// returned. +func (d *decoder) Read(p []byte) (n int, err error) { + var k int + for { + // Read of decoder dict never returns an error. + k, err = d.Dict.Read(p[n:]) + if err != nil { + panic(fmt.Errorf("dictionary read error %s", err)) + } + if k == 0 && d.eos { + return n, io.EOF + } + n += k + if n >= len(p) { + return n, nil + } + if err = d.decompress(); err != nil && err != io.EOF { + return n, err + } + } +} + +// Decompressed returns the number of bytes decompressed by the decoder. +func (d *decoder) Decompressed() int64 { + return d.Dict.pos() - d.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go new file mode 100644 index 00000000..d5b814f0 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -0,0 +1,128 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// decoderDict provides the dictionary for the decoder. The whole +// dictionary is used as reader buffer. +type decoderDict struct { + buf buffer + head int64 +} + +// newDecoderDict creates a new decoder dictionary. The whole dictionary +// will be used as reader buffer. +func newDecoderDict(dictCap int) (d *decoderDict, err error) { + // lower limit supports easy test cases + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New("lzma: dictCap out of range") + } + d = &decoderDict{buf: *newBuffer(dictCap)} + return d, nil +} + +// Reset clears the dictionary. The read buffer is not changed, so the +// buffered data can still be read. +func (d *decoderDict) Reset() { + d.head = 0 +} + +// WriteByte writes a single byte into the dictionary. It is used to +// write literals into the dictionary. +func (d *decoderDict) WriteByte(c byte) error { + if err := d.buf.WriteByte(c); err != nil { + return err + } + d.head++ + return nil +} + +// pos returns the position of the dictionary head. +func (d *decoderDict) pos() int64 { return d.head } + +// dictLen returns the actual length of the dictionary. +func (d *decoderDict) dictLen() int { + capacity := d.buf.Cap() + if d.head >= int64(capacity) { + return capacity + } + return int(d.head) +} + +// byteAt returns a byte stored in the dictionary. If the distance is +// non-positive or exceeds the current length of the dictionary the zero +// byte is returned. +func (d *decoderDict) byteAt(dist int) byte { + if !(0 < dist && dist <= d.dictLen()) { + return 0 + } + i := d.buf.front - dist + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// writeMatch writes the match at the top of the dictionary. The given +// distance must point in the current dictionary and the length must not +// exceed the maximum length 273 supported in LZMA. +// +// The error value ErrNoSpace indicates that no space is available in +// the dictionary for writing. You need to read from the dictionary +// first. +func (d *decoderDict) writeMatch(dist int64, length int) error { + if !(0 < dist && dist <= int64(d.dictLen())) { + return errors.New("writeMatch: distance out of range") + } + if !(0 < length && length <= maxMatchLen) { + return errors.New("writeMatch: length out of range") + } + if length > d.buf.Available() { + return ErrNoSpace + } + d.head += int64(length) + + i := d.buf.front - int(dist) + if i < 0 { + i += len(d.buf.data) + } + for length > 0 { + var p []byte + if i >= d.buf.front { + p = d.buf.data[i:] + i = 0 + } else { + p = d.buf.data[i:d.buf.front] + i = d.buf.front + } + if len(p) > length { + p = p[:length] + } + if _, err := d.buf.Write(p); err != nil { + panic(fmt.Errorf("d.buf.Write returned error %s", err)) + } + length -= len(p) + } + return nil +} + +// Write writes the given bytes into the dictionary and advances the +// head. +func (d *decoderDict) Write(p []byte) (n int, err error) { + n, err = d.buf.Write(p) + d.head += int64(n) + return n, err +} + +// Available returns the number of available bytes for writing into the +// decoder dictionary. +func (d *decoderDict) Available() int { return d.buf.Available() } + +// Read reads data from the buffer contained in the decoder dictionary. +func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go new file mode 100644 index 00000000..76b71310 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -0,0 +1,38 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// directCodec allows the encoding and decoding of values with a fixed number +// of bits. The number of bits must be in the range [1,32]. +type directCodec byte + +// Bits returns the number of bits supported by this codec. +func (dc directCodec) Bits() int { + return int(dc) +} + +// Encode uses the range encoder to encode a value with the fixed number of +// bits. The most-significant bit is encoded first. +func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { + for i := int(dc) - 1; i >= 0; i-- { + if err := e.DirectEncodeBit(v >> uint(i)); err != nil { + return err + } + } + return nil +} + +// Decode uses the range decoder to decode a value with the given number of +// given bits. The most-significant bit is decoded first. +func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { + for i := int(dc) - 1; i >= 0; i-- { + x, err := d.DirectDecodeBit() + if err != nil { + return 0, err + } + v = (v << 1) | x + } + return v, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go new file mode 100644 index 00000000..b447d8ec --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -0,0 +1,140 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// Constants used by the distance codec. +const ( + // minimum supported distance + minDistance = 1 + // maximum supported distance, value is used for the eos marker. + maxDistance = 1 << 32 + // number of the supported len states + lenStates = 4 + // start for the position models + startPosModel = 4 + // first index with align bits support + endPosModel = 14 + // bits for the position slots + posSlotBits = 6 + // number of align bits + alignBits = 4 +) + +// distCodec provides encoding and decoding of distance values. +type distCodec struct { + posSlotCodecs [lenStates]treeCodec + posModel [endPosModel - startPosModel]treeReverseCodec + alignCodec treeReverseCodec +} + +// deepcopy initializes dc as deep copy of the source. +func (dc *distCodec) deepcopy(src *distCodec) { + if dc == src { + return + } + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) + } + for i := range dc.posModel { + dc.posModel[i].deepcopy(&src.posModel[i]) + } + dc.alignCodec.deepcopy(&src.alignCodec) +} + +// newDistCodec creates a new distance codec. +func (dc *distCodec) init() { + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) + } + for i := range dc.posModel { + posSlot := startPosModel + i + bits := (posSlot >> 1) - 1 + dc.posModel[i] = makeTreeReverseCodec(bits) + } + dc.alignCodec = makeTreeReverseCodec(alignBits) +} + +// lenState converts the value l to a supported lenState value. +func lenState(l uint32) uint32 { + if l >= lenStates { + l = lenStates - 1 + } + return l +} + +// Encode encodes the distance using the parameter l. Dist can have values from +// the full range of uint32 values. To get the distance offset the actual match +// distance has to be decreased by 1. A distance offset of 0xffffffff (eos) +// indicates the end of the stream. +func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { + // Compute the posSlot using nlz32 + var posSlot uint32 + var bits uint32 + if dist < startPosModel { + posSlot = dist + } else { + bits = uint32(30 - nlz32(dist)) + posSlot = startPosModel - 2 + (bits << 1) + posSlot += (dist >> uint(bits)) & 1 + } + + if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { + return + } + + switch { + case posSlot < startPosModel: + return nil + case posSlot < endPosModel: + tc := &dc.posModel[posSlot-startPosModel] + return tc.Encode(dist, e) + } + dic := directCodec(bits - alignBits) + if err = dic.Encode(e, dist>>alignBits); err != nil { + return + } + return dc.alignCodec.Encode(dist, e) +} + +// Decode decodes the distance offset using the parameter l. The dist value +// 0xffffffff (eos) indicates the end of the stream. Add one to the distance +// offset to get the actual match distance. +func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { + posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) + if err != nil { + return + } + + // posSlot equals distance + if posSlot < startPosModel { + return posSlot, nil + } + + // posSlot uses the individual models + bits := (posSlot >> 1) - 1 + dist = (2 | (posSlot & 1)) << bits + var u uint32 + if posSlot < endPosModel { + tc := &dc.posModel[posSlot-startPosModel] + if u, err = tc.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil + } + + // posSlots use direct encoding and a single model for the four align + // bits. + dic := directCodec(bits - alignBits) + if u, err = dic.Decode(d); err != nil { + return 0, err + } + dist += u << alignBits + if u, err = dc.alignCodec.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go new file mode 100644 index 00000000..e4093831 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -0,0 +1,268 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "io" +) + +// opLenMargin provides the upper limit of the number of bytes required +// to encode a single operation. +const opLenMargin = 16 + +// compressFlags control the compression process. +type compressFlags uint32 + +// Values for compressFlags. +const ( + // all data should be compressed, even if compression is not + // optimal. + all compressFlags = 1 << iota +) + +// encoderFlags provide the flags for an encoder. +type encoderFlags uint32 + +// Flags for the encoder. +const ( + // eosMarker requests an EOS marker to be written. + eosMarker encoderFlags = 1 << iota +) + +// Encoder compresses data buffered in the encoder dictionary and writes +// it into a byte writer. +type encoder struct { + dict *encoderDict + state *state + re *rangeEncoder + start int64 + // generate eos marker + marker bool + limit bool + margin int +} + +// newEncoder creates a new encoder. If the byte writer must be +// limited use LimitedByteWriter provided by this package. The flags +// argument supports the eosMarker flag, controlling whether a +// terminating end-of-stream marker must be written. +func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict, + flags encoderFlags) (e *encoder, err error) { + + re, err := newRangeEncoder(bw) + if err != nil { + return nil, err + } + e = &encoder{ + dict: dict, + state: state, + re: re, + marker: flags&eosMarker != 0, + start: dict.Pos(), + margin: opLenMargin, + } + if e.marker { + e.margin += 5 + } + return e, nil +} + +// Write writes the bytes from p into the dictionary. If not enough +// space is available the data in the dictionary buffer will be +// compressed to make additional space available. If the limit of the +// underlying writer has been reached ErrLimit will be returned. +func (e *encoder) Write(p []byte) (n int, err error) { + for { + k, err := e.dict.Write(p[n:]) + n += k + if err == ErrNoSpace { + if err = e.compress(0); err != nil { + return n, err + } + continue + } + return n, err + } +} + +// Reopen reopens the encoder with a new byte writer. +func (e *encoder) Reopen(bw io.ByteWriter) error { + var err error + if e.re, err = newRangeEncoder(bw); err != nil { + return err + } + e.start = e.dict.Pos() + e.limit = false + return nil +} + +// writeLiteral writes a literal into the LZMA stream +func (e *encoder) writeLiteral(l lit) error { + var err error + state, state2, _ := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil { + return err + } + litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos()) + match := e.dict.ByteAt(int(e.state.rep[0]) + 1) + err = e.state.litCodec.Encode(e.re, l.b, state, match, litState) + if err != nil { + return err + } + e.state.updateStateLiteral() + return nil +} + +// iverson implements the Iverson operator as proposed by Donald Knuth in his +// book Concrete Mathematics. +func iverson(ok bool) uint32 { + if ok { + return 1 + } + return 0 +} + +// writeMatch writes a repetition operation into the operation stream +func (e *encoder) writeMatch(m match) error { + var err error + if !(minDistance <= m.distance && m.distance <= maxDistance) { + panic(fmt.Errorf("match distance %d out of range", m.distance)) + } + dist := uint32(m.distance - minDistance) + if !(minMatchLen <= m.n && m.n <= maxMatchLen) && + !(dist == e.state.rep[0] && m.n == 1) { + panic(fmt.Errorf( + "match length %d out of range; dist %d rep[0] %d", + m.n, dist, e.state.rep[0])) + } + state, state2, posState := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil { + return err + } + g := 0 + for ; g < 4; g++ { + if e.state.rep[g] == dist { + break + } + } + b := iverson(g < 4) + if err = e.state.isRep[state].Encode(e.re, b); err != nil { + return err + } + n := uint32(m.n - minMatchLen) + if b == 0 { + // simple match + e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] = + e.state.rep[2], e.state.rep[1], e.state.rep[0], dist + e.state.updateStateMatch() + if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil { + return err + } + return e.state.distCodec.Encode(e.re, dist, n) + } + b = iverson(g != 0) + if err = e.state.isRepG0[state].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + // g == 0 + b = iverson(m.n != 1) + if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + e.state.updateStateShortRep() + return nil + } + } else { + // g in {1,2,3} + b = iverson(g != 1) + if err = e.state.isRepG1[state].Encode(e.re, b); err != nil { + return err + } + if b == 1 { + // g in {2,3} + b = iverson(g != 2) + err = e.state.isRepG2[state].Encode(e.re, b) + if err != nil { + return err + } + if b == 1 { + e.state.rep[3] = e.state.rep[2] + } + e.state.rep[2] = e.state.rep[1] + } + e.state.rep[1] = e.state.rep[0] + e.state.rep[0] = dist + } + e.state.updateStateRep() + return e.state.repLenCodec.Encode(e.re, n, posState) +} + +// writeOp writes a single operation to the range encoder. The function +// checks whether there is enough space available to close the LZMA +// stream. +func (e *encoder) writeOp(op operation) error { + if e.re.Available() < int64(e.margin) { + return ErrLimit + } + switch x := op.(type) { + case lit: + return e.writeLiteral(x) + case match: + return e.writeMatch(x) + default: + panic("unexpected operation") + } +} + +// compress compressed data from the dictionary buffer. If the flag all +// is set, all data in the dictionary buffer will be compressed. The +// function returns ErrLimit if the underlying writer has reached its +// limit. +func (e *encoder) compress(flags compressFlags) error { + n := 0 + if flags&all == 0 { + n = maxMatchLen - 1 + } + d := e.dict + m := d.m + for d.Buffered() > n { + op := m.NextOp(e.state.rep) + if err := e.writeOp(op); err != nil { + return err + } + d.Discard(op.Len()) + } + return nil +} + +// eosMatch is a pseudo operation that indicates the end of the stream. +var eosMatch = match{distance: maxDistance, n: minMatchLen} + +// Close terminates the LZMA stream. If requested the end-of-stream +// marker will be written. If the byte writer limit has been or will be +// reached during compression of the remaining data in the buffer the +// LZMA stream will be closed and data will remain in the buffer. +func (e *encoder) Close() error { + err := e.compress(all) + if err != nil && err != ErrLimit { + return err + } + if e.marker { + if err := e.writeMatch(eosMatch); err != nil { + return err + } + } + err = e.re.Close() + return err +} + +// Compressed returns the number bytes of the input data that been +// compressed. +func (e *encoder) Compressed() int64 { + return e.dict.Pos() - e.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go new file mode 100644 index 00000000..4b3916ea --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -0,0 +1,149 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// matcher is an interface that supports the identification of the next +// operation. +type matcher interface { + io.Writer + SetDict(d *encoderDict) + NextOp(rep [4]uint32) operation +} + +// encoderDict provides the dictionary of the encoder. It includes an +// additional buffer atop of the actual dictionary. +type encoderDict struct { + buf buffer + m matcher + head int64 + capacity int + // preallocated array + data [maxMatchLen]byte +} + +// newEncoderDict creates the encoder dictionary. The argument bufSize +// defines the size of the additional buffer. +func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New( + "lzma: dictionary capacity out of range") + } + if bufSize < 1 { + return nil, errors.New( + "lzma: buffer size must be larger than zero") + } + d = &encoderDict{ + buf: *newBuffer(dictCap + bufSize), + capacity: dictCap, + m: m, + } + m.SetDict(d) + return d, nil +} + +// Discard discards n bytes. Note that n must not be larger than +// MaxMatchLen. +func (d *encoderDict) Discard(n int) { + p := d.data[:n] + k, _ := d.buf.Read(p) + if k < n { + panic(fmt.Errorf("lzma: can't discard %d bytes", n)) + } + d.head += int64(n) + d.m.Write(p) +} + +// Len returns the data available in the encoder dictionary. +func (d *encoderDict) Len() int { + n := d.buf.Available() + if int64(n) > d.head { + return int(d.head) + } + return n +} + +// DictLen returns the actual length of data in the dictionary. +func (d *encoderDict) DictLen() int { + if d.head < int64(d.capacity) { + return int(d.head) + } + return d.capacity +} + +// Available returns the number of bytes that can be written by a +// following Write call. +func (d *encoderDict) Available() int { + return d.buf.Available() - d.DictLen() +} + +// Write writes data into the dictionary buffer. Note that the position +// of the dictionary head will not be moved. If there is not enough +// space in the buffer ErrNoSpace will be returned. +func (d *encoderDict) Write(p []byte) (n int, err error) { + m := d.Available() + if len(p) > m { + p = p[:m] + err = ErrNoSpace + } + var e error + if n, e = d.buf.Write(p); e != nil { + err = e + } + return n, err +} + +// Pos returns the position of the head. +func (d *encoderDict) Pos() int64 { return d.head } + +// ByteAt returns the byte at the given distance. +func (d *encoderDict) ByteAt(distance int) byte { + if !(0 < distance && distance <= d.Len()) { + return 0 + } + i := d.buf.rear - distance + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// CopyN copies the last n bytes from the dictionary into the provided +// writer. This is used for copying uncompressed data into an +// uncompressed segment. +func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { + if n <= 0 { + return 0, nil + } + m := d.Len() + if n > m { + n = m + err = ErrNoSpace + } + i := d.buf.rear - n + var e error + if i < 0 { + i += len(d.buf.data) + if written, e = w.Write(d.buf.data[i:]); e != nil { + return written, e + } + i = 0 + } + var k int + k, e = w.Write(d.buf.data[i:d.buf.rear]) + written += k + if e != nil { + err = e + } + return written, err +} + +// Buffered returns the number of bytes in the buffer. +func (d *encoderDict) Buffered() int { return d.buf.Buffered() } diff --git a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma new file mode 100644 index 00000000..5edad633 Binary files /dev/null and b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma differ diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go new file mode 100644 index 00000000..f66e9cdd --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -0,0 +1,309 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + + "github.com/ulikunitz/xz/internal/hash" +) + +/* For compression we need to find byte sequences that match the byte + * sequence at the dictionary head. A hash table is a simple method to + * provide this capability. + */ + +// maxMatches limits the number of matches requested from the Matches +// function. This controls the speed of the overall encoding. +const maxMatches = 16 + +// shortDists defines the number of short distances supported by the +// implementation. +const shortDists = 8 + +// The minimum is somehow arbitrary but the maximum is limited by the +// memory requirements of the hash table. +const ( + minTableExponent = 9 + maxTableExponent = 20 +) + +// newRoller contains the function used to create an instance of the +// hash.Roller. +var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) } + +// hashTable stores the hash table including the rolling hash method. +// +// We implement chained hashing into a circular buffer. Each entry in +// the circular buffer stores the delta distance to the next position with a +// word that has the same hash value. +type hashTable struct { + dict *encoderDict + // actual hash table + t []int64 + // circular list data with the offset to the next word + data []uint32 + front int + // mask for computing the index for the hash table + mask uint64 + // hash offset; initial value is -int64(wordLen) + hoff int64 + // length of the hashed word + wordLen int + // hash roller for computing the hash values for the Write + // method + wr hash.Roller + // hash roller for computing arbitrary hashes + hr hash.Roller + // preallocated slices + p [maxMatches]int64 + distances [maxMatches + shortDists]int +} + +// hashTableExponent derives the hash table exponent from the dictionary +// capacity. +func hashTableExponent(n uint32) int { + e := 30 - nlz32(n) + switch { + case e < minTableExponent: + e = minTableExponent + case e > maxTableExponent: + e = maxTableExponent + } + return e +} + +// newHashTable creates a new hash table for words of length wordLen +func newHashTable(capacity int, wordLen int) (t *hashTable, err error) { + if !(0 < capacity) { + return nil, errors.New( + "newHashTable: capacity must not be negative") + } + exp := hashTableExponent(uint32(capacity)) + if !(1 <= wordLen && wordLen <= 4) { + return nil, errors.New("newHashTable: " + + "argument wordLen out of range") + } + n := 1 << uint(exp) + if n <= 0 { + panic("newHashTable: exponent is too large") + } + t = &hashTable{ + t: make([]int64, n), + data: make([]uint32, capacity), + mask: (uint64(1) << uint(exp)) - 1, + hoff: -int64(wordLen), + wordLen: wordLen, + wr: newRoller(wordLen), + hr: newRoller(wordLen), + } + return t, nil +} + +func (t *hashTable) SetDict(d *encoderDict) { t.dict = d } + +// buffered returns the number of bytes that are currently hashed. +func (t *hashTable) buffered() int { + n := t.hoff + 1 + switch { + case n <= 0: + return 0 + case n >= int64(len(t.data)): + return len(t.data) + } + return int(n) +} + +// addIndex adds n to an index ensuring that is stays inside the +// circular buffer for the hash chain. +func (t *hashTable) addIndex(i, n int) int { + i += n - len(t.data) + if i < 0 { + i += len(t.data) + } + return i +} + +// putDelta puts the delta instance at the current front of the circular +// chain buffer. +func (t *hashTable) putDelta(delta uint32) { + t.data[t.front] = delta + t.front = t.addIndex(t.front, 1) +} + +// putEntry puts a new entry into the hash table. If there is already a +// value stored it is moved into the circular chain buffer. +func (t *hashTable) putEntry(h uint64, pos int64) { + if pos < 0 { + return + } + i := h & t.mask + old := t.t[i] - 1 + t.t[i] = pos + 1 + var delta int64 + if old >= 0 { + delta = pos - old + if delta > 1<<32-1 || delta > int64(t.buffered()) { + delta = 0 + } + } + t.putDelta(uint32(delta)) +} + +// WriteByte converts a single byte into a hash and puts them into the hash +// table. +func (t *hashTable) WriteByte(b byte) error { + h := t.wr.RollByte(b) + t.hoff++ + t.putEntry(h, t.hoff) + return nil +} + +// Write converts the bytes provided into hash tables and stores the +// abbreviated offsets into the hash table. The method will never return an +// error. +func (t *hashTable) Write(p []byte) (n int, err error) { + for _, b := range p { + // WriteByte doesn't generate an error. + t.WriteByte(b) + } + return len(p), nil +} + +// getMatches the matches for a specific hash. The functions returns the +// number of positions found. +// +// TODO: Make a getDistances because that we are actually interested in. +func (t *hashTable) getMatches(h uint64, positions []int64) (n int) { + if t.hoff < 0 || len(positions) == 0 { + return 0 + } + buffered := t.buffered() + tailPos := t.hoff + 1 - int64(buffered) + rear := t.front - buffered + if rear >= 0 { + rear -= len(t.data) + } + // get the slot for the hash + pos := t.t[h&t.mask] - 1 + delta := pos - tailPos + for { + if delta < 0 { + return n + } + positions[n] = tailPos + delta + n++ + if n >= len(positions) { + return n + } + i := rear + int(delta) + if i < 0 { + i += len(t.data) + } + u := t.data[i] + if u == 0 { + return n + } + delta -= int64(u) + } +} + +// hash computes the rolling hash for the word stored in p. For correct +// results its length must be equal to t.wordLen. +func (t *hashTable) hash(p []byte) uint64 { + var h uint64 + for _, b := range p { + h = t.hr.RollByte(b) + } + return h +} + +// Matches fills the positions slice with potential matches. The +// functions returns the number of positions filled into positions. The +// byte slice p must have word length of the hash table. +func (t *hashTable) Matches(p []byte, positions []int64) int { + if len(p) != t.wordLen { + panic(fmt.Errorf( + "byte slice must have length %d", t.wordLen)) + } + h := t.hash(p) + return t.getMatches(h, positions) +} + +// NextOp identifies the next operation using the hash table. +// +// TODO: Use all repetitions to find matches. +func (t *hashTable) NextOp(rep [4]uint32) operation { + // get positions + data := t.dict.data[:maxMatchLen] + n, _ := t.dict.buf.Peek(data) + data = data[:n] + var p []int64 + if n < t.wordLen { + p = t.p[:0] + } else { + p = t.p[:maxMatches] + n = t.Matches(data[:t.wordLen], p) + p = p[:n] + } + + // convert positions in potential distances + head := t.dict.head + dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8) + for _, pos := range p { + dis := int(head - pos) + if dis > shortDists { + dists = append(dists, dis) + } + } + + // check distances + var m match + dictLen := t.dict.DictLen() + for _, dist := range dists { + if dist > dictLen { + continue + } + + // Here comes a trick. We are only interested in matches + // that are longer than the matches we have been found + // before. So before we test the whole byte sequence at + // the given distance, we test the first byte that would + // make the match longer. If it doesn't match the byte + // to match, we don't to care any longer. + i := t.dict.buf.rear - dist + m.n + if i < 0 { + i += len(t.dict.buf.data) + } + if t.dict.buf.data[i] != data[m.n] { + // We can't get a longer match. Jump to the next + // distance. + continue + } + + n := t.dict.buf.matchLen(dist, data) + switch n { + case 0: + continue + case 1: + if uint32(dist-minDistance) != rep[0] { + continue + } + } + if n > m.n { + m = match{int64(dist), n} + if n == len(data) { + // No better match will be found. + break + } + } + } + + if m.n == 0 { + return lit{data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go new file mode 100644 index 00000000..1ae7d80c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -0,0 +1,167 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// uint32LE reads an uint32 integer from a byte slice +func uint32LE(b []byte) uint32 { + x := uint32(b[3]) << 24 + x |= uint32(b[2]) << 16 + x |= uint32(b[1]) << 8 + x |= uint32(b[0]) + return x +} + +// uint64LE converts the uint64 value stored as little endian to an uint64 +// value. +func uint64LE(b []byte) uint64 { + x := uint64(b[7]) << 56 + x |= uint64(b[6]) << 48 + x |= uint64(b[5]) << 40 + x |= uint64(b[4]) << 32 + x |= uint64(b[3]) << 24 + x |= uint64(b[2]) << 16 + x |= uint64(b[1]) << 8 + x |= uint64(b[0]) + return x +} + +// putUint32LE puts an uint32 integer into a byte slice that must have at least +// a length of 4 bytes. +func putUint32LE(b []byte, x uint32) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) +} + +// putUint64LE puts the uint64 value into the byte slice as little endian +// value. The byte slice b must have at least place for 8 bytes. +func putUint64LE(b []byte, x uint64) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) +} + +// noHeaderSize defines the value of the length field in the LZMA header. +const noHeaderSize uint64 = 1<<64 - 1 + +// HeaderLen provides the length of the LZMA file header. +const HeaderLen = 13 + +// header represents the header of an LZMA file. +type header struct { + properties Properties + dictCap int + // uncompressed size; negative value if no size is given + size int64 +} + +// marshalBinary marshals the header. +func (h *header) marshalBinary() (data []byte, err error) { + if err = h.properties.verify(); err != nil { + return nil, err + } + if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { + return nil, fmt.Errorf("lzma: DictCap %d out of range", + h.dictCap) + } + + data = make([]byte, 13) + + // property byte + data[0] = h.properties.Code() + + // dictionary capacity + putUint32LE(data[1:5], uint32(h.dictCap)) + + // uncompressed size + var s uint64 + if h.size > 0 { + s = uint64(h.size) + } else { + s = noHeaderSize + } + putUint64LE(data[5:], s) + + return data, nil +} + +// unmarshalBinary unmarshals the header. +func (h *header) unmarshalBinary(data []byte) error { + if len(data) != HeaderLen { + return errors.New("lzma.unmarshalBinary: data has wrong length") + } + + // properties + var err error + if h.properties, err = PropertiesForCode(data[0]); err != nil { + return err + } + + // dictionary capacity + h.dictCap = int(uint32LE(data[1:])) + if h.dictCap < 0 { + return errors.New( + "LZMA header: dictionary capacity exceeds maximum " + + "integer") + } + + // uncompressed size + s := uint64LE(data[5:]) + if s == noHeaderSize { + h.size = -1 + } else { + h.size = int64(s) + if h.size < 0 { + return errors.New( + "LZMA header: uncompressed size " + + "out of int64 range") + } + } + + return nil +} + +// validDictCap checks whether the dictionary capacity is correct. This +// is used to weed out wrong file headers. +func validDictCap(dictcap int) bool { + if int64(dictcap) == MaxDictCap { + return true + } + for n := uint(10); n < 32; n++ { + if dictcap == 1<= 10 or 2^32-1. If +// there is an explicit size it must not exceed 256 GiB. The length of +// the data argument must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + if err := h.unmarshalBinary(data); err != nil { + return false + } + if !validDictCap(h.dictCap) { + return false + } + return h.size < 0 || h.size <= 1<<38 +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go new file mode 100644 index 00000000..081fc840 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -0,0 +1,398 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +const ( + // maximum size of compressed data in a chunk + maxCompressed = 1 << 16 + // maximum size of uncompressed data in a chunk + maxUncompressed = 1 << 21 +) + +// chunkType represents the type of an LZMA2 chunk. Note that this +// value is an internal representation and no actual encoding of a LZMA2 +// chunk header. +type chunkType byte + +// Possible values for the chunk type. +const ( + // end of stream + cEOS chunkType = iota + // uncompressed; reset dictionary + cUD + // uncompressed; no reset of dictionary + cU + // LZMA compressed; no reset + cL + // LZMA compressed; reset state + cLR + // LZMA compressed; reset state; new property value + cLRN + // LZMA compressed; reset state; new property value; reset dictionary + cLRND +) + +// chunkTypeStrings provide a string representation for the chunk types. +var chunkTypeStrings = [...]string{ + cEOS: "EOS", + cU: "U", + cUD: "UD", + cL: "L", + cLR: "LR", + cLRN: "LRN", + cLRND: "LRND", +} + +// String returns a string representation of the chunk type. +func (c chunkType) String() string { + if !(cEOS <= c && c <= cLRND) { + return "unknown" + } + return chunkTypeStrings[c] +} + +// Actual encodings for the chunk types in the value. Note that the high +// uncompressed size bits are stored in the header byte additionally. +const ( + hEOS = 0 + hUD = 1 + hU = 2 + hL = 1 << 7 + hLR = 1<<7 | 1<<5 + hLRN = 1<<7 | 1<<6 + hLRND = 1<<7 | 1<<6 | 1<<5 +) + +// errHeaderByte indicates an unsupported value for the chunk header +// byte. These bytes starts the variable-length chunk header. +var errHeaderByte = errors.New("lzma: unsupported chunk header byte") + +// headerChunkType converts the header byte into a chunk type. It +// ignores the uncompressed size bits in the chunk header byte. +func headerChunkType(h byte) (c chunkType, err error) { + if h&hL == 0 { + // no compression + switch h { + case hEOS: + c = cEOS + case hUD: + c = cUD + case hU: + c = cU + default: + return 0, errHeaderByte + } + return + } + switch h & hLRND { + case hL: + c = cL + case hLR: + c = cLR + case hLRN: + c = cLRN + case hLRND: + c = cLRND + default: + return 0, errHeaderByte + } + return +} + +// uncompressedHeaderLen provides the length of an uncompressed header +const uncompressedHeaderLen = 3 + +// headerLen returns the length of the LZMA2 header for a given chunk +// type. +func headerLen(c chunkType) int { + switch c { + case cEOS: + return 1 + case cU, cUD: + return uncompressedHeaderLen + case cL, cLR: + return 5 + case cLRN, cLRND: + return 6 + } + panic(fmt.Errorf("unsupported chunk type %d", c)) +} + +// chunkHeader represents the contents of a chunk header. +type chunkHeader struct { + ctype chunkType + uncompressed uint32 + compressed uint16 + props Properties +} + +// String returns a string representation of the chunk header. +func (h *chunkHeader) String() string { + return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed, + h.compressed, &h.props) +} + +// UnmarshalBinary reads the content of the chunk header from the data +// slice. The slice must have the correct length. +func (h *chunkHeader) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return errors.New("no data") + } + c, err := headerChunkType(data[0]) + if err != nil { + return err + } + + n := headerLen(c) + if len(data) < n { + return errors.New("incomplete data") + } + if len(data) > n { + return errors.New("invalid data length") + } + + *h = chunkHeader{ctype: c} + if c == cEOS { + return nil + } + + h.uncompressed = uint32(uint16BE(data[1:3])) + if c <= cU { + return nil + } + h.uncompressed |= uint32(data[0]&^hLRND) << 16 + + h.compressed = uint16BE(data[3:5]) + if c <= cLR { + return nil + } + + h.props, err = PropertiesForCode(data[5]) + return err +} + +// MarshalBinary encodes the chunk header value. The function checks +// whether the content of the chunk header is correct. +func (h *chunkHeader) MarshalBinary() (data []byte, err error) { + if h.ctype > cLRND { + return nil, errors.New("invalid chunk type") + } + if err = h.props.verify(); err != nil { + return nil, err + } + + data = make([]byte, headerLen(h.ctype)) + + switch h.ctype { + case cEOS: + return data, nil + case cUD: + data[0] = hUD + case cU: + data[0] = hU + case cL: + data[0] = hL + case cLR: + data[0] = hLR + case cLRN: + data[0] = hLRN + case cLRND: + data[0] = hLRND + } + + putUint16BE(data[1:3], uint16(h.uncompressed)) + if h.ctype <= cU { + return data, nil + } + data[0] |= byte(h.uncompressed>>16) &^ hLRND + + putUint16BE(data[3:5], h.compressed) + if h.ctype <= cLR { + return data, nil + } + + data[5] = h.props.Code() + return data, nil +} + +// readChunkHeader reads the chunk header from the IO reader. +func readChunkHeader(r io.Reader) (h *chunkHeader, err error) { + p := make([]byte, 1, 6) + if _, err = io.ReadFull(r, p); err != nil { + return + } + c, err := headerChunkType(p[0]) + if err != nil { + return + } + p = p[:headerLen(c)] + if _, err = io.ReadFull(r, p[1:]); err != nil { + return + } + h = new(chunkHeader) + if err = h.UnmarshalBinary(p); err != nil { + return nil, err + } + return h, nil +} + +// uint16BE converts a big-endian uint16 representation to an uint16 +// value. +func uint16BE(p []byte) uint16 { + return uint16(p[0])<<8 | uint16(p[1]) +} + +// putUint16BE puts the big-endian uint16 presentation into the given +// slice. +func putUint16BE(p []byte, x uint16) { + p[0] = byte(x >> 8) + p[1] = byte(x) +} + +// chunkState is used to manage the state of the chunks +type chunkState byte + +// start and stop define the initial and terminating state of the chunk +// state +const ( + start chunkState = 'S' + stop chunkState = 'T' +) + +// errors for the chunk state handling +var ( + errChunkType = errors.New("lzma: unexpected chunk type") + errState = errors.New("lzma: wrong chunk state") +) + +// next transitions state based on chunk type input +func (c *chunkState) next(ctype chunkType) error { + switch *c { + // start state + case 'S': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cLRND: + *c = 'L' + default: + return errChunkType + } + // normal LZMA mode + case 'L': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + *c = 'U' + case cL, cLR, cLRN, cLRND: + break + default: + return errChunkType + } + // reset required + case 'R': + switch ctype { + case cEOS: + *c = 'T' + case cUD, cU: + break + case cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // uncompressed + case 'U': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + break + case cL, cLR, cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // terminal state + case 'T': + return errChunkType + default: + return errState + } + return nil +} + +// defaultChunkType returns the default chunk type for each chunk state. +func (c chunkState) defaultChunkType() chunkType { + switch c { + case 'S': + return cLRND + case 'L', 'U': + return cL + case 'R': + return cLRN + default: + // no error + return cEOS + } +} + +// maxDictCap defines the maximum dictionary capacity supported by the +// LZMA2 dictionary capacity encoding. +const maxDictCap = 1<<32 - 1 + +// maxDictCapCode defines the maximum dictionary capacity code. +const maxDictCapCode = 40 + +// The function decodes the dictionary capacity byte, but doesn't change +// for the correct range of the given byte. +func decodeDictCap(c byte) int64 { + return (2 | int64(c)&1) << (11 + (c>>1)&0x1f) +} + +// DecodeDictCap decodes the encoded dictionary capacity. The function +// returns an error if the code is out of range. +func DecodeDictCap(c byte) (n int64, err error) { + if c >= maxDictCapCode { + if c == maxDictCapCode { + return maxDictCap, nil + } + return 0, errors.New("lzma: invalid dictionary size code") + } + return decodeDictCap(c), nil +} + +// EncodeDictCap encodes a dictionary capacity. The function returns the +// code for the capacity that is greater or equal n. If n exceeds the +// maximum support dictionary capacity, the maximum value is returned. +func EncodeDictCap(n int64) byte { + a, b := byte(0), byte(40) + for a < b { + c := a + (b-a)>>1 + m := decodeDictCap(c) + if n <= m { + if n == m { + return c + } + b = c + } else { + a = c + 1 + } + } + return a +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go new file mode 100644 index 00000000..1ea5320a --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -0,0 +1,115 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// maxPosBits defines the number of bits of the position value that are used to +// to compute the posState value. The value is used to select the tree codec +// for length encoding and decoding. +const maxPosBits = 4 + +// minMatchLen and maxMatchLen give the minimum and maximum values for +// encoding and decoding length values. minMatchLen is also used as base +// for the encoded length values. +const ( + minMatchLen = 2 + maxMatchLen = minMatchLen + 16 + 256 - 1 +) + +// lengthCodec support the encoding of the length value. +type lengthCodec struct { + choice [2]prob + low [1 << maxPosBits]treeCodec + mid [1 << maxPosBits]treeCodec + high treeCodec +} + +// deepcopy initializes the lc value as deep copy of the source value. +func (lc *lengthCodec) deepcopy(src *lengthCodec) { + if lc == src { + return + } + lc.choice = src.choice + for i := range lc.low { + lc.low[i].deepcopy(&src.low[i]) + } + for i := range lc.mid { + lc.mid[i].deepcopy(&src.mid[i]) + } + lc.high.deepcopy(&src.high) +} + +// init initializes a new length codec. +func (lc *lengthCodec) init() { + for i := range lc.choice { + lc.choice[i] = probInit + } + for i := range lc.low { + lc.low[i] = makeTreeCodec(3) + } + for i := range lc.mid { + lc.mid[i] = makeTreeCodec(3) + } + lc.high = makeTreeCodec(8) +} + +// Encode encodes the length offset. The length offset l can be compute by +// subtracting minMatchLen (2) from the actual length. +// +// l = length - minMatchLen +func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, +) (err error) { + if l > maxMatchLen-minMatchLen { + return errors.New("lengthCodec.Encode: l out of range") + } + if l < 8 { + if err = lc.choice[0].Encode(e, 0); err != nil { + return + } + return lc.low[posState].Encode(e, l) + } + if err = lc.choice[0].Encode(e, 1); err != nil { + return + } + if l < 16 { + if err = lc.choice[1].Encode(e, 0); err != nil { + return + } + return lc.mid[posState].Encode(e, l-8) + } + if err = lc.choice[1].Encode(e, 1); err != nil { + return + } + if err = lc.high.Encode(e, l-16); err != nil { + return + } + return nil +} + +// Decode reads the length offset. Add minMatchLen to compute the actual length +// to the length offset l. +func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, +) (l uint32, err error) { + var b uint32 + if b, err = lc.choice[0].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.low[posState].Decode(d) + return + } + if b, err = lc.choice[1].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.mid[posState].Decode(d) + l += 8 + return + } + l, err = lc.high.Decode(d) + l += 16 + return +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go new file mode 100644 index 00000000..e4ef5fc5 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -0,0 +1,125 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// literalCodec supports the encoding of literal. It provides 768 probability +// values per literal state. The upper 512 probabilities are used with the +// context of a match bit. +type literalCodec struct { + probs []prob +} + +// deepcopy initializes literal codec c as a deep copy of the source. +func (c *literalCodec) deepcopy(src *literalCodec) { + if c == src { + return + } + c.probs = make([]prob, len(src.probs)) + copy(c.probs, src.probs) +} + +// init initializes the literal codec. +func (c *literalCodec) init(lc, lp int) { + switch { + case !(minLC <= lc && lc <= maxLC): + panic("lc out of range") + case !(minLP <= lp && lp <= maxLP): + panic("lp out of range") + } + c.probs = make([]prob, 0x300<= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + bit := (r >> 7) & 1 + r <<= 1 + i := ((1 + matchBit) << 8) | symbol + if err = probs[i].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit := (r >> 7) & 1 + r <<= 1 + if err = probs[symbol].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + } + return nil +} + +// Decode decodes a literal byte using the range decoder as well as the LZMA +// state, a match byte, and the literal state. +func (c *literalCodec) Decode(d *rangeDecoder, + state uint32, match byte, litState uint32, +) (s byte, err error) { + k := litState * 0x300 + probs := c.probs[k : k+0x300] + symbol := uint32(1) + if state >= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + i := ((1 + matchBit) << 8) | symbol + bit, err := d.DecodeBit(&probs[i]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit, err := d.DecodeBit(&probs[symbol]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + } + s = byte(symbol - 0x100) + return s, nil +} + +// minLC and maxLC define the range for LC values. +const ( + minLC = 0 + maxLC = 8 +) + +// minLC and maxLC define the range for LP values. +const ( + minLP = 0 + maxLP = 4 +) diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go new file mode 100644 index 00000000..02dfb8bf --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -0,0 +1,52 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// MatchAlgorithm identifies an algorithm to find matches in the +// dictionary. +type MatchAlgorithm byte + +// Supported matcher algorithms. +const ( + HashTable4 MatchAlgorithm = iota + BinaryTree +) + +// maStrings are used by the String method. +var maStrings = map[MatchAlgorithm]string{ + HashTable4: "HashTable4", + BinaryTree: "BinaryTree", +} + +// String returns a string representation of the Matcher. +func (a MatchAlgorithm) String() string { + if s, ok := maStrings[a]; ok { + return s + } + return "unknown" +} + +var errUnsupportedMatchAlgorithm = errors.New( + "lzma: unsupported match algorithm value") + +// verify checks whether the matcher value is supported. +func (a MatchAlgorithm) verify() error { + if _, ok := maStrings[a]; !ok { + return errUnsupportedMatchAlgorithm + } + return nil +} + +func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { + switch a { + case HashTable4: + return newHashTable(dictCap, 4) + case BinaryTree: + return newBinTree(dictCap) + } + return nil, errUnsupportedMatchAlgorithm +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go new file mode 100644 index 00000000..7b7eddc3 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -0,0 +1,55 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "unicode" +) + +// operation represents an operation on the dictionary during encoding or +// decoding. +type operation interface { + Len() int +} + +// rep represents a repetition at the given distance and the given length +type match struct { + // supports all possible distance values, including the eos marker + distance int64 + // length + n int +} + +// Len returns the number of bytes matched. +func (m match) Len() int { + return m.n +} + +// String returns a string representation for the repetition. +func (m match) String() string { + return fmt.Sprintf("M{%d,%d}", m.distance, m.n) +} + +// lit represents a single byte literal. +type lit struct { + b byte +} + +// Len returns 1 for the single byte literal. +func (l lit) Len() int { + return 1 +} + +// String returns a string representation for the literal. +func (l lit) String() string { + var c byte + if unicode.IsPrint(rune(l.b)) { + c = l.b + } else { + c = '.' + } + return fmt.Sprintf("L{%c/%02x}", c, l.b) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go new file mode 100644 index 00000000..2feccba1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -0,0 +1,53 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// movebits defines the number of bits used for the updates of probability +// values. +const movebits = 5 + +// probbits defines the number of bits of a probability value. +const probbits = 11 + +// probInit defines 0.5 as initial value for prob values. +const probInit prob = 1 << (probbits - 1) + +// Type prob represents probabilities. The type can also be used to encode and +// decode single bits. +type prob uint16 + +// Dec decreases the probability. The decrease is proportional to the +// probability value. +func (p *prob) dec() { + *p -= *p >> movebits +} + +// Inc increases the probability. The Increase is proportional to the +// difference of 1 and the probability value. +func (p *prob) inc() { + *p += ((1 << probbits) - *p) >> movebits +} + +// Computes the new bound for a given range using the probability value. +func (p prob) bound(r uint32) uint32 { + return (r >> probbits) * uint32(p) +} + +// Bits returns 1. One is the number of bits that can be encoded or decoded +// with a single prob value. +func (p prob) Bits() int { + return 1 +} + +// Encode encodes the least-significant bit of v. Note that the p value will be +// changed. +func (p *prob) Encode(e *rangeEncoder, v uint32) error { + return e.EncodeBit(v, p) +} + +// Decode decodes a single bit. Note that the p value will change. +func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { + return d.DecodeBit(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go new file mode 100644 index 00000000..15b754cc --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -0,0 +1,69 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// maximum and minimum values for the LZMA properties. +const ( + minPB = 0 + maxPB = 4 +) + +// maxPropertyCode is the possible maximum of a properties code byte. +const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 + +// Properties contains the parameters LC, LP and PB. The parameter LC +// defines the number of literal context bits; parameter LP the number +// of literal position bits and PB the number of position bits. +type Properties struct { + LC int + LP int + PB int +} + +// String returns the properties in a string representation. +func (p *Properties) String() string { + return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) +} + +// PropertiesForCode converts a properties code byte into a Properties value. +func PropertiesForCode(code byte) (p Properties, err error) { + if code > maxPropertyCode { + return p, errors.New("lzma: invalid properties code") + } + p.LC = int(code % 9) + code /= 9 + p.LP = int(code % 5) + code /= 5 + p.PB = int(code % 5) + return p, err +} + +// verify checks the properties for correctness. +func (p *Properties) verify() error { + if p == nil { + return errors.New("lzma: properties are nil") + } + if !(minLC <= p.LC && p.LC <= maxLC) { + return errors.New("lzma: lc out of range") + } + if !(minLP <= p.LP && p.LP <= maxLP) { + return errors.New("lzma: lp out of range") + } + if !(minPB <= p.PB && p.PB <= maxPB) { + return errors.New("lzma: pb out of range") + } + return nil +} + +// Code converts the properties to a byte. The function assumes that +// the properties components are all in range. +func (p Properties) Code() byte { + return byte((p.PB*5+p.LP)*9 + p.LC) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go new file mode 100644 index 00000000..4b0fee3f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -0,0 +1,222 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// rangeEncoder implements range encoding of single bits. The low value can +// overflow therefore we need uint64. The cache value is used to handle +// overflows. +type rangeEncoder struct { + lbw *LimitedByteWriter + nrange uint32 + low uint64 + cacheLen int64 + cache byte +} + +// maxInt64 provides the maximal value of the int64 type +const maxInt64 = 1<<63 - 1 + +// newRangeEncoder creates a new range encoder. +func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) { + lbw, ok := bw.(*LimitedByteWriter) + if !ok { + lbw = &LimitedByteWriter{BW: bw, N: maxInt64} + } + return &rangeEncoder{ + lbw: lbw, + nrange: 0xffffffff, + cacheLen: 1}, nil +} + +// Available returns the number of bytes that still can be written. The +// method takes the bytes that will be currently written by Close into +// account. +func (e *rangeEncoder) Available() int64 { + return e.lbw.N - (e.cacheLen + 4) +} + +// writeByte writes a single byte to the underlying writer. An error is +// returned if the limit is reached. The written byte will be counted if +// the underlying writer doesn't return an error. +func (e *rangeEncoder) writeByte(c byte) error { + if e.Available() < 1 { + return ErrLimit + } + return e.lbw.WriteByte(c) +} + +// DirectEncodeBit encodes the least-significant bit of b with probability 1/2. +func (e *rangeEncoder) DirectEncodeBit(b uint32) error { + e.nrange >>= 1 + e.low += uint64(e.nrange) & (0 - (uint64(b) & 1)) + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// EncodeBit encodes the least significant bit of b. The p value will be +// updated by the function depending on the bit encoded. +func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error { + bound := p.bound(e.nrange) + if b&1 == 0 { + e.nrange = bound + p.inc() + } else { + e.low += uint64(bound) + e.nrange -= bound + p.dec() + } + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// Close writes a complete copy of the low value. +func (e *rangeEncoder) Close() error { + for i := 0; i < 5; i++ { + if err := e.shiftLow(); err != nil { + return err + } + } + return nil +} + +// shiftLow shifts the low value for 8 bit. The shifted byte is written into +// the byte writer. The cache value is used to handle overflows. +func (e *rangeEncoder) shiftLow() error { + if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 { + tmp := e.cache + for { + err := e.writeByte(tmp + byte(e.low>>32)) + if err != nil { + return err + } + tmp = 0xff + e.cacheLen-- + if e.cacheLen <= 0 { + if e.cacheLen < 0 { + panic("negative cacheLen") + } + break + } + } + e.cache = byte(uint32(e.low) >> 24) + } + e.cacheLen++ + e.low = uint64(uint32(e.low) << 8) + return nil +} + +// rangeDecoder decodes single bits of the range encoding stream. +type rangeDecoder struct { + br io.ByteReader + nrange uint32 + code uint32 +} + +// newRangeDecoder initializes a range decoder. It reads five bytes from the +// reader and therefore may return an error. +func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { + d = &rangeDecoder{br: br, nrange: 0xffffffff} + + b, err := d.br.ReadByte() + if err != nil { + return nil, err + } + if b != 0 { + return nil, errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return nil, err + } + } + + if d.code >= d.nrange { + return nil, errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return d, nil +} + +// possiblyAtEnd checks whether the decoder may be at the end of the stream. +func (d *rangeDecoder) possiblyAtEnd() bool { + return d.code == 0 +} + +// DirectDecodeBit decodes a bit with probability 1/2. The return value b will +// contain the bit at the least-significant position. All other bits will be +// zero. +func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) { + d.nrange >>= 1 + d.code -= d.nrange + t := 0 - (d.code >> 31) + d.code += d.nrange & t + b = (t + 1) & 1 + + // d.code will stay less then d.nrange + + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// decodeBit decodes a single bit. The bit will be returned at the +// least-significant position. All other bits will be zero. The probability +// value will be updated. +func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) { + bound := p.bound(d.nrange) + if d.code < bound { + d.nrange = bound + p.inc() + b = 0 + } else { + d.code -= bound + d.nrange -= bound + p.dec() + b = 1 + } + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// updateCode reads a new byte into the code. +func (d *rangeDecoder) updateCode() error { + b, err := d.br.ReadByte() + if err != nil { + return err + } + d.code = (d.code << 8) | uint32(b) + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go new file mode 100644 index 00000000..ae911c38 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -0,0 +1,100 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lzma supports the decoding and encoding of LZMA streams. +// Reader and Writer support the classic LZMA format. Reader2 and +// Writer2 support the decoding and encoding of LZMA2 streams. +// +// The package is written completely in Go and doesn't rely on any external +// library. +package lzma + +import ( + "errors" + "io" +) + +// ReaderConfig stores the parameters for the reader of the classic LZMA +// format. +type ReaderConfig struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero values will +// be replaced by default values. +func (c *ReaderConfig) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader provides a reader for LZMA files or streams. +type Reader struct { + lzma io.Reader + h header + d *decoder +} + +// NewReader creates a new reader for an LZMA stream using the classic +// format. NewReader reads and checks the header of the LZMA stream. +func NewReader(lzma io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(lzma) +} + +// NewReader creates a new reader for an LZMA stream in the classic +// format. The function reads and verifies the the header of the LZMA +// stream. +func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(lzma, data); err != nil { + if err == io.EOF { + return nil, errors.New("lzma: unexpected EOF") + } + return nil, err + } + r = &Reader{lzma: lzma} + if err = r.h.unmarshalBinary(data); err != nil { + return nil, err + } + if r.h.dictCap < MinDictCap { + r.h.dictCap = MinDictCap + } + dictCap := r.h.dictCap + if c.DictCap > dictCap { + dictCap = c.DictCap + } + + state := newState(r.h.properties) + dict, err := newDecoderDict(dictCap) + if err != nil { + return nil, err + } + r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) + if err != nil { + return nil, err + } + return r, nil +} + +// EOSMarker indicates that an EOS marker has been encountered. +func (r *Reader) EOSMarker() bool { + return r.d.eosMarker +} + +// Read returns uncompressed data. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.d.Read(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go new file mode 100644 index 00000000..f36e2650 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -0,0 +1,231 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" + + "github.com/ulikunitz/xz/internal/xlog" +) + +// Reader2Config stores the parameters for the LZMA2 reader. +// format. +type Reader2Config struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *Reader2Config) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero configuration values +// will be replaced by default values. +func (c *Reader2Config) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader2 supports the reading of LZMA2 chunk sequences. Note that the +// first chunk should have a dictionary reset and the first compressed +// chunk a properties reset. The chunk sequence may not be terminated by +// an end-of-stream chunk. +type Reader2 struct { + r io.Reader + err error + + dict *decoderDict + ur *uncompressedReader + decoder *decoder + chunkReader io.Reader + + cstate chunkState +} + +// NewReader2 creates a reader for an LZMA2 chunk sequence. +func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + return Reader2Config{}.NewReader2(lzma2) +} + +// NewReader2 creates an LZMA2 reader using the given configuration. +func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader2{r: lzma2, cstate: start} + r.dict, err = newDecoderDict(c.DictCap) + if err != nil { + return nil, err + } + if err = r.startChunk(); err != nil { + r.err = err + } + return r, nil +} + +// uncompressed tests whether the chunk type specifies an uncompressed +// chunk. +func uncompressed(ctype chunkType) bool { + return ctype == cU || ctype == cUD +} + +// startChunk parses a new chunk. +func (r *Reader2) startChunk() error { + r.chunkReader = nil + header, err := readChunkHeader(r.r) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + xlog.Debugf("chunk header %v", header) + if err = r.cstate.next(header.ctype); err != nil { + return err + } + if r.cstate == stop { + return io.EOF + } + if header.ctype == cUD || header.ctype == cLRND { + r.dict.Reset() + } + size := int64(header.uncompressed) + 1 + if uncompressed(header.ctype) { + if r.ur != nil { + r.ur.Reopen(r.r, size) + } else { + r.ur = newUncompressedReader(r.r, r.dict, size) + } + r.chunkReader = r.ur + return nil + } + br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) + if r.decoder == nil { + state := newState(header.props) + r.decoder, err = newDecoder(br, state, r.dict, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil + } + switch header.ctype { + case cLR: + r.decoder.State.Reset() + case cLRN, cLRND: + r.decoder.State = newState(header.props) + } + err = r.decoder.Reopen(br, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil +} + +// Read reads data from the LZMA2 chunk sequence. +func (r *Reader2) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + for n < len(p) { + var k int + k, err = r.chunkReader.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + err = r.startChunk() + if err == nil { + continue + } + } + r.err = err + return n, err + } + if k == 0 { + r.err = errors.New("lzma: Reader2 doesn't get data") + return n, r.err + } + } + return n, nil +} + +// EOS returns whether the LZMA2 stream has been terminated by an +// end-of-stream chunk. +func (r *Reader2) EOS() bool { + return r.cstate == stop +} + +// uncompressedReader is used to read uncompressed chunks. +type uncompressedReader struct { + lr io.LimitedReader + Dict *decoderDict + eof bool + err error +} + +// newUncompressedReader initializes a new uncompressedReader. +func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { + ur := &uncompressedReader{ + lr: io.LimitedReader{R: r, N: size}, + Dict: dict, + } + return ur +} + +// Reopen reinitializes an uncompressed reader. +func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { + ur.err = nil + ur.eof = false + ur.lr = io.LimitedReader{R: r, N: size} +} + +// fill reads uncompressed data into the dictionary. +func (ur *uncompressedReader) fill() error { + if !ur.eof { + n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) + if err != io.EOF { + return err + } + ur.eof = true + if n > 0 { + return nil + } + } + if ur.lr.N != 0 { + return io.ErrUnexpectedEOF + } + return io.EOF +} + +// Read reads uncompressed data from the limited reader. +func (ur *uncompressedReader) Read(p []byte) (n int, err error) { + if ur.err != nil { + return 0, ur.err + } + for { + var k int + k, err = ur.Dict.Read(p[n:]) + n += k + if n >= len(p) { + return n, nil + } + if err != nil { + break + } + err = ur.fill() + if err != nil { + break + } + } + ur.err = err + return n, err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go new file mode 100644 index 00000000..34779c51 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -0,0 +1,145 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// states defines the overall state count +const states = 12 + +// State maintains the full state of the operation encoding or decoding +// process. +type state struct { + rep [4]uint32 + isMatch [states << maxPosBits]prob + isRepG0Long [states << maxPosBits]prob + isRep [states]prob + isRepG0 [states]prob + isRepG1 [states]prob + isRepG2 [states]prob + litCodec literalCodec + lenCodec lengthCodec + repLenCodec lengthCodec + distCodec distCodec + state uint32 + posBitMask uint32 + Properties Properties +} + +// initProbSlice initializes a slice of probabilities. +func initProbSlice(p []prob) { + for i := range p { + p[i] = probInit + } +} + +// Reset sets all state information to the original values. +func (s *state) Reset() { + p := s.Properties + *s = state{ + Properties: p, + // dict: s.dict, + posBitMask: (uint32(1) << uint(p.PB)) - 1, + } + initProbSlice(s.isMatch[:]) + initProbSlice(s.isRep[:]) + initProbSlice(s.isRepG0[:]) + initProbSlice(s.isRepG1[:]) + initProbSlice(s.isRepG2[:]) + initProbSlice(s.isRepG0Long[:]) + s.litCodec.init(p.LC, p.LP) + s.lenCodec.init() + s.repLenCodec.init() + s.distCodec.init() +} + +// newState creates a new state from the give Properties. +func newState(p Properties) *state { + s := &state{Properties: p} + s.Reset() + return s +} + +// deepcopy initializes s as a deep copy of the source. +func (s *state) deepcopy(src *state) { + if s == src { + return + } + s.rep = src.rep + s.isMatch = src.isMatch + s.isRepG0Long = src.isRepG0Long + s.isRep = src.isRep + s.isRepG0 = src.isRepG0 + s.isRepG1 = src.isRepG1 + s.isRepG2 = src.isRepG2 + s.litCodec.deepcopy(&src.litCodec) + s.lenCodec.deepcopy(&src.lenCodec) + s.repLenCodec.deepcopy(&src.repLenCodec) + s.distCodec.deepcopy(&src.distCodec) + s.state = src.state + s.posBitMask = src.posBitMask + s.Properties = src.Properties +} + +// cloneState creates a new clone of the give state. +func cloneState(src *state) *state { + s := new(state) + s.deepcopy(src) + return s +} + +// updateStateLiteral updates the state for a literal. +func (s *state) updateStateLiteral() { + switch { + case s.state < 4: + s.state = 0 + return + case s.state < 10: + s.state -= 3 + return + } + s.state -= 6 +} + +// updateStateMatch updates the state for a match. +func (s *state) updateStateMatch() { + if s.state < 7 { + s.state = 7 + } else { + s.state = 10 + } +} + +// updateStateRep updates the state for a repetition. +func (s *state) updateStateRep() { + if s.state < 7 { + s.state = 8 + } else { + s.state = 11 + } +} + +// updateStateShortRep updates the state for a short repetition. +func (s *state) updateStateShortRep() { + if s.state < 7 { + s.state = 9 + } else { + s.state = 11 + } +} + +// states computes the states of the operation codec. +func (s *state) states(dictHead int64) (state1, state2, posState uint32) { + state1 = s.state + posState = uint32(dictHead) & s.posBitMask + state2 = (s.state << maxPosBits) | posState + return +} + +// litState computes the literal state. +func (s *state) litState(prev byte, dictHead int64) uint32 { + lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) + litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | + (uint32(prev) >> (8 - lc)) + return litState +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go new file mode 100644 index 00000000..36b29b59 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -0,0 +1,133 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// treeCodec encodes or decodes values with a fixed bit size. It is using a +// tree of probability value. The root of the tree is the most-significant bit. +type treeCodec struct { + probTree +} + +// makeTreeCodec makes a tree codec. The bits value must be inside the range +// [1,32]. +func makeTreeCodec(bits int) treeCodec { + return treeCodec{makeProbTree(bits)} +} + +// deepcopy initializes tc as a deep copy of the source. +func (tc *treeCodec) deepcopy(src *treeCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// Encode uses the range encoder to encode a fixed-bit-size value. +func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { + m := uint32(1) + for i := int(tc.bits) - 1; i >= 0; i-- { + b := (v >> uint(i)) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may +// be caused by the range decoder. +func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := 0; j < int(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + } + return m - (1 << uint(tc.bits)), nil +} + +// treeReverseCodec is another tree codec, where the least-significant bit is +// the start of the probability tree. +type treeReverseCodec struct { + probTree +} + +// deepcopy initializes the treeReverseCodec as a deep copy of the +// source. +func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must +// be in the range [1,32]. +func makeTreeReverseCodec(bits int) treeReverseCodec { + return treeReverseCodec{makeProbTree(bits)} +} + +// Encode uses range encoder to encode a fixed-bit-size value. The range +// encoder may cause errors. +func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { + m := uint32(1) + for i := uint(0); i < uint(tc.bits); i++ { + b := (v >> i) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors +// returned by the range decoder will be returned. +func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := uint(0); j < uint(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + v |= b << j + } + return v, nil +} + +// probTree stores enough probability values to be used by the treeEncode and +// treeDecode methods of the range coder types. +type probTree struct { + probs []prob + bits byte +} + +// deepcopy initializes the probTree value as a deep copy of the source. +func (t *probTree) deepcopy(src *probTree) { + if t == src { + return + } + t.probs = make([]prob, len(src.probs)) + copy(t.probs, src.probs) + t.bits = src.bits +} + +// makeProbTree initializes a probTree structure. +func makeProbTree(bits int) probTree { + if !(1 <= bits && bits <= 32) { + panic("bits outside of range [1,32]") + } + t := probTree{ + bits: byte(bits), + probs: make([]prob, 1< 0 { + c.SizeInHeader = true + } + if !c.SizeInHeader { + c.EOSMarker = true + } +} + +// Verify checks WriterConfig for errors. Verify will replace zero +// values with default values. +func (c *WriterConfig) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.SizeInHeader { + if c.Size < 0 { + return errors.New("lzma: negative size not supported") + } + } else if !c.EOSMarker { + return errors.New("lzma: EOS marker is required") + } + if err = c.Matcher.verify(); err != nil { + return err + } + + return nil +} + +// header returns the header structure for this configuration. +func (c *WriterConfig) header() header { + h := header{ + properties: *c.Properties, + dictCap: c.DictCap, + size: -1, + } + if c.SizeInHeader { + h.size = c.Size + } + return h +} + +// Writer writes an LZMA stream in the classic format. +type Writer struct { + h header + bw io.ByteWriter + buf *bufio.Writer + e *encoder +} + +// NewWriter creates a new LZMA writer for the classic format. The +// method will write the header to the underlying stream. +func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{h: c.header()} + + var ok bool + w.bw, ok = lzma.(io.ByteWriter) + if !ok { + w.buf = bufio.NewWriter(lzma) + w.bw = w.buf + } + state := newState(w.h.properties) + m, err := c.Matcher.new(w.h.dictCap) + if err != nil { + return nil, err + } + dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) + if err != nil { + return nil, err + } + var flags encoderFlags + if c.EOSMarker { + flags = eosMarker + } + if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { + return nil, err + } + + if err = w.writeHeader(); err != nil { + return nil, err + } + return w, nil +} + +// NewWriter creates a new LZMA writer using the classic format. The +// function writes the header to the underlying stream. +func NewWriter(lzma io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(lzma) +} + +// writeHeader writes the LZMA header into the stream. +func (w *Writer) writeHeader() error { + data, err := w.h.marshalBinary() + if err != nil { + return err + } + _, err = w.bw.(io.Writer).Write(data) + return err +} + +// Write puts data into the Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.h.size >= 0 { + m := w.h.size + m -= w.e.Compressed() + int64(w.e.dict.Buffered()) + if m < 0 { + m = 0 + } + if m < int64(len(p)) { + p = p[:m] + err = ErrNoSpace + } + } + var werr error + if n, werr = w.e.Write(p); werr != nil { + err = werr + } + return n, err +} + +// Close closes the writer stream. It ensures that all data from the +// buffer will be compressed and the LZMA stream will be finished. +func (w *Writer) Close() error { + if w.h.size >= 0 { + n := w.e.Compressed() + int64(w.e.dict.Buffered()) + if n != w.h.size { + return errSize + } + } + err := w.e.Close() + if w.buf != nil { + ferr := w.buf.Flush() + if err == nil { + err = ferr + } + } + return err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go new file mode 100644 index 00000000..97bbafa1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -0,0 +1,305 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bytes" + "errors" + "io" +) + +// Writer2Config is used to create a Writer2 using parameters. +type Writer2Config struct { + // The properties for the encoding. If the it is nil the value + // {LC: 3, LP: 0, PB: 2} will be chosen. + Properties *Properties + // The capacity of the dictionary. If DictCap is zero, the value + // 8 MiB will be chosen. + DictCap int + // Size of the lookahead buffer; value 0 indicates default size + // 4096 + BufSize int + // Match algorithm + Matcher MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *Writer2Config) fill() { + if c.Properties == nil { + c.Properties = &Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } +} + +// Verify checks the Writer2Config for correctness. Zero values will be +// replaced by default values. +func (c *Writer2Config) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.Properties.LC+c.Properties.LP > 4 { + return errors.New("lzma: sum of lc and lp exceeds 4") + } + if err = c.Matcher.verify(); err != nil { + return err + } + return nil +} + +// Writer2 supports the creation of an LZMA2 stream. But note that +// written data is buffered, so call Flush or Close to write data to the +// underlying writer. The Close method writes the end-of-stream marker +// to the stream. So you may be able to concatenate the output of two +// writers as long the output of the first writer has only been flushed +// but not closed. +// +// Any change to the fields Properties, DictCap must be done before the +// first call to Write, Flush or Close. +type Writer2 struct { + w io.Writer + + start *state + encoder *encoder + + cstate chunkState + ctype chunkType + + buf bytes.Buffer + lbw LimitedByteWriter +} + +// NewWriter2 creates an LZMA2 chunk sequence writer with the default +// parameters and options. +func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + return Writer2Config{}.NewWriter2(lzma2) +} + +// NewWriter2 creates a new LZMA2 writer using the given configuration. +func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer2{ + w: lzma2, + start: newState(*c.Properties), + cstate: start, + ctype: start.defaultChunkType(), + } + w.buf.Grow(maxCompressed) + w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed} + m, err := c.Matcher.new(c.DictCap) + if err != nil { + return nil, err + } + d, err := newEncoderDict(c.DictCap, c.BufSize, m) + if err != nil { + return nil, err + } + w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0) + if err != nil { + return nil, err + } + return w, nil +} + +// written returns the number of bytes written to the current chunk +func (w *Writer2) written() int { + if w.encoder == nil { + return 0 + } + return int(w.encoder.Compressed()) + w.encoder.dict.Buffered() +} + +// errClosed indicates that the writer is closed. +var errClosed = errors.New("lzma: writer closed") + +// Writes data to LZMA2 stream. Note that written data will be buffered. +// Use Flush or Close to ensure that data is written to the underlying +// writer. +func (w *Writer2) Write(p []byte) (n int, err error) { + if w.cstate == stop { + return 0, errClosed + } + for n < len(p) { + m := maxUncompressed - w.written() + if m <= 0 { + panic("lzma: maxUncompressed reached") + } + var q []byte + if n+m < len(p) { + q = p[n : n+m] + } else { + q = p[n:] + } + k, err := w.encoder.Write(q) + n += k + if err != nil && err != ErrLimit { + return n, err + } + if err == ErrLimit || k == m { + if err = w.flushChunk(); err != nil { + return n, err + } + } + } + return n, nil +} + +// writeUncompressedChunk writes an uncompressed chunk to the LZMA2 +// stream. +func (w *Writer2) writeUncompressedChunk() error { + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("lzma: can't write empty uncompressed chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + switch w.ctype { + case cLRND: + w.ctype = cUD + default: + w.ctype = cU + } + w.encoder.state = w.start + + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = w.encoder.dict.CopyN(w.w, int(u)) + return err +} + +// writeCompressedChunk writes a compressed chunk to the underlying +// writer. +func (w *Writer2) writeCompressedChunk() error { + if w.ctype == cU || w.ctype == cUD { + panic("chunk type uncompressed") + } + + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("writeCompressedChunk: empty chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + c := w.buf.Len() + if c <= 0 { + panic("no compressed data") + } + if c > maxCompressed { + panic("overrun of compressed data limit") + } + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + compressed: uint16(c - 1), + props: w.encoder.state.Properties, + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = io.Copy(w.w, &w.buf) + return err +} + +// writes a single chunk to the underlying writer. +func (w *Writer2) writeChunk() error { + u := int(uncompressedHeaderLen + w.encoder.Compressed()) + c := headerLen(w.ctype) + w.buf.Len() + if u < c { + return w.writeUncompressedChunk() + } + return w.writeCompressedChunk() +} + +// flushChunk terminates the current chunk. The encoder will be reset +// to support the next chunk. +func (w *Writer2) flushChunk() error { + if w.written() == 0 { + return nil + } + var err error + if err = w.encoder.Close(); err != nil { + return err + } + if err = w.writeChunk(); err != nil { + return err + } + w.buf.Reset() + w.lbw.N = maxCompressed + if err = w.encoder.Reopen(&w.lbw); err != nil { + return err + } + if err = w.cstate.next(w.ctype); err != nil { + return err + } + w.ctype = w.cstate.defaultChunkType() + w.start = cloneState(w.encoder.state) + return nil +} + +// Flush writes all buffered data out to the underlying stream. This +// could result in multiple chunks to be created. +func (w *Writer2) Flush() error { + if w.cstate == stop { + return errClosed + } + for w.written() > 0 { + if err := w.flushChunk(); err != nil { + return err + } + } + return nil +} + +// Close terminates the LZMA2 stream with an EOS chunk. +func (w *Writer2) Close() error { + if w.cstate == stop { + return errClosed + } + if err := w.Flush(); err != nil { + return nil + } + // write zero byte EOS chunk + _, err := w.w.Write([]byte{0}) + if err != nil { + return err + } + w.cstate = stop + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go new file mode 100644 index 00000000..bd5f42ee --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -0,0 +1,117 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// LZMA filter constants. +const ( + lzmaFilterID = 0x21 + lzmaFilterLen = 3 +) + +// lzmaFilter declares the LZMA2 filter information stored in an xz +// block header. +type lzmaFilter struct { + dictCap int64 +} + +// String returns a representation of the LZMA filter. +func (f lzmaFilter) String() string { + return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) +} + +// id returns the ID for the LZMA2 filter. +func (f lzmaFilter) id() uint64 { return lzmaFilterID } + +// MarshalBinary converts the lzmaFilter in its encoded representation. +func (f lzmaFilter) MarshalBinary() (data []byte, err error) { + c := lzma.EncodeDictCap(f.dictCap) + return []byte{lzmaFilterID, 1, c}, nil +} + +// UnmarshalBinary unmarshals the given data representation of the LZMA2 +// filter. +func (f *lzmaFilter) UnmarshalBinary(data []byte) error { + if len(data) != lzmaFilterLen { + return errors.New("xz: data for LZMA2 filter has wrong length") + } + if data[0] != lzmaFilterID { + return errors.New("xz: wrong LZMA2 filter id") + } + if data[1] != 1 { + return errors.New("xz: wrong LZMA2 filter size") + } + dc, err := lzma.DecodeDictCap(data[2]) + if err != nil { + return errors.New("xz: wrong LZMA2 dictionary size property") + } + + f.dictCap = dc + return nil +} + +// reader creates a new reader for the LZMA2 filter. +func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, + err error) { + + config := new(lzma.Reader2Config) + if c != nil { + config.DictCap = c.DictCap + } + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fr, err = config.NewReader2(r) + if err != nil { + return nil, err + } + return fr, nil +} + +// writeCloser creates a io.WriteCloser for the LZMA2 filter. +func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, +) (fw io.WriteCloser, err error) { + config := new(lzma.Writer2Config) + if c != nil { + *config = lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + } + + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fw, err = config.NewWriter2(w) + if err != nil { + return nil, err + } + return fw, nil +} + +// last returns true, because an LZMA2 filter must be the last filter in +// the filter list. +func (f lzmaFilter) last() bool { return true } diff --git a/vendor/github.com/ulikunitz/xz/make-docs b/vendor/github.com/ulikunitz/xz/make-docs new file mode 100644 index 00000000..a8c612ce --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/make-docs @@ -0,0 +1,5 @@ +#!/bin/sh + +set -x +pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md +pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md diff --git a/vendor/github.com/ulikunitz/xz/none-check.go b/vendor/github.com/ulikunitz/xz/none-check.go new file mode 100644 index 00000000..6a56a261 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/none-check.go @@ -0,0 +1,23 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import "hash" + +type noneHash struct{} + +func (h noneHash) Write(p []byte) (n int, err error) { return len(p), nil } + +func (h noneHash) Sum(b []byte) []byte { return b } + +func (h noneHash) Reset() {} + +func (h noneHash) Size() int { return 0 } + +func (h noneHash) BlockSize() int { return 0 } + +func newNoneHash() hash.Hash { + return &noneHash{} +} diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go new file mode 100644 index 00000000..bde1412c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -0,0 +1,359 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xz supports the compression and decompression of xz files. It +// supports version 1.0.4 of the specification without the non-LZMA2 +// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt +package xz + +import ( + "bytes" + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/internal/xlog" + "github.com/ulikunitz/xz/lzma" +) + +// ReaderConfig defines the parameters for the xz reader. The +// SingleStream parameter requests the reader to assume that the +// underlying stream contains only a single stream. +type ReaderConfig struct { + DictCap int + SingleStream bool +} + +// Verify checks the reader parameters for Validity. Zero values will be +// replaced by default values. +func (c *ReaderConfig) Verify() error { + if c == nil { + return errors.New("xz: reader parameters are nil") + } + lc := lzma.Reader2Config{DictCap: c.DictCap} + if err := lc.Verify(); err != nil { + return err + } + return nil +} + +// Reader supports the reading of one or multiple xz streams. +type Reader struct { + ReaderConfig + + xz io.Reader + sr *streamReader +} + +// streamReader decodes a single xz stream +type streamReader struct { + ReaderConfig + + xz io.Reader + br *blockReader + newHash func() hash.Hash + h header + index []record +} + +// NewReader creates a new xz reader using the default parameters. +// The function reads and checks the header of the first XZ stream. The +// reader will process multiple streams including padding. +func NewReader(xz io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(xz) +} + +// NewReader creates an xz stream reader. The created reader will be +// able to process multiple streams and padding unless a SingleStream +// has been set in the reader configuration c. +func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader{ + ReaderConfig: c, + xz: xz, + } + if r.sr, err = c.newStreamReader(xz); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + return r, nil +} + +var errUnexpectedData = errors.New("xz: unexpected data after stream") + +// Read reads uncompressed data from the stream. +func (r *Reader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.sr == nil { + if r.SingleStream { + data := make([]byte, 1) + _, err = io.ReadFull(r.xz, data) + if err != io.EOF { + return n, errUnexpectedData + } + return n, io.EOF + } + for { + r.sr, err = r.ReaderConfig.newStreamReader(r.xz) + if err != errPadding { + break + } + } + if err != nil { + return n, err + } + } + k, err := r.sr.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.sr = nil + continue + } + return n, err + } + } + return n, nil +} + +var errPadding = errors.New("xz: padding (4 zero bytes) encountered") + +// newStreamReader creates a new xz stream reader using the given configuration +// parameters. NewReader reads and checks the header of the xz stream. +func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(xz, data[:4]); err != nil { + return nil, err + } + if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) { + return nil, errPadding + } + if _, err = io.ReadFull(xz, data[4:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + r = &streamReader{ + ReaderConfig: c, + xz: xz, + index: make([]record, 0, 4), + } + if err = r.h.UnmarshalBinary(data); err != nil { + return nil, err + } + xlog.Debugf("xz header %s", r.h) + if r.newHash, err = newHashFunc(r.h.flags); err != nil { + return nil, err + } + return r, nil +} + +// readTail reads the index body and the xz footer. +func (r *streamReader) readTail() error { + index, n, err := readIndexBody(r.xz, len(r.index)) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + + for i, rec := range r.index { + if rec != index[i] { + return fmt.Errorf("xz: record %d is %v; want %v", + i, rec, index[i]) + } + } + + p := make([]byte, footerLen) + if _, err = io.ReadFull(r.xz, p); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + var f footer + if err = f.UnmarshalBinary(p); err != nil { + return err + } + xlog.Debugf("xz footer %s", f) + if f.flags != r.h.flags { + return errors.New("xz: footer flags incorrect") + } + if f.indexSize != int64(n)+1 { + return errors.New("xz: index size in footer wrong") + } + return nil +} + +// Read reads actual data from the xz stream. +func (r *streamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.br == nil { + bh, hlen, err := readBlockHeader(r.xz) + if err != nil { + if err == errIndexIndicator { + if err = r.readTail(); err != nil { + return n, err + } + return n, io.EOF + } + return n, err + } + xlog.Debugf("block %v", *bh) + r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh, + hlen, r.newHash()) + if err != nil { + return n, err + } + } + k, err := r.br.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.index = append(r.index, r.br.record()) + r.br = nil + } else { + return n, err + } + } + } + return n, nil +} + +// countingReader is a reader that counts the bytes read. +type countingReader struct { + r io.Reader + n int64 +} + +// Read reads data from the wrapped reader and adds it to the n field. +func (lr *countingReader) Read(p []byte) (n int, err error) { + n, err = lr.r.Read(p) + lr.n += int64(n) + return n, err +} + +// blockReader supports the reading of a block. +type blockReader struct { + lxz countingReader + header *blockHeader + headerLen int + n int64 + hash hash.Hash + r io.Reader +} + +// newBlockReader creates a new block reader. +func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, + hlen int, hash hash.Hash) (br *blockReader, err error) { + + br = &blockReader{ + lxz: countingReader{r: xz}, + header: h, + headerLen: hlen, + hash: hash, + } + + fr, err := c.newFilterReader(&br.lxz, h.filters) + if err != nil { + return nil, err + } + if br.hash.Size() != 0 { + br.r = io.TeeReader(fr, br.hash) + } else { + br.r = fr + } + + return br, nil +} + +// uncompressedSize returns the uncompressed size of the block. +func (br *blockReader) uncompressedSize() int64 { + return br.n +} + +// compressedSize returns the compressed size of the block. +func (br *blockReader) compressedSize() int64 { + return br.lxz.n +} + +// unpaddedSize computes the unpadded size for the block. +func (br *blockReader) unpaddedSize() int64 { + n := int64(br.headerLen) + n += br.compressedSize() + n += int64(br.hash.Size()) + return n +} + +// record returns the index record for the current block. +func (br *blockReader) record() record { + return record{br.unpaddedSize(), br.uncompressedSize()} +} + +// Read reads data from the block. +func (br *blockReader) Read(p []byte) (n int, err error) { + n, err = br.r.Read(p) + br.n += int64(n) + + u := br.header.uncompressedSize + if u >= 0 && br.uncompressedSize() > u { + return n, errors.New("xz: wrong uncompressed size for block") + } + c := br.header.compressedSize + if c >= 0 && br.compressedSize() > c { + return n, errors.New("xz: wrong compressed size for block") + } + if err != io.EOF { + return n, err + } + if br.uncompressedSize() < u || br.compressedSize() < c { + return n, io.ErrUnexpectedEOF + } + + s := br.hash.Size() + k := padLen(br.lxz.n) + q := make([]byte, k+s, k+2*s) + if _, err = io.ReadFull(br.lxz.r, q); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err + } + if !allZeros(q[:k]) { + return n, errors.New("xz: non-zero block padding") + } + checkSum := q[k:] + computedSum := br.hash.Sum(checkSum[s:]) + if !bytes.Equal(checkSum, computedSum) { + return n, errors.New("xz: checksum error for block") + } + return n, io.EOF +} + +func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader, + err error) { + + if err = verifyFilters(f); err != nil { + return nil, err + } + + fr = r + for i := len(f) - 1; i >= 0; i-- { + fr, err = f[i].reader(fr, c) + if err != nil { + return nil, err + } + } + return fr, nil +} diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go new file mode 100644 index 00000000..f693e0ae --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -0,0 +1,399 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// WriterConfig describe the parameters for an xz writer. +type WriterConfig struct { + Properties *lzma.Properties + DictCap int + BufSize int + BlockSize int64 + // checksum method: CRC32, CRC64 or SHA256 (default: CRC64) + CheckSum byte + // Forces NoChecksum (default: false) + NoCheckSum bool + // match algorithm + Matcher lzma.MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *WriterConfig) fill() { + if c.Properties == nil { + c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } + if c.BlockSize == 0 { + c.BlockSize = maxInt64 + } + if c.CheckSum == 0 { + c.CheckSum = CRC64 + } + if c.NoCheckSum { + c.CheckSum = None + } +} + +// Verify checks the configuration for errors. Zero values will be +// replaced by default values. +func (c *WriterConfig) Verify() error { + if c == nil { + return errors.New("xz: writer configuration is nil") + } + c.fill() + lc := lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + if err := lc.Verify(); err != nil { + return err + } + if c.BlockSize <= 0 { + return errors.New("xz: block size out of range") + } + if err := verifyFlags(c.CheckSum); err != nil { + return err + } + return nil +} + +// filters creates the filter list for the given parameters. +func (c *WriterConfig) filters() []filter { + return []filter{&lzmaFilter{int64(c.DictCap)}} +} + +// maxInt64 defines the maximum 64-bit signed integer. +const maxInt64 = 1<<63 - 1 + +// verifyFilters checks the filter list for the length and the right +// sequence of filters. +func verifyFilters(f []filter) error { + if len(f) == 0 { + return errors.New("xz: no filters") + } + if len(f) > 4 { + return errors.New("xz: more than four filters") + } + for _, g := range f[:len(f)-1] { + if g.last() { + return errors.New("xz: last filter is not last") + } + } + if !f[len(f)-1].last() { + return errors.New("xz: wrong last filter") + } + return nil +} + +// newFilterWriteCloser converts a filter list into a WriteCloser that +// can be used by a blockWriter. +func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) { + if err = verifyFilters(f); err != nil { + return nil, err + } + fw = nopWriteCloser(w) + for i := len(f) - 1; i >= 0; i-- { + fw, err = f[i].writeCloser(fw, c) + if err != nil { + return nil, err + } + } + return fw, nil +} + +// nopWCloser implements a WriteCloser with a Close method not doing +// anything. +type nopWCloser struct { + io.Writer +} + +// Close returns nil and doesn't do anything else. +func (c nopWCloser) Close() error { + return nil +} + +// nopWriteCloser converts the Writer into a WriteCloser with a Close +// function that does nothing beside returning nil. +func nopWriteCloser(w io.Writer) io.WriteCloser { + return nopWCloser{w} +} + +// Writer compresses data written to it. It is an io.WriteCloser. +type Writer struct { + WriterConfig + + xz io.Writer + bw *blockWriter + newHash func() hash.Hash + h header + index []record + closed bool +} + +// newBlockWriter creates a new block writer writes the header out. +func (w *Writer) newBlockWriter() error { + var err error + w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash()) + if err != nil { + return err + } + if err = w.bw.writeHeader(w.xz); err != nil { + return err + } + return nil +} + +// closeBlockWriter closes a block writer and records the sizes in the +// index. +func (w *Writer) closeBlockWriter() error { + var err error + if err = w.bw.Close(); err != nil { + return err + } + w.index = append(w.index, w.bw.record()) + return nil +} + +// NewWriter creates a new xz writer using default parameters. +func NewWriter(xz io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(xz) +} + +// NewWriter creates a new Writer using the given configuration parameters. +func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{ + WriterConfig: c, + xz: xz, + h: header{c.CheckSum}, + index: make([]record, 0, 4), + } + if w.newHash, err = newHashFunc(c.CheckSum); err != nil { + return nil, err + } + data, err := w.h.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("w.h.MarshalBinary(): error %w", err) + } + if _, err = xz.Write(data); err != nil { + return nil, err + } + if err = w.newBlockWriter(); err != nil { + return nil, err + } + return w, nil + +} + +// Write compresses the uncompressed data provided. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, errClosed + } + for { + k, err := w.bw.Write(p[n:]) + n += k + if err != errNoSpace { + return n, err + } + if err = w.closeBlockWriter(); err != nil { + return n, err + } + if err = w.newBlockWriter(); err != nil { + return n, err + } + } +} + +// Close closes the writer and adds the footer to the Writer. Close +// doesn't close the underlying writer. +func (w *Writer) Close() error { + if w.closed { + return errClosed + } + w.closed = true + var err error + if err = w.closeBlockWriter(); err != nil { + return err + } + + f := footer{flags: w.h.flags} + if f.indexSize, err = writeIndex(w.xz, w.index); err != nil { + return err + } + data, err := f.MarshalBinary() + if err != nil { + return err + } + if _, err = w.xz.Write(data); err != nil { + return err + } + return nil +} + +// countingWriter is a writer that counts all data written to it. +type countingWriter struct { + w io.Writer + n int64 +} + +// Write writes data to the countingWriter. +func (cw *countingWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + if err == nil && cw.n < 0 { + return n, errors.New("xz: counter overflow") + } + return +} + +// blockWriter is writes a single block. +type blockWriter struct { + cxz countingWriter + // mw combines io.WriteCloser w and the hash. + mw io.Writer + w io.WriteCloser + n int64 + blockSize int64 + closed bool + headerLen int + + filters []filter + hash hash.Hash +} + +// newBlockWriter creates a new block writer. +func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) { + bw = &blockWriter{ + cxz: countingWriter{w: xz}, + blockSize: c.BlockSize, + filters: c.filters(), + hash: hash, + } + bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters) + if err != nil { + return nil, err + } + if bw.hash.Size() != 0 { + bw.mw = io.MultiWriter(bw.w, bw.hash) + } else { + bw.mw = bw.w + } + return bw, nil +} + +// writeHeader writes the header. If the function is called after Close +// the commpressedSize and uncompressedSize fields will be filled. +func (bw *blockWriter) writeHeader(w io.Writer) error { + h := blockHeader{ + compressedSize: -1, + uncompressedSize: -1, + filters: bw.filters, + } + if bw.closed { + h.compressedSize = bw.compressedSize() + h.uncompressedSize = bw.uncompressedSize() + } + data, err := h.MarshalBinary() + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + bw.headerLen = len(data) + return nil +} + +// compressed size returns the amount of data written to the underlying +// stream. +func (bw *blockWriter) compressedSize() int64 { + return bw.cxz.n +} + +// uncompressedSize returns the number of data written to the +// blockWriter +func (bw *blockWriter) uncompressedSize() int64 { + return bw.n +} + +// unpaddedSize returns the sum of the header length, the uncompressed +// size of the block and the hash size. +func (bw *blockWriter) unpaddedSize() int64 { + if bw.headerLen <= 0 { + panic("xz: block header not written") + } + n := int64(bw.headerLen) + n += bw.compressedSize() + n += int64(bw.hash.Size()) + return n +} + +// record returns the record for the current stream. Call Close before +// calling this method. +func (bw *blockWriter) record() record { + return record{bw.unpaddedSize(), bw.uncompressedSize()} +} + +var errClosed = errors.New("xz: writer already closed") + +var errNoSpace = errors.New("xz: no space") + +// Write writes uncompressed data to the block writer. +func (bw *blockWriter) Write(p []byte) (n int, err error) { + if bw.closed { + return 0, errClosed + } + + t := bw.blockSize - bw.n + if int64(len(p)) > t { + err = errNoSpace + p = p[:t] + } + + var werr error + n, werr = bw.mw.Write(p) + bw.n += int64(n) + if werr != nil { + return n, werr + } + return n, err +} + +// Close closes the writer. +func (bw *blockWriter) Close() error { + if bw.closed { + return errClosed + } + bw.closed = true + if err := bw.w.Close(); err != nil { + return err + } + s := bw.hash.Size() + k := padLen(bw.cxz.n) + p := make([]byte, k+s) + bw.hash.Sum(p[k:k]) + if _, err := bw.cxz.w.Write(p); err != nil { + return err + } + return nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b159e039..76f0ed6b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -487,6 +487,9 @@ github.com/docker/go-units # github.com/docker/libcontainer v2.2.1+incompatible ## explicit github.com/docker/libcontainer/netlink +# github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 +## explicit +github.com/docker/libtrust # github.com/dustin/go-humanize v1.0.1 ## explicit; go 1.16 github.com/dustin/go-humanize @@ -879,8 +882,8 @@ github.com/kballard/go-shellquote # github.com/kevinburke/ssh_config v1.2.0 ## explicit github.com/kevinburke/ssh_config -# github.com/klauspost/compress v1.17.7 -## explicit; go 1.20 +# github.com/klauspost/compress v1.17.11 +## explicit; go 1.21 github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -1107,6 +1110,45 @@ github.com/quic-go/quic-go/quicvarint # github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 ## explicit github.com/rcrowley/go-metrics +# github.com/regclient/regclient v0.8.0 +## explicit; go 1.21 +github.com/regclient/regclient +github.com/regclient/regclient/config +github.com/regclient/regclient/internal/auth +github.com/regclient/regclient/internal/cache +github.com/regclient/regclient/internal/conffile +github.com/regclient/regclient/internal/httplink +github.com/regclient/regclient/internal/limitread +github.com/regclient/regclient/internal/pqueue +github.com/regclient/regclient/internal/reghttp +github.com/regclient/regclient/internal/reqmeta +github.com/regclient/regclient/internal/sloghandle +github.com/regclient/regclient/internal/strparse +github.com/regclient/regclient/internal/timejson +github.com/regclient/regclient/internal/units +github.com/regclient/regclient/internal/version +github.com/regclient/regclient/pkg/archive +github.com/regclient/regclient/scheme +github.com/regclient/regclient/scheme/ocidir +github.com/regclient/regclient/scheme/reg +github.com/regclient/regclient/types +github.com/regclient/regclient/types/blob +github.com/regclient/regclient/types/descriptor +github.com/regclient/regclient/types/docker +github.com/regclient/regclient/types/docker/schema1 +github.com/regclient/regclient/types/docker/schema2 +github.com/regclient/regclient/types/errs +github.com/regclient/regclient/types/manifest +github.com/regclient/regclient/types/mediatype +github.com/regclient/regclient/types/oci +github.com/regclient/regclient/types/oci/v1 +github.com/regclient/regclient/types/ping +github.com/regclient/regclient/types/platform +github.com/regclient/regclient/types/ref +github.com/regclient/regclient/types/referrer +github.com/regclient/regclient/types/repo +github.com/regclient/regclient/types/tag +github.com/regclient/regclient/types/warning # github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 github.com/rivo/uniseg @@ -1247,6 +1289,12 @@ github.com/tklauser/go-sysconf # github.com/tklauser/numcpus v0.6.1 ## explicit; go 1.13 github.com/tklauser/numcpus +# github.com/ulikunitz/xz v0.5.12 +## explicit; go 1.12 +github.com/ulikunitz/xz +github.com/ulikunitz/xz/internal/hash +github.com/ulikunitz/xz/internal/xlog +github.com/ulikunitz/xz/lzma # github.com/vishvananda/netns v0.0.4 ## explicit; go 1.17 github.com/vishvananda/netns