Move content encoding in the beginning of the middleware chain, update dependencies

This commit is contained in:
Ingo Oppermann
2024-10-09 14:25:42 +02:00
parent 4d6eb122b0
commit f97943b275
348 changed files with 18733 additions and 5367 deletions

View File

@@ -17,7 +17,6 @@ type FS struct {
DefaultFile string
DefaultContentType string
Gzip bool
Filesystem fs.Filesystem

View File

@@ -51,7 +51,7 @@ func TestConfigSetConflict(t *testing.T) {
router, _ := getDummyConfigRouter(t)
cfg := config.New(nil)
cfg.Storage.MimeTypes = "/path/to/mime.types"
cfg.Storage.MimeTypesFile = "/path/to/mime.types"
var data bytes.Buffer

View File

@@ -15,7 +15,7 @@ func NewBrotli(level Level) Compression {
brotliLevel := brotli.DefaultCompression
if level == BestCompression {
brotliLevel = brotli.BestCompression
} else {
} else if level == BestSpeed {
brotliLevel = brotli.BestSpeed
}

View File

@@ -8,8 +8,9 @@ import (
"net"
"net/http"
"strings"
"sync"
"github.com/datarhei/core/v16/mem"
"github.com/datarhei/core/v16/slices"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
)
@@ -27,8 +28,11 @@ type Config struct {
// is used. Optional. Default value 0
MinLength int
// Schemes is a list of enabled compressiond. Optional. Default [GzipScheme, ZstdScheme]
Schemes []Scheme
// Schemes is a list of enabled compressiond. Optional. Default [gzip]
Schemes []string
// List of content type to compress. If empty, everything will be compressed
ContentTypes []string
}
type Compression interface {
@@ -46,6 +50,7 @@ type Compressor interface {
type compressResponseWriter struct {
Compressor
http.ResponseWriter
hasHeader bool
wroteHeader bool
wroteBody bool
minLength int
@@ -54,20 +59,10 @@ type compressResponseWriter struct {
code int
headerContentLength string
scheme string
contentTypes []string
passThrough bool
}
type Scheme string
func (s Scheme) String() string {
return string(s)
}
const (
GzipScheme Scheme = "gzip"
BrotliScheme Scheme = "br"
ZstdScheme Scheme = "zstd"
)
type Level int
const (
@@ -78,33 +73,11 @@ const (
// DefaultConfig is the default Gzip middleware config.
var DefaultConfig = Config{
Skipper: middleware.DefaultSkipper,
Level: DefaultCompression,
MinLength: 0,
Schemes: []Scheme{GzipScheme},
}
// ContentTypesSkipper returns a Skipper based on the list of content types
// that should be compressed. If the list is empty, all responses will be
// compressed.
func ContentTypeSkipper(contentTypes []string) middleware.Skipper {
return func(c echo.Context) bool {
// If no allowed content types are given, compress all
if len(contentTypes) == 0 {
return false
}
// Iterate through the allowed content types and don't skip if the content type matches
responseContentType := c.Response().Header().Get(echo.HeaderContentType)
for _, contentType := range contentTypes {
if strings.Contains(responseContentType, contentType) {
return false
}
}
return true
}
Skipper: middleware.DefaultSkipper,
Level: DefaultCompression,
MinLength: 0,
Schemes: []string{"gzip"},
ContentTypes: []string{},
}
// New returns a middleware which compresses HTTP response using a compression
@@ -133,38 +106,40 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
config.Schemes = DefaultConfig.Schemes
}
contentTypes := slices.Copy(config.ContentTypes)
gzipEnable := false
brotliEnable := false
zstdEnable := false
for _, s := range config.Schemes {
switch s {
case GzipScheme:
case "gzip":
gzipEnable = true
case BrotliScheme:
case "br":
brotliEnable = true
case ZstdScheme:
case "zstd":
zstdEnable = true
}
}
var gzipPool Compression
var brotliPool Compression
var zstdPool Compression
var gzipCompressor Compression
var brotliCompressor Compression
var zstdCompressor Compression
if gzipEnable {
gzipPool = NewGzip(config.Level)
gzipCompressor = NewGzip(config.Level)
}
if brotliEnable {
brotliPool = NewBrotli(config.Level)
brotliCompressor = NewBrotli(config.Level)
}
if zstdEnable {
zstdPool = NewZstd(config.Level)
zstdCompressor = NewZstd(config.Level)
}
bpool := bufferPool()
bufferPool := mem.NewBufferPool()
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
@@ -173,62 +148,69 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
}
res := c.Response()
res.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding)
encodings := c.Request().Header.Get(echo.HeaderAcceptEncoding)
var pool Compression
var scheme Scheme
var compress Compression
var scheme string
if zstdEnable && strings.Contains(encodings, ZstdScheme.String()) {
pool = zstdPool
scheme = ZstdScheme
} else if brotliEnable && strings.Contains(encodings, BrotliScheme.String()) {
pool = brotliPool
scheme = BrotliScheme
} else if gzipEnable && strings.Contains(encodings, GzipScheme.String()) {
pool = gzipPool
scheme = GzipScheme
if zstdEnable && strings.Contains(encodings, "zstd") {
compress = zstdCompressor
scheme = "zstd"
} else if brotliEnable && strings.Contains(encodings, "br") {
compress = brotliCompressor
scheme = "br"
} else if gzipEnable && strings.Contains(encodings, "gzip") {
compress = gzipCompressor
scheme = "gzip"
}
if pool != nil {
w := pool.Acquire()
if w == nil {
if compress != nil {
compressor := compress.Acquire()
if compressor == nil {
return echo.NewHTTPError(http.StatusInternalServerError, fmt.Errorf("failed to acquire compressor for %s", scheme))
}
rw := res.Writer
w.Reset(rw)
compressor.Reset(rw)
buf := bpool.Get().(*bytes.Buffer)
buf.Reset()
buffer := bufferPool.Get()
grw := &compressResponseWriter{Compressor: w, ResponseWriter: rw, minLength: config.MinLength, buffer: buf, scheme: scheme.String()}
grw := &compressResponseWriter{
Compressor: compressor,
ResponseWriter: rw,
minLength: config.MinLength,
buffer: buffer,
scheme: scheme,
contentTypes: contentTypes,
}
defer func() {
if !grw.wroteBody {
if res.Header().Get(echo.HeaderContentEncoding) == scheme.String() {
res.Header().Del(echo.HeaderContentEncoding)
}
// We have to reset response to it's pristine state when
// nothing is written to body or error is returned.
// See issue #424, #407.
res.Writer = rw
w.Reset(io.Discard)
} else if !grw.minLengthExceeded {
// If the minimum content length hasn't exceeded, write the uncompressed response
res.Writer = rw
if grw.wroteHeader {
// Restore Content-Length header in case it was deleted
if len(grw.headerContentLength) != 0 {
grw.Header().Set(echo.HeaderContentLength, grw.headerContentLength)
if !grw.passThrough {
if !grw.wroteBody {
if res.Header().Get(echo.HeaderContentEncoding) == scheme {
res.Header().Del(echo.HeaderContentEncoding)
}
grw.ResponseWriter.WriteHeader(grw.code)
// We have to reset response to it's pristine state when
// nothing is written to body or error is returned.
// See issue #424, #407.
res.Writer = rw
compressor.Reset(io.Discard)
} else if !grw.minLengthExceeded {
// If the minimum content length hasn't exceeded, write the uncompressed response
res.Writer = rw
if grw.wroteHeader {
// Restore Content-Length header in case it was deleted
if len(grw.headerContentLength) != 0 {
grw.Header().Set(echo.HeaderContentLength, grw.headerContentLength)
}
grw.ResponseWriter.WriteHeader(grw.code)
}
grw.buffer.WriteTo(rw)
compressor.Reset(io.Discard)
}
grw.buffer.WriteTo(rw)
w.Reset(io.Discard)
}
w.Close()
bpool.Put(buf)
pool.Release(w)
compressor.Close()
bufferPool.Put(buffer)
compress.Release(compressor)
}()
res.Writer = grw
@@ -241,17 +223,37 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
func (w *compressResponseWriter) WriteHeader(code int) {
if code == http.StatusNoContent { // Issue #489
w.ResponseWriter.Header().Del(echo.HeaderContentEncoding)
w.Header().Del(echo.HeaderContentEncoding)
}
w.headerContentLength = w.Header().Get(echo.HeaderContentLength)
w.Header().Del(echo.HeaderContentLength) // Issue #444
w.wroteHeader = true
if !w.canCompress(w.Header().Get(echo.HeaderContentType)) {
w.passThrough = true
}
w.hasHeader = true
// Delay writing of the header until we know if we'll actually compress the response
w.code = code
}
func (w *compressResponseWriter) canCompress(responseContentType string) bool {
// If no content types are given, compress all
if len(w.contentTypes) == 0 {
return true
}
// Iterate through the allowed content types and don't skip if the content type matches
for _, contentType := range w.contentTypes {
if strings.Contains(responseContentType, contentType) {
return true
}
}
return false
}
func (w *compressResponseWriter) Write(b []byte) (int, error) {
if w.Header().Get(echo.HeaderContentType) == "" {
w.Header().Set(echo.HeaderContentType, http.DetectContentType(b))
@@ -259,6 +261,18 @@ func (w *compressResponseWriter) Write(b []byte) (int, error) {
w.wroteBody = true
if !w.hasHeader {
w.WriteHeader(http.StatusOK)
}
if w.passThrough {
if !w.wroteHeader {
w.ResponseWriter.WriteHeader(w.code)
w.wroteHeader = true
}
return w.ResponseWriter.Write(b)
}
if !w.minLengthExceeded {
n, err := w.buffer.Write(b)
@@ -267,8 +281,10 @@ func (w *compressResponseWriter) Write(b []byte) (int, error) {
// The minimum length is exceeded, add Content-Encoding header and write the header
w.Header().Set(echo.HeaderContentEncoding, w.scheme) // Issue #806
if w.wroteHeader {
w.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding)
if w.hasHeader {
w.ResponseWriter.WriteHeader(w.code)
w.wroteHeader = true
}
return w.Compressor.Write(w.buffer.Bytes())
@@ -281,12 +297,31 @@ func (w *compressResponseWriter) Write(b []byte) (int, error) {
}
func (w *compressResponseWriter) Flush() {
if !w.hasHeader {
w.WriteHeader(http.StatusOK)
}
if w.passThrough {
if !w.wroteHeader {
w.ResponseWriter.WriteHeader(w.code)
w.wroteHeader = true
}
if flusher, ok := w.ResponseWriter.(http.Flusher); ok {
flusher.Flush()
}
return
}
if !w.minLengthExceeded {
// Enforce compression
w.minLengthExceeded = true
w.Header().Set(echo.HeaderContentEncoding, w.scheme) // Issue #806
if w.wroteHeader {
w.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding)
if w.hasHeader {
w.ResponseWriter.WriteHeader(w.code)
w.wroteHeader = true
}
w.Compressor.Write(w.buffer.Bytes())
@@ -308,12 +343,3 @@ func (w *compressResponseWriter) Push(target string, opts *http.PushOptions) err
}
return http.ErrNotSupported
}
func bufferPool() sync.Pool {
return sync.Pool{
New: func() interface{} {
b := &bytes.Buffer{}
return b
},
}
}

View File

@@ -58,15 +58,15 @@ func (rcr *nopReadCloseResetter) Reset(r io.Reader) error {
return resetter.Reset(r)
}
func getTestcases() map[Scheme]func(r io.Reader) (ReadCloseResetter, error) {
return map[Scheme]func(r io.Reader) (ReadCloseResetter, error){
GzipScheme: func(r io.Reader) (ReadCloseResetter, error) {
func getTestcases() map[string]func(r io.Reader) (ReadCloseResetter, error) {
return map[string]func(r io.Reader) (ReadCloseResetter, error){
"gzip": func(r io.Reader) (ReadCloseResetter, error) {
return gzip.NewReader(r)
},
BrotliScheme: func(r io.Reader) (ReadCloseResetter, error) {
"br": func(r io.Reader) (ReadCloseResetter, error) {
return &nopReadCloseResetter{brotli.NewReader(r)}, nil
},
ZstdScheme: func(r io.Reader) (ReadCloseResetter, error) {
"zstd": func(r io.Reader) (ReadCloseResetter, error) {
reader, err := zstd.NewReader(r)
return &nopReadCloseResetter{reader}, err
},
@@ -77,18 +77,18 @@ func TestCompress(t *testing.T) {
schemes := getTestcases()
for scheme, reader := range schemes {
t.Run(scheme.String(), func(t *testing.T) {
t.Run(scheme, func(t *testing.T) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, "/", nil)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
ctx := e.NewContext(req, rec)
// Skip if no Accept-Encoding header
h := NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error {
handler := NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error {
c.Response().Write([]byte("test")) // For Content-Type sniffing
return nil
})
h(c)
handler(ctx)
assert := assert.New(t)
@@ -96,15 +96,15 @@ func TestCompress(t *testing.T) {
// Compression
req = httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec = httptest.NewRecorder()
c = e.NewContext(req, rec)
h(c)
assert.Equal(scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
ctx = e.NewContext(req, rec)
handler(ctx)
assert.Equal(scheme, rec.Header().Get(echo.HeaderContentEncoding))
assert.Contains(rec.Header().Get(echo.HeaderContentType), echo.MIMETextPlain)
r, err := reader(rec.Body)
if assert.NoError(err) {
buf := new(bytes.Buffer)
buf := &bytes.Buffer{}
defer r.Close()
buf.ReadFrom(r)
assert.Equal("test", buf.String())
@@ -112,11 +112,11 @@ func TestCompress(t *testing.T) {
// Gzip chunked
req = httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec = httptest.NewRecorder()
c = e.NewContext(req, rec)
NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error {
ctx = e.NewContext(req, rec)
NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error {
c.Response().Header().Set("Content-Type", "text/event-stream")
c.Response().Header().Set("Transfer-Encoding", "chunked")
@@ -126,7 +126,7 @@ func TestCompress(t *testing.T) {
// Read the first part of the data
assert.True(rec.Flushed)
assert.Equal(scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
assert.Equal(scheme, rec.Header().Get(echo.HeaderContentEncoding))
// Write and flush the second part of the data
c.Response().Write([]byte("tost\n"))
@@ -135,7 +135,7 @@ func TestCompress(t *testing.T) {
// Write the final part of the data and return
c.Response().Write([]byte("tast"))
return nil
})(c)
})(ctx)
buf := new(bytes.Buffer)
r.Reset(rec.Body)
@@ -146,14 +146,53 @@ func TestCompress(t *testing.T) {
}
}
func TestCompressWithPassthrough(t *testing.T) {
schemes := getTestcases()
for scheme, reader := range schemes {
t.Run(scheme, func(t *testing.T) {
e := echo.New()
e.Use(NewWithConfig(Config{MinLength: 5, Schemes: []string{scheme}, ContentTypes: []string{"text/compress"}}))
e.GET("/plain", func(c echo.Context) error {
c.Response().Header().Set("Content-Type", "text/plain")
c.Response().Write([]byte("testtest"))
return nil
})
e.GET("/compress", func(c echo.Context) error {
c.Response().Header().Set("Content-Type", "text/compress")
c.Response().Write([]byte("testtest"))
return nil
})
req := httptest.NewRequest(http.MethodGet, "/plain", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
e.ServeHTTP(rec, req)
assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding))
assert.Contains(t, rec.Body.String(), "testtest")
req = httptest.NewRequest(http.MethodGet, "/compress", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec = httptest.NewRecorder()
e.ServeHTTP(rec, req)
assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding))
r, err := reader(rec.Body)
if assert.NoError(t, err) {
buf := new(bytes.Buffer)
defer r.Close()
buf.ReadFrom(r)
assert.Equal(t, "testtest", buf.String())
}
})
}
}
func TestCompressWithMinLength(t *testing.T) {
schemes := getTestcases()
for scheme, reader := range schemes {
t.Run(scheme.String(), func(t *testing.T) {
t.Run(scheme, func(t *testing.T) {
e := echo.New()
// Invalid level
e.Use(NewWithConfig(Config{MinLength: 5, Schemes: []Scheme{scheme}}))
e.Use(NewWithConfig(Config{MinLength: 5, Schemes: []string{scheme}}))
e.GET("/", func(c echo.Context) error {
c.Response().Write([]byte("test"))
return nil
@@ -163,17 +202,17 @@ func TestCompressWithMinLength(t *testing.T) {
return nil
})
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
e.ServeHTTP(rec, req)
assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding))
assert.Contains(t, rec.Body.String(), "test")
req = httptest.NewRequest(http.MethodGet, "/foobar", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec = httptest.NewRecorder()
e.ServeHTTP(rec, req)
assert.Equal(t, scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding))
r, err := reader(rec.Body)
if assert.NoError(t, err) {
buf := new(bytes.Buffer)
@@ -185,17 +224,60 @@ func TestCompressWithMinLength(t *testing.T) {
}
}
func TestCompressWithAroundMinLength(t *testing.T) {
schemes := getTestcases()
minLength := 1000
for scheme, reader := range schemes {
for i := minLength - 64; i < minLength+64; i++ {
name := fmt.Sprintf("%s-%d", scheme, i)
t.Run(name, func(t *testing.T) {
data := rand.Bytes(i)
e := echo.New()
e.Use(NewWithConfig(Config{MinLength: minLength, Schemes: []string{scheme}}))
e.GET("/", func(c echo.Context) error {
c.Response().Write(data[:1])
c.Response().Write(data[1:])
return nil
})
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
e.ServeHTTP(rec, req)
if i < minLength {
assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding))
res, err := io.ReadAll(rec.Body)
if assert.NoError(t, err) {
assert.Equal(t, data, res)
}
} else {
assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding))
r, err := reader(rec.Body)
if assert.NoError(t, err) {
buf := new(bytes.Buffer)
defer r.Close()
buf.ReadFrom(r)
assert.Equal(t, data, buf.Bytes())
}
}
})
}
}
}
func TestCompressNoContent(t *testing.T) {
schemes := getTestcases()
for scheme := range schemes {
t.Run(scheme.String(), func(t *testing.T) {
t.Run(scheme, func(t *testing.T) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
h := NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error {
h := NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error {
return c.NoContent(http.StatusNoContent)
})
if assert.NoError(t, h(c)) {
@@ -211,17 +293,17 @@ func TestCompressEmpty(t *testing.T) {
schemes := getTestcases()
for scheme, reader := range schemes {
t.Run(scheme.String(), func(t *testing.T) {
t.Run(scheme, func(t *testing.T) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
h := NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error {
h := NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error {
return c.String(http.StatusOK, "")
})
if assert.NoError(t, h(c)) {
assert.Equal(t, scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding))
assert.Equal(t, "text/plain; charset=UTF-8", rec.Header().Get(echo.HeaderContentType))
r, err := reader(rec.Body)
if assert.NoError(t, err) {
@@ -238,14 +320,14 @@ func TestCompressErrorReturned(t *testing.T) {
schemes := getTestcases()
for scheme := range schemes {
t.Run(scheme.String(), func(t *testing.T) {
t.Run(scheme, func(t *testing.T) {
e := echo.New()
e.Use(NewWithConfig(Config{Schemes: []Scheme{scheme}}))
e.Use(NewWithConfig(Config{Schemes: []string{scheme}}))
e.GET("/", func(c echo.Context) error {
return echo.ErrNotFound
})
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
e.ServeHTTP(rec, req)
assert.Equal(t, http.StatusNotFound, rec.Code)
@@ -259,12 +341,12 @@ func TestCompressWithStatic(t *testing.T) {
schemes := getTestcases()
for scheme, reader := range schemes {
t.Run(scheme.String(), func(t *testing.T) {
t.Run(scheme, func(t *testing.T) {
e := echo.New()
e.Use(NewWithConfig(Config{Schemes: []Scheme{scheme}}))
e.Use(NewWithConfig(Config{Schemes: []string{scheme}}))
e.Static("/test", "./")
req := httptest.NewRequest(http.MethodGet, "/test/compress.go", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
e.ServeHTTP(rec, req)
assert.Equal(t, http.StatusOK, rec.Code)
@@ -292,17 +374,17 @@ func BenchmarkCompress(b *testing.B) {
for i := 1; i <= 18; i++ {
datalen := 2 << i
data := []byte(rand.String(datalen))
data := rand.Bytes(datalen)
for scheme := range schemes {
name := fmt.Sprintf("%s-%d", scheme.String(), datalen)
name := fmt.Sprintf("%s-%d", scheme, datalen)
b.Run(name, func(b *testing.B) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
h := NewWithConfig(Config{Level: BestSpeed, Schemes: []Scheme{scheme}})(func(c echo.Context) error {
h := NewWithConfig(Config{Level: BestSpeed, Schemes: []string{scheme}})(func(c echo.Context) error {
c.Response().Write(data)
return nil
})
@@ -327,13 +409,13 @@ func BenchmarkCompressJSON(b *testing.B) {
schemes := getTestcases()
for scheme := range schemes {
b.Run(scheme.String(), func(b *testing.B) {
b.Run(scheme, func(b *testing.B) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
req.Header.Set(echo.HeaderAcceptEncoding, scheme)
h := NewWithConfig(Config{Level: BestSpeed, Schemes: []Scheme{scheme}})(func(c echo.Context) error {
h := NewWithConfig(Config{Level: BestSpeed, Schemes: []string{scheme}})(func(c echo.Context) error {
c.Response().Write(data)
return nil
})

View File

@@ -0,0 +1,55 @@
package compress
import (
"compress/gzip"
"io"
"sync"
)
type gogzipImpl struct {
pool sync.Pool
}
func NewGoGzip(level Level) Compression {
gzipLevel := gzip.DefaultCompression
if level == BestCompression {
gzipLevel = gzip.BestCompression
} else if level == BestSpeed {
gzipLevel = gzip.BestSpeed
}
g := &gogzipImpl{
pool: sync.Pool{
New: func() interface{} {
w, err := gzip.NewWriterLevel(io.Discard, gzipLevel)
if err != nil {
return nil
}
return w
},
},
}
return g
}
func (g *gogzipImpl) Acquire() Compressor {
c := g.pool.Get()
if c == nil {
return nil
}
x, ok := c.(Compressor)
if !ok {
return nil
}
x.Reset(io.Discard)
return x
}
func (g *gogzipImpl) Release(c Compressor) {
c.Reset(io.Discard)
g.pool.Put(c)
}

View File

@@ -15,7 +15,7 @@ func NewGzip(level Level) Compression {
gzipLevel := gzip.DefaultCompression
if level == BestCompression {
gzipLevel = gzip.BestCompression
} else {
} else if level == BestSpeed {
gzipLevel = gzip.BestSpeed
}

View File

@@ -15,7 +15,7 @@ func NewZstd(level Level) Compression {
zstdLevel := zstd.SpeedDefault
if level == BestCompression {
zstdLevel = zstd.SpeedBestCompression
} else {
} else if level == BestSpeed {
zstdLevel = zstd.SpeedFastest
}

View File

@@ -29,7 +29,7 @@ func (h *handler) handleHLS(c echo.Context, ctxuser string, data map[string]inte
return next(c)
}
func (h *handler) handleHLSIngress(c echo.Context, ctxuser string, data map[string]interface{}, next echo.HandlerFunc) error {
func (h *handler) handleHLSIngress(c echo.Context, _ string, data map[string]interface{}, next echo.HandlerFunc) error {
req := c.Request()
path := req.URL.Path
@@ -97,7 +97,7 @@ func (h *handler) handleHLSIngress(c echo.Context, ctxuser string, data map[stri
return next(c)
}
func (h *handler) handleHLSEgress(c echo.Context, ctxuser string, data map[string]interface{}, next echo.HandlerFunc) error {
func (h *handler) handleHLSEgress(c echo.Context, _ string, data map[string]interface{}, next echo.HandlerFunc) error {
req := c.Request()
res := c.Response()

View File

@@ -102,12 +102,19 @@ type Config struct {
IAM iam.IAM
IAMSkipper func(ip string) bool
Resources resources.Resources
Compress CompressConfig
}
type CorsConfig struct {
Origins []string
}
type CompressConfig struct {
Encoding []string
MimeTypes []string
MinLength int
}
type server struct {
logger log.Logger
@@ -143,8 +150,10 @@ type server struct {
iam echo.MiddlewareFunc
}
gzip struct {
compress struct {
encoding []string
mimetypes []string
minLength int
}
filesystems map[string]*filesystem
@@ -375,15 +384,9 @@ func NewServer(config Config) (serverhandler.Server, error) {
IAM: config.IAM,
}, "/api/graph/query")
s.gzip.mimetypes = []string{
"text/plain",
"text/html",
"text/javascript",
"application/json",
//"application/x-mpegurl",
//"application/vnd.apple.mpegurl",
"image/svg+xml",
}
s.compress.encoding = config.Compress.Encoding
s.compress.mimetypes = config.Compress.MimeTypes
s.compress.minLength = config.Compress.MinLength
s.router = echo.New()
s.router.JSONSerializer = &GoJSONSerializer{}
@@ -409,6 +412,13 @@ func NewServer(config Config) (serverhandler.Server, error) {
s.router.Use(s.middleware.iam)
s.router.Use(mwcompress.NewWithConfig(mwcompress.Config{
Level: mwcompress.BestSpeed,
MinLength: config.Compress.MinLength,
Schemes: config.Compress.Encoding,
ContentTypes: config.Compress.MimeTypes,
}))
s.router.Use(mwsession.NewWithConfig(mwsession.Config{
HLSIngressCollector: config.Sessions.Collector("hlsingress"),
HLSEgressCollector: config.Sessions.Collector("hls"),
@@ -487,13 +497,6 @@ func (s *server) HTTPStatus() map[int]uint64 {
}
func (s *server) setRoutes() {
gzipMiddleware := mwcompress.NewWithConfig(mwcompress.Config{
Skipper: mwcompress.ContentTypeSkipper(nil),
Level: mwcompress.BestSpeed,
MinLength: 1000,
Schemes: []mwcompress.Scheme{mwcompress.GzipScheme},
})
// API router grouo
api := s.router.Group("/api")
@@ -509,7 +512,6 @@ func (s *server) setRoutes() {
// Swagger API documentation router group
doc := s.router.Group("/api/swagger/*")
doc.Use(gzipMiddleware)
doc.GET("", echoSwagger.WrapHandler)
// Mount filesystems
@@ -528,15 +530,6 @@ func (s *server) setRoutes() {
DefaultContentType: filesystem.DefaultContentType,
}))
if filesystem.Gzip {
fs.Use(mwcompress.NewWithConfig(mwcompress.Config{
Skipper: mwcompress.ContentTypeSkipper(s.gzip.mimetypes),
Level: mwcompress.BestSpeed,
MinLength: 1000,
Schemes: []mwcompress.Scheme{mwcompress.GzipScheme},
}))
}
if filesystem.Cache != nil {
mwcache := mwcache.NewWithConfig(mwcache.Config{
Cache: filesystem.Cache,
@@ -590,7 +583,7 @@ func (s *server) setRoutes() {
// GraphQL
graphql := api.Group("/graph")
graphql.Use(gzipMiddleware)
//graphql.Use(gzipMiddleware)
graphql.GET("", s.handler.graph.Playground)
graphql.POST("/query", s.handler.graph.Query)
@@ -598,7 +591,7 @@ func (s *server) setRoutes() {
// APIv3 router group
v3 := api.Group("/v3")
v3.Use(gzipMiddleware)
//v3.Use(gzipMiddleware)
s.setRoutesV3(v3)
}