refactor(backend): storage config for session records

This commit is contained in:
pycook
2025-06-14 21:45:32 +08:00
parent e20439d132
commit 84a11213e4
26 changed files with 3804 additions and 113 deletions

View File

@@ -16,6 +16,7 @@ require (
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.3
github.com/mattn/go-runewidth v0.0.16
github.com/minio/minio-go/v7 v7.0.76
github.com/nicksnyder/go-i18n/v2 v2.4.0
github.com/oklog/run v1.1.0
github.com/pkg/sftp v1.13.6
@@ -46,19 +47,23 @@ require (
github.com/charmbracelet/x/input v0.1.0 // indirect
github.com/charmbracelet/x/term v0.1.1 // indirect
github.com/charmbracelet/x/windows v0.1.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.5.5 // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
github.com/muesli/cancelreader v0.2.2 // indirect
github.com/muesli/termenv v0.15.2 // indirect
github.com/rs/xid v1.6.0 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
golang.org/x/term v0.23.0 // indirect
)
require (
@@ -83,13 +88,13 @@ require (
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.20.0 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/goccy/go-json v0.10.3 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
@@ -110,7 +115,7 @@ require (
go.uber.org/multierr v1.10.0 // indirect
golang.org/x/arch v0.8.0 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
golang.org/x/net v0.27.0 // indirect
golang.org/x/net v0.28.0 // indirect
golang.org/x/sys v0.24.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
google.golang.org/protobuf v1.34.1 // indirect

View File

@@ -49,6 +49,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
@@ -69,6 +71,8 @@ github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE=
github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
@@ -91,8 +95,8 @@ github.com/go-resty/resty/v2 v2.14.0 h1:/rhkzsAqGQkozwfKS5aFAbb6TyKd3zyFRWcdRXLP
github.com/go-resty/resty/v2 v2.14.0/go.mod h1:IW6mekUOsElt9C7oWr0XRt9BNSD6D5rr9mhk6NjmNHg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -120,9 +124,12 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
@@ -154,6 +161,10 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.3 h1:j7a/xn1U6TKA/PHHxqZuzh64CdtRc7rU9M+AvkOl5bA=
github.com/mattn/go-sqlite3 v1.14.3/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.76 h1:9nxHH2XDai61cT/EFhyIw/wW4vJfpPNvl7lSFpRt+Ng=
github.com/minio/minio-go/v7 v7.0.76/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -186,6 +197,8 @@ github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
@@ -269,8 +282,9 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@@ -31,6 +31,7 @@ func initDB() {
model.DefaultConfig, model.DefaultFileHistory, model.DefaultGateway, model.DefaultHistory,
model.DefaultNode, model.DefaultPublicKey, model.DefaultSession, model.DefaultSessionCmd,
model.DefaultShare, model.DefaultQuickCommand, model.DefaultUserPreference,
model.DefaultStorageConfig, model.DefaultStorageMetrics,
); err != nil {
logger.L().Fatal("Failed to init database", zap.Error(err))
}
@@ -40,21 +41,38 @@ func initDB() {
}
acl.MigrateNode()
}
func initServices() {
service.InitAuthorizationService()
fileservice.InitFileService()
}
func initStorage() error {
service.InitStorageService()
if service.DefaultStorageService == nil {
logger.L().Error("Storage service initialization failed")
return nil
}
logger.L().Info("Storage system initialization completed successfully")
// Initialize storage cleaner service
service.InitStorageCleanerService()
return nil
}
func RunApi() error {
initDB()
initServices()
// Initialize storage
if err := initStorage(); err != nil {
logger.L().Fatal("Failed to init storage", zap.Error(err))
}
r := gin.New()
router.SetupRouter(r)

View File

@@ -861,14 +861,14 @@ func (c *Controller) RDPFileDownload(ctx *gin.Context) {
// @Accept json
// @Produce json
// @Param session_id path string true "Session ID"
// @Param request body fileservice.RDPMkdirRequest true "Directory creation request"
// @Param path query string true "Directory path"
// @Success 200 {object} HttpResponse
// @Router /rdp/sessions/{session_id}/files/mkdir [post]
func (c *Controller) RDPFileMkdir(ctx *gin.Context) {
sessionId := ctx.Param("session_id")
var req fileservice.RDPMkdirRequest
if err := ctx.ShouldBindJSON(&req); err != nil {
path := ctx.Query("path")
if path == "" {
ctx.JSON(http.StatusBadRequest, HttpResponse{
Code: http.StatusBadRequest,
Message: "Invalid request parameters",
@@ -902,7 +902,7 @@ func (c *Controller) RDPFileMkdir(ctx *gin.Context) {
}
// Send mkdir request through Guacamole protocol
err := fileservice.CreateRDPDirectory(tunnel, req.Path)
err := fileservice.CreateRDPDirectory(tunnel, path)
if err != nil {
logger.L().Error("Failed to create directory in RDP session", zap.Error(err))
ctx.JSON(http.StatusInternalServerError, HttpResponse{
@@ -913,7 +913,7 @@ func (c *Controller) RDPFileMkdir(ctx *gin.Context) {
}
// Record file operation history using session-based method
if err := fileservice.DefaultFileService.RecordFileHistoryBySession(ctx, sessionId, "mkdir", req.Path); err != nil {
if err := fileservice.DefaultFileService.RecordFileHistoryBySession(ctx, sessionId, "mkdir", path); err != nil {
logger.L().Error("Failed to record file history", zap.Error(err))
}
@@ -922,7 +922,7 @@ func (c *Controller) RDPFileMkdir(ctx *gin.Context) {
Message: "ok",
Data: gin.H{
"message": "Directory created successfully",
"path": req.Path,
"path": path,
},
})
}

View File

@@ -1,15 +1,17 @@
package controller
import (
"fmt"
"io"
"net/http"
"path/filepath"
"github.com/gin-gonic/gin"
"go.uber.org/zap"
"github.com/veops/oneterm/internal/model"
"github.com/veops/oneterm/internal/service"
"github.com/veops/oneterm/pkg/config"
"github.com/veops/oneterm/pkg/errors"
"github.com/veops/oneterm/pkg/logger"
)
var (
@@ -146,11 +148,22 @@ func (c *Controller) CreateSessionReplay(ctx *gin.Context) {
func (c *Controller) GetSessionReplay(ctx *gin.Context) {
sessionId := ctx.Param("session_id")
filename, err := sessionService.GetSessionReplayFilename(ctx, sessionId)
// Try to get replay from storage service or local file system
replayReader, err := sessionService.GetSessionReplay(ctx, sessionId)
if err != nil {
ctx.AbortWithError(http.StatusInternalServerError, &errors.ApiError{Code: errors.ErrInternal, Data: map[string]any{"err": err}})
ctx.AbortWithError(http.StatusNotFound, &errors.ApiError{Code: errors.ErrInternal, Data: map[string]any{"err": err}})
return
}
defer replayReader.Close()
ctx.FileAttachment(filepath.Join(config.Cfg.Session.ReplayDir, filename), filename)
// Stream the file content
filename := fmt.Sprintf("%s.cast", sessionId)
ctx.Header("Content-Disposition", fmt.Sprintf("attachment; filename=%s", filename))
ctx.Header("Content-Type", "application/octet-stream")
_, err = io.Copy(ctx.Writer, replayReader)
if err != nil {
logger.L().Error("Failed to stream replay file", zap.String("session_id", sessionId), zap.Error(err))
return
}
}

View File

@@ -0,0 +1,317 @@
package controller
import (
"errors"
"net/http"
"github.com/gin-gonic/gin"
"github.com/spf13/cast"
"gorm.io/gorm"
"github.com/veops/oneterm/internal/acl"
"github.com/veops/oneterm/internal/model"
"github.com/veops/oneterm/internal/service"
myErrors "github.com/veops/oneterm/pkg/errors"
)
var storageService = service.DefaultStorageService
// ListStorageConfigs godoc
//
// @Tags storage
// @Summary List all storage configurations
// @Param page_index query int false "page_index"
// @Param page_size query int false "page_size"
// @Param search query string false "search"
// @Param type query string false "storage type filter"
// @Param enabled query string false "enabled filter (true/false)"
// @Param primary query string false "primary filter (true/false)"
// @Success 200 {object} HttpResponse{data=ListData{list=[]model.StorageConfig}}
// @Router /storage/configs [get]
func (c *Controller) ListStorageConfigs(ctx *gin.Context) {
currentUser, _ := acl.GetSessionFromCtx(ctx)
if !acl.IsAdmin(currentUser) {
ctx.AbortWithError(http.StatusForbidden, &myErrors.ApiError{Code: myErrors.ErrNoPerm, Data: map[string]any{"perm": acl.READ}})
return
}
db := storageService.BuildQuery(ctx)
doGet[*model.StorageConfig](ctx, false, db, "")
}
// GetStorageConfig godoc
//
// @Tags storage
// @Summary Get storage configuration by ID
// @Param id path int true "Storage ID"
// @Success 200 {object} HttpResponse{data=model.StorageConfig}
// @Router /storage/configs/{id} [get]
func (c *Controller) GetStorageConfig(ctx *gin.Context) {
currentUser, _ := acl.GetSessionFromCtx(ctx)
if !acl.IsAdmin(currentUser) {
ctx.AbortWithError(http.StatusForbidden, &myErrors.ApiError{Code: myErrors.ErrNoPerm, Data: map[string]any{"perm": acl.READ}})
return
}
baseService := service.NewBaseService()
id, err := cast.ToIntE(ctx.Param("id"))
if err != nil {
ctx.AbortWithError(http.StatusBadRequest, &myErrors.ApiError{Code: myErrors.ErrInvalidArgument, Data: map[string]any{"err": err}})
return
}
config := &model.StorageConfig{}
if err := baseService.GetById(ctx, id, config); err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
ctx.AbortWithError(http.StatusNotFound, &myErrors.ApiError{Code: myErrors.ErrInternal, Data: map[string]any{"err": "storage config not found"}})
return
}
ctx.AbortWithError(http.StatusInternalServerError, &myErrors.ApiError{Code: myErrors.ErrInternal, Data: map[string]any{"err": err}})
return
}
ctx.JSON(http.StatusOK, HttpResponse{Data: config})
}
// CreateStorageConfig godoc
//
// @Tags storage
// @Summary Create a new storage configuration
// @Param config body model.StorageConfig true "Storage configuration"
// @Success 200 {object} HttpResponse{}
// @Router /storage/configs [post]
func (c *Controller) CreateStorageConfig(ctx *gin.Context) {
currentUser, _ := acl.GetSessionFromCtx(ctx)
if !acl.IsAdmin(currentUser) {
ctx.AbortWithError(http.StatusForbidden, &myErrors.ApiError{Code: myErrors.ErrNoPerm, Data: map[string]any{"perm": acl.WRITE}})
return
}
doCreate(ctx, false, &model.StorageConfig{}, "", func(ctx *gin.Context, config *model.StorageConfig) {
// Custom validation for storage config
if err := validateStorageConfig(config); err != nil {
ctx.AbortWithError(http.StatusBadRequest, &myErrors.ApiError{Code: myErrors.ErrInvalidArgument, Data: map[string]any{"err": err}})
return
}
// Initialize storage provider after creation
if provider, err := storageService.CreateProvider(config); err == nil {
// Test connection
if err := provider.HealthCheck(ctx); err != nil {
ctx.AbortWithError(http.StatusBadRequest, &myErrors.ApiError{Code: myErrors.ErrInvalidArgument, Data: map[string]any{"err": err}})
return
}
}
})
}
// UpdateStorageConfig godoc
//
// @Tags storage
// @Summary Update an existing storage configuration
// @Param id path int true "Storage ID"
// @Param config body model.StorageConfig true "Storage configuration"
// @Success 200 {object} HttpResponse{}
// @Router /storage/configs/{id} [put]
func (c *Controller) UpdateStorageConfig(ctx *gin.Context) {
currentUser, _ := acl.GetSessionFromCtx(ctx)
if !acl.IsAdmin(currentUser) {
ctx.AbortWithError(http.StatusForbidden, &myErrors.ApiError{Code: myErrors.ErrNoPerm, Data: map[string]any{"perm": acl.WRITE}})
return
}
doUpdate(ctx, false, &model.StorageConfig{}, "", func(ctx *gin.Context, config *model.StorageConfig) {
// Custom validation for storage config
if err := validateStorageConfig(config); err != nil {
ctx.AbortWithError(http.StatusBadRequest, &myErrors.ApiError{Code: myErrors.ErrInvalidArgument, Data: map[string]any{"err": err}})
return
}
// Test connection after update
if provider, err := storageService.CreateProvider(config); err == nil {
if err := provider.HealthCheck(ctx); err != nil {
ctx.AbortWithError(http.StatusBadRequest, &myErrors.ApiError{Code: myErrors.ErrInvalidArgument, Data: map[string]any{"err": err}})
return
}
}
})
}
// DeleteStorageConfig godoc
//
// @Tags storage
// @Summary Delete a storage configuration
// @Param id path int true "Storage ID"
// @Success 200 {object} HttpResponse{}
// @Router /storage/configs/{id} [delete]
func (c *Controller) DeleteStorageConfig(ctx *gin.Context) {
currentUser, _ := acl.GetSessionFromCtx(ctx)
if !acl.IsAdmin(currentUser) {
ctx.AbortWithError(http.StatusForbidden, &myErrors.ApiError{Code: myErrors.ErrNoPerm, Data: map[string]any{"perm": acl.WRITE}})
return
}
doDelete(ctx, false, &model.StorageConfig{}, "", func(ctx *gin.Context, id int) {
// Custom validation: check if it's the primary storage
config := &model.StorageConfig{}
baseService := service.NewBaseService()
if err := baseService.GetById(ctx, id, config); err == nil && config.IsPrimary {
ctx.AbortWithError(http.StatusBadRequest, &myErrors.ApiError{Code: myErrors.ErrInvalidArgument, Data: map[string]any{"err": "cannot delete primary storage"}})
return
}
})
}
// TestStorageConnection godoc
//
// @Tags storage
// @Summary Test storage connection
// @Param config body model.StorageConfig true "Storage configuration to test"
// @Success 200 {object} HttpResponse{}
// @Router /storage/test-connection [post]
func (c *Controller) TestStorageConnection(ctx *gin.Context) {
currentUser, _ := acl.GetSessionFromCtx(ctx)
if !acl.IsAdmin(currentUser) {
ctx.AbortWithError(http.StatusForbidden, &myErrors.ApiError{Code: myErrors.ErrNoPerm, Data: map[string]any{"perm": acl.WRITE}})
return
}
config := &model.StorageConfig{}
if err := ctx.ShouldBindJSON(config); err != nil {
ctx.AbortWithError(http.StatusBadRequest, &myErrors.ApiError{Code: myErrors.ErrInvalidArgument, Data: map[string]any{"err": err}})
return
}
// Create a temporary provider to test connection
provider, err := storageService.CreateProvider(config)
if err != nil {
ctx.AbortWithError(http.StatusBadRequest, &myErrors.ApiError{Code: myErrors.ErrInvalidArgument, Data: map[string]any{"err": err}})
return
}
// Perform health check
if err := provider.HealthCheck(ctx); err != nil {
ctx.AbortWithError(http.StatusBadRequest, &myErrors.ApiError{Code: myErrors.ErrInvalidArgument, Data: map[string]any{"err": err}})
return
}
ctx.JSON(http.StatusOK, defaultHttpResponse)
}
// GetStorageHealth godoc
//
// @Tags storage
// @Summary Get health status of all storage providers
// @Success 200 {object} HttpResponse{data=map[string]any}
// @Router /storage/health [get]
func (c *Controller) GetStorageHealth(ctx *gin.Context) {
currentUser, _ := acl.GetSessionFromCtx(ctx)
if !acl.IsAdmin(currentUser) {
ctx.AbortWithError(http.StatusForbidden, &myErrors.ApiError{Code: myErrors.ErrNoPerm, Data: map[string]any{"perm": acl.READ}})
return
}
healthResults := storageService.HealthCheck(ctx)
// Convert error map to a more API-friendly format
healthStatus := make(map[string]map[string]interface{})
for name, err := range healthResults {
healthStatus[name] = map[string]interface{}{
"healthy": err == nil,
"error": err,
}
}
ctx.JSON(http.StatusOK, HttpResponse{Data: healthStatus})
}
// SetPrimaryStorage godoc
//
// @Tags storage
// @Summary Set a storage provider as primary
// @Param id path int true "Storage ID"
// @Success 200 {object} HttpResponse{}
// @Router /storage/configs/{id}/set-primary [put]
func (c *Controller) SetPrimaryStorage(ctx *gin.Context) {
currentUser, _ := acl.GetSessionFromCtx(ctx)
if !acl.IsAdmin(currentUser) {
ctx.AbortWithError(http.StatusForbidden, &myErrors.ApiError{Code: myErrors.ErrNoPerm, Data: map[string]any{"perm": acl.WRITE}})
return
}
id, err := cast.ToIntE(ctx.Param("id"))
if err != nil {
ctx.AbortWithError(http.StatusBadRequest, &myErrors.ApiError{Code: myErrors.ErrInvalidArgument, Data: map[string]any{"err": err}})
return
}
// Get current config
baseService := service.NewBaseService()
config := &model.StorageConfig{}
if err := baseService.GetById(ctx, id, config); err != nil {
ctx.AbortWithError(http.StatusInternalServerError, &myErrors.ApiError{Code: myErrors.ErrInternal, Data: map[string]any{"err": err}})
return
}
// Update to set as primary
config.IsPrimary = true
config.UpdaterId = currentUser.GetUid()
if err := storageService.UpdateStorageConfig(ctx, config); err != nil {
ctx.AbortWithError(http.StatusInternalServerError, &myErrors.ApiError{Code: myErrors.ErrInternal, Data: map[string]any{"err": err}})
return
}
ctx.JSON(http.StatusOK, defaultHttpResponse)
}
// ToggleStorageProvider godoc
//
// @Tags storage
// @Summary Enable or disable a storage provider
// @Param id path int true "Storage ID"
// @Success 200 {object} HttpResponse{}
// @Router /storage/configs/{id}/toggle [put]
func (c *Controller) ToggleStorageProvider(ctx *gin.Context) {
currentUser, _ := acl.GetSessionFromCtx(ctx)
if !acl.IsAdmin(currentUser) {
ctx.AbortWithError(http.StatusForbidden, &myErrors.ApiError{Code: myErrors.ErrNoPerm, Data: map[string]any{"perm": acl.WRITE}})
return
}
id, err := cast.ToIntE(ctx.Param("id"))
if err != nil {
ctx.AbortWithError(http.StatusBadRequest, &myErrors.ApiError{Code: myErrors.ErrInvalidArgument, Data: map[string]any{"err": err}})
return
}
// Get current config
baseService := service.NewBaseService()
config := &model.StorageConfig{}
if err := baseService.GetById(ctx, id, config); err != nil {
ctx.AbortWithError(http.StatusInternalServerError, &myErrors.ApiError{Code: myErrors.ErrInternal, Data: map[string]any{"err": err}})
return
}
// Toggle enabled status
config.Enabled = !config.Enabled
config.UpdaterId = currentUser.GetUid()
if err := storageService.UpdateStorageConfig(ctx, config); err != nil {
ctx.AbortWithError(http.StatusInternalServerError, &myErrors.ApiError{Code: myErrors.ErrInternal, Data: map[string]any{"err": err}})
return
}
ctx.JSON(http.StatusOK, HttpResponse{Data: map[string]bool{"enabled": config.Enabled}})
}
// validateStorageConfig validates storage configuration
func validateStorageConfig(config *model.StorageConfig) error {
if config.Name == "" {
return &myErrors.ApiError{Code: myErrors.ErrInvalidArgument, Data: map[string]any{"err": "storage name is required"}}
}
if config.Type == "" {
return &myErrors.ApiError{Code: myErrors.ErrInvalidArgument, Data: map[string]any{"err": "storage type is required"}}
}
return nil
}

View File

@@ -2219,13 +2219,11 @@ const docTemplate = `{
"required": true
},
{
"description": "Directory creation request",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/service.RDPMkdirRequest"
}
"type": "string",
"description": "Directory path",
"name": "path",
"in": "query",
"required": true
}
],
"responses": {
@@ -3096,6 +3094,310 @@ const docTemplate = `{
}
}
}
},
"/storage/configs": {
"get": {
"tags": [
"storage"
],
"summary": "List all storage configurations",
"parameters": [
{
"type": "integer",
"description": "page_index",
"name": "page_index",
"in": "query"
},
{
"type": "integer",
"description": "page_size",
"name": "page_size",
"in": "query"
},
{
"type": "string",
"description": "search",
"name": "search",
"in": "query"
},
{
"type": "string",
"description": "storage type filter",
"name": "type",
"in": "query"
},
{
"type": "string",
"description": "enabled filter (true/false)",
"name": "enabled",
"in": "query"
},
{
"type": "string",
"description": "primary filter (true/false)",
"name": "primary",
"in": "query"
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"allOf": [
{
"$ref": "#/definitions/controller.HttpResponse"
},
{
"type": "object",
"properties": {
"data": {
"allOf": [
{
"$ref": "#/definitions/controller.ListData"
},
{
"type": "object",
"properties": {
"list": {
"type": "array",
"items": {
"$ref": "#/definitions/model.StorageConfig"
}
}
}
}
]
}
}
}
]
}
}
}
},
"post": {
"tags": [
"storage"
],
"summary": "Create a new storage configuration",
"parameters": [
{
"description": "Storage configuration",
"name": "config",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/model.StorageConfig"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controller.HttpResponse"
}
}
}
}
},
"/storage/configs/{id}": {
"get": {
"tags": [
"storage"
],
"summary": "Get storage configuration by ID",
"parameters": [
{
"type": "integer",
"description": "Storage ID",
"name": "id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"allOf": [
{
"$ref": "#/definitions/controller.HttpResponse"
},
{
"type": "object",
"properties": {
"data": {
"$ref": "#/definitions/model.StorageConfig"
}
}
}
]
}
}
}
},
"put": {
"tags": [
"storage"
],
"summary": "Update an existing storage configuration",
"parameters": [
{
"type": "integer",
"description": "Storage ID",
"name": "id",
"in": "path",
"required": true
},
{
"description": "Storage configuration",
"name": "config",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/model.StorageConfig"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controller.HttpResponse"
}
}
}
},
"delete": {
"tags": [
"storage"
],
"summary": "Delete a storage configuration",
"parameters": [
{
"type": "integer",
"description": "Storage ID",
"name": "id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controller.HttpResponse"
}
}
}
}
},
"/storage/configs/{id}/set-primary": {
"put": {
"tags": [
"storage"
],
"summary": "Set a storage provider as primary",
"parameters": [
{
"type": "integer",
"description": "Storage ID",
"name": "id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controller.HttpResponse"
}
}
}
}
},
"/storage/configs/{id}/toggle": {
"put": {
"tags": [
"storage"
],
"summary": "Enable or disable a storage provider",
"parameters": [
{
"type": "integer",
"description": "Storage ID",
"name": "id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controller.HttpResponse"
}
}
}
}
},
"/storage/health": {
"get": {
"tags": [
"storage"
],
"summary": "Get health status of all storage providers",
"responses": {
"200": {
"description": "OK",
"schema": {
"allOf": [
{
"$ref": "#/definitions/controller.HttpResponse"
},
{
"type": "object",
"properties": {
"data": {
"type": "object",
"additionalProperties": true
}
}
}
]
}
}
}
}
},
"/storage/test-connection": {
"post": {
"tags": [
"storage"
],
"summary": "Test storage connection",
"parameters": [
{
"description": "Storage configuration to test",
"name": "config",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/model.StorageConfig"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controller.HttpResponse"
}
}
}
}
}
},
"definitions": {
@@ -3906,6 +4208,71 @@ const docTemplate = `{
}
}
},
"model.StorageConfig": {
"type": "object",
"properties": {
"config": {
"$ref": "#/definitions/model.StorageConfigMap"
},
"created_at": {
"type": "string"
},
"creator_id": {
"description": "Standard fields",
"type": "integer"
},
"description": {
"type": "string"
},
"enabled": {
"type": "boolean"
},
"id": {
"type": "integer"
},
"is_primary": {
"type": "boolean"
},
"name": {
"type": "string"
},
"priority": {
"type": "integer"
},
"type": {
"$ref": "#/definitions/model.StorageType"
},
"updated_at": {
"type": "string"
},
"updater_id": {
"type": "integer"
}
}
},
"model.StorageConfigMap": {
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"model.StorageType": {
"type": "string",
"enum": [
"local",
"s3",
"minio",
"oss",
"cos"
],
"x-enum-varnames": [
"StorageTypeLocal",
"StorageTypeS3",
"StorageTypeMinio",
"StorageTypeOSS",
"StorageTypeCOS"
]
},
"model.UserPreference": {
"type": "object",
"properties": {
@@ -3966,17 +4333,6 @@ const docTemplate = `{
"type": "boolean"
}
}
},
"service.RDPMkdirRequest": {
"type": "object",
"required": [
"path"
],
"properties": {
"path": {
"type": "string"
}
}
}
}
}`

View File

@@ -2208,13 +2208,11 @@
"required": true
},
{
"description": "Directory creation request",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/service.RDPMkdirRequest"
}
"type": "string",
"description": "Directory path",
"name": "path",
"in": "query",
"required": true
}
],
"responses": {
@@ -3085,6 +3083,310 @@
}
}
}
},
"/storage/configs": {
"get": {
"tags": [
"storage"
],
"summary": "List all storage configurations",
"parameters": [
{
"type": "integer",
"description": "page_index",
"name": "page_index",
"in": "query"
},
{
"type": "integer",
"description": "page_size",
"name": "page_size",
"in": "query"
},
{
"type": "string",
"description": "search",
"name": "search",
"in": "query"
},
{
"type": "string",
"description": "storage type filter",
"name": "type",
"in": "query"
},
{
"type": "string",
"description": "enabled filter (true/false)",
"name": "enabled",
"in": "query"
},
{
"type": "string",
"description": "primary filter (true/false)",
"name": "primary",
"in": "query"
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"allOf": [
{
"$ref": "#/definitions/controller.HttpResponse"
},
{
"type": "object",
"properties": {
"data": {
"allOf": [
{
"$ref": "#/definitions/controller.ListData"
},
{
"type": "object",
"properties": {
"list": {
"type": "array",
"items": {
"$ref": "#/definitions/model.StorageConfig"
}
}
}
}
]
}
}
}
]
}
}
}
},
"post": {
"tags": [
"storage"
],
"summary": "Create a new storage configuration",
"parameters": [
{
"description": "Storage configuration",
"name": "config",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/model.StorageConfig"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controller.HttpResponse"
}
}
}
}
},
"/storage/configs/{id}": {
"get": {
"tags": [
"storage"
],
"summary": "Get storage configuration by ID",
"parameters": [
{
"type": "integer",
"description": "Storage ID",
"name": "id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"allOf": [
{
"$ref": "#/definitions/controller.HttpResponse"
},
{
"type": "object",
"properties": {
"data": {
"$ref": "#/definitions/model.StorageConfig"
}
}
}
]
}
}
}
},
"put": {
"tags": [
"storage"
],
"summary": "Update an existing storage configuration",
"parameters": [
{
"type": "integer",
"description": "Storage ID",
"name": "id",
"in": "path",
"required": true
},
{
"description": "Storage configuration",
"name": "config",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/model.StorageConfig"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controller.HttpResponse"
}
}
}
},
"delete": {
"tags": [
"storage"
],
"summary": "Delete a storage configuration",
"parameters": [
{
"type": "integer",
"description": "Storage ID",
"name": "id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controller.HttpResponse"
}
}
}
}
},
"/storage/configs/{id}/set-primary": {
"put": {
"tags": [
"storage"
],
"summary": "Set a storage provider as primary",
"parameters": [
{
"type": "integer",
"description": "Storage ID",
"name": "id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controller.HttpResponse"
}
}
}
}
},
"/storage/configs/{id}/toggle": {
"put": {
"tags": [
"storage"
],
"summary": "Enable or disable a storage provider",
"parameters": [
{
"type": "integer",
"description": "Storage ID",
"name": "id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controller.HttpResponse"
}
}
}
}
},
"/storage/health": {
"get": {
"tags": [
"storage"
],
"summary": "Get health status of all storage providers",
"responses": {
"200": {
"description": "OK",
"schema": {
"allOf": [
{
"$ref": "#/definitions/controller.HttpResponse"
},
{
"type": "object",
"properties": {
"data": {
"type": "object",
"additionalProperties": true
}
}
}
]
}
}
}
}
},
"/storage/test-connection": {
"post": {
"tags": [
"storage"
],
"summary": "Test storage connection",
"parameters": [
{
"description": "Storage configuration to test",
"name": "config",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/model.StorageConfig"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controller.HttpResponse"
}
}
}
}
}
},
"definitions": {
@@ -3895,6 +4197,71 @@
}
}
},
"model.StorageConfig": {
"type": "object",
"properties": {
"config": {
"$ref": "#/definitions/model.StorageConfigMap"
},
"created_at": {
"type": "string"
},
"creator_id": {
"description": "Standard fields",
"type": "integer"
},
"description": {
"type": "string"
},
"enabled": {
"type": "boolean"
},
"id": {
"type": "integer"
},
"is_primary": {
"type": "boolean"
},
"name": {
"type": "string"
},
"priority": {
"type": "integer"
},
"type": {
"$ref": "#/definitions/model.StorageType"
},
"updated_at": {
"type": "string"
},
"updater_id": {
"type": "integer"
}
}
},
"model.StorageConfigMap": {
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"model.StorageType": {
"type": "string",
"enum": [
"local",
"s3",
"minio",
"oss",
"cos"
],
"x-enum-varnames": [
"StorageTypeLocal",
"StorageTypeS3",
"StorageTypeMinio",
"StorageTypeOSS",
"StorageTypeCOS"
]
},
"model.UserPreference": {
"type": "object",
"properties": {
@@ -3955,17 +4322,6 @@
"type": "boolean"
}
}
},
"service.RDPMkdirRequest": {
"type": "object",
"required": [
"path"
],
"properties": {
"path": {
"type": "string"
}
}
}
}
}

View File

@@ -532,6 +532,52 @@ definitions:
uid:
type: integer
type: object
model.StorageConfig:
properties:
config:
$ref: '#/definitions/model.StorageConfigMap'
created_at:
type: string
creator_id:
description: Standard fields
type: integer
description:
type: string
enabled:
type: boolean
id:
type: integer
is_primary:
type: boolean
name:
type: string
priority:
type: integer
type:
$ref: '#/definitions/model.StorageType'
updated_at:
type: string
updater_id:
type: integer
type: object
model.StorageConfigMap:
additionalProperties:
type: string
type: object
model.StorageType:
enum:
- local
- s3
- minio
- oss
- cos
type: string
x-enum-varnames:
- StorageTypeLocal
- StorageTypeS3
- StorageTypeMinio
- StorageTypeOSS
- StorageTypeCOS
model.UserPreference:
properties:
created_at:
@@ -573,13 +619,6 @@ definitions:
paste:
type: boolean
type: object
service.RDPMkdirRequest:
properties:
path:
type: string
required:
- path
type: object
info:
contact: {}
paths:
@@ -1945,12 +1984,11 @@ paths:
name: session_id
required: true
type: string
- description: Directory creation request
in: body
name: request
- description: Directory path
in: query
name: path
required: true
schema:
$ref: '#/definitions/service.RDPMkdirRequest'
type: string
produces:
- application/json
responses:
@@ -2456,4 +2494,189 @@ paths:
type: object
tags:
- stat
/storage/configs:
get:
parameters:
- description: page_index
in: query
name: page_index
type: integer
- description: page_size
in: query
name: page_size
type: integer
- description: search
in: query
name: search
type: string
- description: storage type filter
in: query
name: type
type: string
- description: enabled filter (true/false)
in: query
name: enabled
type: string
- description: primary filter (true/false)
in: query
name: primary
type: string
responses:
"200":
description: OK
schema:
allOf:
- $ref: '#/definitions/controller.HttpResponse'
- properties:
data:
allOf:
- $ref: '#/definitions/controller.ListData'
- properties:
list:
items:
$ref: '#/definitions/model.StorageConfig'
type: array
type: object
type: object
summary: List all storage configurations
tags:
- storage
post:
parameters:
- description: Storage configuration
in: body
name: config
required: true
schema:
$ref: '#/definitions/model.StorageConfig'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/controller.HttpResponse'
summary: Create a new storage configuration
tags:
- storage
/storage/configs/{id}:
delete:
parameters:
- description: Storage ID
in: path
name: id
required: true
type: integer
responses:
"200":
description: OK
schema:
$ref: '#/definitions/controller.HttpResponse'
summary: Delete a storage configuration
tags:
- storage
get:
parameters:
- description: Storage ID
in: path
name: id
required: true
type: integer
responses:
"200":
description: OK
schema:
allOf:
- $ref: '#/definitions/controller.HttpResponse'
- properties:
data:
$ref: '#/definitions/model.StorageConfig'
type: object
summary: Get storage configuration by ID
tags:
- storage
put:
parameters:
- description: Storage ID
in: path
name: id
required: true
type: integer
- description: Storage configuration
in: body
name: config
required: true
schema:
$ref: '#/definitions/model.StorageConfig'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/controller.HttpResponse'
summary: Update an existing storage configuration
tags:
- storage
/storage/configs/{id}/set-primary:
put:
parameters:
- description: Storage ID
in: path
name: id
required: true
type: integer
responses:
"200":
description: OK
schema:
$ref: '#/definitions/controller.HttpResponse'
summary: Set a storage provider as primary
tags:
- storage
/storage/configs/{id}/toggle:
put:
parameters:
- description: Storage ID
in: path
name: id
required: true
type: integer
responses:
"200":
description: OK
schema:
$ref: '#/definitions/controller.HttpResponse'
summary: Enable or disable a storage provider
tags:
- storage
/storage/health:
get:
responses:
"200":
description: OK
schema:
allOf:
- $ref: '#/definitions/controller.HttpResponse'
- properties:
data:
additionalProperties: true
type: object
type: object
summary: Get health status of all storage providers
tags:
- storage
/storage/test-connection:
post:
parameters:
- description: Storage configuration to test
in: body
name: config
required: true
schema:
$ref: '#/definitions/model.StorageConfig'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/controller.HttpResponse'
summary: Test storage connection
tags:
- storage
swagger: "2.0"

View File

@@ -173,5 +173,19 @@ func SetupRouter(r *gin.Engine) {
rdpGroup.GET("/sessions/:session_id/files/download", c.RDPFileDownload)
rdpGroup.POST("/sessions/:session_id/files/mkdir", c.RDPFileMkdir)
}
// Storage management routes
storage := v1.Group("/storage")
{
storage.GET("/configs", c.ListStorageConfigs)
storage.GET("/configs/:id", c.GetStorageConfig)
storage.POST("/configs", c.CreateStorageConfig)
storage.PUT("/configs/:id", c.UpdateStorageConfig)
storage.DELETE("/configs/:id", c.DeleteStorageConfig)
storage.POST("/test-connection", c.TestStorageConnection)
storage.GET("/health", c.GetStorageHealth)
storage.PUT("/configs/:id/set-primary", c.SetPrimaryStorage)
storage.PUT("/configs/:id/toggle", c.ToggleStorageProvider)
}
}
}

View File

@@ -303,6 +303,13 @@ func HandleTerm(sess *gsession.Session, ctx *gin.Context) (err error) {
sess.ClearSSHClient()
}
// Close SSH recorder to save recording file
if sess.SshRecoder != nil {
if closeErr := sess.SshRecoder.Close(); closeErr != nil {
logger.L().Error("Failed to close SSH recorder", zap.String("sessionId", sess.SessionId), zap.Error(closeErr))
}
}
sess.SshParser.Close(sess.Prompt)
sess.Status = model.SESSIONSTATUS_OFFLINE
sess.ClosedAt = lo.ToPtr(time.Now())

View File

@@ -16,4 +16,6 @@ var (
DefaultShare = &Share{}
DefaultQuickCommand = &QuickCommand{}
DefaultUserPreference = &UserPreference{}
DefaultStorageConfig = &StorageConfig{}
DefaultStorageMetrics = &StorageMetrics{}
)

View File

@@ -0,0 +1,167 @@
package model
import (
"database/sql/driver"
"encoding/json"
"time"
"gorm.io/plugin/soft_delete"
)
// StorageType represents the type of storage backend
type StorageType string
const (
StorageTypeLocal StorageType = "local"
StorageTypeS3 StorageType = "s3"
StorageTypeMinio StorageType = "minio"
StorageTypeOSS StorageType = "oss"
StorageTypeCOS StorageType = "cos"
)
// StorageConfigMap represents the configuration parameters for a storage backend
type StorageConfigMap map[string]string
func (m *StorageConfigMap) Scan(value any) error {
if value == nil {
*m = make(map[string]string)
return nil
}
return json.Unmarshal(value.([]byte), m)
}
func (m StorageConfigMap) Value() (driver.Value, error) {
if m == nil {
return "{}", nil
}
return json.Marshal(m)
}
// StorageConfig represents a storage configuration entry in the database
type StorageConfig struct {
Id int `json:"id" gorm:"column:id;primarykey;autoIncrement"`
Name string `json:"name" gorm:"column:name;uniqueIndex;size:64;not null"`
Type StorageType `json:"type" gorm:"column:type;size:32;not null"`
Enabled bool `json:"enabled" gorm:"column:enabled;default:true"`
Priority int `json:"priority" gorm:"column:priority;default:10"`
IsPrimary bool `json:"is_primary" gorm:"column:is_primary;default:false"`
Config StorageConfigMap `json:"config" gorm:"column:config;type:text"`
Description string `json:"description" gorm:"column:description;size:255"`
// Standard fields
CreatorId int `json:"creator_id" gorm:"column:creator_id"`
UpdaterId int `json:"updater_id" gorm:"column:updater_id"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt soft_delete.DeletedAt `json:"-" gorm:"column:deleted_at;uniqueIndex:deleted_at"`
}
func (m *StorageConfig) TableName() string {
return "storage_configs"
}
func (m *StorageConfig) SetId(id int) {
m.Id = id
}
func (m *StorageConfig) SetCreatorId(id int) {
m.CreatorId = id
}
func (m *StorageConfig) SetUpdaterId(id int) {
m.UpdaterId = id
}
func (m *StorageConfig) SetResourceId(id int) {
m.Id = id
}
func (m *StorageConfig) GetResourceId() int {
return m.Id
}
func (m *StorageConfig) GetId() int {
return m.Id
}
func (m *StorageConfig) GetName() string {
return m.Name
}
func (m *StorageConfig) SetPerms(perms []string) {
// Storage configs don't have permissions
}
// StorageMetrics represents storage usage metrics
type StorageMetrics struct {
Id int `json:"id" gorm:"column:id;primarykey;autoIncrement"`
StorageName string `json:"storage_name" gorm:"column:storage_name;size:64;not null"`
FileCount int64 `json:"file_count" gorm:"column:file_count;default:0"`
TotalSize int64 `json:"total_size" gorm:"column:total_size;default:0"`
ReplayCount int64 `json:"replay_count" gorm:"column:replay_count;default:0"`
ReplaySize int64 `json:"replay_size" gorm:"column:replay_size;default:0"`
RdpFileCount int64 `json:"rdp_file_count" gorm:"column:rdp_file_count;default:0"`
RdpFileSize int64 `json:"rdp_file_size" gorm:"column:rdp_file_size;default:0"`
LastUpdated time.Time `json:"last_updated" gorm:"column:last_updated"`
IsHealthy bool `json:"is_healthy" gorm:"column:is_healthy;default:true"`
ErrorMessage string `json:"error_message" gorm:"column:error_message;size:255"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt soft_delete.DeletedAt `json:"-" gorm:"column:deleted_at;uniqueIndex:deleted_at"`
}
func (m *StorageMetrics) TableName() string {
return "storage_metrics"
}
// FileMetadata represents metadata for files stored in storage backends
type FileMetadata struct {
Id int `json:"id" gorm:"column:id;primarykey;autoIncrement"`
StorageKey string `json:"storage_key" gorm:"column:storage_key;uniqueIndex;size:255;not null"`
FileName string `json:"file_name" gorm:"column:file_name;size:255;not null"`
FileSize int64 `json:"file_size" gorm:"column:file_size;default:0"`
MimeType string `json:"mime_type" gorm:"column:mime_type;size:100"`
Checksum string `json:"checksum" gorm:"column:checksum;size:64"`
StorageType StorageType `json:"storage_type" gorm:"column:storage_type;size:32;not null"`
StorageName string `json:"storage_name" gorm:"column:storage_name;size:64;not null"`
Category string `json:"category" gorm:"column:category;size:32"` // replay, rdp_file, etc.
SessionId string `json:"session_id" gorm:"column:session_id;size:64"`
AssetId int `json:"asset_id" gorm:"column:asset_id"`
UserId int `json:"user_id" gorm:"column:user_id"`
// Standard fields
CreatorId int `json:"creator_id" gorm:"column:creator_id"`
UpdaterId int `json:"updater_id" gorm:"column:updater_id"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt soft_delete.DeletedAt `json:"-" gorm:"column:deleted_at;uniqueIndex:deleted_at"`
}
func (m *FileMetadata) TableName() string {
return "file_metadata"
}
func (m *FileMetadata) SetId(id int) {
m.Id = id
}
func (m *FileMetadata) SetCreatorId(id int) {
m.CreatorId = id
}
func (m *FileMetadata) SetUpdaterId(id int) {
m.UpdaterId = id
}
func (m *FileMetadata) GetResourceId() int {
return m.Id
}
func (m *FileMetadata) GetId() int {
return m.Id
}
func (m *FileMetadata) GetName() string {
return m.FileName
}

View File

@@ -3,6 +3,7 @@ package repository
import (
"context"
"github.com/gin-gonic/gin"
"gorm.io/gorm"
"github.com/veops/oneterm/internal/model"
@@ -11,6 +12,7 @@ import (
// IFileRepository file history repository interface
type IFileRepository interface {
AddFileHistory(ctx context.Context, history *model.FileHistory) error
BuildFileHistoryQuery(ctx *gin.Context) *gorm.DB
}
// FileRepository file history repository implementation
@@ -29,3 +31,8 @@ func NewFileRepository(db *gorm.DB) IFileRepository {
func (r *FileRepository) AddFileHistory(ctx context.Context, history *model.FileHistory) error {
return r.db.Create(history).Error
}
// BuildFileHistoryQuery builds a query for file history records
func (r *FileRepository) BuildFileHistoryQuery(ctx *gin.Context) *gorm.DB {
return r.db.Model(&model.FileHistory{})
}

View File

@@ -0,0 +1,165 @@
package repository
import (
"context"
"github.com/gin-gonic/gin"
"github.com/veops/oneterm/internal/model"
dbpkg "github.com/veops/oneterm/pkg/db"
"gorm.io/gorm"
)
// StorageRepository defines interface for storage-related database operations
type StorageRepository interface {
BaseRepository
// BuildQuery builds query for storage configurations with filters
BuildQuery(ctx *gin.Context) *gorm.DB
// GetStorageConfigs retrieves all storage configurations
GetStorageConfigs(ctx context.Context) ([]*model.StorageConfig, error)
// GetStorageConfigByName retrieves storage config by name
GetStorageConfigByName(ctx context.Context, name string) (*model.StorageConfig, error)
// CreateStorageConfig creates a new storage configuration
CreateStorageConfig(ctx context.Context, config *model.StorageConfig) error
// UpdateStorageConfig updates storage configuration
UpdateStorageConfig(ctx context.Context, config *model.StorageConfig) error
// DeleteStorageConfig deletes storage configuration
DeleteStorageConfig(ctx context.Context, name string) error
// GetFileMetadata retrieves file metadata
GetFileMetadata(ctx context.Context, key string) (*model.FileMetadata, error)
// CreateFileMetadata creates file metadata record
CreateFileMetadata(ctx context.Context, metadata *model.FileMetadata) error
// UpdateFileMetadata updates file metadata
UpdateFileMetadata(ctx context.Context, metadata *model.FileMetadata) error
// DeleteFileMetadata deletes file metadata
DeleteFileMetadata(ctx context.Context, key string) error
// ListFileMetadata lists file metadata with pagination
ListFileMetadata(ctx context.Context, prefix string, limit, offset int) ([]*model.FileMetadata, int64, error)
}
// storageRepository implements StorageRepository
type storageRepository struct {
*baseRepository
}
// NewStorageRepository creates a new storage repository
func NewStorageRepository() StorageRepository {
return &storageRepository{
baseRepository: &baseRepository{},
}
}
// BuildQuery builds query for storage configurations with filters
func (r *storageRepository) BuildQuery(ctx *gin.Context) *gorm.DB {
db := dbpkg.DB.Model(&model.StorageConfig{})
// Add search functionality
if search := ctx.Query("search"); search != "" {
db = db.Where("name LIKE ? OR description LIKE ?", "%"+search+"%", "%"+search+"%")
}
// Add type filter
if storageType := ctx.Query("type"); storageType != "" {
db = db.Where("type = ?", storageType)
}
// Add enabled filter
if enabled := ctx.Query("enabled"); enabled != "" {
db = db.Where("enabled = ?", enabled == "true")
}
// Add primary filter
if primary := ctx.Query("primary"); primary != "" {
db = db.Where("is_primary = ?", primary == "true")
}
return db
}
// GetStorageConfigs retrieves all storage configurations
func (r *storageRepository) GetStorageConfigs(ctx context.Context) ([]*model.StorageConfig, error) {
var configs []*model.StorageConfig
err := dbpkg.DB.Find(&configs).Error
return configs, err
}
// GetStorageConfigByName retrieves storage config by name
func (r *storageRepository) GetStorageConfigByName(ctx context.Context, name string) (*model.StorageConfig, error) {
var config model.StorageConfig
err := dbpkg.DB.Where("name = ?", name).First(&config).Error
if err != nil {
return nil, err
}
return &config, nil
}
// CreateStorageConfig creates a new storage configuration
func (r *storageRepository) CreateStorageConfig(ctx context.Context, config *model.StorageConfig) error {
return dbpkg.DB.Create(config).Error
}
// UpdateStorageConfig updates storage configuration
func (r *storageRepository) UpdateStorageConfig(ctx context.Context, config *model.StorageConfig) error {
return dbpkg.DB.Save(config).Error
}
// DeleteStorageConfig deletes storage configuration
func (r *storageRepository) DeleteStorageConfig(ctx context.Context, name string) error {
return dbpkg.DB.Where("name = ?", name).Delete(&model.StorageConfig{}).Error
}
// GetFileMetadata retrieves file metadata
func (r *storageRepository) GetFileMetadata(ctx context.Context, key string) (*model.FileMetadata, error) {
var metadata model.FileMetadata
err := dbpkg.DB.Where("storage_key = ?", key).First(&metadata).Error
if err != nil {
return nil, err
}
return &metadata, nil
}
// CreateFileMetadata creates file metadata record
func (r *storageRepository) CreateFileMetadata(ctx context.Context, metadata *model.FileMetadata) error {
return dbpkg.DB.Create(metadata).Error
}
// UpdateFileMetadata updates file metadata
func (r *storageRepository) UpdateFileMetadata(ctx context.Context, metadata *model.FileMetadata) error {
return dbpkg.DB.Save(metadata).Error
}
// DeleteFileMetadata deletes file metadata
func (r *storageRepository) DeleteFileMetadata(ctx context.Context, key string) error {
return dbpkg.DB.Where("storage_key = ?", key).Delete(&model.FileMetadata{}).Error
}
// ListFileMetadata lists file metadata with pagination
func (r *storageRepository) ListFileMetadata(ctx context.Context, prefix string, limit, offset int) ([]*model.FileMetadata, int64, error) {
var metadata []*model.FileMetadata
var total int64
query := dbpkg.DB.Model(&model.FileMetadata{})
if prefix != "" {
query = query.Where("storage_key LIKE ?", prefix+"%")
}
// Get total count
err := query.Count(&total).Error
if err != nil {
return nil, 0, err
}
// Get records with pagination
err = query.Limit(limit).Offset(offset).Find(&metadata).Error
return metadata, total, err
}

View File

@@ -20,6 +20,7 @@ import (
"github.com/veops/oneterm/internal/acl"
"github.com/veops/oneterm/internal/model"
"github.com/veops/oneterm/internal/repository"
gsession "github.com/veops/oneterm/internal/session"
dbpkg "github.com/veops/oneterm/pkg/db"
"github.com/veops/oneterm/pkg/logger"
@@ -30,9 +31,8 @@ var DefaultFileService IFileService
// InitFileService initializes the global file service
func InitFileService() {
DefaultFileService = NewFileService(&FileRepository{
db: dbpkg.DB,
})
repo := repository.NewFileRepository(dbpkg.DB)
DefaultFileService = NewFileService(repo)
}
func init() {
@@ -68,7 +68,7 @@ func init() {
}
// NewFileService creates a new file service instance
func NewFileService(repo IFileRepository) IFileService {
func NewFileService(repo repository.IFileRepository) IFileService {
return &FileService{
repo: repo,
}
@@ -651,16 +651,3 @@ func (s *FileService) GetRDPDrivePath(assetId int) (string, error) {
return fullDrivePath, nil
}
// Simple FileRepository implementation
type FileRepository struct {
db *gorm.DB
}
func (r *FileRepository) AddFileHistory(ctx context.Context, history *model.FileHistory) error {
return r.db.Create(history).Error
}
func (r *FileRepository) BuildFileHistoryQuery(ctx *gin.Context) *gorm.DB {
return r.db.Model(&model.FileHistory{})
}

View File

@@ -351,7 +351,7 @@ type IFileService interface {
// FileService implements IFileService
type FileService struct {
repo IFileRepository
repo repository.IFileRepository
}
// RDP File related structures
@@ -555,12 +555,6 @@ func (pw *FileProgressWriter) Write(p []byte) (int, error) {
return n, err
}
// Repository interface
type IFileRepository interface {
AddFileHistory(ctx context.Context, history *model.FileHistory) error
BuildFileHistoryQuery(ctx *gin.Context) *gorm.DB
}
// SessionFileTransfer methods
func (t *SessionFileTransfer) UpdateProgress(offset int64) {
t.mutex.Lock()

View File

@@ -143,3 +143,20 @@ func (s *SessionService) GetSessionReplayFilename(ctx context.Context, sessionId
return filename, nil
}
// GetSessionReplay gets session replay file reader
func (s *SessionService) GetSessionReplay(ctx context.Context, sessionId string) (io.ReadCloser, error) {
// First try to get from storage service
if DefaultStorageService != nil {
reader, err := DefaultStorageService.GetSessionReplay(ctx, sessionId)
if err == nil {
return reader, nil
}
logger.L().Warn("Failed to get replay from storage service, falling back to local file",
zap.String("session_id", sessionId),
zap.Error(err))
}
// Fallback to direct file access with date hierarchy search
return gsession.GetReplay(sessionId)
}

View File

@@ -0,0 +1,650 @@
package service
import (
"context"
"fmt"
"io"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/gin-gonic/gin"
"github.com/veops/oneterm/internal/model"
"github.com/veops/oneterm/internal/repository"
"github.com/veops/oneterm/pkg/config"
"github.com/veops/oneterm/pkg/logger"
"github.com/veops/oneterm/pkg/storage"
"github.com/veops/oneterm/pkg/storage/providers"
"go.uber.org/zap"
"gorm.io/gorm"
)
// StorageService defines the interface for storage operations
type StorageService interface {
BaseService
// Configuration Management using database
GetStorageConfigs(ctx context.Context) ([]*model.StorageConfig, error)
GetStorageConfig(ctx context.Context, name string) (*model.StorageConfig, error)
CreateStorageConfig(ctx context.Context, config *model.StorageConfig) error
UpdateStorageConfig(ctx context.Context, config *model.StorageConfig) error
DeleteStorageConfig(ctx context.Context, name string) error
// File Operations combining storage backend and database metadata
UploadFile(ctx context.Context, key string, reader io.Reader, size int64, metadata *model.FileMetadata) error
DownloadFile(ctx context.Context, key string) (io.ReadCloser, *model.FileMetadata, error)
DeleteFile(ctx context.Context, key string) error
FileExists(ctx context.Context, key string) (bool, error)
ListFiles(ctx context.Context, prefix string, limit, offset int) ([]*model.FileMetadata, int64, error)
// Business-specific operations for external interface
SaveSessionReplay(ctx context.Context, sessionId string, reader io.Reader, size int64) error
GetSessionReplay(ctx context.Context, sessionId string) (io.ReadCloser, error)
DeleteSessionReplay(ctx context.Context, sessionId string) error
SaveRDPFile(ctx context.Context, assetId int, remotePath string, reader io.Reader, size int64) error
GetRDPFile(ctx context.Context, assetId int, remotePath string) (io.ReadCloser, error)
DeleteRDPFile(ctx context.Context, assetId int, remotePath string) error
// Provider management
GetPrimaryProvider() (storage.Provider, error)
HealthCheck(ctx context.Context) map[string]error
CreateProvider(config *model.StorageConfig) (storage.Provider, error)
// New method for building queries
BuildQuery(ctx *gin.Context) *gorm.DB
// GetAvailableProvider returns an available storage provider with fallback logic
// Priority: Primary storage first, then by priority (lower number = higher priority)
GetAvailableProvider(ctx context.Context) (storage.Provider, error)
}
// storageService implements StorageService
type storageService struct {
BaseService
storageRepo repository.StorageRepository
providers map[string]storage.Provider
primary string
}
// NewStorageService creates a new storage service
func NewStorageService() StorageService {
return &storageService{
BaseService: NewBaseService(),
storageRepo: repository.NewStorageRepository(),
providers: make(map[string]storage.Provider),
}
}
// Configuration Management via repository to operate database
func (s *storageService) BuildQuery(ctx *gin.Context) *gorm.DB {
return s.storageRepo.BuildQuery(ctx)
}
func (s *storageService) GetStorageConfigs(ctx context.Context) ([]*model.StorageConfig, error) {
return s.storageRepo.GetStorageConfigs(ctx)
}
func (s *storageService) GetStorageConfig(ctx context.Context, name string) (*model.StorageConfig, error) {
return s.storageRepo.GetStorageConfigByName(ctx, name)
}
func (s *storageService) CreateStorageConfig(ctx context.Context, config *model.StorageConfig) error {
// Validate configuration
if err := s.validateConfig(config); err != nil {
return err
}
// Create in database via repository
if err := s.storageRepo.CreateStorageConfig(ctx, config); err != nil {
return fmt.Errorf("failed to create storage config: %w", err)
}
// Initialize storage provider
provider, err := s.CreateProvider(config)
if err != nil {
logger.L().Warn("Failed to initialize storage provider",
zap.String("name", config.Name),
zap.Error(err))
} else {
s.providers[config.Name] = provider
if config.IsPrimary {
s.primary = config.Name
}
}
return nil
}
func (s *storageService) UpdateStorageConfig(ctx context.Context, config *model.StorageConfig) error {
if err := s.validateConfig(config); err != nil {
return err
}
return s.storageRepo.UpdateStorageConfig(ctx, config)
}
func (s *storageService) DeleteStorageConfig(ctx context.Context, name string) error {
// Remove from memory
delete(s.providers, name)
if s.primary == name {
s.primary = ""
}
// Delete from database
return s.storageRepo.DeleteStorageConfig(ctx, name)
}
// File Operations combining storage provider and database metadata
func (s *storageService) UploadFile(ctx context.Context, key string, reader io.Reader, size int64, metadata *model.FileMetadata) error {
provider, err := s.GetAvailableProvider(ctx)
if err != nil {
return fmt.Errorf("no available storage provider: %w", err)
}
// Upload to storage backend
if err := provider.Upload(ctx, key, reader, size); err != nil {
return fmt.Errorf("failed to upload file: %w", err)
}
// Save metadata to database if provided
if metadata != nil {
metadata.StorageKey = key
metadata.FileSize = size
metadata.StorageType = model.StorageType(provider.Type())
if err := s.storageRepo.CreateFileMetadata(ctx, metadata); err != nil {
logger.L().Warn("Failed to save file metadata",
zap.String("key", key),
zap.Error(err))
}
}
return nil
}
func (s *storageService) DownloadFile(ctx context.Context, key string) (io.ReadCloser, *model.FileMetadata, error) {
provider, err := s.GetAvailableProvider(ctx)
if err != nil {
return nil, nil, fmt.Errorf("no available storage provider: %w", err)
}
// Download from storage backend
reader, err := provider.Download(ctx, key)
if err != nil {
return nil, nil, fmt.Errorf("failed to download file: %w", err)
}
// Get metadata from database
metadata, err := s.storageRepo.GetFileMetadata(ctx, key)
if err != nil {
logger.L().Warn("Failed to get file metadata",
zap.String("key", key),
zap.Error(err))
}
return reader, metadata, nil
}
func (s *storageService) DeleteFile(ctx context.Context, key string) error {
provider, err := s.GetAvailableProvider(ctx)
if err != nil {
return fmt.Errorf("no available storage provider: %w", err)
}
// Delete from storage backend
if err := provider.Delete(ctx, key); err != nil {
return fmt.Errorf("failed to delete file: %w", err)
}
// Delete metadata from database
if err := s.storageRepo.DeleteFileMetadata(ctx, key); err != nil {
logger.L().Warn("Failed to delete file metadata",
zap.String("key", key),
zap.Error(err))
}
return nil
}
func (s *storageService) FileExists(ctx context.Context, key string) (bool, error) {
provider, err := s.GetPrimaryProvider()
if err != nil {
return false, fmt.Errorf("no primary storage provider: %w", err)
}
return provider.Exists(ctx, key)
}
func (s *storageService) ListFiles(ctx context.Context, prefix string, limit, offset int) ([]*model.FileMetadata, int64, error) {
return s.storageRepo.ListFileMetadata(ctx, prefix, limit, offset)
}
// Business-specific operations
func (s *storageService) SaveSessionReplay(ctx context.Context, sessionId string, reader io.Reader, size int64) error {
key := fmt.Sprintf("%s.cast", sessionId)
metadata := &model.FileMetadata{
FileName: fmt.Sprintf("%s.cast", sessionId),
Category: "replay",
SessionId: sessionId,
MimeType: "application/octet-stream",
}
logger.L().Info("SaveReplay called", zap.String("session_id", sessionId))
return s.UploadFile(ctx, key, reader, size, metadata)
}
func (s *storageService) GetSessionReplay(ctx context.Context, sessionId string) (io.ReadCloser, error) {
key := fmt.Sprintf("%s.cast", sessionId)
reader, _, err := s.DownloadFile(ctx, key)
return reader, err
}
func (s *storageService) DeleteSessionReplay(ctx context.Context, sessionId string) error {
key := fmt.Sprintf("%s.cast", sessionId)
return s.DeleteFile(ctx, key)
}
func (s *storageService) SaveRDPFile(ctx context.Context, assetId int, remotePath string, reader io.Reader, size int64) error {
// Normalize path format
normalizedPath := filepath.ToSlash(strings.TrimPrefix(remotePath, "/"))
key := fmt.Sprintf("rdp/asset_%d/%s", assetId, normalizedPath)
metadata := &model.FileMetadata{
FileName: filepath.Base(remotePath),
Category: "rdp_file",
AssetId: assetId,
}
return s.UploadFile(ctx, key, reader, size, metadata)
}
func (s *storageService) GetRDPFile(ctx context.Context, assetId int, remotePath string) (io.ReadCloser, error) {
normalizedPath := filepath.ToSlash(strings.TrimPrefix(remotePath, "/"))
key := fmt.Sprintf("rdp/asset_%d/%s", assetId, normalizedPath)
reader, _, err := s.DownloadFile(ctx, key)
return reader, err
}
func (s *storageService) DeleteRDPFile(ctx context.Context, assetId int, remotePath string) error {
normalizedPath := filepath.ToSlash(strings.TrimPrefix(remotePath, "/"))
key := fmt.Sprintf("rdp/asset_%d/%s", assetId, normalizedPath)
return s.DeleteFile(ctx, key)
}
// Provider management
func (s *storageService) GetPrimaryProvider() (storage.Provider, error) {
if s.primary == "" {
return nil, fmt.Errorf("no primary storage provider configured")
}
provider, exists := s.providers[s.primary]
if !exists {
return nil, fmt.Errorf("primary storage provider not found: %s", s.primary)
}
return provider, nil
}
func (s *storageService) HealthCheck(ctx context.Context) map[string]error {
results := make(map[string]error)
for name, provider := range s.providers {
err := provider.HealthCheck(ctx)
results[name] = err
}
return results
}
// Helper methods
func (s *storageService) validateConfig(config *model.StorageConfig) error {
if config.Name == "" {
return fmt.Errorf("storage name is required")
}
if config.Type == "" {
return fmt.Errorf("storage type is required")
}
return nil
}
func (s *storageService) CreateProvider(config *model.StorageConfig) (storage.Provider, error) {
switch config.Type {
case model.StorageTypeLocal:
localConfig := providers.LocalConfig{
BasePath: config.Config["base_path"],
}
// Parse path strategy from config
if strategyStr, exists := config.Config["path_strategy"]; exists {
localConfig.PathStrategy = storage.PathStrategy(strategyStr)
} else {
localConfig.PathStrategy = storage.DateHierarchyStrategy // Default to date hierarchy
}
// Parse retention configuration
retentionConfig := storage.DefaultRetentionConfig()
if retentionDaysStr, exists := config.Config["retention_days"]; exists {
if days, err := strconv.Atoi(retentionDaysStr); err == nil {
retentionConfig.RetentionDays = days
}
}
if archiveDaysStr, exists := config.Config["archive_days"]; exists {
if days, err := strconv.Atoi(archiveDaysStr); err == nil {
retentionConfig.ArchiveDays = days
}
}
if cleanupStr, exists := config.Config["cleanup_enabled"]; exists {
retentionConfig.CleanupEnabled = cleanupStr == "true"
}
if archiveStr, exists := config.Config["archive_enabled"]; exists {
retentionConfig.ArchiveEnabled = archiveStr == "true"
}
localConfig.RetentionConfig = retentionConfig
return providers.NewLocal(localConfig)
case model.StorageTypeMinio:
minioConfig, err := providers.ParseMinioConfigFromMap(config.Config)
if err != nil {
return nil, fmt.Errorf("failed to parse Minio config: %w", err)
}
return providers.NewMinio(minioConfig)
case model.StorageTypeS3:
// TODO: implement S3 provider with path strategy support
return nil, fmt.Errorf("S3 provider not implemented yet")
default:
return nil, fmt.Errorf("unsupported storage type: %s", config.Type)
}
}
// GetAvailableProvider returns an available storage provider with fallback logic
// Priority: Primary storage first, then by priority (lower number = higher priority)
func (s *storageService) GetAvailableProvider(ctx context.Context) (storage.Provider, error) {
// 1. Try primary storage first
if s.primary != "" {
if provider, exists := s.providers[s.primary]; exists {
if healthErr := provider.HealthCheck(ctx); healthErr == nil {
return provider, nil
} else {
logger.L().Warn("Primary storage provider health check failed, trying fallback",
zap.String("primary", s.primary),
zap.Error(healthErr))
}
}
}
// 2. Get all enabled storage configs sorted by priority
configs, err := s.GetStorageConfigs(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get storage configs: %w", err)
}
// Filter enabled configs and sort by priority (lower number = higher priority)
var enabledConfigs []*model.StorageConfig
for _, config := range configs {
if config.Enabled {
enabledConfigs = append(enabledConfigs, config)
}
}
// Sort by priority (ascending order - lower number = higher priority)
sort.Slice(enabledConfigs, func(i, j int) bool {
return enabledConfigs[i].Priority < enabledConfigs[j].Priority
})
// 3. Try each provider by priority order
for _, config := range enabledConfigs {
if provider, exists := s.providers[config.Name]; exists {
if healthErr := provider.HealthCheck(ctx); healthErr == nil {
logger.L().Info("Using fallback storage provider",
zap.String("name", config.Name),
zap.Int("priority", config.Priority))
return provider, nil
} else {
logger.L().Warn("Storage provider health check failed",
zap.String("name", config.Name),
zap.Error(healthErr))
}
}
}
return nil, fmt.Errorf("no available storage provider found")
}
// Global storage service instance
var DefaultStorageService StorageService
// InitStorageService initializes the global storage service with database configurations
func InitStorageService() {
if DefaultStorageService == nil {
DefaultStorageService = NewStorageService()
}
ctx := context.Background()
storageImpl := DefaultStorageService.(*storageService)
// 1. Load or create storage configurations
configs, err := loadOrCreateStorageConfigs(ctx, storageImpl)
if err != nil {
logger.L().Error("Failed to initialize storage configurations", zap.Error(err))
return
}
// 2. Validate configuration status
validateStorageConfigs(configs)
// 3. Initialize storage providers
successCount := initializeStorageProviders(ctx, storageImpl, configs)
// 4. Verify primary provider
if err := verifyPrimaryProvider(ctx, storageImpl); err != nil {
logger.L().Error("Primary storage provider verification failed", zap.Error(err))
return
}
// 5. Initialize session replay adapter
provider, err := storageImpl.GetPrimaryProvider()
if err != nil {
logger.L().Error("Failed to get primary provider for session replay adapter", zap.Error(err))
return
}
storage.InitializeAdapter(provider)
logger.L().Info("Storage service initialization completed",
zap.Int("total_configs", len(configs)),
zap.Int("successful_providers", successCount),
zap.String("primary_provider", storageImpl.primary))
}
// loadOrCreateStorageConfigs loads existing configurations or creates default configuration
func loadOrCreateStorageConfigs(ctx context.Context, s *storageService) ([]*model.StorageConfig, error) {
configs, err := s.GetStorageConfigs(ctx)
if err != nil {
return nil, fmt.Errorf("failed to load storage configurations: %w", err)
}
if len(configs) == 0 {
logger.L().Info("No storage configurations found, creating default local storage")
defaultConfig := &model.StorageConfig{
Name: "default-local",
Type: model.StorageTypeLocal,
Enabled: true,
Priority: 10,
IsPrimary: true,
Description: "Default local storage for file operations with date hierarchy",
Config: model.StorageConfigMap{
"base_path": config.Cfg.Session.ReplayDir,
"path_strategy": "date_hierarchy",
"retention_days": "30",
"archive_days": "7",
"cleanup_enabled": "true",
"archive_enabled": "true",
},
}
if err := s.CreateStorageConfig(ctx, defaultConfig); err != nil {
return nil, fmt.Errorf("failed to create default storage configuration: %w", err)
}
configs = []*model.StorageConfig{defaultConfig}
logger.L().Info("Created default local storage configuration successfully")
}
return configs, nil
}
// validateStorageConfigs validates the status of storage configurations
func validateStorageConfigs(configs []*model.StorageConfig) {
var enabledCount, primaryCount int
for _, config := range configs {
if !config.Enabled {
continue
}
enabledCount++
logger.L().Info("Found enabled storage provider",
zap.String("name", config.Name),
zap.String("type", string(config.Type)),
zap.Bool("is_primary", config.IsPrimary))
if config.IsPrimary {
primaryCount++
}
}
if enabledCount == 0 {
logger.L().Warn("No enabled storage providers found")
}
if primaryCount == 0 {
logger.L().Warn("No primary storage provider configured")
} else if primaryCount > 1 {
logger.L().Warn("Multiple primary storage providers found", zap.Int("count", primaryCount))
}
}
// initializeStorageProviders initializes all enabled storage providers
func initializeStorageProviders(ctx context.Context, s *storageService, configs []*model.StorageConfig) int {
var successCount int
for _, config := range configs {
if !config.Enabled {
logger.L().Debug("Skipping disabled storage provider", zap.String("name", config.Name))
continue
}
provider, err := s.CreateProvider(config)
if err != nil {
logger.L().Warn("Failed to initialize storage provider",
zap.String("name", config.Name),
zap.String("type", string(config.Type)),
zap.Error(err))
continue
}
if err := provider.HealthCheck(ctx); err != nil {
logger.L().Warn("Storage provider failed health check",
zap.String("name", config.Name),
zap.String("type", string(config.Type)),
zap.Error(err))
continue
}
s.providers[config.Name] = provider
successCount++
if config.IsPrimary {
s.primary = config.Name
logger.L().Info("Set primary storage provider",
zap.String("name", config.Name),
zap.String("type", string(config.Type)))
}
}
return successCount
}
// verifyPrimaryProvider verifies the availability of the primary storage provider
func verifyPrimaryProvider(ctx context.Context, s *storageService) error {
provider, err := s.GetPrimaryProvider()
if err != nil {
return fmt.Errorf("no primary storage provider available: %w", err)
}
if err := provider.HealthCheck(ctx); err != nil {
logger.L().Warn("Primary storage provider health check failed",
zap.String("type", provider.Type()),
zap.Error(err))
return err
}
logger.L().Info("Primary storage provider health check passed",
zap.String("type", provider.Type()))
return nil
}
func init() {
DefaultStorageService = NewStorageService()
// Start background storage health monitoring
go func() {
ticker := time.NewTicker(5 * time.Minute)
defer ticker.Stop()
for {
<-ticker.C
if DefaultStorageService != nil {
performHealthMonitoring()
}
}
}()
}
// performHealthMonitoring performs periodic health checks on all storage providers
func performHealthMonitoring() {
ctx := context.Background()
storageImpl, ok := DefaultStorageService.(*storageService)
if !ok {
return
}
healthResults := make(map[string]error)
for name, provider := range storageImpl.providers {
if err := provider.HealthCheck(ctx); err != nil {
healthResults[name] = err
logger.L().Warn("Storage provider health check failed",
zap.String("name", name),
zap.String("type", provider.Type()),
zap.Error(err))
} else {
healthResults[name] = nil
logger.L().Debug("Storage provider health check passed",
zap.String("name", name),
zap.String("type", provider.Type()))
}
}
// Log summary of health check results
healthyCount := 0
totalCount := len(healthResults)
for _, err := range healthResults {
if err == nil {
healthyCount++
}
}
if totalCount > 0 {
logger.L().Info("Storage health monitoring completed",
zap.Int("healthy_providers", healthyCount),
zap.Int("total_providers", totalCount))
}
}

View File

@@ -0,0 +1,254 @@
package service
import (
"context"
"fmt"
"os"
"path/filepath"
"time"
"github.com/veops/oneterm/internal/model"
"github.com/veops/oneterm/pkg/logger"
"github.com/veops/oneterm/pkg/storage"
"go.uber.org/zap"
)
// StorageCleanerService handles storage cleanup and archival tasks
type StorageCleanerService struct {
storageService StorageService
ticker *time.Ticker
stopChan chan struct{}
}
// NewStorageCleanerService creates a new storage cleaner service
func NewStorageCleanerService(storageService StorageService) *StorageCleanerService {
return &StorageCleanerService{
storageService: storageService,
stopChan: make(chan struct{}),
}
}
// Start starts the storage cleaner with daily checks at 2 AM
func (s *StorageCleanerService) Start() {
// Calculate time until next 2 AM
now := time.Now()
next2AM := time.Date(now.Year(), now.Month(), now.Day(), 2, 0, 0, 0, now.Location())
if now.After(next2AM) {
next2AM = next2AM.Add(24 * time.Hour)
}
// Wait until 2 AM, then run every 24 hours
timer := time.NewTimer(time.Until(next2AM))
go func() {
<-timer.C
s.runCleanup()
// Now run every 24 hours
s.ticker = time.NewTicker(24 * time.Hour)
for {
select {
case <-s.ticker.C:
s.runCleanup()
case <-s.stopChan:
return
}
}
}()
logger.L().Info("Storage cleaner service started",
zap.Time("next_run", next2AM))
}
// Stop stops the storage cleaner service
func (s *StorageCleanerService) Stop() {
if s.ticker != nil {
s.ticker.Stop()
}
close(s.stopChan)
logger.L().Info("Storage cleaner service stopped")
}
// runCleanup performs the actual cleanup and archival
func (s *StorageCleanerService) runCleanup() {
logger.L().Info("Starting storage cleanup and archival")
ctx := context.Background()
configs, err := s.storageService.GetStorageConfigs(ctx)
if err != nil {
logger.L().Error("Failed to get storage configs for cleanup", zap.Error(err))
return
}
for _, config := range configs {
if !config.Enabled {
continue
}
provider, err := s.storageService.CreateProvider(config)
if err != nil {
logger.L().Error("Failed to create provider for cleanup",
zap.String("storage", config.Name), zap.Error(err))
continue
}
s.cleanupStorage(config, provider)
}
logger.L().Info("Storage cleanup and archival completed")
}
// cleanupStorage performs cleanup for a specific storage provider
func (s *StorageCleanerService) cleanupStorage(config *model.StorageConfig, provider storage.Provider) {
// Only handle local storage for now
if config.Type != model.StorageTypeLocal {
return
}
basePath := config.Config["base_path"]
if basePath == "" {
return
}
// Parse retention config
retentionDays := 30 // default
archiveDays := 7 // default
cleanupEnabled := true
archiveEnabled := true
if val, exists := config.Config["retention_days"]; exists {
if days, err := parseIntConfig(val); err == nil {
retentionDays = days
}
}
if val, exists := config.Config["archive_days"]; exists {
if days, err := parseIntConfig(val); err == nil {
archiveDays = days
}
}
if val, exists := config.Config["cleanup_enabled"]; exists {
cleanupEnabled = val == "true"
}
if val, exists := config.Config["archive_enabled"]; exists {
archiveEnabled = val == "true"
}
logger.L().Info("Processing storage cleanup",
zap.String("storage", config.Name),
zap.String("base_path", basePath),
zap.Int("retention_days", retentionDays),
zap.Int("archive_days", archiveDays),
zap.Bool("cleanup_enabled", cleanupEnabled),
zap.Bool("archive_enabled", archiveEnabled))
// Get all date directories
entries, err := os.ReadDir(basePath)
if err != nil {
logger.L().Error("Failed to read base directory",
zap.String("path", basePath), zap.Error(err))
return
}
now := time.Now()
retentionCutoff := now.AddDate(0, 0, -retentionDays)
archiveCutoff := now.AddDate(0, 0, -archiveDays)
for _, entry := range entries {
if !entry.IsDir() {
continue
}
// Check if directory name looks like a date (YYYY-MM-DD)
dirName := entry.Name()
if len(dirName) != 10 || dirName[4] != '-' || dirName[7] != '-' {
continue
}
dirDate, err := time.Parse("2006-01-02", dirName)
if err != nil {
continue
}
dirPath := filepath.Join(basePath, dirName)
// Delete directories older than retention period
if cleanupEnabled && dirDate.Before(retentionCutoff) {
logger.L().Info("Deleting expired directory",
zap.String("path", dirPath),
zap.Time("date", dirDate))
if err := os.RemoveAll(dirPath); err != nil {
logger.L().Error("Failed to delete directory",
zap.String("path", dirPath), zap.Error(err))
} else {
logger.L().Info("Successfully deleted expired directory",
zap.String("path", dirPath))
}
continue
}
// Archive directories older than archive period
if archiveEnabled && dirDate.Before(archiveCutoff) {
s.archiveDirectory(basePath, dirPath, dirDate)
}
}
}
// archiveDirectory archives a directory to archived folder
func (s *StorageCleanerService) archiveDirectory(basePath, dirPath string, dirDate time.Time) {
archiveDir := filepath.Join(basePath, "archived")
if err := os.MkdirAll(archiveDir, 0755); err != nil {
logger.L().Error("Failed to create archive directory",
zap.String("path", archiveDir), zap.Error(err))
return
}
archivedDirName := fmt.Sprintf("%s_archived", filepath.Base(dirPath))
archivedDirPath := filepath.Join(archiveDir, archivedDirName)
// Check if archive already exists
if _, err := os.Stat(archivedDirPath); err == nil {
logger.L().Info("Archive already exists, skipping",
zap.String("archive", archivedDirPath))
return
}
logger.L().Info("Archiving directory",
zap.String("source", dirPath),
zap.String("archive", archivedDirPath))
// Move directory to archived folder
if err := os.Rename(dirPath, archivedDirPath); err != nil {
logger.L().Error("Failed to archive directory",
zap.String("source", dirPath),
zap.String("dest", archivedDirPath),
zap.Error(err))
} else {
logger.L().Info("Successfully archived directory",
zap.String("source", dirPath),
zap.String("dest", archivedDirPath))
}
}
// parseIntConfig parses integer from string config
func parseIntConfig(val string) (int, error) {
var result int
_, err := fmt.Sscanf(val, "%d", &result)
return result, err
}
// Global storage cleaner instance
var DefaultStorageCleanerService *StorageCleanerService
// InitStorageCleanerService initializes the global storage cleaner service
func InitStorageCleanerService() {
if DefaultStorageService == nil {
logger.L().Warn("Storage service not initialized, skipping cleaner initialization")
return
}
DefaultStorageCleanerService = NewStorageCleanerService(DefaultStorageService)
DefaultStorageCleanerService.Start()
logger.L().Info("Storage cleaner service initialized")
}

View File

@@ -1,37 +1,39 @@
package session
import (
"bytes"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"go.uber.org/zap"
"github.com/veops/oneterm/pkg/config"
"github.com/veops/oneterm/pkg/logger"
"github.com/veops/oneterm/pkg/storage"
)
type Asciinema struct {
file *os.File
ts time.Time
sessionID string
buffer *bytes.Buffer
ts time.Time
useStorage bool
}
func NewAsciinema(id string, w, h int) (ret *Asciinema, err error) {
replayDir := config.Cfg.Session.ReplayDir
if err = os.MkdirAll(replayDir, 0755); err != nil {
logger.L().Error("create replay directory failed", zap.String("dir", replayDir), zap.Error(err))
return
ret = &Asciinema{
sessionID: id,
buffer: bytes.NewBuffer(nil),
ts: time.Now(),
useStorage: storage.DefaultSessionReplayAdapter != nil,
}
f, err := os.Create(filepath.Join(replayDir, fmt.Sprintf("%s.cast", id)))
if err != nil {
logger.L().Error("open cast failed", zap.String("id", id), zap.Error(err))
return
}
ret = &Asciinema{file: f, ts: time.Now()}
bs, _ := json.Marshal(map[string]any{
// Write Asciinema header information
header := map[string]any{
"version": 2,
"width": w,
"height": h,
@@ -41,9 +43,17 @@ func NewAsciinema(id string, w, h int) (ret *Asciinema, err error) {
"SHELL": "/bin/bash",
"TERM": "xterm-256color",
},
})
ret.file.Write(append(bs, '\r', '\n'))
return
}
bs, err := json.Marshal(header)
if err != nil {
logger.L().Error("failed to marshal asciinema header", zap.String("id", id), zap.Error(err))
return nil, err
}
ret.buffer.Write(append(bs, '\r', '\n'))
return ret, nil
}
func (a *Asciinema) Write(p []byte) {
@@ -52,7 +62,7 @@ func (a *Asciinema) Write(p []byte) {
o[1] = "o"
o[2] = string(p)
bs, _ := json.Marshal(o)
a.file.Write(append(bs, '\r', '\n'))
a.buffer.Write(append(bs, '\r', '\n'))
}
func (a *Asciinema) Resize(w, h int) {
@@ -61,5 +71,242 @@ func (a *Asciinema) Resize(w, h int) {
r[1] = "r"
r[2] = fmt.Sprintf("%dx%d", w, h)
bs, _ := json.Marshal(r)
a.file.Write(append(bs, '\r', '\n'))
a.buffer.Write(append(bs, '\r', '\n'))
}
// Close closes the recording and saves to storage
func (a *Asciinema) Close() error {
if a.useStorage && storage.DefaultSessionReplayAdapter != nil {
reader := bytes.NewReader(a.buffer.Bytes())
size := int64(a.buffer.Len())
err := storage.DefaultSessionReplayAdapter.SaveReplay(a.sessionID, reader, size)
if err != nil {
logger.L().Error("Failed to save replay to storage", zap.String("session_id", a.sessionID), zap.Error(err))
return a.saveToLocalFile()
}
return nil
}
return a.saveToLocalFile()
}
// saveToLocalFile saves to local filesystem (fallback solution)
func (a *Asciinema) saveToLocalFile() error {
logger.L().Info("saveToLocalFile called", zap.String("session_id", a.sessionID))
// Use date hierarchy strategy for local files - directly under base_path
dateDir := a.ts.Format("2006-01-02")
replayDir := filepath.Join(config.Cfg.Session.ReplayDir, dateDir)
if err := os.MkdirAll(replayDir, 0755); err != nil {
logger.L().Error("create replay directory failed", zap.String("dir", replayDir), zap.Error(err))
return err
}
filePath := filepath.Join(replayDir, fmt.Sprintf("%s.cast", a.sessionID))
file, err := os.Create(filePath)
if err != nil {
logger.L().Error("create replay file failed", zap.String("path", filePath), zap.Error(err))
return err
}
defer file.Close()
_, err = io.Copy(file, bytes.NewReader(a.buffer.Bytes()))
if err != nil {
logger.L().Error("write replay file failed", zap.String("path", filePath), zap.Error(err))
return err
}
logger.L().Info("Replay saved to local file",
zap.String("session_id", a.sessionID),
zap.String("path", filePath))
return nil
}
// GetReplay gets replay file
func GetReplay(sessionID string) (io.ReadCloser, error) {
// Try to get from storage first
if storage.DefaultSessionReplayAdapter != nil {
reader, err := storage.DefaultSessionReplayAdapter.GetReplay(sessionID)
if err == nil {
return reader, nil
}
logger.L().Warn("Failed to get replay from storage, trying local file",
zap.String("session_id", sessionID),
zap.Error(err))
}
// Fallback to local file with date hierarchy search
replayDir := config.Cfg.Session.ReplayDir
// First try exact path for backward compatibility
oldFilePath := filepath.Join(replayDir, fmt.Sprintf("%s.cast", sessionID))
if file, err := os.Open(oldFilePath); err == nil {
return file, nil
}
// Search in date hierarchy directories (directly under base_path)
entries, err := os.ReadDir(replayDir)
if err != nil {
return nil, fmt.Errorf("replay not found for session %s: %w", sessionID, err)
}
// Search in date directories (newest first)
for i := len(entries) - 1; i >= 0; i-- {
entry := entries[i]
if entry.IsDir() {
// Check if directory name looks like a date (YYYY-MM-DD)
if len(entry.Name()) == 10 && entry.Name()[4] == '-' && entry.Name()[7] == '-' {
filePath := filepath.Join(replayDir, entry.Name(), fmt.Sprintf("%s.cast", sessionID))
if file, err := os.Open(filePath); err == nil {
return file, nil
}
}
}
}
return nil, fmt.Errorf("replay not found for session %s", sessionID)
}
// ReplayExists checks if replay exists
func ReplayExists(sessionID string) bool {
// Check storage
if storage.DefaultSessionReplayAdapter != nil {
exists, err := storage.DefaultSessionReplayAdapter.ReplayExists(sessionID)
if err == nil && exists {
return true
}
}
// Check local file
replayDir := config.Cfg.Session.ReplayDir
filePath := filepath.Join(replayDir, fmt.Sprintf("%s.cast", sessionID))
_, err := os.Stat(filePath)
return err == nil
}
// DeleteReplay deletes replay
func DeleteReplay(sessionID string) error {
var lastErr error
// Delete from storage
if storage.DefaultSessionReplayAdapter != nil {
err := storage.DefaultSessionReplayAdapter.DeleteReplay(sessionID)
if err != nil {
logger.L().Warn("Failed to delete replay from storage",
zap.String("session_id", sessionID),
zap.Error(err))
lastErr = err
}
}
// Delete from local
replayDir := config.Cfg.Session.ReplayDir
filePath := filepath.Join(replayDir, fmt.Sprintf("%s.cast", sessionID))
err := os.Remove(filePath)
if err != nil && !os.IsNotExist(err) {
logger.L().Warn("Failed to delete local replay file",
zap.String("session_id", sessionID),
zap.String("path", filePath),
zap.Error(err))
if lastErr == nil {
lastErr = err
}
}
return lastErr
}
// MigrateLocalReplaysToStorage migrates local replays to new storage
func MigrateLocalReplaysToStorage() error {
if storage.DefaultSessionReplayAdapter == nil {
return fmt.Errorf("storage adapter not initialized")
}
replayDir := config.Cfg.Session.ReplayDir
if _, err := os.Stat(replayDir); os.IsNotExist(err) {
logger.L().Info("No local replay directory found, skipping migration")
return nil
}
files, err := os.ReadDir(replayDir)
if err != nil {
return fmt.Errorf("failed to read replay directory: %w", err)
}
var migratedCount, failedCount int
for _, file := range files {
if file.IsDir() || !strings.HasSuffix(file.Name(), ".cast") {
continue
}
sessionID := strings.TrimSuffix(file.Name(), ".cast")
filePath := filepath.Join(replayDir, file.Name())
// Check if already exists in storage
exists, err := storage.DefaultSessionReplayAdapter.ReplayExists(sessionID)
if err != nil {
logger.L().Error("Failed to check replay existence",
zap.String("session_id", sessionID),
zap.Error(err))
continue
}
if exists {
logger.L().Info("Replay already exists in storage, skipping",
zap.String("session_id", sessionID))
continue
}
// Migrate file
localFile, err := os.Open(filePath)
if err != nil {
logger.L().Error("Failed to open local replay file",
zap.String("path", filePath),
zap.Error(err))
failedCount++
continue
}
info, err := localFile.Stat()
if err != nil {
localFile.Close()
logger.L().Error("Failed to get file info",
zap.String("path", filePath),
zap.Error(err))
failedCount++
continue
}
err = storage.DefaultSessionReplayAdapter.SaveReplay(sessionID, localFile, info.Size())
localFile.Close()
if err != nil {
logger.L().Error("Failed to migrate replay to storage",
zap.String("session_id", sessionID),
zap.Error(err))
failedCount++
continue
}
migratedCount++
logger.L().Info("Replay migrated successfully",
zap.String("session_id", sessionID))
// Optional: Delete local file after successful migration
// os.Remove(filePath)
}
logger.L().Info("Replay migration completed",
zap.Int("migrated", migratedCount),
zap.Int("failed", failedCount))
if failedCount > 0 {
return fmt.Errorf("migration completed with %d failures", failedCount)
}
return nil
}

View File

@@ -0,0 +1,144 @@
package storage
import (
"context"
"io"
"time"
"github.com/veops/oneterm/pkg/logger"
"go.uber.org/zap"
)
// AdvancedProvider extends Provider with path strategy support
type AdvancedProvider interface {
Provider
GetPathStrategy() PathStrategy
}
// SessionReplayAdapter provides session replay storage operations
type SessionReplayAdapter struct {
provider Provider
}
// NewSessionReplayAdapter creates a new session replay adapter
func NewSessionReplayAdapter(provider Provider) *SessionReplayAdapter {
return &SessionReplayAdapter{
provider: provider,
}
}
// SaveReplay saves a session replay with timestamp-based path generation
func (a *SessionReplayAdapter) SaveReplay(sessionID string, reader io.Reader, size int64) error {
if a.provider == nil {
logger.L().Warn("SessionReplayAdapter provider is nil", zap.String("session_id", sessionID))
return nil // No storage provider available
}
ctx := context.Background()
// Generate key with current timestamp for date-based organization
key := a.generateReplayKey(sessionID, time.Now())
return a.provider.Upload(ctx, key, reader, size)
}
// GetReplay retrieves a session replay
func (a *SessionReplayAdapter) GetReplay(sessionID string) (io.ReadCloser, error) {
if a.provider == nil {
return nil, nil // No storage provider available
}
ctx := context.Background()
// Try with current date first (most recent)
key := a.generateReplayKey(sessionID, time.Now())
reader, err := a.provider.Download(ctx, key)
if err == nil {
return reader, nil
}
// Fallback to old format for backward compatibility
oldKey := sessionID + ".cast"
return a.provider.Download(ctx, oldKey)
}
// DeleteReplay deletes a session replay
func (a *SessionReplayAdapter) DeleteReplay(sessionID string) error {
if a.provider == nil {
return nil // No storage provider available
}
ctx := context.Background()
// Try to delete with current date first
key := a.generateReplayKey(sessionID, time.Now())
err := a.provider.Delete(ctx, key)
if err == nil {
return nil
}
// Fallback to old format for backward compatibility
oldKey := sessionID + ".cast"
return a.provider.Delete(ctx, oldKey)
}
// ReplayExists checks if a replay exists
func (a *SessionReplayAdapter) ReplayExists(sessionID string) (bool, error) {
if a.provider == nil {
return false, nil // No storage provider available
}
ctx := context.Background()
// Try with current date first
key := a.generateReplayKey(sessionID, time.Now())
exists, err := a.provider.Exists(ctx, key)
if err == nil && exists {
return true, nil
}
// Fallback to old format for backward compatibility
oldKey := sessionID + ".cast"
return a.provider.Exists(ctx, oldKey)
}
// generateReplayKey generates storage key for replay files
// For date hierarchy strategy: YYYY-MM-DD/sessionID.cast
// For flat strategy: sessionID.cast
func (a *SessionReplayAdapter) generateReplayKey(sessionID string, timestamp time.Time) string {
// Check if provider supports advanced path generation
if advProvider, ok := a.provider.(AdvancedProvider); ok {
strategy := advProvider.GetPathStrategy()
if strategy == DateHierarchyStrategy {
// Use date-based path: YYYY-MM-DD/sessionID.cast
dateDir := timestamp.Format("2006-01-02")
return dateDir + "/" + sessionID + ".cast"
}
}
// Fallback to flat structure
return sessionID + ".cast"
}
// SaveReplayWithTimestamp saves a session replay with explicit timestamp
func (a *SessionReplayAdapter) SaveReplayWithTimestamp(sessionID string, reader io.Reader, size int64, timestamp time.Time) error {
if a.provider == nil {
logger.L().Warn("SessionReplayAdapter provider is nil", zap.String("session_id", sessionID))
return nil
}
ctx := context.Background()
key := a.generateReplayKey(sessionID, timestamp)
return a.provider.Upload(ctx, key, reader, size)
}
// Global adapter instance
var DefaultSessionReplayAdapter *SessionReplayAdapter
// InitializeAdapter initializes the global session replay adapter
func InitializeAdapter(provider Provider) {
DefaultSessionReplayAdapter = NewSessionReplayAdapter(provider)
logger.L().Info("Session replay adapter initialized",
zap.String("provider_type", provider.Type()))
}

View File

@@ -0,0 +1,147 @@
package storage
import (
"fmt"
"path/filepath"
"strings"
"time"
)
// PathStrategy defines different file path generation strategies
type PathStrategy string
const (
// FlatStrategy stores all files in a single directory
FlatStrategy PathStrategy = "flat"
// DateHierarchyStrategy organizes files by date hierarchy (YYYY-MM-DD)
DateHierarchyStrategy PathStrategy = "date_hierarchy"
)
// StorageRetentionConfig holds retention and archival configuration
type StorageRetentionConfig struct {
RetentionDays int `json:"retention_days" mapstructure:"retention_days"` // Keep files for N days
ArchiveDays int `json:"archive_days" mapstructure:"archive_days"` // Archive files after N days
CleanupEnabled bool `json:"cleanup_enabled" mapstructure:"cleanup_enabled"` // Enable automatic cleanup
ArchiveEnabled bool `json:"archive_enabled" mapstructure:"archive_enabled"` // Enable automatic archival
}
// DefaultRetentionConfig returns default retention configuration
func DefaultRetentionConfig() StorageRetentionConfig {
return StorageRetentionConfig{
RetentionDays: 30,
ArchiveDays: 7,
CleanupEnabled: true,
ArchiveEnabled: true,
}
}
// PathGenerator generates storage paths based on strategy
type PathGenerator struct {
Strategy PathStrategy
BaseDir string
}
// NewPathGenerator creates a new path generator
func NewPathGenerator(strategy PathStrategy, baseDir string) *PathGenerator {
return &PathGenerator{
Strategy: strategy,
BaseDir: baseDir,
}
}
// GenerateReplayPath generates path for session replay files
func (pg *PathGenerator) GenerateReplayPath(sessionID string, timestamp time.Time) string {
return pg.generatePath("replays", sessionID+".cast", timestamp)
}
// GenerateRDPFilePath generates path for RDP files
func (pg *PathGenerator) GenerateRDPFilePath(assetID int, remotePath string, timestamp time.Time) string {
// Sanitize remote path for safe storage
safeName := strings.ReplaceAll(remotePath, "/", "_")
safeName = strings.ReplaceAll(safeName, "\\", "_")
filename := fmt.Sprintf("asset_%d_%s", assetID, safeName)
return pg.generatePath("rdp_files", filename, timestamp)
}
// GenerateArchivePath generates path for archived files
func (pg *PathGenerator) GenerateArchivePath(category string, archiveDate time.Time) string {
archiveName := fmt.Sprintf("%s_%s.tar.gz", category, archiveDate.Format("2006-01"))
return filepath.Join(pg.BaseDir, "archived", archiveName)
}
// generatePath generates path based on strategy
func (pg *PathGenerator) generatePath(category, filename string, timestamp time.Time) string {
switch pg.Strategy {
case DateHierarchyStrategy:
dateDir := timestamp.Format("2006-01-02")
return filepath.Join(pg.BaseDir, category, dateDir, filename)
case FlatStrategy:
fallthrough
default:
return filepath.Join(pg.BaseDir, category, filename)
}
}
// ParseDateFromPath extracts date from hierarchical path
func (pg *PathGenerator) ParseDateFromPath(filePath string) (time.Time, error) {
if pg.Strategy != DateHierarchyStrategy {
return time.Time{}, fmt.Errorf("path strategy does not support date parsing")
}
rel, err := filepath.Rel(pg.BaseDir, filePath)
if err != nil {
return time.Time{}, err
}
parts := strings.Split(rel, string(filepath.Separator))
if len(parts) < 2 {
return time.Time{}, fmt.Errorf("invalid path format for date extraction")
}
// Expected format: category/YYYY-MM-DD/filename
dateStr := parts[1]
return time.Parse("2006-01-02", dateStr)
}
// ListDatedDirectories returns all date directories for a category
func (pg *PathGenerator) ListDatedDirectories(category string) ([]string, error) {
if pg.Strategy != DateHierarchyStrategy {
return nil, fmt.Errorf("path strategy does not support date directories")
}
categoryPath := filepath.Join(pg.BaseDir, category)
return listDirectoriesByPattern(categoryPath, "2006-01-02")
}
// GetPathsOlderThan returns file paths older than specified days
func (pg *PathGenerator) GetPathsOlderThan(category string, days int) ([]string, error) {
cutoffDate := time.Now().AddDate(0, 0, -days)
var oldPaths []string
if pg.Strategy == DateHierarchyStrategy {
dirs, err := pg.ListDatedDirectories(category)
if err != nil {
return nil, err
}
for _, dir := range dirs {
dirDate, err := time.Parse("2006-01-02", filepath.Base(dir))
if err != nil {
continue
}
if dirDate.Before(cutoffDate) {
oldPaths = append(oldPaths, dir)
}
}
}
return oldPaths, nil
}
// listDirectoriesByPattern lists directories matching date pattern
func listDirectoriesByPattern(basePath, pattern string) ([]string, error) {
// This is a simplified implementation
// In production, you would use filepath.Walk or similar
// to find directories matching the date pattern
return nil, fmt.Errorf("not implemented")
}

View File

@@ -0,0 +1,46 @@
package storage
import (
"context"
"io"
)
// Provider defines the interface for storage operations
type Provider interface {
// Upload uploads a file to storage
Upload(ctx context.Context, key string, reader io.Reader, size int64) error
// Download downloads a file from storage
Download(ctx context.Context, key string) (io.ReadCloser, error)
// Delete deletes a file from storage
Delete(ctx context.Context, key string) error
// Exists checks if a file exists
Exists(ctx context.Context, key string) (bool, error)
// GetSize gets the size of a file
GetSize(ctx context.Context, key string) (int64, error)
// Type returns the storage provider type
Type() string
// HealthCheck performs a health check
HealthCheck(ctx context.Context) error
}
// Config represents storage configuration
type Config struct {
Type string `json:"type"`
Name string `json:"name"`
Parameters map[string]string `json:"parameters"`
}
// ProviderType represents the type of storage provider
type ProviderType string
const (
TypeLocal ProviderType = "local"
TypeS3 ProviderType = "s3"
TypeMinio ProviderType = "minio"
)

View File

@@ -0,0 +1,230 @@
package providers
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"time"
"github.com/veops/oneterm/pkg/storage"
)
// LocalConfig holds configuration for local storage
type LocalConfig struct {
BasePath string `json:"base_path" mapstructure:"base_path"`
PathStrategy storage.PathStrategy `json:"path_strategy" mapstructure:"path_strategy"`
RetentionConfig storage.StorageRetentionConfig `json:"retention" mapstructure:"retention"`
}
// Local implements the storage.Provider interface for local filesystem
type Local struct {
config LocalConfig
pathGenerator *storage.PathGenerator
}
// NewLocal creates a new local storage provider
func NewLocal(config LocalConfig) (storage.Provider, error) {
// Set default path strategy if not specified
if config.PathStrategy == "" {
config.PathStrategy = storage.DateHierarchyStrategy
}
// Set default retention config if not specified
if config.RetentionConfig.RetentionDays == 0 {
config.RetentionConfig = storage.DefaultRetentionConfig()
}
// Ensure base directory exists
if err := os.MkdirAll(config.BasePath, 0755); err != nil {
return nil, fmt.Errorf("failed to create base directory: %w", err)
}
pathGenerator := storage.NewPathGenerator(config.PathStrategy, config.BasePath)
return &Local{
config: config,
pathGenerator: pathGenerator,
}, nil
}
// Upload uploads a file to local storage
func (p *Local) Upload(ctx context.Context, key string, reader io.Reader, size int64) error {
fmt.Println("Local provider Upload called:", key)
// For backward compatibility, if key doesn't include timestamp info,
// we'll use current time for path generation
filePath := p.getFilePath(key)
// Ensure directory exists
dir := filepath.Dir(filePath)
if err := os.MkdirAll(dir, 0755); err != nil {
return fmt.Errorf("failed to create directory: %w", err)
}
file, err := os.Create(filePath)
if err != nil {
return fmt.Errorf("failed to create file: %w", err)
}
defer file.Close()
_, err = io.Copy(file, reader)
if err != nil {
return fmt.Errorf("failed to write file: %w", err)
}
return nil
}
// Download downloads a file from local storage
func (p *Local) Download(ctx context.Context, key string) (io.ReadCloser, error) {
filePath := p.getFilePath(key)
// Try exact path first
if file, err := os.Open(filePath); err == nil {
return file, nil
}
// For date hierarchy strategy, search in date directories if direct path fails
if p.config.PathStrategy == storage.DateHierarchyStrategy {
return p.searchInDateDirectories(key)
}
return nil, fmt.Errorf("file not found: %s", key)
}
// Delete deletes a file from local storage
func (p *Local) Delete(ctx context.Context, key string) error {
filePath := p.getFilePath(key)
return os.Remove(filePath)
}
// Exists checks if a file exists in local storage
func (p *Local) Exists(ctx context.Context, key string) (bool, error) {
filePath := p.getFilePath(key)
// Try exact path first
if _, err := os.Stat(filePath); err == nil {
return true, nil
}
// For date hierarchy strategy, search in date directories
if p.config.PathStrategy == storage.DateHierarchyStrategy {
_, err := p.searchInDateDirectories(key)
return err == nil, nil
}
return false, nil
}
// GetSize gets the size of a file
func (p *Local) GetSize(ctx context.Context, key string) (int64, error) {
filePath := p.getFilePath(key)
stat, err := os.Stat(filePath)
if err != nil {
// Search in date directories for date hierarchy strategy
if p.config.PathStrategy == storage.DateHierarchyStrategy {
file, err := p.searchInDateDirectories(key)
if err != nil {
return 0, err
}
defer file.Close()
// Get file info from opened file
if f, ok := file.(*os.File); ok {
stat, err = f.Stat()
if err != nil {
return 0, err
}
return stat.Size(), nil
}
}
return 0, err
}
return stat.Size(), nil
}
// Type returns the storage type
func (p *Local) Type() string {
return "local"
}
// HealthCheck performs a health check on the local storage
func (p *Local) HealthCheck(ctx context.Context) error {
// Check if base directory is writable
testFile := filepath.Join(p.config.BasePath, ".health_check")
file, err := os.Create(testFile)
if err != nil {
return fmt.Errorf("local storage health check failed: %w", err)
}
file.Close()
os.Remove(testFile)
return nil
}
// getFilePath resolves the file path for a given key
func (p *Local) getFilePath(key string) string {
// For new path generation with timestamps, we need additional context
// For now, maintain backward compatibility with direct key-to-path mapping
return filepath.Join(p.config.BasePath, key)
}
// GetFilePathWithTimestamp generates file path with timestamp for new uploads
func (p *Local) GetFilePathWithTimestamp(key string, timestamp time.Time) string {
// Extract category and filename from key
dir := filepath.Dir(key)
filename := filepath.Base(key)
switch dir {
case "replays":
// Extract session ID from filename (remove .cast extension)
sessionID := filename[:len(filename)-len(filepath.Ext(filename))]
return p.pathGenerator.GenerateReplayPath(sessionID, timestamp)
default:
// Fallback to basic path generation
return p.pathGenerator.GenerateReplayPath(filename, timestamp)
}
}
// searchInDateDirectories searches for a file in date directories
func (p *Local) searchInDateDirectories(key string) (io.ReadCloser, error) {
dir := filepath.Dir(key)
filename := filepath.Base(key)
categoryPath := filepath.Join(p.config.BasePath, dir)
// List all date directories
entries, err := os.ReadDir(categoryPath)
if err != nil {
return nil, fmt.Errorf("failed to read category directory: %w", err)
}
// Search in date directories (newest first)
for i := len(entries) - 1; i >= 0; i-- {
entry := entries[i]
if entry.IsDir() {
// Check if directory name looks like a date (YYYY-MM-DD)
if len(entry.Name()) == 10 && entry.Name()[4] == '-' && entry.Name()[7] == '-' {
possiblePath := filepath.Join(categoryPath, entry.Name(), filename)
if file, err := os.Open(possiblePath); err == nil {
return file, nil
}
}
}
}
return nil, fmt.Errorf("file not found in any date directory: %s", key)
}
// GetPathStrategy returns the path strategy
func (p *Local) GetPathStrategy() storage.PathStrategy {
return p.config.PathStrategy
}
// GetRetentionConfig returns the retention configuration
func (p *Local) GetRetentionConfig() storage.StorageRetentionConfig {
return p.config.RetentionConfig
}

View File

@@ -0,0 +1,311 @@
package providers
import (
"context"
"fmt"
"io"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/veops/oneterm/pkg/storage"
)
// MinioConfig holds configuration for Minio storage
type MinioConfig struct {
Endpoint string `json:"endpoint" mapstructure:"endpoint"`
AccessKeyID string `json:"access_key_id" mapstructure:"access_key_id"`
SecretAccessKey string `json:"secret_access_key" mapstructure:"secret_access_key"`
UseSSL bool `json:"use_ssl" mapstructure:"use_ssl"`
BucketName string `json:"bucket_name" mapstructure:"bucket_name"`
PathStrategy storage.PathStrategy `json:"path_strategy" mapstructure:"path_strategy"`
RetentionConfig storage.StorageRetentionConfig `json:"retention" mapstructure:"retention"`
}
// Minio implements the storage.Provider interface for Minio object storage
type Minio struct {
client *minio.Client
config MinioConfig
pathGenerator *storage.PathGenerator
}
// NewMinio creates a new Minio storage provider
func NewMinio(config MinioConfig) (storage.Provider, error) {
// Set default path strategy if not specified
if config.PathStrategy == "" {
config.PathStrategy = storage.DateHierarchyStrategy
}
// Set default retention config if not specified
if config.RetentionConfig.RetentionDays == 0 {
config.RetentionConfig = storage.DefaultRetentionConfig()
}
// Initialize Minio client
client, err := minio.New(config.Endpoint, &minio.Options{
Creds: credentials.NewStaticV4(config.AccessKeyID, config.SecretAccessKey, ""),
Secure: config.UseSSL,
})
if err != nil {
return nil, fmt.Errorf("failed to create Minio client: %w", err)
}
// Create bucket if it doesn't exist
ctx := context.Background()
exists, err := client.BucketExists(ctx, config.BucketName)
if err != nil {
return nil, fmt.Errorf("failed to check if bucket exists: %w", err)
}
if !exists {
err = client.MakeBucket(ctx, config.BucketName, minio.MakeBucketOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create bucket: %w", err)
}
}
// Use bucket name as virtual base path for path generator
pathGenerator := storage.NewPathGenerator(config.PathStrategy, config.BucketName)
return &Minio{
client: client,
config: config,
pathGenerator: pathGenerator,
}, nil
}
// Upload uploads a file to Minio storage
func (m *Minio) Upload(ctx context.Context, key string, reader io.Reader, size int64) error {
objectKey := m.getObjectKey(key)
_, err := m.client.PutObject(ctx, m.config.BucketName, objectKey, reader, size, minio.PutObjectOptions{
ContentType: "application/octet-stream",
})
if err != nil {
return fmt.Errorf("failed to upload object: %w", err)
}
return nil
}
// Download downloads a file from Minio storage
func (m *Minio) Download(ctx context.Context, key string) (io.ReadCloser, error) {
objectKey := m.getObjectKey(key)
// Try exact key first
object, err := m.client.GetObject(ctx, m.config.BucketName, objectKey, minio.GetObjectOptions{})
if err == nil {
// Verify object exists by reading stat
_, err = object.Stat()
if err == nil {
return object, nil
}
object.Close()
}
// For date hierarchy strategy, search in date prefixes
if m.config.PathStrategy == storage.DateHierarchyStrategy {
return m.searchInDatePrefixes(ctx, key)
}
return nil, fmt.Errorf("object not found: %s", key)
}
// Delete deletes a file from Minio storage
func (m *Minio) Delete(ctx context.Context, key string) error {
objectKey := m.getObjectKey(key)
return m.client.RemoveObject(ctx, m.config.BucketName, objectKey, minio.RemoveObjectOptions{})
}
// Exists checks if a file exists in Minio storage
func (m *Minio) Exists(ctx context.Context, key string) (bool, error) {
objectKey := m.getObjectKey(key)
// Try exact key first
_, err := m.client.StatObject(ctx, m.config.BucketName, objectKey, minio.StatObjectOptions{})
if err == nil {
return true, nil
}
// For date hierarchy strategy, search in date prefixes
if m.config.PathStrategy == storage.DateHierarchyStrategy {
_, err := m.searchInDatePrefixes(ctx, key)
return err == nil, nil
}
return false, nil
}
// GetSize gets the size of a file in Minio storage
func (m *Minio) GetSize(ctx context.Context, key string) (int64, error) {
objectKey := m.getObjectKey(key)
stat, err := m.client.StatObject(ctx, m.config.BucketName, objectKey, minio.StatObjectOptions{})
if err != nil {
// For date hierarchy strategy, we can't easily get size from search
// Return 0 as fallback since we can't stat the object in date prefixes efficiently
if m.config.PathStrategy == storage.DateHierarchyStrategy {
return 0, fmt.Errorf("object not found: %s", key)
}
return 0, err
}
return stat.Size, nil
}
// Type returns the storage type
func (m *Minio) Type() string {
return "minio"
}
// HealthCheck performs a health check on Minio storage
func (m *Minio) HealthCheck(ctx context.Context) error {
// Check if bucket is accessible
_, err := m.client.BucketExists(ctx, m.config.BucketName)
if err != nil {
return fmt.Errorf("Minio health check failed: %w", err)
}
return nil
}
// GetPathStrategy returns the path strategy
func (m *Minio) GetPathStrategy() storage.PathStrategy {
return m.config.PathStrategy
}
// GetRetentionConfig returns the retention configuration
func (m *Minio) GetRetentionConfig() storage.StorageRetentionConfig {
return m.config.RetentionConfig
}
// getObjectKey resolves the object key for a given storage key
func (m *Minio) getObjectKey(key string) string {
// Remove bucket prefix if present (path generator includes it)
if strings.HasPrefix(key, m.config.BucketName+"/") {
return strings.TrimPrefix(key, m.config.BucketName+"/")
}
return key
}
// GetObjectKeyWithTimestamp generates object key with timestamp for new uploads
func (m *Minio) GetObjectKeyWithTimestamp(key string, timestamp time.Time) string {
// Extract category and filename from key
dir := filepath.Dir(key)
filename := filepath.Base(key)
switch dir {
case "replays":
// Generate date-based path for replays
if m.config.PathStrategy == storage.DateHierarchyStrategy {
dateDir := timestamp.Format("2006-01-02")
return "replays/" + dateDir + "/" + filename
}
}
// Fallback to direct key
return key
}
// searchInDatePrefixes searches for an object in date-based prefixes
func (m *Minio) searchInDatePrefixes(ctx context.Context, key string) (io.ReadCloser, error) {
dir := filepath.Dir(key)
filename := filepath.Base(key)
// List objects with category prefix to find date directories
objectCh := m.client.ListObjects(ctx, m.config.BucketName, minio.ListObjectsOptions{
Prefix: dir + "/",
Recursive: false,
})
// Collect date prefixes
var datePrefixes []string
for object := range objectCh {
if object.Err != nil {
continue
}
// Extract date directory from object key
parts := strings.Split(strings.TrimPrefix(object.Key, dir+"/"), "/")
if len(parts) >= 1 {
dateStr := parts[0]
// Check if it looks like a date (YYYY-MM-DD)
if len(dateStr) == 10 && dateStr[4] == '-' && dateStr[7] == '-' {
prefix := dir + "/" + dateStr + "/"
// Avoid duplicates
found := false
for _, p := range datePrefixes {
if p == prefix {
found = true
break
}
}
if !found {
datePrefixes = append(datePrefixes, prefix)
}
}
}
}
// Search in date prefixes (newest first by sorting in reverse)
for i := len(datePrefixes) - 1; i >= 0; i-- {
possibleKey := datePrefixes[i] + filename
object, err := m.client.GetObject(ctx, m.config.BucketName, possibleKey, minio.GetObjectOptions{})
if err == nil {
// Verify object exists
_, err = object.Stat()
if err == nil {
return object, nil
}
object.Close()
}
}
return nil, fmt.Errorf("object not found in any date prefix: %s", key)
}
// ParseMinioConfigFromMap creates MinioConfig from string map (for database storage)
func ParseMinioConfigFromMap(configMap map[string]string) (MinioConfig, error) {
config := MinioConfig{}
config.Endpoint = configMap["endpoint"]
config.AccessKeyID = configMap["access_key_id"]
config.SecretAccessKey = configMap["secret_access_key"]
config.BucketName = configMap["bucket_name"]
// Parse boolean fields
if useSSLStr, exists := configMap["use_ssl"]; exists {
config.UseSSL = useSSLStr == "true"
}
// Parse path strategy
if strategyStr, exists := configMap["path_strategy"]; exists {
config.PathStrategy = storage.PathStrategy(strategyStr)
} else {
config.PathStrategy = storage.DateHierarchyStrategy
}
// Parse retention configuration
retentionConfig := storage.DefaultRetentionConfig()
if retentionDaysStr, exists := configMap["retention_days"]; exists {
if days, err := strconv.Atoi(retentionDaysStr); err == nil {
retentionConfig.RetentionDays = days
}
}
if archiveDaysStr, exists := configMap["archive_days"]; exists {
if days, err := strconv.Atoi(archiveDaysStr); err == nil {
retentionConfig.ArchiveDays = days
}
}
if cleanupStr, exists := configMap["cleanup_enabled"]; exists {
retentionConfig.CleanupEnabled = cleanupStr == "true"
}
if archiveStr, exists := configMap["archive_enabled"]; exists {
retentionConfig.ArchiveEnabled = archiveStr == "true"
}
config.RetentionConfig = retentionConfig
return config, nil
}