refactor project structure

This commit is contained in:
hdt3213
2021-05-02 14:54:42 +08:00
parent bb9c140653
commit f29298cc68
78 changed files with 140 additions and 140 deletions

85
redis/server/client.go Normal file
View File

@@ -0,0 +1,85 @@
package server
import (
"github.com/hdt3213/godis/lib/sync/wait"
"net"
"sync"
"time"
)
// abstract of active client
type Client struct {
conn net.Conn
// waiting util reply finished
waitingReply wait.Wait
// lock while server sending response
mu sync.Mutex
// subscribing channels
subs map[string]bool
}
func (c *Client) Close() error {
c.waitingReply.WaitWithTimeout(10 * time.Second)
_ = c.conn.Close()
return nil
}
func MakeClient(conn net.Conn) *Client {
return &Client{
conn: conn,
}
}
func (c *Client) Write(b []byte) error {
if b == nil || len(b) == 0 {
return nil
}
c.mu.Lock()
defer c.mu.Unlock()
_, err := c.conn.Write(b)
return err
}
func (c *Client) SubsChannel(channel string) {
c.mu.Lock()
defer c.mu.Unlock()
if c.subs == nil {
c.subs = make(map[string]bool)
}
c.subs[channel] = true
}
func (c *Client) UnSubsChannel(channel string) {
c.mu.Lock()
defer c.mu.Unlock()
if c.subs == nil {
return
}
delete(c.subs, channel)
}
func (c *Client) SubsCount() int {
if c.subs == nil {
return 0
}
return len(c.subs)
}
func (c *Client) GetChannels() []string {
if c.subs == nil {
return make([]string, 0)
}
channels := make([]string, len(c.subs))
i := 0
for channel := range c.subs {
channels[i] = channel
i++
}
return channels
}

112
redis/server/handler.go Normal file
View File

@@ -0,0 +1,112 @@
package server
/*
* A tcp.Handler implements redis protocol
*/
import (
"context"
"github.com/hdt3213/godis/cluster"
"github.com/hdt3213/godis/config"
DBImpl "github.com/hdt3213/godis/db"
"github.com/hdt3213/godis/interface/db"
"github.com/hdt3213/godis/lib/logger"
"github.com/hdt3213/godis/lib/sync/atomic"
"github.com/hdt3213/godis/redis/parser"
"github.com/hdt3213/godis/redis/reply"
"io"
"net"
"strings"
"sync"
)
var (
UnknownErrReplyBytes = []byte("-ERR unknown\r\n")
)
type Handler struct {
activeConn sync.Map // *client -> placeholder
db db.DB
closing atomic.AtomicBool // refusing new client and new request
}
func MakeHandler() *Handler {
var db db.DB
if config.Properties.Self != "" &&
len(config.Properties.Peers) > 0 {
db = cluster.MakeCluster()
} else {
db = DBImpl.MakeDB()
}
return &Handler{
db: db,
}
}
func (h *Handler) closeClient(client *Client) {
_ = client.Close()
h.db.AfterClientClose(client)
h.activeConn.Delete(client)
}
func (h *Handler) Handle(ctx context.Context, conn net.Conn) {
if h.closing.Get() {
// closing handler refuse new connection
_ = conn.Close()
}
client := MakeClient(conn)
h.activeConn.Store(client, 1)
ch := parser.Parse(conn)
for payload := range ch {
if payload.Err != nil {
if payload.Err == io.EOF ||
payload.Err == io.ErrUnexpectedEOF ||
strings.Contains(payload.Err.Error(), "use of closed network connection") {
// connection closed
h.closeClient(client)
logger.Info("connection closed: " + client.conn.RemoteAddr().String())
return
} else {
// protocol err
errReply := reply.MakeErrReply(payload.Err.Error())
err := client.Write(errReply.ToBytes())
if err != nil {
h.closeClient(client)
logger.Info("connection closed: " + client.conn.RemoteAddr().String())
return
}
continue
}
}
if payload.Data == nil {
logger.Error("empty payload")
continue
}
r, ok := payload.Data.(*reply.MultiBulkReply)
if !ok {
logger.Error("require multi bulk reply")
continue
}
result := h.db.Exec(client, r.Args)
if result != nil {
_ = client.Write(result.ToBytes())
} else {
_ = client.Write(UnknownErrReplyBytes)
}
}
}
func (h *Handler) Close() error {
logger.Info("handler shuting down...")
h.closing.Set(true)
// TODO: concurrent wait
h.activeConn.Range(func(key interface{}, val interface{}) bool {
client := key.(*Client)
_ = client.Close()
return true
})
h.db.Close()
return nil
}