first commit

This commit is contained in:
wencaiwulue
2021-07-24 19:10:03 +08:00
commit 7f846f6c0b
20 changed files with 3100 additions and 0 deletions

3
TODO.MD Normal file
View File

@@ -0,0 +1,3 @@
# TODO
## 域名解析功能

36
dns/dns.go Normal file
View File

@@ -0,0 +1,36 @@
package dns
import (
"fmt"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
"os/exec"
)
// todo set dns ip
// todo needs to test if set dns server do works or not
func Windows(clientset *kubernetes.Clientset) error {
ip, err := GetDNSIp(clientset)
if err != nil {
return err
}
cmd := "interface ipv4 add dnsserver name=\"Ethernet\" address=%s index=2"
output, err := exec.Command("netsh", fmt.Sprintf(cmd, ip)).CombinedOutput()
if err != nil {
return err
}
log.Info(output)
return nil
}
// todo update metrics, set default gateway
func UpdateMetric() error {
// todo get tun name
cmd := "interface ip set interface interface=\"Ethernet0\" metric=290"
output, err := exec.Command("netsh", cmd).CombinedOutput()
if err != nil {
return err
}
log.Info(output)
return nil
}

36
dns/dns_unix.go Normal file
View File

@@ -0,0 +1,36 @@
package dns
import (
"context"
"github.com/pkg/errors"
"io/fs"
"io/ioutil"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/kubernetes"
"path/filepath"
)
func Dns(clientset *kubernetes.Clientset) error {
var dnsIP string
var err error
if dnsIP, err = GetDNSIp(clientset); err != nil {
return err
}
filename := filepath.Join("etc", "resolver", "local")
fileContent := "nameserver " + dnsIP
return ioutil.WriteFile(filename, []byte(fileContent), fs.ModePerm)
}
func GetDNSIp(clientset *kubernetes.Clientset) (string, error) {
serviceList, err := clientset.CoreV1().Services(v1.NamespaceSystem).List(context.Background(), v1.ListOptions{
LabelSelector: fields.OneTermEqualSelector("k8s-app", "kube-dns").String(),
})
if err != nil {
return "", err
}
if len(serviceList.Items) == 0 {
return "", errors.New("Not found kube-dns")
}
return serviceList.Items[0].Spec.ClusterIP, nil
}

37
exe/driver.go Executable file
View File

@@ -0,0 +1,37 @@
package exe
import (
"fmt"
log "github.com/sirupsen/logrus"
"os"
"os/exec"
"path/filepath"
)
func InstallTunTapDriver() {
if err := Install(); err != nil {
log.Fatal(err)
}
}
func UninstallTunTapDriver() {
filepath.VolumeName("C")
path := filepath.Join(getDriver()+":\\", "Program Files", "TAP-Windows", "Uninstall.exe")
cmd := exec.Command(path, "/S")
b, e := cmd.CombinedOutput()
if e != nil {
fmt.Println(e)
}
fmt.Println(string(b))
}
func getDriver() string {
for _, drive := range "ABCDEFGHIJKLMNOPQRSTUVWXYZ" {
f, err := os.Open(string(drive) + ":\\")
if err == nil {
_ = f.Close()
return string(drive)
}
}
return ""
}

31
exe/driver_test.go Normal file
View File

@@ -0,0 +1,31 @@
package exe
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"testing"
)
func TestInstall(t *testing.T) {
InstallTunTapDriver()
}
func TestUninstall(t *testing.T) {
UninstallTunTapDriver()
}
func TestAA(t *testing.T) {
cmd := exec.Command("vpn", "version")
b, e := cmd.CombinedOutput()
if e != nil {
fmt.Println(e)
}
fmt.Println(string(b))
}
func TestName(t *testing.T) {
tempFile, _ := ioutil.TempFile("", "*.exe")
fmt.Println(tempFile.Name())
_ = os.Remove(tempFile.Name())
}

30
exe/embed.go Executable file
View File

@@ -0,0 +1,30 @@
package exe
import (
"embed"
"io/ioutil"
"os"
"os/exec"
)
//go:embed tap-windows-9.21.2.exe
var fs embed.FS
func Install() error {
bytes, err := fs.ReadFile("tap-windows-9.21.2.exe")
if err != nil {
return err
}
tempFile, err := ioutil.TempFile("", "*.exe")
defer func() { _ = os.Remove(tempFile.Name()) }()
if err != nil {
return err
}
if _, err = tempFile.Write(bytes); err != nil {
return err
}
_ = tempFile.Sync()
_ = os.Chmod(tempFile.Name(), 0700)
cmd := exec.Command(tempFile.Name(), "/S")
return cmd.Run()
}

BIN
exe/tap-windows-9.21.2.exe Executable file

Binary file not shown.

23
go.mod Normal file
View File

@@ -0,0 +1,23 @@
module kubevpn
go 1.16
require (
github.com/coreos/go-iptables v0.6.0 // indirect
github.com/ginuerzh/gost v0.0.0-20210324070400-a4695ece2d1f
github.com/go-log/log v0.2.0
github.com/google/uuid v1.1.2
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
github.com/onsi/ginkgo v1.14.0 // indirect
github.com/shadowsocks/go-shadowsocks2 v0.1.5 // indirect
github.com/sirupsen/logrus v1.8.1
github.com/xtaci/lossyconn v0.0.0-20200209145036-adba10fffc37 // indirect
golang.org/x/net v0.0.0-20210504132125-bbd867fde50d // indirect
golang.zx2c4.com/wireguard v0.0.0-20210604143328-f9b48a961cd2 // indirect
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20210506160403-92e472f520a5 // indirect
k8s.io/api v0.21.2
k8s.io/apimachinery v0.21.2
k8s.io/cli-runtime v0.21.2
k8s.io/client-go v0.21.2
k8s.io/kubectl v0.21.2
)

1109
go.sum Normal file

File diff suppressed because it is too large Load Diff

314
pkg/cfg.go Normal file
View File

@@ -0,0 +1,314 @@
package main
import (
"bufio"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"github.com/ginuerzh/gost"
"io/ioutil"
"net"
"net/url"
"os"
"strings"
)
type baseConfig struct {
route
Routes []route
Debug bool
}
var (
defaultCertFile = "cert.pem"
defaultKeyFile = "key.pem"
)
// Load the certificate from cert & key files and optional client CA file,
// will use the default certificate if the provided info are invalid.
func tlsConfig(certFile, keyFile, caFile string) (*tls.Config, error) {
if certFile == "" || keyFile == "" {
certFile, keyFile = defaultCertFile, defaultKeyFile
}
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, err
}
cfg := &tls.Config{Certificates: []tls.Certificate{cert}}
if pool, _ := loadCA(caFile); pool != nil {
cfg.ClientCAs = pool
cfg.ClientAuth = tls.RequireAndVerifyClientCert
}
return cfg, nil
}
func loadCA(caFile string) (cp *x509.CertPool, err error) {
if caFile == "" {
return
}
cp = x509.NewCertPool()
data, err := ioutil.ReadFile(caFile)
if err != nil {
return nil, err
}
if !cp.AppendCertsFromPEM(data) {
return nil, errors.New("AppendCertsFromPEM failed")
}
return
}
func parseKCPConfig(configFile string) (*gost.KCPConfig, error) {
if configFile == "" {
return nil, nil
}
file, err := os.Open(configFile)
if err != nil {
return nil, err
}
defer file.Close()
config := &gost.KCPConfig{}
if err = json.NewDecoder(file).Decode(config); err != nil {
return nil, err
}
return config, nil
}
func parseUsers(authFile string) (users []*url.Userinfo, err error) {
if authFile == "" {
return
}
file, err := os.Open(authFile)
if err != nil {
return
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" || strings.HasPrefix(line, "#") {
continue
}
s := strings.SplitN(line, " ", 2)
if len(s) == 1 {
users = append(users, url.User(strings.TrimSpace(s[0])))
} else if len(s) == 2 {
users = append(users, url.UserPassword(strings.TrimSpace(s[0]), strings.TrimSpace(s[1])))
}
}
err = scanner.Err()
return
}
func parseAuthenticator(s string) (gost.Authenticator, error) {
if s == "" {
return nil, nil
}
f, err := os.Open(s)
if err != nil {
return nil, err
}
defer f.Close()
au := gost.NewLocalAuthenticator(nil)
au.Reload(f)
go gost.PeriodReload(au, s)
return au, nil
}
func parseIP(s string, port string) (ips []string) {
if s == "" {
return
}
if port == "" {
port = "8080" // default port
}
file, err := os.Open(s)
if err != nil {
ss := strings.Split(s, ",")
for _, s := range ss {
s = strings.TrimSpace(s)
if s != "" {
// TODO: support IPv6
if !strings.Contains(s, ":") {
s = s + ":" + port
}
ips = append(ips, s)
}
}
return
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" || strings.HasPrefix(line, "#") {
continue
}
if !strings.Contains(line, ":") {
line = line + ":" + port
}
ips = append(ips, line)
}
return
}
func parseBypass(s string) *gost.Bypass {
if s == "" {
return nil
}
var matchers []gost.Matcher
var reversed bool
if strings.HasPrefix(s, "~") {
reversed = true
s = strings.TrimLeft(s, "~")
}
f, err := os.Open(s)
if err != nil {
for _, s := range strings.Split(s, ",") {
s = strings.TrimSpace(s)
if s == "" {
continue
}
matchers = append(matchers, gost.NewMatcher(s))
}
return gost.NewBypass(reversed, matchers...)
}
defer f.Close()
bp := gost.NewBypass(reversed)
bp.Reload(f)
go gost.PeriodReload(bp, s)
return bp
}
func parseResolver(cfg string) gost.Resolver {
if cfg == "" {
return nil
}
var nss []gost.NameServer
f, err := os.Open(cfg)
if err != nil {
for _, s := range strings.Split(cfg, ",") {
s = strings.TrimSpace(s)
if s == "" {
continue
}
if strings.HasPrefix(s, "https") {
p := "https"
u, _ := url.Parse(s)
if u == nil || u.Scheme == "" {
continue
}
if u.Scheme == "https-chain" {
p = u.Scheme
}
ns := gost.NameServer{
Addr: s,
Protocol: p,
}
nss = append(nss, ns)
continue
}
ss := strings.Split(s, "/")
if len(ss) == 1 {
ns := gost.NameServer{
Addr: ss[0],
}
nss = append(nss, ns)
}
if len(ss) == 2 {
ns := gost.NameServer{
Addr: ss[0],
Protocol: ss[1],
}
nss = append(nss, ns)
}
}
return gost.NewResolver(0, nss...)
}
defer f.Close()
resolver := gost.NewResolver(0)
resolver.Reload(f)
go gost.PeriodReload(resolver, cfg)
return resolver
}
func parseHosts(s string) *gost.Hosts {
f, err := os.Open(s)
if err != nil {
return nil
}
defer f.Close()
hosts := gost.NewHosts()
hosts.Reload(f)
go gost.PeriodReload(hosts, s)
return hosts
}
func parseIPRoutes(s string) (routes []gost.IPRoute) {
if s == "" {
return
}
file, err := os.Open(s)
if err != nil {
ss := strings.Split(s, ",")
for _, s := range ss {
if _, inet, _ := net.ParseCIDR(strings.TrimSpace(s)); inet != nil {
routes = append(routes, gost.IPRoute{Dest: inet})
}
}
return
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := strings.Replace(scanner.Text(), "\t", " ", -1)
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "#") {
continue
}
var route gost.IPRoute
var ss []string
for _, s := range strings.Split(line, " ") {
if s = strings.TrimSpace(s); s != "" {
ss = append(ss, s)
}
}
if len(ss) > 0 && ss[0] != "" {
_, route.Dest, _ = net.ParseCIDR(strings.TrimSpace(ss[0]))
if route.Dest == nil {
continue
}
}
if len(ss) > 1 && ss[1] != "" {
route.Gateway = net.ParseIP(ss[1])
}
routes = append(routes, route)
}
return routes
}

154
pkg/main.go Normal file
View File

@@ -0,0 +1,154 @@
package main
import (
"context"
"errors"
"fmt"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
"kubevpn/exe"
"kubevpn/remote"
"net"
"path/filepath"
"runtime"
"strings"
)
var (
baseCfg = &baseConfig{}
namespace string
clientset *kubernetes.Clientset
config *restclient.Config
name string
)
func init() {
var err error
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{
ExplicitPath: filepath.Join(homedir.HomeDir(), clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName),
},
nil,
)
config, err = clientConfig.ClientConfig()
if err != nil {
log.Fatal(err)
}
clientset, err = kubernetes.NewForConfig(config)
if err != nil {
log.Fatal(err)
}
namespace, _, _ = clientConfig.Namespace()
k8sCIDR, err := getCIDR(clientset, namespace)
if err != nil {
log.Fatal(err)
}
list := []string{k8sCIDR.String()}
name = remote.CreateServer(clientset, namespace, "192.168.254.100/24")
fmt.Println(name)
err = remote.InitDHCP(clientset, namespace, &net.IPNet{IP: net.IPv4(196, 168, 254, 100), Mask: net.IPv4Mask(255, 255, 255, 0)})
if err != nil {
log.Fatal(err)
}
dhcp, err := remote.GetIpFromDHCP(clientset, namespace)
if err != nil {
log.Fatal(err)
}
list = append(list, dhcp.String())
baseCfg.route.ChainNodes = []string{"ssh://127.0.0.1:2222"}
baseCfg.route.ServeNodes = []string{
fmt.Sprintf("tun://:8421/127.0.0.1:8421?net=%s&route=%s", dhcp.String(), strings.Join(list, ",")),
}
fmt.Println("your ip is " + dhcp.String())
baseCfg.Debug = true
if runtime.GOOS == "windows" {
exe.InstallTunTapDriver()
}
}
func main() {
readyChan := make(chan struct{})
stop := make(chan struct{})
go func() {
err := PortForwardPod(config,
clientset,
name,
namespace,
"2222:2222",
readyChan,
stop,
)
if err != nil {
log.Error(err)
}
}()
<-readyChan
log.Info("port forward ready")
if err := start(); err != nil {
log.Fatal(err)
}
select {}
}
func start() error {
var routerList []router
rts, err := baseCfg.route.GenRouters()
if err != nil {
return err
}
routerList = append(routerList, rts...)
for _, route := range baseCfg.Routes {
rts, err = route.GenRouters()
if err != nil {
return err
}
routerList = append(routerList, rts...)
}
if len(routerList) == 0 {
return errors.New("invalid config")
}
for i := range routerList {
go routerList[i].Serve()
}
return nil
}
func getCIDR(clientset *kubernetes.Clientset, ns string) (*net.IPNet, error) {
if nodeList, err := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}); err == nil {
for _, node := range nodeList.Items {
if _, ip, err := net.ParseCIDR(node.Spec.PodCIDR); err == nil && ip != nil {
ip.Mask = net.IPv4Mask(255, 255, 0, 0)
return ip, nil
}
}
}
if services, err := clientset.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}); err == nil {
for _, service := range services.Items {
if ip := net.ParseIP(service.Spec.ClusterIP); ip != nil {
return &net.IPNet{IP: ip, Mask: net.IPv4Mask(255, 255, 0, 0)}, nil
}
}
}
if podList, err := clientset.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}); err == nil {
for _, pod := range podList.Items {
if ip := net.ParseIP(pod.Status.PodIP); ip != nil {
return &net.IPNet{IP: ip, Mask: net.IPv4Mask(255, 255, 0, 0)}, nil
}
}
}
return nil, fmt.Errorf("can not found cidr")
}

41
pkg/main_test.go Normal file
View File

@@ -0,0 +1,41 @@
package main
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
"os/exec"
"path/filepath"
"testing"
)
var (
clientConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{
ExplicitPath: filepath.Join(homedir.HomeDir(), clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName),
},
nil,
)
clientconfig, _ = clientConfig.ClientConfig()
clientsets, _ = kubernetes.NewForConfig(clientconfig)
namespaces, _, _ = clientConfig.Namespace()
)
func TestCidr(t *testing.T) {
cidr, err := getCIDR(clientsets, namespaces)
if err == nil {
fmt.Println(cidr.String())
}
}
func TestPing(t *testing.T) {
list, _ := clientsets.CoreV1().Services(namespaces).List(context.Background(), metav1.ListOptions{})
for _, service := range list.Items {
for _, clusterIP := range service.Spec.ClusterIPs {
_ = exec.Command("ping", clusterIP, "-c", "4").Run()
}
}
}

164
pkg/peer.go Normal file
View File

@@ -0,0 +1,164 @@
package main
import (
"bufio"
"bytes"
"encoding/json"
"github.com/ginuerzh/gost"
"io"
"io/ioutil"
"strconv"
"strings"
"time"
)
type peerConfig struct {
Strategy string `json:"strategy"`
MaxFails int `json:"max_fails"`
FailTimeout time.Duration
period time.Duration // the period for live reloading
Nodes []string `json:"nodes"`
group *gost.NodeGroup
baseNodes []gost.Node
stopped chan struct{}
}
func newPeerConfig() *peerConfig {
return &peerConfig{
stopped: make(chan struct{}),
}
}
func (cfg *peerConfig) Validate() {
}
func (cfg *peerConfig) Reload(r io.Reader) error {
if cfg.Stopped() {
return nil
}
if err := cfg.parse(r); err != nil {
return err
}
cfg.Validate()
group := cfg.group
group.SetSelector(
nil,
gost.WithFilter(
&gost.FailFilter{
MaxFails: cfg.MaxFails,
FailTimeout: cfg.FailTimeout,
},
&gost.InvalidFilter{},
),
gost.WithStrategy(gost.NewStrategy(cfg.Strategy)),
)
gNodes := cfg.baseNodes
nid := len(gNodes) + 1
for _, s := range cfg.Nodes {
nodes, err := parseChainNode(s)
if err != nil {
return err
}
for i := range nodes {
nodes[i].ID = nid
nid++
}
gNodes = append(gNodes, nodes...)
}
nodes := group.SetNodes(gNodes...)
for _, node := range nodes[len(cfg.baseNodes):] {
if node.Bypass != nil {
node.Bypass.Stop() // clear the old nodes
}
}
return nil
}
func (cfg *peerConfig) parse(r io.Reader) error {
data, err := ioutil.ReadAll(r)
if err != nil {
return err
}
// compatible with JSON format
if err := json.NewDecoder(bytes.NewReader(data)).Decode(cfg); err == nil {
return nil
}
split := func(line string) []string {
if line == "" {
return nil
}
if n := strings.IndexByte(line, '#'); n >= 0 {
line = line[:n]
}
line = strings.Replace(line, "\t", " ", -1)
line = strings.TrimSpace(line)
var ss []string
for _, s := range strings.Split(line, " ") {
if s = strings.TrimSpace(s); s != "" {
ss = append(ss, s)
}
}
return ss
}
cfg.Nodes = nil
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := scanner.Text()
ss := split(line)
if len(ss) < 2 {
continue
}
switch ss[0] {
case "strategy":
cfg.Strategy = ss[1]
case "max_fails":
cfg.MaxFails, _ = strconv.Atoi(ss[1])
case "fail_timeout":
cfg.FailTimeout, _ = time.ParseDuration(ss[1])
case "reload":
cfg.period, _ = time.ParseDuration(ss[1])
case "peer":
cfg.Nodes = append(cfg.Nodes, ss[1])
}
}
return scanner.Err()
}
func (cfg *peerConfig) Period() time.Duration {
if cfg.Stopped() {
return -1
}
return cfg.period
}
// Stop stops reloading.
func (cfg *peerConfig) Stop() {
select {
case <-cfg.stopped:
default:
close(cfg.stopped)
}
}
// Stopped checks whether the reloader is stopped.
func (cfg *peerConfig) Stopped() bool {
select {
case <-cfg.stopped:
return true
default:
return false
}
}

464
pkg/route.go Normal file
View File

@@ -0,0 +1,464 @@
package main
import (
"crypto/tls"
"crypto/x509"
"encoding/base64"
"fmt"
"github.com/ginuerzh/gost"
"net"
"net/url"
"os"
"strings"
"time"
"github.com/go-log/log"
)
type stringList []string
func (l *stringList) String() string {
return fmt.Sprintf("%s", *l)
}
func (l *stringList) Set(value string) error {
*l = append(*l, value)
return nil
}
type route struct {
ServeNodes stringList
ChainNodes stringList
Retries int
}
func (r *route) parseChain() (*gost.Chain, error) {
chain := gost.NewChain()
chain.Retries = r.Retries
gid := 1 // group ID
for _, ns := range r.ChainNodes {
ngroup := gost.NewNodeGroup()
ngroup.ID = gid
gid++
// parse the base nodes
nodes, err := parseChainNode(ns)
if err != nil {
return nil, err
}
nid := 1 // node ID
for i := range nodes {
nodes[i].ID = nid
nid++
}
ngroup.AddNode(nodes...)
ngroup.SetSelector(nil,
gost.WithFilter(
&gost.FailFilter{
MaxFails: nodes[0].GetInt("max_fails"),
FailTimeout: nodes[0].GetDuration("fail_timeout"),
},
&gost.InvalidFilter{},
),
gost.WithStrategy(gost.NewStrategy(nodes[0].Get("strategy"))),
)
if cfg := nodes[0].Get("peer"); cfg != "" {
f, err := os.Open(cfg)
if err != nil {
return nil, err
}
peerCfg := newPeerConfig()
peerCfg.group = ngroup
peerCfg.baseNodes = nodes
peerCfg.Reload(f)
f.Close()
go gost.PeriodReload(peerCfg, cfg)
}
chain.AddNodeGroup(ngroup)
}
return chain, nil
}
func parseChainNode(ns string) (nodes []gost.Node, err error) {
node, err := gost.ParseNode(ns)
if err != nil {
return
}
if auth := node.Get("auth"); auth != "" && node.User == nil {
c, err := base64.StdEncoding.DecodeString(auth)
if err != nil {
return nil, err
}
cs := string(c)
s := strings.IndexByte(cs, ':')
if s < 0 {
node.User = url.User(cs)
} else {
node.User = url.UserPassword(cs[:s], cs[s+1:])
}
}
if node.User == nil {
users, err := parseUsers(node.Get("secrets"))
if err != nil {
return nil, err
}
if len(users) > 0 {
node.User = users[0]
}
}
serverName, sport, _ := net.SplitHostPort(node.Addr)
if serverName == "" {
serverName = "localhost" // default server name
}
rootCAs, err := loadCA(node.Get("ca"))
if err != nil {
return
}
tlsCfg := &tls.Config{
ServerName: serverName,
InsecureSkipVerify: !node.GetBool("secure"),
RootCAs: rootCAs,
}
// If the argument `ca` is given, but not open `secure`, we verify the
// certificate manually.
if rootCAs != nil && !node.GetBool("secure") {
tlsCfg.VerifyConnection = func(state tls.ConnectionState) error {
opts := x509.VerifyOptions{
Roots: rootCAs,
CurrentTime: time.Now(),
DNSName: "",
Intermediates: x509.NewCertPool(),
}
certs := state.PeerCertificates
for i, cert := range certs {
if i == 0 {
continue
}
opts.Intermediates.AddCert(cert)
}
_, err = certs[0].Verify(opts)
return err
}
}
if cert, err := tls.LoadX509KeyPair(node.Get("cert"), node.Get("key")); err == nil {
tlsCfg.Certificates = []tls.Certificate{cert}
}
timeout := node.GetDuration("timeout")
var tr gost.Transporter
switch node.Transport {
case "ssh":
if node.Protocol == "direct" || node.Protocol == "remote" {
tr = gost.SSHForwardTransporter()
} else {
tr = gost.SSHTunnelTransporter()
}
default:
tr = gost.TCPTransporter()
}
var connector gost.Connector
switch node.Protocol {
case "ssu":
connector = gost.ShadowUDPConnector(node.User)
case "direct":
connector = gost.SSHDirectForwardConnector()
case "remote":
connector = gost.SSHRemoteForwardConnector()
default:
connector = gost.AutoConnector(node.User)
}
host := node.Get("host")
if host == "" {
host = node.Host
}
node.DialOptions = append(node.DialOptions,
gost.TimeoutDialOption(timeout),
gost.HostDialOption(host),
)
node.ConnectOptions = []gost.ConnectOption{
gost.UserAgentConnectOption(node.Get("agent")),
gost.NoTLSConnectOption(node.GetBool("notls")),
gost.NoDelayConnectOption(node.GetBool("nodelay")),
}
sshConfig := &gost.SSHConfig{}
if s := node.Get("ssh_key"); s != "" {
key, err := gost.ParseSSHKeyFile(s)
if err != nil {
return nil, err
}
sshConfig.Key = key
}
handshakeOptions := []gost.HandshakeOption{
gost.AddrHandshakeOption(node.Addr),
gost.HostHandshakeOption(host),
gost.UserHandshakeOption(node.User),
gost.TLSConfigHandshakeOption(tlsCfg),
gost.IntervalHandshakeOption(node.GetDuration("ping")),
gost.TimeoutHandshakeOption(timeout),
gost.RetryHandshakeOption(node.GetInt("retry")),
gost.SSHConfigHandshakeOption(sshConfig),
}
node.Client = &gost.Client{
Connector: connector,
Transporter: tr,
}
node.Bypass = parseBypass(node.Get("bypass"))
ips := parseIP(node.Get("ip"), sport)
for _, ip := range ips {
nd := node.Clone()
nd.Addr = ip
// override the default node address
nd.HandshakeOptions = append(handshakeOptions, gost.AddrHandshakeOption(ip))
// One node per IP
nodes = append(nodes, nd)
}
if len(ips) == 0 {
node.HandshakeOptions = handshakeOptions
nodes = []gost.Node{node}
}
return
}
func (r *route) GenRouters() ([]router, error) {
chain, err := r.parseChain()
if err != nil {
return nil, err
}
var rts []router
for _, ns := range r.ServeNodes {
node, err := gost.ParseNode(ns)
if err != nil {
return nil, err
}
if auth := node.Get("auth"); auth != "" && node.User == nil {
c, err := base64.StdEncoding.DecodeString(auth)
if err != nil {
return nil, err
}
cs := string(c)
s := strings.IndexByte(cs, ':')
if s < 0 {
node.User = url.User(cs)
} else {
node.User = url.UserPassword(cs[:s], cs[s+1:])
}
}
authenticator, err := parseAuthenticator(node.Get("secrets"))
if err != nil {
return nil, err
}
if authenticator == nil && node.User != nil {
kvs := make(map[string]string)
kvs[node.User.Username()], _ = node.User.Password()
authenticator = gost.NewLocalAuthenticator(kvs)
}
if node.User == nil {
if users, _ := parseUsers(node.Get("secrets")); len(users) > 0 {
node.User = users[0]
}
}
certFile, keyFile := node.Get("cert"), node.Get("key")
tlsCfg, err := tlsConfig(certFile, keyFile, node.Get("ca"))
if err != nil && certFile != "" && keyFile != "" {
return nil, err
}
ttl := node.GetDuration("ttl")
timeout := node.GetDuration("timeout")
tunRoutes := parseIPRoutes(node.Get("route"))
gw := net.ParseIP(node.Get("gw")) // default gateway
for i := range tunRoutes {
if tunRoutes[i].Gateway == nil {
tunRoutes[i].Gateway = gw
}
}
var ln gost.Listener
switch node.Transport {
case "ssh":
config := &gost.SSHConfig{
Authenticator: authenticator,
TLSConfig: tlsCfg,
}
if s := node.Get("ssh_key"); s != "" {
key, err := gost.ParseSSHKeyFile(s)
if err != nil {
return nil, err
}
config.Key = key
}
if s := node.Get("ssh_authorized_keys"); s != "" {
keys, err := gost.ParseSSHAuthorizedKeysFile(s)
if err != nil {
return nil, err
}
config.AuthorizedKeys = keys
}
if node.Protocol == "forward" {
ln, err = gost.TCPListener(node.Addr)
} else {
ln, err = gost.SSHTunnelListener(node.Addr, config)
}
case "tcp":
// Directly use SSH port forwarding if the last chain node is forward+ssh
if chain.LastNode().Protocol == "forward" && chain.LastNode().Transport == "ssh" {
chain.Nodes()[len(chain.Nodes())-1].Client.Connector = gost.SSHDirectForwardConnector()
chain.Nodes()[len(chain.Nodes())-1].Client.Transporter = gost.SSHForwardTransporter()
}
ln, err = gost.TCPListener(node.Addr)
case "udp":
ln, err = gost.UDPListener(node.Addr, &gost.UDPListenConfig{
TTL: ttl,
Backlog: node.GetInt("backlog"),
QueueSize: node.GetInt("queue"),
})
case "tun":
cfg := gost.TunConfig{
Name: node.Get("name"),
Addr: node.Get("net"),
Peer: node.Get("peer"),
MTU: node.GetInt("mtu"),
Routes: tunRoutes,
Gateway: node.Get("gw"),
}
ln, err = gost.TunListener(cfg)
case "tap":
cfg := gost.TapConfig{
Name: node.Get("name"),
Addr: node.Get("net"),
MTU: node.GetInt("mtu"),
Routes: strings.Split(node.Get("route"), ","),
Gateway: node.Get("gw"),
}
ln, err = gost.TapListener(cfg)
case "dns":
ln, err = gost.DNSListener(
node.Addr,
&gost.DNSOptions{
Mode: node.Get("mode"),
TLSConfig: tlsCfg,
},
)
default:
ln, err = gost.TCPListener(node.Addr)
}
if err != nil {
return nil, err
}
var handler gost.Handler
switch node.Protocol {
case "tcp":
handler = gost.TCPDirectForwardHandler(node.Remote)
case "udp":
handler = gost.UDPDirectForwardHandler(node.Remote)
case "tun":
handler = gost.TunHandler()
case "tap":
handler = gost.TapHandler()
case "dns":
handler = gost.DNSHandler(node.Remote)
default:
// start from 2.5, if remote is not empty, then we assume that it is a forward tunnel.
if node.Remote != "" {
handler = gost.TCPDirectForwardHandler(node.Remote)
} else {
handler = gost.AutoHandler()
}
}
node.Bypass = parseBypass(node.Get("bypass"))
hosts := parseHosts(node.Get("hosts"))
ips := parseIP(node.Get("ip"), "")
resolver := parseResolver(node.Get("dns"))
if resolver != nil {
resolver.Init(
gost.ChainResolverOption(chain),
gost.TimeoutResolverOption(timeout),
gost.TTLResolverOption(ttl),
gost.PreferResolverOption(node.Get("prefer")),
gost.SrcIPResolverOption(net.ParseIP(node.Get("ip"))),
)
}
handler.Init(
gost.AddrHandlerOption(ln.Addr().String()),
gost.ChainHandlerOption(chain),
gost.UsersHandlerOption(node.User),
gost.AuthenticatorHandlerOption(authenticator),
gost.BypassHandlerOption(node.Bypass),
gost.ResolverHandlerOption(resolver),
gost.HostsHandlerOption(hosts),
gost.RetryHandlerOption(node.GetInt("retry")), // override the global retry option.
gost.TimeoutHandlerOption(timeout),
gost.ProbeResistHandlerOption(node.Get("probe_resist")),
gost.KnockingHandlerOption(node.Get("knock")),
gost.NodeHandlerOption(node),
gost.IPsHandlerOption(ips),
gost.TCPModeHandlerOption(node.GetBool("tcp")),
gost.IPRoutesHandlerOption(tunRoutes...),
)
rt := router{
node: node,
server: &gost.Server{Listener: ln},
handler: handler,
chain: chain,
resolver: resolver,
hosts: hosts,
}
rts = append(rts, rt)
}
return rts, nil
}
type router struct {
node gost.Node
server *gost.Server
handler gost.Handler
chain *gost.Chain
resolver gost.Resolver
hosts *gost.Hosts
}
func (r *router) Serve() error {
log.Logf("%s on %s", r.node.String(), r.server.Addr())
return r.server.Serve(r.handler)
}
func (r *router) Close() error {
if r == nil || r.server == nil {
return nil
}
return r.server.Close()
}

246
pkg/util.go Normal file
View File

@@ -0,0 +1,246 @@
package main
import (
"context"
"fmt"
"github.com/moby/term"
log "github.com/sirupsen/logrus"
v12 "k8s.io/api/autoscaling/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/portforward"
"k8s.io/client-go/tools/remotecommand"
clientgowatch "k8s.io/client-go/tools/watch"
"k8s.io/client-go/transport/spdy"
"k8s.io/client-go/util/retry"
"k8s.io/kubectl/pkg/cmd/util"
term2 "k8s.io/kubectl/pkg/util/term"
"net"
"net/http"
"os"
"time"
)
func WaitResource(client *kubernetes.Clientset, getter cache.Getter, namespace, apiVersion, kind string, list metav1.ListOptions, checker func(interface{}) bool) error {
groupResources, _ := restmapper.GetAPIGroupResources(client)
mapper := restmapper.NewDiscoveryRESTMapper(groupResources)
groupVersionKind := schema.FromAPIVersionAndKind(apiVersion, kind)
mapping, err := mapper.RESTMapping(groupVersionKind.GroupKind(), groupVersionKind.Version)
if err != nil {
log.Error(err)
return err
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
watchlist := cache.NewFilteredListWatchFromClient(
getter,
mapping.Resource.Resource,
namespace,
func(options *metav1.ListOptions) {
options.LabelSelector = list.LabelSelector
options.FieldSelector = list.FieldSelector
options.Watch = list.Watch
},
)
preConditionFunc := func(store cache.Store) (bool, error) {
if len(store.List()) == 0 {
return false, nil
}
for _, p := range store.List() {
if !checker(p) {
return false, nil
}
}
return true, nil
}
conditionFunc := func(e watch.Event) (bool, error) { return checker(e.Object), nil }
object, err := scheme.Scheme.New(mapping.GroupVersionKind)
if err != nil {
return err
}
event, err := clientgowatch.UntilWithSync(ctx, watchlist, object, preConditionFunc, conditionFunc)
if err != nil {
log.Infof("wait to ready failed, error: %v, event: %v", err, event)
return err
}
return nil
}
func GetAvailablePortOrDie() int {
address, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("%s:0", "0.0.0.0"))
if err != nil {
log.Fatal(err)
}
listener, err := net.ListenTCP("tcp", address)
if err != nil {
log.Fatal(err)
}
defer listener.Close()
return listener.Addr().(*net.TCPAddr).Port
}
func WaitPod(clientset *kubernetes.Clientset, namespace string, list metav1.ListOptions, checker func(*v1.Pod) bool) error {
return WaitResource(
clientset,
clientset.CoreV1().RESTClient(),
namespace,
"v1",
"Pod",
list,
func(i interface{}) bool { return checker(i.(*v1.Pod)) },
)
}
func PortForwardPod(config *rest.Config, clientset *kubernetes.Clientset, podName, namespace, portPair string, readyChan, stopChan chan struct{}) error {
url := clientset.CoreV1().
RESTClient().
Post().
Resource("pods").
Namespace(namespace).
Name(podName).
SubResource("portforward").
URL()
transport, upgrader, err := spdy.RoundTripperFor(config)
if err != nil {
log.Error(err)
return err
}
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", url)
p := []string{portPair}
forwarder, err := portforward.New(dialer, p, stopChan, readyChan, os.Stdout, os.Stderr)
if err != nil {
log.Error(err)
return err
}
if err = forwarder.ForwardPorts(); err != nil {
log.Error(err)
return err
}
return nil
}
func ScaleDeploymentReplicasTo(options *kubernetes.Clientset, name, namespace string, replicas int32) {
err := retry.OnError(
retry.DefaultRetry,
func(err error) bool { return err != nil },
func() error {
_, err := options.AppsV1().Deployments(namespace).
UpdateScale(context.TODO(), name, &v12.Scale{
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
Spec: v12.ScaleSpec{Replicas: replicas},
}, metav1.UpdateOptions{})
return err
})
if err != nil {
log.Errorf("update deployment: %s's replicas to %d failed, error: %v", name, replicas, err)
}
}
type shellOptions interface {
GetNamespace() string
GetDeployment() string
GetLocalDir() string
GetRemoteDir() string
GetKubeconfig() string
}
func Shell(client *kubernetes.Clientset, options shellOptions) error {
deployment, err2 := client.AppsV1().Deployments(options.GetNamespace()).
Get(context.TODO(), options.GetDeployment(), metav1.GetOptions{})
if err2 != nil {
log.Error(err2)
}
labelMap, _ := metav1.LabelSelectorAsMap(deployment.Spec.Selector)
pods, err := client.CoreV1().Pods(options.GetNamespace()).
List(context.TODO(), metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labelMap).String()})
if err != nil {
log.Errorf("get kubedev pod error: %v", err)
}
if len(pods.Items) <= 0 {
log.Warnf("this should not happened, pods items length: %d", len(pods.Items))
}
index := -1
for i, pod := range pods.Items {
if pod.Status.Phase == v1.PodRunning {
index = i
break
}
}
if index < 0 {
return fmt.Errorf("cannot exec into a container in a completed pod; current phase is %s", pods.Items[0].Status.Phase)
}
stdin, stdout, stderr := term.StdStreams()
tty := term2.TTY{
Out: stdout,
In: stdin,
Raw: true,
}
if !tty.IsTerminalIn() {
log.Error("Unable to use a TTY - input is not a terminal or the right kind of file")
}
var terminalSizeQueue remotecommand.TerminalSizeQueue
if tty.Raw {
terminalSizeQueue = tty.MonitorSize(tty.GetSize())
}
f := func() error {
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
kubeconfig := options.GetKubeconfig()
configFlags.KubeConfig = &kubeconfig
namespace := options.GetNamespace()
configFlags.Namespace = &namespace
f := util.NewFactory(util.NewMatchVersionFlags(configFlags))
config, _ := f.ToRESTConfig()
restClient, err := rest.RESTClientFor(config)
if err != nil {
return err
}
req := restClient.Post().
Resource("pods").
Name(pods.Items[index].Name).
Namespace(options.GetNamespace()).
SubResource("exec").
VersionedParams(
&v1.PodExecOptions{
Container: pods.Items[index].Spec.Containers[0].Name,
Command: []string{"sh", "-c", "(bash||sh)"},
Stdin: true,
Stdout: true,
Stderr: true,
TTY: true,
},
scheme.ParameterCodec,
)
executor, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
if err != nil {
return err
}
return executor.Stream(remotecommand.StreamOptions{
Stdin: tty.In,
Stdout: tty.Out,
Stderr: stderr,
Tty: true,
TerminalSizeQueue: terminalSizeQueue,
})
}
if err = tty.Safe(f); err != nil {
return err
}
return nil
}

7
remote/Dockerfile Normal file
View File

@@ -0,0 +1,7 @@
FROM ubuntu:latest
WORKDIR /app
RUN sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list
RUN apt-get clean && apt-get update && apt-get install -y wget dnsutils vim curl net-tools iptables
RUN curl -fL https://github.com/ginuerzh/gost/releases/download/v2.11.1/gost-linux-amd64-2.11.1.gz -o gost.gz
RUN gzip gost.gz -d && chmod +x gost && mv gost /usr/local/bin/gost

195
remote/dhcp.go Normal file
View File

@@ -0,0 +1,195 @@
package remote
import (
"context"
"encoding/json"
log "github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry"
"net"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
)
var stopChan = make(chan os.Signal)
func addCleanUpResourceHandler(client *kubernetes.Clientset, namespace string, ip *net.IPNet) {
signal.Notify(stopChan, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGKILL /*, syscall.SIGSTOP*/)
go func() {
<-stopChan
log.Info("prepare to exit, cleaning up")
cleanUpTrafficManagerIfRefCountIsZero(client, namespace)
err := ReleaseIpToDHCP(client, namespace, ip)
if err != nil {
log.Errorf("failed to release ip to dhcp, err: %v", err)
}
log.Info("clean up successful")
os.Exit(0)
}()
}
func deletePod(client *kubernetes.Clientset, podName, namespace string, wait bool) {
err := client.CoreV1().Pods(namespace).Delete(context.TODO(), podName, metav1.DeleteOptions{})
if !wait {
return
}
if err != nil && errors.IsNotFound(err) {
log.Info("not found shadow pod, no need to delete it")
return
}
log.Infof("waiting for pod: %s to be deleted...", podName)
if err == nil {
w, errs := client.CoreV1().Pods(namespace).
Watch(context.TODO(), metav1.ListOptions{
FieldSelector: fields.OneTermEqualSelector("metadata.name", podName).String(),
Watch: true,
})
if errs != nil {
log.Error(errs)
return
}
out:
for {
select {
case event := <-w.ResultChan():
if watch.Deleted == event.Type {
break out
}
}
}
log.Infof("delete pod: %s suecessfully", podName)
}
}
// vendor/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go:99
func updateRefCount(client *kubernetes.Clientset, namespace string, increment int) {
err := retry.OnError(
retry.DefaultRetry,
func(err error) bool { return err != nil },
func() error {
configMap, err := client.CoreV1().ConfigMaps(namespace).Get(context.TODO(), TrafficManager, metav1.GetOptions{})
if err != nil {
log.Errorf("update ref-count failed, increment: %d, error: %v", increment, err)
return err
}
curCount, err := strconv.Atoi(configMap.GetAnnotations()["ref-count"])
if err != nil {
curCount = 0
}
patch, _ := json.Marshal([]interface{}{
map[string]interface{}{
"op": "replace",
"path": "/metadata/annotations/" + "ref-count",
"value": strconv.Itoa(curCount + increment),
},
})
_, err = client.CoreV1().ConfigMaps(namespace).
Patch(context.TODO(), TrafficManager, types.JSONPatchType, patch, metav1.PatchOptions{})
return err
},
)
if err != nil {
log.Errorf("update ref count error, error: %v", err)
} else {
log.Info("update ref count successfully")
}
}
func cleanUpTrafficManagerIfRefCountIsZero(client *kubernetes.Clientset, namespace string) {
updateRefCount(client, namespace, -1)
configMap, err := client.CoreV1().ConfigMaps(namespace).Get(context.TODO(), TrafficManager, metav1.GetOptions{})
if err != nil {
log.Error(err)
return
}
refCount, err := strconv.Atoi(configMap.GetAnnotations()["ref-count"])
if err != nil {
log.Error(err)
return
}
// if refcount is less than zero or equals to zero, means no body will using this dns pod, so clean it
if refCount <= 0 {
log.Info("refCount is zero, prepare to clean up resource")
_ = client.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), TrafficManager, metav1.DeleteOptions{})
_ = client.CoreV1().Pods(namespace).Delete(context.TODO(), TrafficManager, metav1.DeleteOptions{})
}
}
func InitDHCP(client *kubernetes.Clientset, namespace string, addr *net.IPNet) error {
get, err := client.CoreV1().ConfigMaps(namespace).Get(context.Background(), TrafficManager, metav1.GetOptions{})
if err == nil && get != nil {
return nil
}
if addr == nil {
addr = &net.IPNet{IP: net.IPv4(196, 168, 254, 100), Mask: net.IPv4Mask(255, 255, 255, 0)}
}
var ips []string
for i := 2; i < 254; i++ {
if i != 100 {
ips = append(ips, strconv.Itoa(i))
}
}
result := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: TrafficManager,
Namespace: namespace,
Labels: map[string]string{},
},
Data: map[string]string{"DHCP": strings.Join(ips, ",")},
}
_, err = client.CoreV1().ConfigMaps(namespace).Create(context.Background(), result, metav1.CreateOptions{})
if err != nil {
log.Errorf("create dhcp error, err: %v", err)
return err
}
return nil
}
func GetIpFromDHCP(client *kubernetes.Clientset, namespace string) (*net.IPNet, error) {
get, err := client.CoreV1().ConfigMaps(namespace).Get(context.Background(), TrafficManager, metav1.GetOptions{})
if err != nil {
log.Errorf("failed to get ip from dhcp, err: %v", err)
return nil, err
}
split := strings.Split(get.Data["DHCP"], ",")
ip := split[0]
split = split[1:]
get.Data["DHCP"] = strings.Join(split, ",")
_, err = client.CoreV1().ConfigMaps(namespace).Update(context.Background(), get, metav1.UpdateOptions{})
if err != nil {
log.Errorf("update dhcp error after get ip, need to put ip back, err: %v", err)
return nil, err
}
atoi, _ := strconv.Atoi(ip)
return &net.IPNet{
IP: net.IPv4(192, 168, 254, byte(atoi)),
Mask: net.IPv4Mask(255, 255, 255, 0),
}, nil
}
func ReleaseIpToDHCP(client *kubernetes.Clientset, namespace string, ip *net.IPNet) error {
get, err := client.CoreV1().ConfigMaps(namespace).Get(context.Background(), TrafficManager, metav1.GetOptions{})
if err != nil {
log.Errorf("failed to get dhcp, err: %v", err)
return err
}
split := strings.Split(get.Data["DHCP"], ",")
split = append(split, strings.Split(ip.IP.To4().String(), ".")[3])
get.Data["DHCP"] = strings.Join(split, ",")
_, err = client.CoreV1().ConfigMaps(namespace).Update(context.Background(), get, metav1.UpdateOptions{})
if err != nil {
log.Errorf("update dhcp error after release ip, need to try again, err: %v", err)
return err
}
return nil
}

24
remote/dnspod.yaml Normal file
View File

@@ -0,0 +1,24 @@
apiVersion: v1
kind: Pod
metadata:
name: dnspod
labels:
app: dnspod
spec:
containers:
- name: dnspod
image: naison/gost:latest
imagePullPolicy: IfNotPresent
command:
- gost -D
resources:
requests:
memory: "256Mi"
cpu: "128m"
limits:
memory: "512Mi"
cpu: "256m"
ports:
- containerPort: 22
restartPolicy: Always
priorityClassName: system-cluster-critical

107
remote/remote.go Normal file
View File

@@ -0,0 +1,107 @@
package remote
import (
"context"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/kubernetes"
"k8s.io/kubectl/pkg/polymorphichelpers"
"k8s.io/kubectl/pkg/util/podutils"
"log"
"sort"
"time"
)
const TrafficManager = "kubevpn.traffic.manager"
func CreateServer(clientset *kubernetes.Clientset, namespace, ip string) string {
firstPod, i, err3 := polymorphichelpers.GetFirstPod(clientset.CoreV1(),
namespace,
fields.OneTermEqualSelector("app", TrafficManager).String(),
time.Second*5,
func(pods []*v1.Pod) sort.Interface {
return sort.Reverse(podutils.ActivePods(pods))
},
)
if err3 == nil && i != 0 && firstPod != nil {
return firstPod.GetName()
}
t := true
zero := int64(0)
name := TrafficManager
pod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{"app": TrafficManager},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "vpn",
Image: "naison/kubevpn:latest",
Command: []string{"/bin/sh", "-c"},
Args: []string{
"sysctl net.ipv4.ip_forward=1;" +
"iptables -F;" +
"iptables -P INPUT ACCEPT;" +
"iptables -P FORWARD ACCEPT;" +
"iptables -t nat -A POSTROUTING -s 192.168.254.0/24 -o eth0 -j MASQUERADE;" +
"iptables -t nat -A POSTROUTING -s 172.20.0.0/16 -o eth0 -j MASQUERADE;" +
"gost -L ssh://:2222 -L tun://:8421?net=" + ip + " -D",
},
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{
"NET_ADMIN",
"SYS_MODULE",
},
},
RunAsUser: &zero,
Privileged: &t,
},
Resources: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("128m"),
v1.ResourceMemory: resource.MustParse("256Mi"),
},
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("256m"),
v1.ResourceMemory: resource.MustParse("512Mi"),
},
},
ImagePullPolicy: v1.PullIfNotPresent,
},
},
PriorityClassName: "system-cluster-critical",
},
}
_, err2 := clientset.CoreV1().Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{})
if err2 != nil {
log.Fatal(err2)
}
watch, err := clientset.CoreV1().Pods(namespace).Watch(context.TODO(), metav1.SingleObject(metav1.ObjectMeta{Name: name}))
if err != nil {
log.Fatal(err)
}
tick := time.Tick(time.Minute * 2)
out:
for {
select {
case e := <-watch.ResultChan():
if e.Object.(*v1.Pod).Status.Phase == v1.PodRunning {
watch.Stop()
break out
}
case <-tick:
watch.Stop()
logrus.Error("timeout")
}
}
return name
}

79
remote/remote_test.go Normal file
View File

@@ -0,0 +1,79 @@
package remote
import (
"fmt"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
"net"
"path/filepath"
"testing"
"time"
)
func TestCreateServer(t *testing.T) {
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{
ExplicitPath: filepath.Join(homedir.HomeDir(), clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName),
},
nil,
)
config, err := clientConfig.ClientConfig()
if err != nil {
log.Fatal(err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
log.Fatal(err)
}
server := CreateServer(clientset, "test", "192.168.254.100/24")
fmt.Println(server)
}
func TestGetIp(t *testing.T) {
ip := &net.IPNet{
IP: net.IPv4(192, 168, 254, 100),
Mask: net.IPv4Mask(255, 255, 255, 0),
}
fmt.Println(ip.String())
}
func TestGetIPFromDHCP(t *testing.T) {
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{
ExplicitPath: filepath.Join(homedir.HomeDir(), clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName),
},
nil,
)
config, err := clientConfig.ClientConfig()
if err != nil {
log.Fatal(err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
log.Fatal(err)
}
err = InitDHCP(clientset, "test", nil)
if err != nil {
fmt.Println(err)
}
for i := 0; i < 10; i++ {
ipNet, err := GetIpFromDHCP(clientset, "test")
if err != nil {
fmt.Println(err)
continue
} else {
fmt.Println(ipNet.String())
}
time.Sleep(time.Millisecond * 10)
err = ReleaseIpToDHCP(clientset, "test", ipNet)
if err != nil {
fmt.Println(err)
}
time.Sleep(time.Millisecond * 10)
}
}