hotfix: add cmd once for generate tls cert after helm installed (#657)

* hotfix: add cmd once for generate tls cert after helm installed

* hotfix: update scale

* hotfix: update scale

* hotfix: fix bugs

* hotfix: print

* feat: add role for get cidr

* feat: add --image options for cmd once

* feat: add role watch pod

* feat: filter api-server
This commit is contained in:
naison
2025-06-26 11:08:42 +08:00
committed by GitHub
parent 4021480ad5
commit f14d074417
11 changed files with 235 additions and 97 deletions

View File

@@ -11,19 +11,7 @@ RUN make kubevpn
FROM debian:bookworm-slim
ARG BASE=github.com/wencaiwulue/kubevpn
RUN apt-get update && apt-get install -y openssl iptables curl dnsutils \
&& if [ $(uname -m) = "x86_64" ]; then \
echo "The architecture is AMD64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"; \
elif [ $(uname -m) = "aarch64" ]; then \
echo "The architecture is ARM64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl"; \
else \
echo "Unsupported architecture."; \
exit 1; \
fi \
&& chmod +x kubectl && mv kubectl /usr/local/bin \
&& apt-get remove -y curl \
RUN apt-get update && apt-get install -y iptables dnsutils \
&& apt-get autoremove -y \
&& apt-get clean -y \
&& rm -rf /var/lib/apt/lists/*

View File

@@ -11,16 +11,6 @@ RUN apt-get clean && apt-get update && apt-get install -y wget dnsutils vim curl
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3 \
apt-transport-https ca-certificates curl
RUN if [ $(uname -m) = "x86_64" ]; then \
echo "The architecture is AMD64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
elif [ $(uname -m) = "aarch64" ]; then \
echo "The architecture is ARM64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
else \
echo "Unsupported architecture."; \
fi
ENV TZ=Asia/Shanghai \
DEBIAN_FRONTEND=noninteractive
RUN apt update \

View File

@@ -1,4 +1,4 @@
FROM envoyproxy/envoy:v1.25.0 AS envoy
FROM envoyproxy/envoy:v1.34.1 AS envoy
FROM golang:1.23 AS builder
ARG BASE=github.com/wencaiwulue/kubevpn
@@ -11,19 +11,7 @@ RUN make kubevpn
FROM debian:bookworm-slim
ARG BASE=github.com/wencaiwulue/kubevpn
RUN apt-get update && apt-get install -y openssl iptables curl dnsutils \
&& if [ $(uname -m) = "x86_64" ]; then \
echo "The architecture is AMD64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"; \
elif [ $(uname -m) = "aarch64" ]; then \
echo "The architecture is ARM64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl"; \
else \
echo "Unsupported architecture."; \
exit 1; \
fi \
&& chmod +x kubectl && mv kubectl /usr/local/bin \
&& apt-get remove -y curl \
RUN apt-get update && apt-get install -y iptables dnsutils \
&& apt-get autoremove -y \
&& apt-get clean -y \
&& rm -rf /var/lib/apt/lists/*

View File

@@ -32,42 +32,7 @@ spec:
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- /bin/bash
- -c
args:
- |2-
echo "Label namespace {{ include "kubevpn.namespace" . }}"
kubectl label ns {{ include "kubevpn.namespace" . }} ns={{ include "kubevpn.namespace" . }}
echo "Generating https certificate"
openssl req -x509 -nodes -days 36500 -newkey rsa:2048 -subj "/CN={{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}" -addext "subjectAltName=DNS:{{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}.svc.cluster.local,DNS:{{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}.svc,DNS:{{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }},DNS:localhost,IP:127.0.0.1" -keyout server.key -out server.crt
export TLS_CRT=$(cat server.crt | base64 | tr -d '\n')
echo "Patch mutatingwebhookconfigurations {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}"
kubectl patch mutatingwebhookconfigurations {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }} -p "{\"webhooks\":[{\"name\":\"{{ include "kubevpn.fullname" . }}.naison.io\",\"sideEffects\":\"None\",\"admissionReviewVersions\":[\"v1\", \"v1beta1\"],\"clientConfig\":{\"service\":{\"namespace\":\"{{ include "kubevpn.namespace" . }}\",\"name\":\"{{ include "kubevpn.fullname" . }}\"},\"caBundle\":\"$TLS_CRT\"}}]}"
export TLS_KEY=$(cat server.key | base64 | tr -d '\n')
echo "Patch secret {{ include "kubevpn.fullname" . }}"
kubectl patch secret {{ include "kubevpn.fullname" . }} -n {{ include "kubevpn.namespace" . }} -p "{\"data\":{\"tls_key\":\"$TLS_KEY\",\"tls_crt\":\"$TLS_CRT\"}}"
echo "Restart the pods..."
kubectl scale -n {{ include "kubevpn.namespace" . }} --replicas=0 deployment/{{ include "kubevpn.fullname" . }}
kubectl scale -n {{ include "kubevpn.namespace" . }} --replicas=1 deployment/{{ include "kubevpn.fullname" . }}
export POOLS=$(kubectl get cm {{ include "kubevpn.fullname" . }} -n {{ include "kubevpn.namespace" . }} -o jsonpath='{.data.IPv4_POOLS}')
if [[ -z "${POOLS// }" ]];then
echo "Cidr is empty"
echo "Get pod cidr..."
export POD_CIDR=$(kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' | tr -s '\n' ' ')
echo "Get service cidr..."
export SVC_CIDR=$(echo '{"apiVersion":"v1","kind":"Service","metadata":{"name":"kubevpn-get-svc-cidr-{{ include "kubevpn.namespace" . }}", "namespace": "{{ include "kubevpn.namespace" . }}"},"spec":{"clusterIP":"1.1.1.1","ports":[{"port":443}]}}' | kubectl apply -f - 2>&1 | sed 's/.*valid IPs is //')
echo "Pod cidr: $POD_CIDR, service cidr: $SVC_CIDR"
echo "Patch configmap {{ include "kubevpn.fullname" . }}"
kubectl patch configmap {{ include "kubevpn.fullname" . }} -n {{ include "kubevpn.namespace" . }} -p "{\"data\":{\"IPv4_POOLS\":\"$POD_CIDR $SVC_CIDR\"}}"
else
echo "Cidr is NOT empty"
fi
echo "Done~"
exit 0
- kubevpn
- once
- --image
- "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"

View File

@@ -21,10 +21,10 @@ rules:
- delete
- apiGroups: [ "" ]
resources: [ "namespaces" ]
resourceNames: ["{{ include "kubevpn.namespace" . }}"]
resourceNames: [{{ include "kubevpn.namespace" . }}]
verbs:
- get
- patch
- update
- apiGroups: [ "apps" ]
resources: [ "deployments/scale", "deployments" ]
resourceNames:
@@ -43,6 +43,25 @@ rules:
- get
- update
- patch
- list
# for get network cidr
- apiGroups:
- ""
resources:
- pods/exec
verbs:
- create
- apiGroups:
- ""
resources:
- pods
- pods/log
verbs:
- list
- get
- create
- delete
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
@@ -59,7 +78,7 @@ rules:
verbs:
- get
- list
- patch
- update
- apiGroups:
- ""
resources:
@@ -67,4 +86,11 @@ rules:
verbs:
- get
- list
- watch
- watch
# for get network cidr
- apiGroups:
- ""
resources:
- pods
verbs:
- list

26
cmd/kubevpn/cmds/once.go Normal file
View File

@@ -0,0 +1,26 @@
package cmds
import (
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
)
func CmdOnce(factory cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "once",
Short: i18n.T("Once generate TLS and restart deployment"),
Long: templates.LongDesc(i18n.T(`Once generate TLS and restart deployment for helm installation.`)),
RunE: func(cmd *cobra.Command, args []string) (err error) {
return handler.Once(cmd.Context(), factory)
},
Hidden: true,
DisableFlagsInUseLine: true,
}
cmd.Flags().StringVar(&config.Image, "image", config.Image, "use this image to startup container")
return cmd
}

View File

@@ -65,6 +65,7 @@ func NewKubeVPNCommand() *cobra.Command {
CmdDaemon(factory),
CmdWebhook(factory),
CmdSyncthing(factory),
CmdOnce(factory),
},
},
{

View File

@@ -11,6 +11,7 @@ import (
const (
HOME = ".kubevpn"
Daemon = "daemon"
Log = "log"
SockPath = "user_daemon.sock"
SudoSockPath = "root_daemon.sock"
@@ -23,14 +24,15 @@ const (
ConfigFile = "config.yaml"
TmpDir = "tmp"
TempDir = "temp"
DBFile = "db"
)
var (
daemonPath string
homePath string
daemonPath string
logPath string
//go:embed config.yaml
config []byte
@@ -43,8 +45,9 @@ func init() {
}
homePath = filepath.Join(dir, HOME)
daemonPath = filepath.Join(dir, HOME, Daemon)
logPath = filepath.Join(dir, HOME, Log)
var paths = []string{homePath, daemonPath, GetPProfPath(), GetSyncthingPath(), GetTempPath()}
var paths = []string{homePath, daemonPath, logPath, GetPProfPath(), GetSyncthingPath(), GetTempPath()}
for _, path := range paths {
_, err = os.Stat(path)
if errors.Is(err, os.ErrNotExist) {
@@ -96,14 +99,14 @@ func GetConfigFile() string {
}
func GetTempPath() string {
return filepath.Join(homePath, TmpDir)
return filepath.Join(homePath, TempDir)
}
func GetDaemonLogPath(isSudo bool) string {
if isSudo {
return filepath.Join(daemonPath, SudoLogFile)
return filepath.Join(logPath, SudoLogFile)
}
return filepath.Join(daemonPath, UserLogFile)
return filepath.Join(logPath, UserLogFile)
}
func GetPProfPath() string {

View File

@@ -51,10 +51,12 @@ func (s *server) ServeDNS(w miekgdns.ResponseWriter, m *miekgdns.Msg) {
var wg = &sync.WaitGroup{}
var isSuccess = &atomic.Bool{}
searchList := fix(originName, s.forwardDNS.Search)
var searchList []string
if v, ok := s.dnsCache.Get(originName); ok {
searchList = []string{v.(string)}
plog.G(ctx).Infof("Use cache name: %s --> %s", originName, v.(string))
} else {
searchList = fix(originName, s.forwardDNS.Search)
}
for _, name := range searchList {
@@ -67,7 +69,6 @@ func (s *server) ServeDNS(w miekgdns.ResponseWriter, m *miekgdns.Msg) {
msg.Question[i].Name = name
}
var answer *miekgdns.Msg
answer, err := miekgdns.ExchangeContext(ctx, msg, net.JoinHostPort(dnsAddr, s.forwardDNS.Port))
if err != nil {
plog.G(ctx).Errorf("Failed to found DNS name: %s: %v", name, err)
@@ -76,13 +77,11 @@ func (s *server) ServeDNS(w miekgdns.ResponseWriter, m *miekgdns.Msg) {
if len(answer.Answer) == 0 {
return
}
if isSuccess.Load() {
if !isSuccess.CompareAndSwap(false, true) {
return
}
isSuccess.Store(true)
s.dnsCache.Add(originName, name, time.Minute*30)
plog.G(ctx).Infof("Resolve domain %s with full name: %s --> %s", originName, name, answer.Answer[0].String())
for i := 0; i < len(answer.Answer); i++ {
answer.Answer[i].Header().Name = originName
@@ -90,6 +89,7 @@ func (s *server) ServeDNS(w miekgdns.ResponseWriter, m *miekgdns.Msg) {
for i := 0; i < len(answer.Question); i++ {
answer.Question[i].Name = originName
}
plog.G(ctx).Infof("Resolve domain %s with full name: %s --> %s", originName, name, answer.String())
err = w.WriteMsg(answer)
if err != nil {

View File

@@ -225,7 +225,7 @@ func (c *ConnectOptions) DoConnect(ctx context.Context, isLite bool) (err error)
return
}
go c.setupSignalHandler()
if err = c.getCIDR(c.ctx); err != nil {
if err = c.getCIDR(c.ctx, true); err != nil {
plog.G(ctx).Errorf("Failed to get network CIDR: %v", err)
return
}
@@ -822,11 +822,13 @@ func (c *ConnectOptions) GetRunningPodList(ctx context.Context) ([]v1.Pod, error
// https://stackoverflow.com/questions/45903123/kubernetes-set-service-cidr-and-pod-cidr-the-same
// https://stackoverflow.com/questions/44190607/how-do-you-find-the-cluster-service-cidr-of-a-kubernetes-cluster/54183373#54183373
// https://stackoverflow.com/questions/44190607/how-do-you-find-the-cluster-service-cidr-of-a-kubernetes-cluster
func (c *ConnectOptions) getCIDR(ctx context.Context) error {
func (c *ConnectOptions) getCIDR(ctx context.Context, filterAPIServer bool) error {
var err error
c.apiServerIPs, err = util.GetAPIServerIP(c.config.Host)
if err != nil {
return err
if filterAPIServer {
c.apiServerIPs, err = util.GetAPIServerIP(c.config.Host)
if err != nil {
return err
}
}
// (1) get CIDR from cache
@@ -835,7 +837,7 @@ func (c *ConnectOptions) getCIDR(ctx context.Context) error {
if err != nil {
return err
}
if ipPoolStr != "" {
if strings.TrimSpace(ipPoolStr) != "" {
for _, s := range strings.Split(ipPoolStr, " ") {
_, cidr, _ := net.ParseCIDR(s)
if cidr != nil {

149
pkg/handler/once.go Normal file
View File

@@ -0,0 +1,149 @@
package handler
import (
"context"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func Once(ctx context.Context, factory cmdutil.Factory) error {
clientset, err := factory.KubernetesClientSet()
if err != nil {
return err
}
namespace, _, err := factory.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
err = labelNs(ctx, namespace, clientset)
if err != nil {
return err
}
err = genTLS(ctx, namespace, clientset)
if err != nil {
return err
}
err = restartDeployment(ctx, namespace, clientset)
if err != nil {
return err
}
err = getCIDR(ctx, factory)
if err != nil {
return err
}
return nil
}
func labelNs(ctx context.Context, namespace string, clientset *kubernetes.Clientset) error {
plog.G(ctx).Infof("Labeling Namespace %s", namespace)
ns, err := clientset.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
if err != nil {
plog.G(ctx).Errorf("Failed to get namespace: %v", err)
return err
}
if ns.Labels == nil {
ns.Labels = map[string]string{}
}
if ns.Labels["ns"] == namespace {
plog.G(ctx).Infof("Namespace %s already labeled", namespace)
return nil
}
ns.Labels["ns"] = namespace
_, err = clientset.CoreV1().Namespaces().Update(ctx, ns, metav1.UpdateOptions{})
if err != nil {
plog.G(ctx).Infof("Failed to labele namespace: %v", err)
return err
}
return nil
}
func genTLS(ctx context.Context, namespace string, clientset *kubernetes.Clientset) error {
plog.G(ctx).Infof("Generating TLS for Namespace %s", namespace)
crt, key, host, err := util.GenTLSCert(ctx, namespace)
if err != nil {
return err
}
// reason why not use v1.SecretTypeTls is because it needs key called tls.crt and tls.key, but tls.key can not as env variable
// ➜ ~ export tls.key=a
//export: not valid in this context: tls.key
secret := genSecret(namespace, crt, key, host)
oldSecret, err := clientset.CoreV1().Secrets(namespace).Get(ctx, secret.Name, metav1.GetOptions{})
if err != nil {
plog.G(ctx).Errorf("Failed to get secret: %v", err)
return err
}
secret.ResourceVersion = oldSecret.ResourceVersion
_, err = clientset.CoreV1().Secrets(namespace).Update(ctx, secret, metav1.UpdateOptions{})
if err != nil {
plog.G(ctx).Errorf("Failed to update secret: %v", err)
return err
}
mutatingWebhookConfiguration := genMutatingWebhookConfiguration(namespace, crt)
oldConfig, err := clientset.AdmissionregistrationV1().MutatingWebhookConfigurations().Get(ctx, mutatingWebhookConfiguration.Name, metav1.GetOptions{})
if err != nil {
plog.G(ctx).Errorf("Failed to get mutatingWebhookConfiguration: %v", err)
return err
}
mutatingWebhookConfiguration.ResourceVersion = oldConfig.ResourceVersion
_, err = clientset.AdmissionregistrationV1().MutatingWebhookConfigurations().Update(ctx, mutatingWebhookConfiguration, metav1.UpdateOptions{})
if err != nil {
plog.G(ctx).Errorf("Failed to update mutatingWebhookConfiguration: %v", err)
return err
}
return nil
}
func restartDeployment(ctx context.Context, namespace string, clientset *kubernetes.Clientset) error {
deployName := config.ConfigMapPodTrafficManager
plog.G(ctx).Infof("Restarting Deployment %s", deployName)
scale, err := clientset.AppsV1().Deployments(namespace).GetScale(ctx, deployName, metav1.GetOptions{})
if err != nil {
plog.G(ctx).Errorf("Failed to get scale: %v", err)
return err
}
scale.Spec.Replicas = 0
scale, err = clientset.AppsV1().Deployments(namespace).UpdateScale(ctx, deployName, scale, metav1.UpdateOptions{})
if err != nil {
plog.G(ctx).Errorf("Failed to update scale: %v", err)
return err
}
scale.Spec.Replicas = 1
_, err = clientset.AppsV1().Deployments(namespace).UpdateScale(ctx, deployName, scale, metav1.UpdateOptions{})
if err != nil {
plog.G(ctx).Errorf("Failed to update scale: %v", err)
return err
}
return nil
}
func getCIDR(ctx context.Context, factory cmdutil.Factory) error {
plog.G(ctx).Infof("Getting CIDR")
c := &ConnectOptions{
Image: config.Image,
}
err := c.InitClient(factory)
if err != nil {
return err
}
// run inside pod
err = c.getCIDR(ctx, false)
if err != nil {
plog.G(ctx).Errorf("Failed to get CIDR: %v", err)
return err
}
s := sets.New[string]()
for _, cidr := range c.cidrs {
s.Insert(cidr.String())
}
plog.G(ctx).Infof("Get CIDR: %v", strings.Join(s.UnsortedList(), " "))
return nil
}