mirror of
https://github.com/kubenetworks/kubevpn.git
synced 2025-12-24 11:51:13 +08:00
hotfix: cmd once retry restart deploy if conflict
This commit is contained in:
@@ -7,6 +7,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/util/retry"
|
||||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||||
|
|
||||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||||
@@ -105,24 +106,34 @@ func genTLS(ctx context.Context, namespace string, clientset *kubernetes.Clients
|
|||||||
func restartDeployment(ctx context.Context, namespace string, clientset *kubernetes.Clientset) error {
|
func restartDeployment(ctx context.Context, namespace string, clientset *kubernetes.Clientset) error {
|
||||||
deployName := config.ConfigMapPodTrafficManager
|
deployName := config.ConfigMapPodTrafficManager
|
||||||
plog.G(ctx).Infof("Restarting Deployment %s", deployName)
|
plog.G(ctx).Infof("Restarting Deployment %s", deployName)
|
||||||
|
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||||
|
return scaleDeploy(ctx, namespace, clientset, deployName, 0)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||||
|
return scaleDeploy(ctx, namespace, clientset, deployName, 1)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func scaleDeploy(ctx context.Context, namespace string, clientset *kubernetes.Clientset, deployName string, replicas int32) error {
|
||||||
scale, err := clientset.AppsV1().Deployments(namespace).GetScale(ctx, deployName, metav1.GetOptions{})
|
scale, err := clientset.AppsV1().Deployments(namespace).GetScale(ctx, deployName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.G(ctx).Errorf("Failed to get scale: %v", err)
|
plog.G(ctx).Errorf("Failed to get scale: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
scale.Spec.Replicas = 0
|
scale.Spec.Replicas = replicas
|
||||||
scale, err = clientset.AppsV1().Deployments(namespace).UpdateScale(ctx, deployName, scale, metav1.UpdateOptions{})
|
scale, err = clientset.AppsV1().Deployments(namespace).UpdateScale(ctx, deployName, scale, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.G(ctx).Errorf("Failed to update scale: %v", err)
|
plog.G(ctx).Errorf("Failed to update scale: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
scale.Spec.Replicas = 1
|
return err
|
||||||
_, err = clientset.AppsV1().Deployments(namespace).UpdateScale(ctx, deployName, scale, metav1.UpdateOptions{})
|
|
||||||
if err != nil {
|
|
||||||
plog.G(ctx).Errorf("Failed to update scale: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCIDR(ctx context.Context, factory cmdutil.Factory) error {
|
func getCIDR(ctx context.Context, factory cmdutil.Factory) error {
|
||||||
|
|||||||
Reference in New Issue
Block a user