mirror of
https://github.com/kubenetworks/kubevpn.git
synced 2025-10-29 18:02:00 +08:00
feat: patch once and restore probe by last user exit
This commit is contained in:
@@ -24,7 +24,6 @@ func init() {
|
||||
connectCmd.Flags().StringVar(&connect.KubeconfigPath, "kubeconfig", clientcmd.RecommendedHomeFile, "kubeconfig")
|
||||
connectCmd.Flags().StringVarP(&connect.Namespace, "namespace", "n", "", "namespace")
|
||||
connectCmd.PersistentFlags().StringArrayVar(&connect.Workloads, "workloads", []string{}, "workloads, like: pods/tomcat, deployment/nginx, replicaset/tomcat...")
|
||||
connectCmd.Flags().StringVar((*string)(&connect.Mode), "mode", string(handler.Reverse), "mode(reverse/mesh), reverse: proxy all traffic into local, mesh: proxy traffic with special headers into local")
|
||||
connectCmd.Flags().StringToStringVarP(&connect.Headers, "headers", "H", map[string]string{}, "headers, format is k=v, like: k1=v1,k2=v2")
|
||||
connectCmd.Flags().BoolVar(&config.Debug, "debug", false, "true/false")
|
||||
RootCmd.AddCommand(connectCmd)
|
||||
|
||||
@@ -30,7 +30,8 @@ var ServerCmd = &cobra.Command{
|
||||
go func() { log.Info(http.ListenAndServe("localhost:6060", nil)) }()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := handler.Start(context.TODO(), route); err != nil {
|
||||
err := handler.Start(context.TODO(), route)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
select {}
|
||||
|
||||
@@ -2,6 +2,7 @@ package controlplane
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
@@ -25,9 +26,11 @@ func Watch(directory string, notifyCh chan<- NotifyMessage) {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
err = watcher.Add(directory)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-watcher.Events:
|
||||
@@ -56,13 +59,12 @@ func Watch(directory string, notifyCh chan<- NotifyMessage) {
|
||||
return
|
||||
}
|
||||
log.Println("error:", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = watcher.Add(directory)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
case <-time.Tick(time.Second * 3):
|
||||
notifyCh <- NotifyMessage{
|
||||
Operation: Remove,
|
||||
FilePath: directory,
|
||||
}
|
||||
}
|
||||
}
|
||||
<-done
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -31,17 +32,9 @@ import (
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
)
|
||||
|
||||
type Mode string
|
||||
|
||||
const (
|
||||
Mesh Mode = "mesh"
|
||||
Reverse Mode = "reverse"
|
||||
)
|
||||
|
||||
type ConnectOptions struct {
|
||||
KubeconfigPath string
|
||||
Namespace string
|
||||
Mode Mode
|
||||
Headers map[string]string
|
||||
Workloads []string
|
||||
clientset *kubernetes.Clientset
|
||||
@@ -62,53 +55,42 @@ func (c *ConnectOptions) createRemoteInboundPod() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
tempIps := []*net.IPNet{c.localTunIP}
|
||||
//wg := &sync.WaitGroup{}
|
||||
//lock := &sync.Mutex{}
|
||||
for _, workload := range c.Workloads {
|
||||
if len(workload) > 0 {
|
||||
//wg.Add(1)
|
||||
/*go*/
|
||||
func(finalWorkload string) {
|
||||
//defer wg.Done()
|
||||
//lock.Lock()
|
||||
virtualShadowIp, _ := c.dhcp.RentIPRandom()
|
||||
tempIps = append(tempIps, virtualShadowIp)
|
||||
//lock.Unlock()
|
||||
c.usedIPs = append(c.usedIPs, virtualShadowIp)
|
||||
configInfo := util.PodRouteConfig{
|
||||
LocalTunIP: c.localTunIP.IP.String(),
|
||||
InboundPodTunIP: virtualShadowIp.String(),
|
||||
TrafficManagerRealIP: c.routerIP.String(),
|
||||
Route: config.CIDR.String(),
|
||||
}
|
||||
// TODO OPTIMIZE CODE
|
||||
if c.Mode == Mesh {
|
||||
err = InjectVPNAndEnvoySidecar(c.factory, c.clientset.CoreV1().ConfigMaps(c.Namespace), c.Namespace, finalWorkload, configInfo, c.Headers)
|
||||
// means mesh mode
|
||||
if len(c.Headers) != 0 {
|
||||
err = InjectVPNAndEnvoySidecar(c.factory, c.clientset.CoreV1().ConfigMaps(c.Namespace), c.Namespace, workload, configInfo, c.Headers)
|
||||
} else {
|
||||
err = InjectVPNSidecar(c.factory, c.Namespace, finalWorkload, configInfo)
|
||||
err = InjectVPNSidecar(c.factory, c.Namespace, workload, configInfo)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}(workload)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
//wg.Wait()
|
||||
c.usedIPs = tempIps
|
||||
return
|
||||
}
|
||||
|
||||
func (c *ConnectOptions) DoConnect() (err error) {
|
||||
c.addCleanUpResourceHandler(c.clientset, c.Namespace)
|
||||
c.cidrs, err = util.GetCidrFromCNI(c.clientset, c.restclient, c.config, c.Namespace)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
trafficMangerNet := net.IPNet{IP: config.RouterIP, Mask: config.CIDR.Mask}
|
||||
c.dhcp = NewDHCPManager(c.clientset.CoreV1().ConfigMaps(c.Namespace), c.Namespace, &trafficMangerNet)
|
||||
if err = c.dhcp.InitDHCP(); err != nil {
|
||||
return
|
||||
}
|
||||
err = c.GetCIDR()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
c.routerIP, err = CreateOutboundPod(c.clientset, c.Namespace, trafficMangerNet.String(), c.cidrs)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -420,3 +402,35 @@ func (c *ConnectOptions) GetRunningPodList() ([]v1.Pod, error) {
|
||||
}
|
||||
return list.Items, nil
|
||||
}
|
||||
|
||||
func (c *ConnectOptions) GetCIDR() (err error) {
|
||||
// (1) get cidr from cache
|
||||
var value string
|
||||
value, err = c.dhcp.Get(config.KeyClusterIPv4POOLS)
|
||||
if err == nil && len(value) != 0 {
|
||||
for _, s := range strings.Split(value, " ") {
|
||||
_, cidr, _ := net.ParseCIDR(s)
|
||||
if cidr != nil {
|
||||
c.cidrs = append(c.cidrs, cidr)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(c.cidrs) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// (2) get cache from cni
|
||||
c.cidrs, err = util.GetCidrFromCNI(c.clientset, c.restclient, c.config, c.Namespace)
|
||||
if err == nil {
|
||||
s := sets.NewString()
|
||||
for _, cidr := range c.cidrs {
|
||||
s.Insert(cidr.String())
|
||||
}
|
||||
_ = c.dhcp.Set(config.KeyClusterIPv4POOLS, strings.Join(s.List(), " "))
|
||||
return
|
||||
}
|
||||
|
||||
// (3) fallback to get cidr from node/pod/service
|
||||
c.cidrs, err = util.GetCIDRFromResource(c.clientset, c.Namespace)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ var (
|
||||
)
|
||||
|
||||
func TestGetCIDR(t *testing.T) {
|
||||
cidr, err := util.GetCIDR(clientsets, namespaces)
|
||||
cidr, err := util.GetCIDRFromResource(clientsets, namespaces)
|
||||
if err == nil {
|
||||
for _, ipNet := range cidr {
|
||||
fmt.Println(ipNet)
|
||||
|
||||
@@ -139,3 +139,34 @@ func (d *DHCPManager) updateDHCPConfigMap(f func(*ipallocator.Range) error) erro
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DHCPManager) Set(key, value string) error {
|
||||
cm, err := d.client.Get(context.Background(), config.ConfigMapPodTrafficManager, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Errorf("failed to get data, err: %v", err)
|
||||
return err
|
||||
}
|
||||
if cm.Data == nil {
|
||||
cm.Data = make(map[string]string)
|
||||
}
|
||||
cm.Data[key] = value
|
||||
_, err = d.client.Update(context.Background(), cm, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
log.Errorf("update data failed, err: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DHCPManager) Get(key string) (string, error) {
|
||||
cm, err := d.client.Get(context.Background(), config.ConfigMapPodTrafficManager, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if cm != nil && cm.Data != nil {
|
||||
if v, ok := cm.Data[key]; ok {
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("can not get data")
|
||||
}
|
||||
|
||||
@@ -65,48 +65,40 @@ func InjectVPNAndEnvoySidecar(factory cmdutil.Factory, clientset v12.ConfigMapIn
|
||||
if containerNames.HasAll(config.ContainerSidecarVPN, config.ContainerSidecarEnvoyProxy) {
|
||||
// add rollback func to remove envoy config
|
||||
RollbackFuncList = append(RollbackFuncList, func() {
|
||||
err = removeEnvoyConfig(clientset, nodeID, headers)
|
||||
err := UnPatchContainer(factory, clientset, namespace, workloads, headers)
|
||||
if err != nil {
|
||||
log.Warnln(err)
|
||||
log.Error(err)
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// (1) add mesh container
|
||||
removePatch, restorePatch := patch(origin, path)
|
||||
b, _ := json.Marshal(restorePatch)
|
||||
mesh.AddMeshContainer(templateSpec, nodeID, c)
|
||||
helper := pkgresource.NewHelper(object.Client, object.Mapping)
|
||||
bytes, err := json.Marshal([]struct {
|
||||
Op string `json:"op"`
|
||||
Path string `json:"path"`
|
||||
Value interface{} `json:"value"`
|
||||
}{{
|
||||
ps := []P{{
|
||||
Op: "replace",
|
||||
Path: "/" + strings.Join(append(path, "spec"), "/"),
|
||||
Value: templateSpec.Spec,
|
||||
}})
|
||||
}, {
|
||||
Op: "replace",
|
||||
Path: "/metadata/annotations/probe",
|
||||
Value: b,
|
||||
}}
|
||||
bytes, err := json.Marshal(append(ps, removePatch...))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//t := true
|
||||
_, err = helper.Patch(object.Namespace, object.Name, types.JSONPatchType, bytes, &metav1.PatchOptions{
|
||||
//Force: &t,
|
||||
})
|
||||
|
||||
removePatch, restorePatch := patch(origin, path)
|
||||
_, err = helper.Patch(object.Namespace, object.Name, types.JSONPatchType, removePatch, &metav1.PatchOptions{})
|
||||
_, err = helper.Patch(object.Namespace, object.Name, types.JSONPatchType, bytes, &metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
log.Warnf("error while remove probe of resource: %s %s, ignore, err: %v",
|
||||
object.Mapping.GroupVersionKind.GroupKind().String(), object.Name, err)
|
||||
log.Warnf("error while remove probe of resource: %s %s, ignore, err: %v", object.Mapping.GroupVersionKind.GroupKind().String(), object.Name, err)
|
||||
}
|
||||
|
||||
RollbackFuncList = append(RollbackFuncList, func() {
|
||||
if err = UnPatchContainer(factory, clientset, namespace, workloads, headers); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
if _, err = helper.Patch(object.Namespace, object.Name, types.JSONPatchType, restorePatch, &metav1.PatchOptions{}); err != nil {
|
||||
log.Warnf("error while restore probe of resource: %s %s, ignore, err: %v",
|
||||
object.Mapping.GroupVersionKind.GroupKind().String(), object.Name, err)
|
||||
}
|
||||
})
|
||||
_ = util.RolloutStatus(factory, namespace, workloads, time.Minute*5)
|
||||
return err
|
||||
@@ -128,12 +120,14 @@ func UnPatchContainer(factory cmdutil.Factory, mapInterface v12.ConfigMapInterfa
|
||||
|
||||
nodeID := fmt.Sprintf("%s.%s", object.Mapping.Resource.GroupResource().String(), object.Name)
|
||||
|
||||
err = removeEnvoyConfig(mapInterface, nodeID, headers)
|
||||
var empty bool
|
||||
empty, err = removeEnvoyConfig(mapInterface, nodeID, headers)
|
||||
if err != nil {
|
||||
log.Warnln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if empty {
|
||||
mesh.RemoveContainers(templateSpec)
|
||||
helper := pkgresource.NewHelper(object.Client, object.Mapping)
|
||||
bytes, err := json.Marshal([]struct {
|
||||
@@ -144,14 +138,16 @@ func UnPatchContainer(factory cmdutil.Factory, mapInterface v12.ConfigMapInterfa
|
||||
Op: "replace",
|
||||
Path: "/" + strings.Join(append(depth, "spec"), "/"),
|
||||
Value: templateSpec.Spec,
|
||||
}, {
|
||||
Op: "replace",
|
||||
Path: "/metadata/annotations/probe",
|
||||
Value: "",
|
||||
}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//t := true
|
||||
_, err = helper.Patch(object.Namespace, object.Name, types.JSONPatchType, bytes, &metav1.PatchOptions{
|
||||
//Force: &t,
|
||||
})
|
||||
_, err = helper.Patch(object.Namespace, object.Name, types.JSONPatchType, bytes, &metav1.PatchOptions{})
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -198,21 +194,21 @@ func addEnvoyConfig(mapInterface v12.ConfigMapInterface, nodeID string, localTUN
|
||||
return err
|
||||
}
|
||||
|
||||
func removeEnvoyConfig(mapInterface v12.ConfigMapInterface, nodeID string, headers map[string]string) error {
|
||||
func removeEnvoyConfig(mapInterface v12.ConfigMapInterface, nodeID string, headers map[string]string) (bool, error) {
|
||||
configMap, err := mapInterface.Get(context.TODO(), config.ConfigMapPodTrafficManager, metav1.GetOptions{})
|
||||
if k8serrors.IsNotFound(err) {
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
str, ok := configMap.Data[config.KeyEnvoy]
|
||||
if !ok {
|
||||
return errors.New("can not found value for key: envoy-config.yaml")
|
||||
return false, errors.New("can not found value for key: envoy-config.yaml")
|
||||
}
|
||||
var v []*controlplane.Virtual
|
||||
if err = yaml.Unmarshal([]byte(str), &v); err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
for _, virtual := range v {
|
||||
if nodeID == virtual.Uid {
|
||||
@@ -224,20 +220,22 @@ func removeEnvoyConfig(mapInterface v12.ConfigMapInterface, nodeID string, heade
|
||||
}
|
||||
}
|
||||
}
|
||||
var empty bool
|
||||
// remove default
|
||||
for i := 0; i < len(v); i++ {
|
||||
if nodeID == v[i].Uid && len(v[i].Rules) == 0 {
|
||||
v = append(v[:i], v[i+1:]...)
|
||||
i--
|
||||
empty = true
|
||||
}
|
||||
}
|
||||
marshal, err := yaml.Marshal(v)
|
||||
if err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
configMap.Data[config.KeyEnvoy] = string(marshal)
|
||||
_, err = mapInterface.Update(context.Background(), configMap, metav1.UpdateOptions{})
|
||||
return err
|
||||
return empty, err
|
||||
}
|
||||
|
||||
func contains(a map[string]string, sub map[string]string) bool {
|
||||
|
||||
@@ -82,6 +82,17 @@ func CreateOutboundPod(clientset *kubernetes.Clientset, namespace string, traffi
|
||||
s = append(s, ipNet.String())
|
||||
}
|
||||
|
||||
var Resources = v1.ResourceRequirements{
|
||||
Requests: map[v1.ResourceName]resource.Quantity{
|
||||
v1.ResourceCPU: resource.MustParse("128m"),
|
||||
v1.ResourceMemory: resource.MustParse("256Mi"),
|
||||
},
|
||||
Limits: map[v1.ResourceName]resource.Quantity{
|
||||
v1.ResourceCPU: resource.MustParse("256m"),
|
||||
v1.ResourceMemory: resource.MustParse("512Mi"),
|
||||
},
|
||||
}
|
||||
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.ConfigMapPodTrafficManager,
|
||||
@@ -147,16 +158,7 @@ kubevpn serve -L tcp://:10800 -L tun://:8422?net=${TrafficManagerIP} --debug=tru
|
||||
ContainerPort: 10800,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: map[v1.ResourceName]resource.Quantity{
|
||||
v1.ResourceCPU: resource.MustParse("128m"),
|
||||
v1.ResourceMemory: resource.MustParse("256Mi"),
|
||||
},
|
||||
Limits: map[v1.ResourceName]resource.Quantity{
|
||||
v1.ResourceCPU: resource.MustParse("256m"),
|
||||
v1.ResourceMemory: resource.MustParse("512Mi"),
|
||||
},
|
||||
},
|
||||
Resources: Resources,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Capabilities: &v1.Capabilities{
|
||||
@@ -187,6 +189,7 @@ kubevpn serve -L tcp://:10800 -L tun://:8422?net=${TrafficManagerIP} --debug=tru
|
||||
},
|
||||
},
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Resources: Resources,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
@@ -269,31 +272,26 @@ func InjectVPNSidecar(factory cmdutil.Factory, namespace, workloads string, conf
|
||||
} else
|
||||
// controllers
|
||||
{
|
||||
bytes, _ := json.Marshal([]struct {
|
||||
Op string `json:"op"`
|
||||
Path string `json:"path"`
|
||||
Value interface{} `json:"value"`
|
||||
}{{
|
||||
// remove probe
|
||||
removePatch, restorePatch := patch(origin, path)
|
||||
p := []P{{
|
||||
Op: "replace",
|
||||
Path: "/" + strings.Join(append(path, "spec"), "/"),
|
||||
Value: podTempSpec.Spec,
|
||||
}})
|
||||
}}
|
||||
bytes, _ := json.Marshal(append(p, removePatch...))
|
||||
_, err = helper.Patch(object.Namespace, object.Name, types.JSONPatchType, bytes, &metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
log.Errorf("error while inject proxy container, err: %v, exiting...", err)
|
||||
return err
|
||||
}
|
||||
removePatch, restorePatch := patch(origin, path)
|
||||
_, err = helper.Patch(object.Namespace, object.Name, types.JSONPatchType, removePatch, &metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
log.Warnf("error while remove probe of resource: %s %s, ignore, err: %v",
|
||||
object.Mapping.GroupVersionKind.GroupKind().String(), object.Name, err)
|
||||
}
|
||||
|
||||
RollbackFuncList = append(RollbackFuncList, func() {
|
||||
if err = removeInboundContainer(factory, namespace, workloads); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
if _, err = helper.Patch(object.Namespace, object.Name, types.JSONPatchType, restorePatch, &metav1.PatchOptions{}); err != nil {
|
||||
b, _ := json.Marshal(restorePatch)
|
||||
if _, err = helper.Patch(object.Namespace, object.Name, types.JSONPatchType, b, &metav1.PatchOptions{}); err != nil {
|
||||
log.Warnf("error while restore probe of resource: %s %s, ignore, err: %v",
|
||||
object.Mapping.GroupVersionKind.GroupKind().String(), object.Name, err)
|
||||
}
|
||||
@@ -397,13 +395,13 @@ func CleanupUselessInfo(pod *v1.Pod) {
|
||||
pod.SetOwnerReferences(nil)
|
||||
}
|
||||
|
||||
func patch(spec v1.PodTemplateSpec, path []string) (removePatch []byte, restorePatch []byte) {
|
||||
type P struct {
|
||||
Op string `json:"op,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
Value interface{} `json:"value,omitempty"`
|
||||
}
|
||||
var remove, restore []P
|
||||
|
||||
func patch(spec v1.PodTemplateSpec, path []string) (remove []P, restore []P) {
|
||||
for i := range spec.Spec.Containers {
|
||||
index := strconv.Itoa(i)
|
||||
readinessPath := "/" + strings.Join(append(path, "spec", "containers", index, "readinessProbe"), "/")
|
||||
@@ -436,7 +434,5 @@ func patch(spec v1.PodTemplateSpec, path []string) (removePatch []byte, restoreP
|
||||
Value: spec.Spec.Containers[i].StartupProbe,
|
||||
})
|
||||
}
|
||||
removePatch, _ = json.Marshal(remove)
|
||||
restorePatch, _ = json.Marshal(restore)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -125,7 +125,6 @@ func TestPreCheck(t *testing.T) {
|
||||
options := ConnectOptions{
|
||||
KubeconfigPath: filepath.Join(homedir.HomeDir(), ".kube", "mesh"),
|
||||
Namespace: "naison-test",
|
||||
Mode: "reverse",
|
||||
Workloads: []string{"services/authors"},
|
||||
}
|
||||
options.InitClient()
|
||||
|
||||
@@ -20,27 +20,20 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func GetCIDR(clientset *kubernetes.Clientset, namespace string) ([]*net.IPNet, error) {
|
||||
var CIDRList []*net.IPNet
|
||||
// get pod CIDR from node spec
|
||||
func GetCIDRFromResource(clientset *kubernetes.Clientset, namespace string) ([]*net.IPNet, error) {
|
||||
var list []*net.IPNet
|
||||
// (1) get pod CIDR from node spec
|
||||
nodeList, err := clientset.CoreV1().Nodes().List(context.TODO(), v1.ListOptions{})
|
||||
if err == nil {
|
||||
var podCIDRs = sets.NewString()
|
||||
for _, node := range nodeList.Items {
|
||||
if node.Spec.PodCIDRs != nil {
|
||||
podCIDRs.Insert(node.Spec.PodCIDRs...)
|
||||
}
|
||||
if len(node.Spec.PodCIDR) != 0 {
|
||||
podCIDRs.Insert(node.Spec.PodCIDR)
|
||||
}
|
||||
}
|
||||
for _, podCIDR := range podCIDRs.List() {
|
||||
if _, CIDR, err := net.ParseCIDR(podCIDR); err == nil {
|
||||
CIDRList = append(CIDRList, CIDR)
|
||||
for _, c := range sets.NewString(node.Spec.PodCIDRs...).Insert(node.Spec.PodCIDR).List() {
|
||||
_, cidr, _ := net.ParseCIDR(c)
|
||||
if cidr != nil {
|
||||
list = append(list, cidr)
|
||||
}
|
||||
}
|
||||
}
|
||||
// get pod CIDR from pod ip, why doing this: notice that node's pod cidr is not correct in minikube
|
||||
|
||||
// (2) get pod CIDR from pod ip, why doing this: notice that node's pod cidr is not correct in minikube
|
||||
// ➜ ~ kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}'
|
||||
//10.244.0.0/24%
|
||||
// ➜ ~ kubectl get pods -o=custom-columns=podIP:.status.podIP
|
||||
@@ -55,29 +48,27 @@ func GetCIDR(clientset *kubernetes.Clientset, namespace string) ([]*net.IPNet, e
|
||||
//172.17.0.3
|
||||
//172.17.0.7
|
||||
//172.17.0.2
|
||||
podList, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), v1.ListOptions{})
|
||||
if err == nil {
|
||||
podList, _ := clientset.CoreV1().Pods(namespace).List(context.TODO(), v1.ListOptions{})
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Spec.HostNetwork {
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(pod.Status.PodIP); ip != nil {
|
||||
var contain bool
|
||||
for _, CIDR := range CIDRList {
|
||||
if CIDR.Contains(ip) {
|
||||
for _, cidr := range list {
|
||||
if cidr.Contains(ip) {
|
||||
contain = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !contain {
|
||||
mask := net.CIDRMask(24, 32)
|
||||
CIDRList = append(CIDRList, &net.IPNet{IP: ip.Mask(mask), Mask: mask})
|
||||
}
|
||||
list = append(list, &net.IPNet{IP: ip.Mask(mask), Mask: mask})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get service CIDR
|
||||
// (3) get service CIDR
|
||||
defaultCIDRIndex := "The range of valid IPs is"
|
||||
_, err = clientset.CoreV1().Services(namespace).Create(context.TODO(), &v12.Service{
|
||||
ObjectMeta: v1.ObjectMeta{GenerateName: "foo-svc-"},
|
||||
@@ -86,36 +77,34 @@ func GetCIDR(clientset *kubernetes.Clientset, namespace string) ([]*net.IPNet, e
|
||||
if err != nil {
|
||||
idx := strings.LastIndex(err.Error(), defaultCIDRIndex)
|
||||
if idx != -1 {
|
||||
_, cidr, err := net.ParseCIDR(strings.TrimSpace(err.Error()[idx+len(defaultCIDRIndex):]))
|
||||
if err == nil {
|
||||
CIDRList = append(CIDRList, cidr)
|
||||
_, cidr, _ := net.ParseCIDR(strings.TrimSpace(err.Error()[idx+len(defaultCIDRIndex):]))
|
||||
if cidr != nil {
|
||||
list = append(list, cidr)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
serviceList, err := clientset.CoreV1().Services(namespace).List(context.TODO(), v1.ListOptions{})
|
||||
if err == nil {
|
||||
serviceList, _ := clientset.CoreV1().Services(namespace).List(context.TODO(), v1.ListOptions{})
|
||||
for _, service := range serviceList.Items {
|
||||
if ip := net.ParseIP(service.Spec.ClusterIP); ip != nil {
|
||||
var contain bool
|
||||
for _, CIDR := range CIDRList {
|
||||
for _, CIDR := range list {
|
||||
if CIDR.Contains(ip) {
|
||||
contain = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !contain {
|
||||
mask := net.CIDRMask(16, 32)
|
||||
CIDRList = append(CIDRList, &net.IPNet{IP: ip.Mask(mask), Mask: mask})
|
||||
}
|
||||
mask := net.CIDRMask(24, 32)
|
||||
list = append(list, &net.IPNet{IP: ip.Mask(mask), Mask: mask})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// remove duplicate CIDR
|
||||
// (4) remove duplicate CIDR
|
||||
result := make([]*net.IPNet, 0)
|
||||
set := sets.NewString()
|
||||
for _, cidr := range CIDRList {
|
||||
for _, cidr := range list {
|
||||
if !set.Has(cidr.String()) {
|
||||
set.Insert(cidr.String())
|
||||
result = append(result, cidr)
|
||||
@@ -127,18 +116,18 @@ func GetCIDR(clientset *kubernetes.Clientset, namespace string) ([]*net.IPNet, e
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// todo use patch to update this pod
|
||||
func GetCidrFromCNI(clientset *kubernetes.Clientset, restclient *rest.RESTClient, restconfig *rest.Config, namespace string) ([]*net.IPNet, error) {
|
||||
var name = "cni-net-dir-kubevpn"
|
||||
hostPathType := v12.HostPathDirectoryOrCreate
|
||||
pod := &v12.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: config.ConfigMapPodTrafficManager,
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v12.PodSpec{
|
||||
Volumes: []v12.Volume{
|
||||
{
|
||||
Name: "cni-net-dir",
|
||||
Name: name,
|
||||
VolumeSource: v12.VolumeSource{
|
||||
HostPath: &v12.HostPathVolumeSource{
|
||||
Path: config.DefaultNetDir,
|
||||
@@ -149,7 +138,7 @@ func GetCidrFromCNI(clientset *kubernetes.Clientset, restclient *rest.RESTClient
|
||||
},
|
||||
Containers: []v12.Container{
|
||||
{
|
||||
Name: config.ContainerSidecarVPN,
|
||||
Name: name,
|
||||
Image: "guosen-dev.cargo.io/epscplibrary/kubevpn:latest",
|
||||
Command: []string{"tail", "-f", "/dev/null"},
|
||||
Resources: v12.ResourceRequirements{
|
||||
@@ -164,7 +153,7 @@ func GetCidrFromCNI(clientset *kubernetes.Clientset, restclient *rest.RESTClient
|
||||
},
|
||||
VolumeMounts: []v12.VolumeMount{
|
||||
{
|
||||
Name: "cni-net-dir",
|
||||
Name: name,
|
||||
ReadOnly: true,
|
||||
MountPath: config.DefaultNetDir,
|
||||
},
|
||||
@@ -176,15 +165,18 @@ func GetCidrFromCNI(clientset *kubernetes.Clientset, restclient *rest.RESTClient
|
||||
}
|
||||
get, err := clientset.CoreV1().Pods(pod.Namespace).Get(context.Background(), pod.Name, v1.GetOptions{})
|
||||
if k8serrors.IsNotFound(err) || get.Status.Phase != v12.PodRunning {
|
||||
create, err := clientset.CoreV1().Pods(namespace).Create(context.Background(), pod, v1.CreateOptions{})
|
||||
var deleteFunc = func() {
|
||||
_ = clientset.CoreV1().Pods(namespace).Delete(context.Background(), pod.Name, v1.DeleteOptions{GracePeriodSeconds: pointer.Int64(0)})
|
||||
}
|
||||
// delete pod anyway
|
||||
deleteFunc()
|
||||
pod, err = clientset.CoreV1().Pods(namespace).Create(context.Background(), pod, v1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer clientset.CoreV1().Pods(namespace).Delete(context.TODO(), create.Name, v1.DeleteOptions{
|
||||
GracePeriodSeconds: pointer.Int64(0),
|
||||
})
|
||||
defer deleteFunc()
|
||||
err = WaitPod(clientset.CoreV1().Pods(namespace), v1.ListOptions{
|
||||
FieldSelector: fields.OneTermEqualSelector("metadata.name", create.Name).String(),
|
||||
FieldSelector: fields.OneTermEqualSelector("metadata.name", pod.Name).String(),
|
||||
}, func(pod *v12.Pod) bool {
|
||||
return pod.Status.Phase == v12.PodRunning
|
||||
})
|
||||
@@ -223,5 +215,9 @@ func GetCidrFromCNI(clientset *kubernetes.Clientset, restclient *rest.RESTClient
|
||||
}
|
||||
}
|
||||
|
||||
if len(result) == 0 {
|
||||
return nil, fmt.Errorf("can not found any cidr")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -198,7 +198,6 @@ func Shell(clientset *kubernetes.Clientset, restclient *rest.RESTClient, config
|
||||
Stderr: StreamOptions.ErrOut != nil,
|
||||
TTY: tt.Raw,
|
||||
}, scheme.ParameterCodec)
|
||||
fmt.Println(req.URL())
|
||||
return Executor.Execute("POST", req.URL(), config, StreamOptions.In, StreamOptions.Out, StreamOptions.ErrOut, tt.Raw, sizeQueue)
|
||||
}
|
||||
|
||||
|
||||
@@ -45,6 +45,14 @@ spec:
|
||||
- containerPort: 9080
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
|
||||
---
|
||||
# Ratings service
|
||||
apiVersion: v1
|
||||
@@ -93,6 +101,13 @@ spec:
|
||||
- containerPort: 9080
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
---
|
||||
# Reviews service
|
||||
apiVersion: v1
|
||||
@@ -149,6 +164,13 @@ spec:
|
||||
mountPath: /opt/ibm/wlp/output
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
volumes:
|
||||
- name: wlp-output
|
||||
emptyDir: { }
|
||||
@@ -205,6 +227,13 @@ spec:
|
||||
mountPath: /tmp
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Mi"
|
||||
cpu: "250m"
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
volumes:
|
||||
- name: tmp
|
||||
emptyDir: { }
|
||||
|
||||
Reference in New Issue
Block a user