Compare commits

...

207 Commits

Author SHA1 Message Date
naison
fcbe2d64f7 chore: add ut for center install 2025-06-01 18:42:32 +08:00
fengcaiwen
a0c0860051 hotfix: fix center install cause mutate webhook not works 2025-06-01 18:39:02 +08:00
wencaiwulue
e374d6b51d feat: update krew index version to refs/tags/v2.7.12 2025-05-23 11:46:30 +08:00
kubenetworks
9703a12bc2 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-23 11:46:18 +08:00
naison
f9f52d1001 Merge pull request #612
ut: fix ut
2025-05-23 10:53:37 +08:00
naison
51c16989fe ut: fix ut 2025-05-23 02:52:37 +00:00
naison
75c609211b refactor: use informer to list&watch pod&service ip for adding to route table (#610) 2025-05-23 10:09:06 +08:00
naison
6d545dc5c9 hotfix: remove cidr if contains api-server ip 2025-05-20 22:12:19 +08:00
naison
b17da3cbcb feat: update krew index version to refs/tags/v2.7.11 (#607) 2025-05-18 17:32:49 +08:00
naison
d1108ebd86 Update charts/index.yaml (#608) 2025-05-18 17:32:35 +08:00
naison
792839a2d4 feat: support dump service into hosts in center cluster mode (#605) 2025-05-18 16:20:34 +08:00
fengcaiwen
f493931b41 hotfix: remove job before install 2025-05-18 16:20:13 +08:00
wencaiwulue
7df065ef93 feat: update krew index version to refs/tags/v2.7.10 2025-05-14 21:23:16 +08:00
kubenetworks
c265b3581c Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-14 21:23:03 +08:00
naison
f802e03d01 hotfix: add heartbeat to manager in the pod 2025-05-14 20:22:57 +08:00
wencaiwulue
c08cb461dd feat: update krew index version to refs/tags/v2.7.9 2025-05-12 17:18:47 +08:00
kubenetworks
1a2649a02a Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-12 17:18:34 +08:00
naison
facd6bdb3d hotfix: fix create temp kubeconfig but name container path separator (#599) 2025-05-12 16:28:15 +08:00
naison
a1117dee62 hotfix: handle not found route packet with gVisor instead of drop it 2025-05-12 15:49:40 +08:00
naison
b28eaef6a7 chore(mod): upgrade purego version to v0.8.3 2025-05-12 15:47:48 +08:00
naison
46aebef01f refactor: remove temp kubeconfig before daemon quit 2025-05-12 15:46:21 +08:00
naison
3791f48737 hotfix: fix create temp kubeconfig 2025-05-12 15:32:02 +08:00
wencaiwulue
fc76b70713 feat: update krew index version to refs/tags/v2.7.8 2025-05-11 00:17:22 +08:00
kubenetworks
e990dc1d0f Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-11 00:16:45 +08:00
naison
d636449073 feat: set read/write timeout to 60s for remote tcp conn (#590) 2025-05-10 23:02:31 +08:00
fengcaiwen
e85e1a6c40 refactor: show port-forward log 2025-05-10 18:05:44 +08:00
wencaiwulue
40d09716c4 feat: update krew index version to refs/tags/v2.7.7 2025-05-09 14:44:31 +08:00
kubenetworks
63792172bd Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-09 14:44:17 +08:00
naison
ca6a2be70f refactor: optimize get pod/service netowrk cidr logic (#585) 2025-05-09 13:06:32 +08:00
naison
e21fc8cda9 hotfix: duplicated definition of symbol dlopen on go1.23.9 2025-05-09 13:04:45 +08:00
wencaiwulue
1f4698c6f8 feat: update krew index version to refs/tags/v2.7.6 2025-05-08 10:09:00 +08:00
kubenetworks
efea780edf Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-08 10:08:49 +08:00
wencaiwulue
bdb21f8964 feat: update krew index version to refs/tags/v2.7.5 2025-05-07 17:00:00 +08:00
naison
e33d2f1928 hotfix: fix init dir 2025-05-07 16:08:56 +08:00
wencaiwulue
e6df115933 feat: update krew index version to refs/tags/v2.7.5 2025-05-07 10:36:28 +08:00
kubenetworks
549e56cd05 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-07 10:36:17 +08:00
fengcaiwen
54ed2b711f hotfix: fix init dir permission deny 2025-05-07 09:12:17 +08:00
wencaiwulue
56b81574ac feat: update krew index version to refs/tags/v2.7.4 2025-05-07 08:47:19 +08:00
kubenetworks
ce2b7a010e Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-07 08:47:05 +08:00
fengcaiwen
5df0c3ffdc hotfix: fix init dir permission deny 2025-05-07 00:17:35 +08:00
naison
8b0e87592a hotfix: fix init dir permission deny (#573) 2025-05-07 00:01:44 +08:00
naison
31a42c1fa7 feat: update krew index version to refs/tags/v2.7.3 (#571) 2025-05-06 23:55:53 +08:00
naison
ee0957a5c9 Update charts/index.yaml (#572) 2025-05-06 23:55:40 +08:00
naison
206d74c331 feat: use dns query as port-forward health check (#570) 2025-05-06 22:20:15 +08:00
naison
53ed72dee3 Merge pull request #567 from kubenetworks/refactor/refactor-code
refactor: refactor code
2025-04-29 22:16:59 +08:00
fengcaiwen
323235f268 refactor: optimize code 2025-04-29 21:53:34 +08:00
fengcaiwen
6af6622bd3 refactor: change server log level to info 2025-04-29 21:50:08 +08:00
fengcaiwen
18ef72fc20 refactor: forward only one port 2025-04-29 21:48:14 +08:00
fengcaiwen
fe08448249 refactor: split user and root daemon log 2025-04-29 21:40:46 +08:00
fengcaiwen
ebaa4098f1 refactor: change temp kubeconfig to ~/.kubevpn/tmp 2025-04-29 21:39:45 +08:00
fengcaiwen
9ba873494f feat: add heartbeats ping pod ip 2025-04-29 21:34:58 +08:00
naison
da40f3315b Merge pull request #566 from kubenetworks/hotfix/fix-bugs
hotfix: fix bugs
2025-04-27 23:19:40 +08:00
fengcaiwen
c4540b1930 refactor: use tcp conn instead of packet conn 2025-04-27 23:03:45 +08:00
fengcaiwen
a6ec321e46 hotfix: cmp running pod image tag and client version 2025-04-27 23:02:34 +08:00
fengcaiwen
79f8aca7df hotfix: close ssh session 2025-04-27 23:02:06 +08:00
fengcaiwen
6edfc3127d hotfix: quit sudo daemon before user daemon 2025-04-27 23:01:27 +08:00
naison
bed0a9168c Update charts/index.yaml (#564)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-04-25 23:40:24 +08:00
naison
d5ee35bfa8 feat: update krew index version to refs/tags/v2.7.2 (#563)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-04-25 23:39:56 +08:00
naison
9661a122bd refactor: optimize code (#561) 2025-04-25 19:37:03 +08:00
naison
28657e3832 refactor: remove deprecated options of config flags (#560) 2025-04-24 22:44:20 +08:00
naison
6a8a197f48 hotfix: close ssh client if ctx done (#559) 2025-04-24 22:41:24 +08:00
naison
31186fc1d9 refactor: only ssh jump in user daemon (#558) 2025-04-24 22:39:03 +08:00
naison
fca3baf47e refactor: optimize code (#557) 2025-04-23 15:00:00 +08:00
naison
1cae5d270b refactor: optimize ssh logic (#555) 2025-04-21 22:19:31 +08:00
naison
a3556a263d refactor: add additional [2]byte for packet length (#554) 2025-04-21 21:51:01 +08:00
naison
dd80717d8d refactor: return error if get nil daemon client (#553) 2025-04-20 15:49:03 +08:00
naison
537b2940fe perf: route packet by each tcp conn (#548) 2025-04-19 19:14:39 +08:00
naison
9aae88d54b hotfix: set recv/send buffer size 1024k for adding ip to route table on macos (#552) 2025-04-19 15:35:43 +08:00
naison
100a8df723 refactor: revert pr #517 (#551) 2025-04-19 12:35:09 +08:00
naison
48e30b4344 refactor: use go workspace for syncthing gui (#549) 2025-04-19 12:09:06 +08:00
naison
c9f1ce6522 chore: upgrade coredns version (#550) 2025-04-19 10:06:56 +08:00
naison
c42e3475f9 Update charts/index.yaml (#547)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-04-15 23:36:27 +08:00
naison
4fb338b5fc feat: update krew index version to refs/tags/v2.7.1 (#546)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-04-15 23:36:09 +08:00
naison
15243b3935 hotfix: remove closed conn from route map (#545) 2025-04-15 21:33:41 +08:00
naison
f0f9459976 hotfix: set mtu on windows (#544) 2025-04-15 21:32:47 +08:00
naison
ee7d5fa6f9 Update charts/index.yaml (#538) 2025-04-12 14:04:52 +08:00
naison
e393f8371e feat: update krew index version to refs/tags/v2.7.0 (#537)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-04-12 14:04:23 +08:00
naison
ca333fcdaf feat: encrypt with tls 1.3 (#522) 2025-04-12 12:30:05 +08:00
naison
7e4e9e1e0d refactor: add more log detect connect namespace (#536) 2025-04-12 10:53:27 +08:00
naison
58ee2df1a3 docs: add readme.md for helm charts (#534) 2025-04-11 22:06:00 +08:00
naison
15200f1caf refactor: add more log (#533)
* feat: add more log
2025-04-11 21:12:19 +08:00
naison
23baab449c refactor: optimize code (#531) 2025-04-11 19:13:06 +08:00
naison
0ddcaa8acc hotfix: fix bug (#530) 2025-04-11 19:12:15 +08:00
naison
0c122473ce hotfix: fix parse dig command output (#529) 2025-04-11 18:50:14 +08:00
naison
d08f74a57e hotfix: optimize code (#528)
* hotfix: optimize code
2025-04-10 22:53:28 +08:00
naison
cd66bb7907 feat: add log if drop packet (#527)
* feat: add log if drop packet
2025-04-09 22:19:37 +08:00
naison
f303616554 hotfix: fix []byte leak (#525) 2025-04-09 21:08:33 +08:00
naison
3973b85d25 hotfix: remove label (#524) 2025-04-08 22:02:07 +08:00
naison
4fd1f014bd refactor: adjust log level (#523) 2025-04-08 22:01:06 +08:00
naison
fe62cf6c4d hotfix: install missing command dig (#521) 2025-04-07 13:00:13 +08:00
naison
c5900d070c Update charts/index.yaml (#520)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-04-06 21:17:57 +08:00
naison
d84ca66cfb feat: update krew index version to refs/tags/v2.6.0 (#519)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-04-06 21:17:41 +08:00
naison
60c3030e65 hotfix: use echo instead of sysctl to set ipv4 ip_forward feature (#518) 2025-04-06 18:34:34 +08:00
naison
ea574a756b feat: support gcp auth (#517)
* feat: support gcp auth
2025-04-06 17:36:10 +08:00
naison
e8735a68be refactor: optimize logic (#515)
* refactor: optimize logic
2025-04-05 21:48:18 +08:00
naison
d55d290677 feat: update krew index version to refs/tags/v2.5.1 (#513)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-04-04 00:20:33 +08:00
naison
45435bcc48 Update charts/index.yaml (#514)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-04-04 00:04:07 +08:00
naison
dbe9f91ee0 hotfix: ut (#512)
* hotfix: ut
2025-04-03 23:00:03 +08:00
naison
b3d2e1e838 refactor: optimize code (#511) 2025-04-03 21:41:35 +08:00
yuyicai
fa0b343401 feat: change dockerfile (#491)
* feat: change dockerfile

Signed-off-by: yuyicai <yuyicai@hotmail.com>

* chore: change markfile for container test

Signed-off-by: yuyicai <yuyicai@hotmail.com>

* ut

* feat: install openssl

Signed-off-by: yuyicai <yuyicai@hotmail.com>

---------

Signed-off-by: yuyicai <yuyicai@hotmail.com>
Co-authored-by: naison <895703375@qq.com>
2025-04-03 21:10:31 +08:00
naison
a1bb338cdb refactor: optimize code (#510)
* refactor: rename
2025-04-03 20:51:55 +08:00
naison
dbc9df070b feat: add options netstack to helm charts (#509) 2025-04-03 20:45:12 +08:00
naison
804708aabe feat: add options --connect-namespace to proxy and dev mode (#508) 2025-04-03 20:44:59 +08:00
naison
21087fc708 hotfix: fix upgrade bug (#507)
* hotfix: fix upgrade bug
2025-04-03 20:44:43 +08:00
naison
94db7846d8 hotfix: fix detect helm ns but still use -n namespace (#506)
* hotfix: fix detect helm ns but still use -n namespace
2025-04-02 19:20:31 +08:00
naison
e205b77e41 Update charts/index.yaml (#505)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-03-31 14:02:55 +08:00
naison
2927261390 feat: update krew index version to refs/tags/v2.5.0 (#504)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-03-31 14:02:40 +08:00
naison
8f37488207 hotfix: fix upgrade logic (#503) 2025-03-31 12:50:10 +08:00
naison
d05a53a77f Update charts/index.yaml (#502)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-03-30 22:17:12 +08:00
naison
a2df9f7b59 feat: update krew index version to refs/tags/v2.4.3 (#501)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-03-30 22:14:33 +08:00
naison
cd68b1fb00 hotfix: gen envoy rule id by ns and resource uid (#500)
* hotfix: gen envoy rule id by ns and uid
2025-03-30 20:57:11 +08:00
naison
208f607f03 hotfix: fix dns slow (#499) 2025-03-30 11:56:57 +08:00
naison
116a1f1983 feat: detect namespace kubevpn installed by helm (#498) 2025-03-30 11:54:40 +08:00
naison
d191c927f4 feat: add helm to go mod (#497) 2025-03-30 11:52:21 +08:00
naison
a030dc582b feat: support connect one namespace but proxy workload in another namespace (#496) 2025-03-30 11:50:11 +08:00
naison
08bcbe1611 refactor: split connect and proxy mode (#495) 2025-03-30 11:46:37 +08:00
naison
fb428403a2 hotfix: set get running pod timeout 10s to 5s (#494) 2025-03-30 11:43:37 +08:00
naison
4f4bbd79f2 chore: optimize ut (#493)
* chore: optimize ut
2025-03-25 22:33:07 +08:00
naison
1ec3ca4637 hotfix: fix clone mode bug (#492) 2025-03-24 21:53:02 +08:00
yuyicai
484a5cafe4 Merge pull request #490 from kubenetworks/chart-releaser-qqesqc5oa54qow4n
Update index.yaml
2025-03-23 21:25:11 +08:00
yuyicai
b62a6b0185 Merge pull request #489 from kubenetworks/feat/update-krew-index-version
feat: update krew index version to refs/tags/v2.4.2
2025-03-23 21:24:34 +08:00
kubenetworks
90898c8047 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-03-23 12:53:36 +00:00
yuyicai
c06daf68e8 feat: update krew index version to refs/tags/v2.4.2 2025-03-23 12:52:53 +00:00
naison
d65da7ba66 chore(ut): add more ut (#475)
* chore: add more ut
2025-03-23 19:32:45 +08:00
naison
2ac187eb64 hotfix: delete old pod (#488) 2025-03-23 17:13:11 +08:00
naison
b46f7a9877 refactor: divide log to session and backend (#487)
* refactor: divide log to session and backend
2025-03-23 13:59:10 +08:00
naison
a5622b9439 chore: update sample bookinfo resource (#486) 2025-03-23 12:40:14 +08:00
yuyicai
0e8f655673 Merge pull request #483 from kubenetworks/docs-install-from-script
docs: install-from-script
2025-03-18 21:37:25 +08:00
yuyicai
f7250649af docs: install-from-script
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-18 20:54:47 +08:00
yuyicai
cbaff5e623 Merge pull request #481 from kubenetworks/install-kubevpn-by-shell-script
chore: install kubevpn by shell script
2025-03-17 22:30:02 +08:00
yuyicai
6aee9f0882 chore: install kubevpn by shell script
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-17 22:09:17 +08:00
yuyicai
1f63a15e01 Merge pull request #480 from kubenetworks/update-bookinfo-to-v1-20-2
chore: upgrade bookinfo to v1.20.2, add arm64 image
2025-03-17 22:05:44 +08:00
yuyicai
a65c26e446 chore: upgrade bookinfo to v1.20.2, add arm64 image
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-17 21:04:03 +08:00
naison
f5566f6ec2 feat: update krew index version to refs/tags/v2.4.1 (#478)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-03-16 17:55:36 +08:00
naison
543e2d716d Update charts/index.yaml (#479)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-03-16 17:55:15 +08:00
naison
f267443c61 hotfix: delete pod force if check newer spec but have old env (#477) 2025-03-16 17:05:30 +08:00
naison
b6f90812f7 hotfix: restore service target port while leave resource in gvisor mode (#476) 2025-03-16 17:04:55 +08:00
naison
b5ea7b2016 chore: update github action (#474) 2025-03-14 22:24:32 +08:00
naison
30f904d7bb feat: update krew index version to refs/tags/v2.4.0 (#473)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-03-14 22:17:20 +08:00
kubenetworks
fde001009e Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-03-14 14:16:56 +00:00
naison
6908991461 refactor: optimize code (#472) 2025-03-14 21:12:11 +08:00
naison
031c2134d8 hotfix: also cleanup in user daemon if error occurs (#471) 2025-03-14 21:02:19 +08:00
naison
77570575ca refactor: upgrade deploy image if client version is incompatibility with image tag (#470) 2025-03-14 20:52:22 +08:00
Spongebob
a70ce62762 feat: add snap store release support (#469) 2025-03-14 19:23:41 +08:00
naison
5edd70452c feat: batch write route msg to unix socket: 'route ip+net: sysctl: no buffer space available' (#466) 2025-03-13 11:07:18 +08:00
naison
24d16b2791 hotfix: fix bugs (#468) 2025-03-13 11:04:49 +08:00
naison
6820dbb30d hotfix: do reverse opreation if operate cancelled (#465) 2025-03-12 18:36:12 +08:00
naison
ee26880bf5 refactor: use unix library to add route table and setup ip addr instead of use command route or ifconfig (#463) 2025-03-12 13:08:59 +08:00
naison
05b76094f0 feat: support service type externalName (#464) 2025-03-12 00:26:00 +08:00
yuyicai
2e79a331b4 Merge pull request #461 from kubenetworks/proxy-tun-arch-image
docs: add proxy tun arch image
2025-03-11 11:28:13 +08:00
yuyicai
ec5efc8253 docs: add proxy tun arch image
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-11 10:11:57 +08:00
naison
4547e84de9 Merge pull request #460 from kubenetworks/chore/update-bookinfo-resource
chore: update bookinfo resource
2025-03-10 22:00:30 +08:00
naison
f0694efeda Merge pull request #459 from kubenetworks/feat/extra-domain-support-ingress-record
feat: options extra-domain support ingress record
2025-03-10 19:39:18 +08:00
naison
8df6da1871 feat: options extra-domain support ingress record 2025-03-10 11:10:04 +00:00
yuyicai
ec7d939f8d Merge pull request #458 from kubenetworks/check-if-need-upgrade-server-image
feat: check if need to upgrade image
2025-03-09 16:28:03 +08:00
yuyicai
a682dfbc2c feat: check if need to upgrade image
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-09 16:01:56 +08:00
naison
a16c1ef007 Merge pull request #457 from kubenetworks/feat/setup-testcase-on-windows
feat: use kind instead of minikube on macos
2025-03-09 11:11:45 +08:00
naison
ec88fd82f0 feat: use kind on macos 2025-03-09 02:44:25 +00:00
yuyicai
3457a79328 Merge pull request #456 from kubenetworks/change-default-container-image
feat: change default container image
2025-03-09 10:29:58 +08:00
yuyicai
2780f67dd6 feat: change default container image
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-08 23:13:45 +08:00
naison
24b2195036 Merge pull request #455 from kubenetworks/chore/update-comment
chore: update comment
2025-03-07 17:06:47 +08:00
naison
d61d08694a chore: update comment 2025-03-07 09:04:10 +00:00
naison
f81c7ec3ce Merge pull request #454 from kubenetworks/feat/cmd-alias-support-env-KUBEVPNCONFIG
feat: cmd alias support env KUBEVPNCONFIG
2025-03-07 15:58:04 +08:00
naison
168db06979 feat: cmd alias support env KUBEVPNCONFIG 2025-03-07 07:57:06 +00:00
naison
8ad7463fc7 Merge pull request #453 from kubenetworks/hotfix/fix-cmd-ssh-npe
hotfix: fix cmd ssh resize terminal size npe
2025-03-07 15:38:18 +08:00
naison
8926577885 hotfix: fix cmd ssh resize terminal size npe 2025-03-07 07:37:39 +00:00
naison
0f94f58310 Merge pull request #452 from kubenetworks/refactor/optimize-code
refactor: optimize code
2025-03-07 10:54:40 +08:00
naison
210767d908 refactor: optimize code 2025-03-07 02:53:52 +00:00
yuyicai
ae9c23550f Merge pull request #451 from kubenetworks/build-ghcr-latest-container-image
chore: add ghcr.io latest container image
2025-03-07 09:29:36 +08:00
yuyicai
2f9a025f5b chore: add ghcr.io latest container image
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-06 22:18:01 +08:00
yuyicai
4d5c4fa426 Merge pull request #449 from kubenetworks/tun-ip-cidr
feat: change tun ip cidr
2025-03-06 21:31:28 +08:00
fengcaiwen
3a4bfa9241 feat: panic if parse network cidr error 2025-03-06 09:14:13 +08:00
yuyicai
db09cbbb6e feat: update tun cidr for kubevpn-traffic-manager
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-05 23:14:57 +08:00
yuyicai
a87cbf1e9a feat: change tun ip cidr
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-05 22:27:43 +08:00
naison
547501fc41 Merge pull request #443 from kubenetworks/hotfix/fix-auto-upgrade-deploy-image
hotfix: fix auto upgrade deploy image
2025-02-25 22:25:00 +08:00
naison
7051f24313 hotfix: fix upgrade deploy image 2025-02-25 14:22:55 +00:00
naison
153fe3e5e7 Merge pull request #442 from kubenetworks/feat/update-krew-index-version
feat: update krew index version to refs/tags/v2.3.13
2025-02-23 22:32:32 +08:00
kubenetworks
78914e8765 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-02-23 14:30:35 +00:00
wencaiwulue
2fbfb080e0 feat: update krew index version to refs/tags/v2.3.13 2025-02-23 14:29:43 +00:00
naison
86585214d4 Merge pull request #438 from kubenetworks/hotfix/default-use-spdy-not-websocket-to-portforward
hotfix: default use spdy not websocket protocol to portforward
2025-02-23 21:42:21 +08:00
naison
867aefbc3a Merge pull request #441 from kubenetworks/hotfix/fix-daemon-process-unexpected-exit-on-linux
fix: fix daemon process unexpected exit on linux
2025-02-23 21:39:59 +08:00
fengcaiwen
2037d3b05f fix: fix daemon process unexpected exit on linux 2025-02-23 21:37:52 +08:00
naison
794fd861ba Merge pull request #440 from kubenetworks/hotfix/fix-podlabel-find-service-in-fargate-mode
use match not equal to find svc by pod label in fargate mode
2025-02-22 20:18:54 +08:00
naison
d10a4e3aef use match not equal to find svc by pod label in fargate mode 2025-02-22 12:00:56 +00:00
naison
5b39275f5b hotfix: default use spdy not websocket to portforward 2025-02-21 14:39:21 +00:00
naison
de38a35189 Revert "chore: upload charts to repo charts"
This reverts commit 2793ab20e6.
2025-02-13 10:40:33 +00:00
naison
04c0b33516 Merge pull request #436 from kubenetworks/feat/update-krew-index-version
feat: update krew index version to refs/tags/v2.3.12
2025-02-13 15:50:56 +08:00
kubenetworks
ffdefce23c Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-02-13 07:46:06 +00:00
wencaiwulue
2a3b4d89f7 feat: update krew index version to refs/tags/v2.3.12 2025-02-13 07:20:30 +00:00
naison
b1abafd7f4 Merge pull request #435 from kubenetworks/feat/add-cmd-image-copy
feat: add cmd image copy
2025-02-13 12:50:16 +08:00
naison
12f29f2528 Merge pull request #434 from kubenetworks/hotfix/fix-cmd-ssh-terminal-bug
hotfix: fix ssh terminal bug
2025-02-13 12:50:06 +08:00
naison
7f3f0305e4 feat: add cmd image copy 2025-02-13 04:48:39 +00:00
naison
c947472d47 hotfix: fix ssh terminal bug 2025-02-13 04:46:59 +00:00
naison
4013846cab Merge pull request #433 from kubenetworks/hotfix/use-default-krb5-config
hotfix: use default krb5 config and not cancel context after handle new local connection of PortmapUtil, otherwise ssh.client stop channel also closed
2025-02-13 11:48:14 +08:00
fengcaiwen
399bc4efe0 hotfix: not cancel context after handle new local connection of PortmapUtil, otherwise ssh.client stop channel also closed 2025-02-12 23:27:45 +08:00
fengcaiwen
24367b1b82 hotfix: use default krb5 config 2025-02-12 22:20:51 +08:00
naison
1a32d7a58e Merge pull request #432 from kubenetworks/chore/add-upload-charts-to-repo-charts
chore: upload charts to repo charts
2025-02-09 16:32:35 +08:00
naison
2793ab20e6 chore: upload charts to repo charts 2025-02-09 08:31:00 +00:00
naison
528ac55325 Merge pull request #431 from kubenetworks/chore/upgrade-go-mod-library
chore: upgrade go mod library
2025-02-09 11:19:10 +08:00
fengcaiwen
3896fd1642 chore: upgrade go mod library 2025-02-09 11:07:10 +08:00
naison
819b20bbdb Merge pull request #430 from kubenetworks/chore/upgrade-go-mod-library
chore: upgrade go mod library
2025-02-08 21:38:17 +08:00
fengcaiwen
2fc0bb3f0c chore: upgrade go mod library 2025-02-08 20:45:20 +08:00
naison
a6730613e7 Merge pull request #429 from kubenetworks/hotfix/add-platform-for-cmd-ssh
hotfix: add platform for cmd ssh
2025-02-08 20:12:06 +08:00
naison
3ad0b5d1a3 hotfix: add platform for cmd ssh 2025-02-08 12:04:25 +00:00
naison
3c2b7943b5 Merge pull request #427 from kubenetworks/feat/update-krew-index-version
feat: update krew index version to refs/tags/v2.3.11
2025-02-03 17:25:46 +08:00
kubenetworks
b2f5fc6ac1 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-02-03 09:24:54 +00:00
wencaiwulue
768e8b1931 feat: update krew index version to refs/tags/v2.3.11 2025-02-03 09:24:09 +00:00
6089 changed files with 610993 additions and 188402 deletions

View File

@@ -24,6 +24,8 @@ jobs:
uses: medyagh/setup-minikube@latest
with:
cache: true
cpus: 'max'
memory: 'max'
- name: Kubernetes info
run: |
@@ -33,13 +35,12 @@ jobs:
- name: Install demo bookinfo
run: |
minikube image load --remote istio/examples-bookinfo-details-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-ratings-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-reviews-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-productpage-v1:1.16.2
minikube image load --remote naison/authors:latest
minikube image load --remote nginx:latest
minikube image load --remote naison/kubevpn:test
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-details-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-ratings-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-reviews-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-productpage-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/authors:latest
minikube image load --remote ghcr.io/kubenetworks/nginx:latest
minikube image ls
eval $(minikube docker-env)
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
@@ -57,8 +58,7 @@ jobs:
- name: Wait for pods reviews to be ready
run: |
kubectl wait pods -l app=reviews --for=condition=Ready --timeout=3600s
kubectl wait pods -l app=productpage --for=condition=Ready --timeout=3600s
kubectl wait --for=condition=Ready pods --all --timeout=3600s
kubectl get svc -A -o wide
kubectl get pod -A -o wide
kubectl get all -o wide

View File

@@ -220,4 +220,44 @@ jobs:
--charts-repo https://github.com/$owner/$repo \
--pages-branch master \
--pages-index-path charts/index.yaml \
--push
--pr
snapcraft:
runs-on: ubuntu-24.04
env:
SNAPCRAFT_STORE_CREDENTIALS: ${{ secrets.SNAPCRAFT_TOKEN }}
steps:
- name: Check out Git repository
uses: actions/checkout@v3
- name: Install Snapcraft
uses: samuelmeuli/action-snapcraft@v3
- name: Setup LXD
uses: canonical/setup-lxd@main
- name: Use Snapcraft
run: |
RELEASE_VERSION=${GITHUB_REF#refs/*/}
sed -i s#CRAFT_ARCH_BUILD_VERSION#$RELEASE_VERSION#g snap/snapcraft.yaml
snapcraft
snapcraft upload --release=stable kubevpn_${RELEASE_VERSION}_amd64.snap
snapcraft-arm:
runs-on: ubuntu-24.04-arm
env:
SNAPCRAFT_STORE_CREDENTIALS: ${{ secrets.SNAPCRAFT_TOKEN }}
steps:
- name: Check out Git repository
uses: actions/checkout@v3
- name: Install Snapcraft
uses: samuelmeuli/action-snapcraft@v3
- name: Setup LXD
uses: canonical/setup-lxd@main
- name: Use Snapcraft
run: |
RELEASE_VERSION=${GITHUB_REF#refs/*/}
sed -i s#CRAFT_ARCH_BUILD_VERSION#$RELEASE_VERSION#g snap/snapcraft.yaml
snapcraft
snapcraft upload --release=stable kubevpn_${RELEASE_VERSION}_arm64.snap

View File

@@ -18,9 +18,7 @@ jobs:
check-latest: true
- name: Push image to docker hub
run: |
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USER }} --password-stdin
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
docker buildx create --use
export VERSION=${{github.event.pull_request.head.sha}}
if [[ -z "$VERSION" ]]; then
export VERSION=${{ github.sha }}
@@ -43,6 +41,8 @@ jobs:
uses: medyagh/setup-minikube@latest
with:
cache: true
cpus: 'max'
memory: 'max'
- name: Kubernetes info
run: |
@@ -51,13 +51,12 @@ jobs:
kubectl get pods -n kube-system -o wide
- name: Install demo bookinfo
run: |
minikube image load --remote istio/examples-bookinfo-details-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-ratings-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-reviews-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-productpage-v1:1.16.2
minikube image load --remote naison/authors:latest
minikube image load --remote nginx:latest
minikube image load --remote naison/kubevpn:test
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-details-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-ratings-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-reviews-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-productpage-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/authors:latest
minikube image load --remote ghcr.io/kubenetworks/nginx:latest
minikube image ls
eval $(minikube docker-env)
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
@@ -75,8 +74,7 @@ jobs:
- name: Wait for pods reviews to be ready
run: |
kubectl wait pods -l app=reviews --for=condition=Ready --timeout=3600s
kubectl wait pods -l app=productpage --for=condition=Ready --timeout=3600s
kubectl wait --for=condition=Ready pods --all --timeout=3600s
kubectl get svc -A -o wide
kubectl get pod -A -o wide
kubectl get all -o wide
@@ -105,16 +103,25 @@ jobs:
uses: docker/actions-toolkit/.github/actions/macos-setup-qemu@19ca9ade20f5da695f76a10988d6532058575f82
- name: Set up Docker
uses: crazy-max/ghaction-setup-docker@v3
uses: docker/setup-docker-action@v4
with:
daemon-config: |
{
"debug": true,
"features": {
"containerd-snapshotter": true
}
}
- uses: azure/setup-kubectl@v4
- name: Install minikube
run: |
set -x
docker version
brew install minikube
minikube start --driver=docker
minikube start --driver=docker --memory=max --cpus=max --wait=all --wait-timeout=60m
kubectl cluster-info
kubectl config view --flatten --raw
kubectl get pod -A -o wide
minikube kubectl -- get pod -A -o wide
- name: Kubernetes info
run: |
@@ -124,15 +131,6 @@ jobs:
- name: Install demo bookinfo
run: |
minikube image load --remote istio/examples-bookinfo-details-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-ratings-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-reviews-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-productpage-v1:1.16.2
minikube image load --remote naison/authors:latest
minikube image load --remote nginx:latest
minikube image load --remote naison/kubevpn:test
minikube image ls
eval $(minikube docker-env)
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
- name: Build
@@ -148,8 +146,7 @@ jobs:
- name: Wait for pods reviews to be ready
run: |
kubectl wait pods -l app=reviews --for=condition=Ready --timeout=3600s
kubectl wait pods -l app=productpage --for=condition=Ready --timeout=3600s
kubectl wait --for=condition=Ready pods --all --timeout=3600s
kubectl get svc -A -o wide || true
kubectl get pod -A -o wide || true
kubectl get all -o wide || true
@@ -162,6 +159,8 @@ jobs:
windows:
runs-on: windows-latest
env:
VERSION: ${{ github.event.pull_request.head.sha || github.sha }}
needs: [ "image" ]
steps:
- uses: actions/checkout@v4
@@ -172,13 +171,29 @@ jobs:
go-version: '1.23'
- name: Set up Docker
uses: crazy-max/ghaction-setup-docker@v3
uses: docker/setup-docker-action@v4
with:
daemon-config: |
{
"debug": true,
"features": {
"containerd-snapshotter": true
}
}
- run: |
docker info --format '{{.OSType}}'
choco install kind
kind create cluster
kubectl cluster-info
kubectl config view --flatten --raw
- run: |
choco install minikube
minikube start --driver=docker
choco install make
- name: Build
run: make kubevpn-windows-amd64
run: |
make kubevpn-windows-amd64
./bin/kubevpn.exe version
./bin/kubevpn.exe status

View File

@@ -18,11 +18,11 @@ REPOSITORY ?= kubevpn
IMAGE ?= $(REGISTRY)/$(NAMESPACE)/$(REPOSITORY):$(VERSION)
IMAGE_LATEST ?= docker.io/naison/kubevpn:latest
IMAGE_GH ?= ghcr.io/kubenetworks/kubevpn:$(VERSION)
IMAGE_GH_LATEST ?= ghcr.io/kubenetworks/kubevpn:latest
# Setup the -ldflags option for go build here, interpolate the variable values
# add '-tag noassets' for syncthing gui
LDFLAGS=-tags noassets --ldflags "-s -w\
-X ${BASE}/pkg/config.Image=${IMAGE} \
LDFLAGS=--ldflags "-s -w\
-X ${BASE}/pkg/config.Image=${IMAGE_GH} \
-X ${BASE}/pkg/config.Version=${VERSION} \
-X ${BASE}/pkg/config.GitCommit=${GIT_COMMIT} \
-X ${BASE}/pkg/config.GitHubOAuthToken=${GitHubOAuthToken} \
@@ -86,16 +86,16 @@ kubevpn-linux-386:
.PHONY: container
container:
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE} -t ${IMAGE_LATEST} -t ${IMAGE_GH} -f $(BUILD_DIR)/Dockerfile --push .
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE} -t ${IMAGE_LATEST} -t ${IMAGE_GH} -t ${IMAGE_GH_LATEST} -f $(BUILD_DIR)/Dockerfile --push .
############################ build local
.PHONY: container-local
container-local: kubevpn-linux-amd64
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE_LATEST} -f $(BUILD_DIR)/local.Dockerfile --push .
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE_LATEST} -t ${IMAGE_GH_LATEST} -f $(BUILD_DIR)/local.Dockerfile --push .
.PHONY: container-test
container-test: kubevpn-linux-amd64
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE} -t ${IMAGE_GH} -f $(BUILD_DIR)/test.Dockerfile --push .
docker build -t ${IMAGE_GH} -f $(BUILD_DIR)/test.Dockerfile --push .
.PHONY: version
version:
@@ -107,7 +107,7 @@ gen:
.PHONY: ut
ut:
go test -tags=noassets -coverprofile=coverage.txt -coverpkg=./... -v ./... -timeout=60m
go test -p=1 -v -timeout=60m -coverprofile=coverage.txt -coverpkg=./... ./...
.PHONY: cover
cover: ut

View File

@@ -9,6 +9,7 @@
[![Releases][7]](https://github.com/kubenetworks/kubevpn/releases)
[![GoDoc](https://godoc.org/github.com/kubenetworks/kubevpn?status.png)](https://pkg.go.dev/github.com/wencaiwulue/kubevpn/v2)
[![codecov](https://codecov.io/gh/wencaiwulue/kubevpn/graph/badge.svg?token=KMDSINSDEP)](https://codecov.io/gh/wencaiwulue/kubevpn)
[![Snapcraft](https://snapcraft.io/kubevpn/badge.svg)](https://snapcraft.io/kubevpn)
[1]: https://img.shields.io/github/actions/workflow/status/kubenetworks/kubevpn/release.yml?logo=github
@@ -38,6 +39,8 @@ For instance, you have the flexibility to run your Kubernetes pod within a local
environment, volume, and network setup.
With KubeVPN, empower yourself to develop applications entirely on your local PC!
![arch](docs/en/images/kubevpn-proxy-tun-arch.svg)
## Content
1. [QuickStart](./README.md#quickstart)
@@ -47,12 +50,25 @@ With KubeVPN, empower yourself to develop applications entirely on your local PC
## QuickStart
### Install from script ( macOS / Linux)
```shell
curl -fsSL https://kubevpn.dev/install.sh | sh
```
### Install from [brew](https://brew.sh/) (macOS / Linux)
```shell
brew install kubevpn
```
### Install from [snap](https://snapcraft.io/kubevpn) (Linux)
```shell
sudo snap install kubevpn
```
### Install from [scoop](https://scoop.sh/) (Windows)
```shell
@@ -561,13 +577,13 @@ need to special parameter `--network` (inner docker) for sharing network and pid
Example:
```shell
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 naison/kubevpn:latest
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 ghcr.io/kubenetworks/kubevpn:latest
```
```shell
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 naison/kubevpn:latest
Unable to find image 'naison/kubevpn:latest' locally
latest: Pulling from naison/kubevpn
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 ghcr.io/kubenetworks/kubevpn:latest
Unable to find image 'ghcr.io/kubenetworks/kubevpn:latest' locally
latest: Pulling from ghcr.io/kubenetworks/kubevpn
9c704ecd0c69: Already exists
4987d0a976b5: Pull complete
8aa94c4fc048: Pull complete
@@ -578,8 +594,8 @@ ca82aef6a9eb: Pull complete
1fd9534c7596: Pull complete
588bd802eb9c: Pull complete
Digest: sha256:368db2e0d98f6866dcefd60512960ce1310e85c24a398fea2a347905ced9507d
Status: Downloaded newer image for naison/kubevpn:latest
WARNING: image with reference naison/kubevpn was found but does not match the specified platform: wanted linux/amd64, actual: linux/arm64
Status: Downloaded newer image for ghcr.io/kubenetworks/kubevpn:latest
WARNING: image with reference ghcr.io/kubenetworks/kubevpn was found but does not match the specified platform: wanted linux/amd64, actual: linux/arm64
root@5732124e6447:/app# kubevpn dev deployment/authors --headers user=naison --entrypoint sh
hostname is 5732124e6447
Starting connect
@@ -656,7 +672,7 @@ OK: 8 MiB in 19 packages
Hello world!/opt/microservices #
/opt/microservices # curl authors:9080/health -H "foo: bar"
>>Received request: GET /health from 223.254.0.109:57930
>>Received request: GET /health from 198.19.0.109:57930
Hello world!/opt/microservices #
/opt/microservices # curl localhost:9080/health
{"status":"Authors is healthy"}/opt/microservices # exit
@@ -677,10 +693,10 @@ during test, check what container is running
```text
➜ ~ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
d0b3dab8912a ghcr.io/kubenetworks/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
➜ ~
```
@@ -711,9 +727,7 @@ support OSI model layers 3 and above, protocols like `ICMP`, `TCP`, and `UDP`...
## Architecture
![arch.svg](docs/en/images/proxy-arch.svg)
Architecture can be found [here](/docs/en/Architecture.md)
and [website](https://www.kubevpn.cn/docs/architecture/connect).
[architecture](https://kubevpn.dev/docs/architecture/connect).
## Contributions

View File

@@ -9,6 +9,7 @@
[![Releases][7]](https://github.com/kubenetworks/kubevpn/releases)
[![GoDoc](https://godoc.org/github.com/kubenetworks/kubevpn?status.png)](https://pkg.go.dev/github.com/wencaiwulue/kubevpn/v2)
[![codecov](https://codecov.io/gh/wencaiwulue/kubevpn/graph/badge.svg?token=KMDSINSDEP)](https://codecov.io/gh/wencaiwulue/kubevpn)
[![Snapcraft](https://snapcraft.io/kubevpn/badge.svg)](https://snapcraft.io/kubevpn)
[1]: https://img.shields.io/github/actions/workflow/status/kubenetworks/kubevpn/release.yml?logo=github
@@ -33,6 +34,8 @@ KubeVPN 提供一个云原生开发环境。通过连接云端 kubernetes 网络
Docker
模拟 k8s pod runtime 将容器运行在本地 (具有相同的环境变量,磁盘和网络)。
![架构](docs/en/images/kubevpn-proxy-tun-arch.svg)
## 内容
1. [快速开始](./README_ZH.md#快速开始)
@@ -42,12 +45,24 @@ Docker
## 快速开始
### 使用脚本安装 ( macOS / Linux)
```shell
curl -fsSL https://kubevpn.dev/install.sh | sh
```
### 使用 [brew](https://brew.sh/) 安装 (macOS / Linux)
```shell
brew install kubevpn
```
### 使用 [snap](https://snapcraft.io/kubevpn) 安装 (Linux)
```shell
sudo snap install kubevpn
```
### 使用 [scoop](https://scoop.sh/) (Windows)
```shell
@@ -485,13 +500,13 @@ Created main container: authors_default_kubevpn_ff34b
例如:
```shell
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 naison/kubevpn:latest
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 ghcr.io/kubenetworks/kubevpn:latest
```
```shell
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 naison/kubevpn:latest
Unable to find image 'naison/kubevpn:latest' locally
latest: Pulling from naison/kubevpn
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 ghcr.io/kubenetworks/kubevpn:latest
Unable to find image 'ghcr.io/kubenetworks/kubevpn:latest' locally
latest: Pulling from ghcr.io/kubenetworks/kubevpn
9c704ecd0c69: Already exists
4987d0a976b5: Pull complete
8aa94c4fc048: Pull complete
@@ -502,8 +517,8 @@ ca82aef6a9eb: Pull complete
1fd9534c7596: Pull complete
588bd802eb9c: Pull complete
Digest: sha256:368db2e0d98f6866dcefd60512960ce1310e85c24a398fea2a347905ced9507d
Status: Downloaded newer image for naison/kubevpn:latest
WARNING: image with reference naison/kubevpn was found but does not match the specified platform: wanted linux/amd64, actual: linux/arm64
Status: Downloaded newer image for ghcr.io/kubenetworks/kubevpn:latest
WARNING: image with reference ghcr.io/kubenetworks/kubevpn was found but does not match the specified platform: wanted linux/amd64, actual: linux/arm64
root@5732124e6447:/app# kubevpn dev deployment/authors --headers user=naison --entrypoint sh
hostname is 5732124e6447
Starting connect
@@ -580,7 +595,7 @@ OK: 8 MiB in 19 packages
Hello world!/opt/microservices #
/opt/microservices # curl authors:9080/health -H "foo: bar"
>>Received request: GET /health from 223.254.0.109:57930
>>Received request: GET /health from 198.19.0.109:57930
Hello world!/opt/microservices #
/opt/microservices # curl localhost:9080/health
{"status":"Authors is healthy"}/opt/microservices # exit
@@ -601,10 +616,10 @@ exit
```text
➜ ~ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
d0b3dab8912a ghcr.io/kubenetworks/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
➜ ~
```
@@ -629,8 +644,7 @@ d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minute
## 架构
![arch.svg](docs/en/images/proxy-arch.svg)
架构信息可以从[这里](/docs/en/Architecture.md) 和 [网站](https://www.kubevpn.cn/docs/architecture/connect) 找到.
[架构](https://kubevpn.dev/docs/architecture/connect)
## 贡献代码

View File

@@ -6,40 +6,29 @@ COPY . /go/src/$BASE
WORKDIR /go/src/$BASE
RUN go env -w GO111MODULE=on && go env -w GOPROXY=https://goproxy.cn,direct
RUN make kubevpn
RUN go install github.com/go-delve/delve/cmd/dlv@latest
FROM ubuntu:latest
FROM debian:bookworm-slim
ARG BASE=github.com/wencaiwulue/kubevpn
RUN sed -i s@/security.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list \
&& sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list
RUN apt-get clean && apt-get update && apt-get install -y wget dnsutils vim curl \
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3 \
apt-transport-https ca-certificates curl
RUN if [ $(uname -m) = "x86_64" ]; then \
echo "The architecture is AMD64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
elif [ $(uname -m) = "aarch64" ]; then \
echo "The architecture is ARM64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
else \
echo "Unsupported architecture."; \
fi
ENV TZ=Asia/Shanghai \
DEBIAN_FRONTEND=noninteractive
RUN apt update \
&& apt install -y tzdata \
&& ln -fs /usr/share/zoneinfo/${TZ} /etc/localtime \
&& echo ${TZ} > /etc/timezone \
&& dpkg-reconfigure --frontend noninteractive tzdata \
RUN apt-get update && apt-get install -y openssl iptables curl dnsutils \
&& if [ $(uname -m) = "x86_64" ]; then \
echo "The architecture is AMD64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"; \
elif [ $(uname -m) = "aarch64" ]; then \
echo "The architecture is ARM64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl"; \
else \
echo "Unsupported architecture."; \
exit 1; \
fi \
&& chmod +x kubectl && mv kubectl /usr/local/bin \
&& apt-get remove -y curl \
&& apt-get autoremove -y \
&& apt-get clean -y \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY --from=builder /go/src/$BASE/bin/kubevpn /usr/local/bin/kubevpn
COPY --from=builder /go/bin/dlv /usr/local/bin/dlv
COPY --from=envoy /usr/local/bin/envoy /usr/local/bin/envoy

View File

@@ -1,5 +1,34 @@
FROM naison/kubevpn:latest
FROM envoyproxy/envoy:v1.25.0 AS envoy
FROM golang:1.23 AS builder
ARG BASE=github.com/wencaiwulue/kubevpn
COPY . /go/src/$BASE
WORKDIR /go/src/$BASE
RUN make kubevpn
FROM debian:bookworm-slim
ARG BASE=github.com/wencaiwulue/kubevpn
RUN apt-get update && apt-get install -y openssl iptables curl dnsutils \
&& if [ $(uname -m) = "x86_64" ]; then \
echo "The architecture is AMD64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"; \
elif [ $(uname -m) = "aarch64" ]; then \
echo "The architecture is ARM64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl"; \
else \
echo "Unsupported architecture."; \
exit 1; \
fi \
&& chmod +x kubectl && mv kubectl /usr/local/bin \
&& apt-get remove -y curl \
&& apt-get autoremove -y \
&& apt-get clean -y \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY bin/kubevpn /usr/local/bin/kubevpn
COPY --from=builder /go/src/$BASE/bin/kubevpn /usr/local/bin/kubevpn
COPY --from=envoy /usr/local/bin/envoy /usr/local/bin/envoy

View File

@@ -1,6 +1,270 @@
apiVersion: v1
entries:
kubevpn:
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.12
created: "2025-05-23T03:38:02.484001975Z"
description: A Helm chart for KubeVPN
digest: b9e28ceda8bb07b42ec37eb2d6b283496d83645479b2f1f4e921d9c462eeb54e
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.12/kubevpn-2.7.12.tgz
version: 2.7.12
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.11
created: "2025-05-18T09:03:21.60777933Z"
description: A Helm chart for KubeVPN
digest: ee30c2533dff51fa389767e56931583cdfff8c5fca7d6c9698f521c6fc508d42
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.11/kubevpn-2.7.11.tgz
version: 2.7.11
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.10
created: "2025-05-14T13:08:51.09371872Z"
description: A Helm chart for KubeVPN
digest: fd23dd5bf0c3a9343d73276c4997a34027a93c1a88667265d92297630579d165
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.10/kubevpn-2.7.10.tgz
version: 2.7.10
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.9
created: "2025-05-12T09:14:52.66116293Z"
description: A Helm chart for KubeVPN
digest: 56e022017177603290575849553c2e9c19f6a1691288dbd67c32a2fdcbde0834
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.9/kubevpn-2.7.9.tgz
version: 2.7.9
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.8
created: "2025-05-10T15:46:13.342045201Z"
description: A Helm chart for KubeVPN
digest: bfab5a7e4e1e795071a7ce3fd7713b517aa447d967ec58500e5a551564869109
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.8/kubevpn-2.7.8.tgz
version: 2.7.8
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.7
created: "2025-05-09T06:43:01.403047355Z"
description: A Helm chart for KubeVPN
digest: 14b3e7873aa71fa7a380631c83be8df1dfb8d0ccb49eb6746aa4f83e3df934f6
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.7/kubevpn-2.7.7.tgz
version: 2.7.7
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.6
created: "2025-05-07T11:46:09.644201893Z"
description: A Helm chart for KubeVPN
digest: 2146d5245440dff7d551ccc745aa1d9476d4f42053ff8a80f33f835d8da57712
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.6/kubevpn-2.7.6.tgz
version: 2.7.6
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.5
created: "2025-05-07T01:56:15.201307242Z"
description: A Helm chart for KubeVPN
digest: 34799e9605b3048aac75484bb32fb6c70f9e7eb7470e9b77c51be075a548c25e
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.5/kubevpn-2.7.5.tgz
version: 2.7.5
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.4
created: "2025-05-06T17:01:13.789138284Z"
description: A Helm chart for KubeVPN
digest: 5c6f2d1a178e917ac83ec72d0a46de9a0ff68f80a3aeb813d15dfb92c8ad36be
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.4/kubevpn-2.7.4.tgz
version: 2.7.4
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.3
created: "2025-05-06T15:40:24.505449375Z"
description: A Helm chart for KubeVPN
digest: 86ef4b1de6ea15f6738824f7c389a891f53500b9163b1288847172eb7dc6817e
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.3/kubevpn-2.7.3.tgz
version: 2.7.3
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.2
created: "2025-04-25T15:40:08.296727519Z"
description: A Helm chart for KubeVPN
digest: 8711dae30f4ff9bc9cea018fa16ae70087a17af42262f7f31c43950a34fffa08
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.2/kubevpn-2.7.2.tgz
version: 2.7.2
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.1
created: "2025-04-15T15:18:20.818055207Z"
description: A Helm chart for KubeVPN
digest: 79c40c942fd2cfcca63dd82921e04871680838f01717c6fcb3ee06bfb7f59535
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.1/kubevpn-2.7.1.tgz
version: 2.7.1
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.0
created: "2025-04-12T05:37:01.063235951Z"
description: A Helm chart for KubeVPN
digest: a4b4de15f474fba43367fc7239c31e2020a6a1e0e3b29e02eb653cb9922b02e8
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.0/kubevpn-2.7.0.tgz
version: 2.7.0
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.6.0
created: "2025-04-06T12:54:49.852649414Z"
description: A Helm chart for KubeVPN
digest: 58d930de19ac808e9f0ee501fe6f74b6f38376692708fc94fe7200496d9c5ca2
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.6.0/kubevpn-2.6.0.tgz
version: 2.6.0
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.5.1
created: "2025-04-03T15:46:28.062220333Z"
description: A Helm chart for KubeVPN
digest: 6daf003256c42bb0db414eb17eb06294e46d33bc6c63f01419012a37318d0d2f
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.5.1/kubevpn-2.5.1.tgz
version: 2.5.1
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.5.0
created: "2025-03-31T05:36:16.050204161Z"
description: A Helm chart for KubeVPN
digest: 301137b1599c232efd61ce9360e0a60da89e0a5c2eb076750bf461b38d26cfaf
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.5.0/kubevpn-2.5.0.tgz
version: 2.5.0
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.4.3
created: "2025-03-30T13:48:42.333380676Z"
description: A Helm chart for KubeVPN
digest: 8ef28a43cb3d04f071445cf7d1199aba7392d78e1941707bab82853c5541c93c
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.4.3/kubevpn-2.4.3.tgz
version: 2.4.3
- apiVersion: v2
appVersion: v2.4.2
created: "2025-03-23T12:53:35.793492243Z"
description: A Helm chart for KubeVPN
digest: c627f69ac904ddb41c396909873425d85264fb3393d550fa1b0e8d2abfc402e9
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.4.2/kubevpn-2.4.2.tgz
version: 2.4.2
- apiVersion: v2
appVersion: v2.4.1
created: "2025-03-16T09:48:30.691242519Z"
description: A Helm chart for KubeVPN
digest: 1766431ce46b43758353928188cc993832e41cd0e352c9bc7991390bbbf41b04
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.4.1/kubevpn-2.4.1.tgz
version: 2.4.1
- apiVersion: v2
appVersion: v2.4.0
created: "2025-03-14T14:16:56.392516206Z"
description: A Helm chart for KubeVPN
digest: ffece68d3234ba629e02456fd3b0d31b5d2d1330c4c7f5d82ac2e0e1e97d82f3
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.4.0/kubevpn-2.4.0.tgz
version: 2.4.0
- apiVersion: v2
appVersion: v2.3.13
created: "2025-02-23T14:30:35.221348419Z"
description: A Helm chart for KubeVPN
digest: e79cdd07eae2ba3f36997debf898b091e1e68412fde7a34e823bad902e803105
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.13/kubevpn-2.3.13.tgz
version: 2.3.13
- apiVersion: v2
appVersion: v2.3.12
created: "2025-02-13T07:46:06.029130129Z"
description: A Helm chart for KubeVPN
digest: 0b7d9f8b4cd306377e4452a9d86530387afcae379e11665909b90e15f2d82a04
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.12/kubevpn-2.3.12.tgz
version: 2.3.12
- apiVersion: v2
appVersion: v2.3.11
created: "2025-02-03T09:24:54.033585049Z"
description: A Helm chart for KubeVPN
digest: a54a2ed19e6f4aa5c274186d6b188c0230244582055905155c4620ebe8864838
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.11/kubevpn-2.3.11.tgz
version: 2.3.11
- apiVersion: v2
appVersion: v2.3.10
created: "2025-01-24T13:36:34.489289734Z"
@@ -311,4 +575,4 @@ entries:
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.2/kubevpn-2.2.2.tgz
version: 2.2.2
generated: "2025-01-24T13:36:34.489504385Z"
generated: "2025-05-23T03:38:02.484184006Z"

View File

@@ -4,3 +4,5 @@ description: A Helm chart for KubeVPN
type: application
version: 0.1.0
appVersion: "1.16.0"
annotations:
app: kubevpn

24
charts/kubevpn/README.md Normal file
View File

@@ -0,0 +1,24 @@
# Helm charts for KubeVPN server
Use helm to install kubevpn server means use cluster mode. All user will use this instance.
- Please make sure users should have permission to namespace `kubevpn`.
- Otherwise, will fall back to create `kubevpn` deployment in own namespace.
## Install with default mode
```shell
helm install kubevpn kubevpn/kubevpn -n kubevpn --create-namespace
```
in China, you can use tencent image registry
```shell
helm install kubevpn kubevpn/kubevpn --set image.repository=ccr.ccs.tencentyun.com/kubevpn/kubevpn -n kubevpn --create-namespace
```
## AWS Fargate cluster
```shell
helm install kubevpn kubevpn/kubevpn --set netstack=gvisor -n kubevpn --create-namespace
```

View File

@@ -1,4 +1,4 @@
1. Connect to cluster network by running these commands:
kubevpn connect --namespace {{ .Release.Namespace }}
export POD_IP=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kubevpn.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].status.podIP}")
kubevpn connect --namespace {{ include "kubevpn.namespace" . }}
export POD_IP=$(kubectl get pods --namespace {{ include "kubevpn.namespace" . }} -l "app.kubernetes.io/name={{ include "kubevpn.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].status.podIP}")
ping $POD_IP

View File

@@ -61,3 +61,22 @@ Create the name of the service account to use
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Namespace
1. special by -n
2. use default namespace kubevpn
*/}}
{{- define "kubevpn.namespace" -}}
{{- if .Release.Namespace }}
{{- if eq .Release.Namespace "default" }}
{{- .Values.namespace }}
{{- else }}
{{- .Release.Namespace }}
{{- end }}
{{- else if .Values.namespace }}
{{- .Values.namespace }}
{{- else }}
{{- .Values.namespace }}
{{- end }}
{{- end }}

View File

@@ -2,6 +2,7 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
data:
DHCP: ""
DHCP6: ""

View File

@@ -2,6 +2,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
labels:
{{- include "kubevpn.labels" . | nindent 4 }}
spec:
@@ -32,33 +33,34 @@ spec:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- args:
- |2-
sysctl -w net.ipv4.ip_forward=1
sysctl -w net.ipv6.conf.all.disable_ipv6=0
sysctl -w net.ipv6.conf.all.forwarding=1
{{- if eq .Values.netstack "system" }}
- |
echo 1 > /proc/sys/net/ipv4/ip_forward
echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6
echo 1 > /proc/sys/net/ipv6/conf/all/forwarding
update-alternatives --set iptables /usr/sbin/iptables-legacy
iptables -F
ip6tables -F
iptables -P INPUT ACCEPT
ip6tables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
ip6tables -P FORWARD ACCEPT
iptables -t nat -A POSTROUTING -s ${CIDR4} -o eth0 -j MASQUERADE
ip6tables -t nat -A POSTROUTING -s ${CIDR6} -o eth0 -j MASQUERADE
kubevpn serve -L "tcp://:10800" -L "tun://:8422?net=${TunIPv4}" -L "gtcp://:10801" -L "gudp://:10802" --debug=true
kubevpn server -l "tcp://:10800" -l "tun://:8422?net=${TunIPv4}&net6=${TunIPv6}" -l "gtcp://:10801" -l "gudp://:10802"
{{- else }}
- kubevpn server -l "tcp://:10800" -l "gtcp://:10801" -l "gudp://:10802"
{{- end }}
command:
- /bin/sh
- -c
env:
- name: CIDR4
value: 223.254.0.0/16
value: 198.19.0.0/16
- name: CIDR6
value: efff:ffff:ffff:ffff::/64
value: 2001:2::/64
- name: TunIPv4
value: 223.254.0.100/16
value: 198.19.0.100/16
- name: TunIPv6
value: efff:ffff:ffff:ffff:ffff:ffff:ffff:9999/64
value: 2001:2::9999/64
envFrom:
- secretRef:
name: {{ include "kubevpn.fullname" . }}
@@ -74,12 +76,10 @@ spec:
protocol: TCP
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- if eq .Values.netstack "system" }}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
runAsUser: 0
{{- toYaml .Values.securityContext | nindent 12 }}
{{- end }}
- args:
- control-plane
- --watchDirectoryFilename
@@ -106,6 +106,11 @@ spec:
envFrom:
- secretRef:
name: {{ include "kubevpn.fullname" . }}
env:
- name: "POD_NAMESPACE"
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: webhook

View File

@@ -3,6 +3,7 @@ apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
labels:
{{- include "kubevpn.labels" . | nindent 4 }}
spec:

View File

@@ -2,6 +2,7 @@ apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
labels:
{{- include "kubevpn.labels" . | nindent 4 }}
annotations:
@@ -36,34 +37,34 @@ spec:
args:
- |2-
echo "Label namespace {{ .Release.Namespace }}"
kubectl label ns {{ .Release.Namespace }} ns={{ .Release.Namespace }}
echo "Label namespace {{ include "kubevpn.namespace" . }}"
kubectl label ns {{ include "kubevpn.namespace" . }} ns={{ include "kubevpn.namespace" . }}
echo "Generating https certificate"
openssl req -x509 -nodes -days 36500 -newkey rsa:2048 -subj "/CN={{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}.svc" -addext "subjectAltName=DNS:{{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local,DNS:{{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}.svc" -keyout server.key -out server.crt
openssl req -x509 -nodes -days 36500 -newkey rsa:2048 -subj "/CN={{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}" -addext "subjectAltName=DNS:{{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}.svc.cluster.local,DNS:{{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}.svc,DNS:{{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }},DNS:localhost,IP:127.0.0.1" -keyout server.key -out server.crt
export TLS_CRT=$(cat server.crt | base64 | tr -d '\n')
echo "Patch mutatingwebhookconfigurations {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}"
kubectl patch mutatingwebhookconfigurations {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }} -p "{\"webhooks\":[{\"name\":\"{{ include "kubevpn.fullname" . }}.naison.io\",\"sideEffects\":\"None\",\"admissionReviewVersions\":[\"v1\", \"v1beta1\"],\"clientConfig\":{\"service\":{\"namespace\":\"{{ .Release.Namespace }}\",\"name\":\"{{ include "kubevpn.fullname" . }}\"},\"caBundle\":\"$TLS_CRT\"}}]}"
echo "Patch mutatingwebhookconfigurations {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}"
kubectl patch mutatingwebhookconfigurations {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }} -p "{\"webhooks\":[{\"name\":\"{{ include "kubevpn.fullname" . }}.naison.io\",\"sideEffects\":\"None\",\"admissionReviewVersions\":[\"v1\", \"v1beta1\"],\"clientConfig\":{\"service\":{\"namespace\":\"{{ include "kubevpn.namespace" . }}\",\"name\":\"{{ include "kubevpn.fullname" . }}\"},\"caBundle\":\"$TLS_CRT\"}}]}"
export TLS_KEY=$(cat server.key | base64 | tr -d '\n')
echo "Patch secret {{ include "kubevpn.fullname" . }}"
kubectl patch secret {{ include "kubevpn.fullname" . }} -n {{ .Release.Namespace }} -p "{\"data\":{\"tls_key\":\"$TLS_KEY\",\"tls_crt\":\"$TLS_CRT\"}}"
kubectl patch secret {{ include "kubevpn.fullname" . }} -n {{ include "kubevpn.namespace" . }} -p "{\"data\":{\"tls_key\":\"$TLS_KEY\",\"tls_crt\":\"$TLS_CRT\"}}"
echo "Restart the pods..."
kubectl scale -n {{ .Release.Namespace }} --replicas=0 deployment/{{ include "kubevpn.fullname" . }}
kubectl scale -n {{ .Release.Namespace }} --replicas=1 deployment/{{ include "kubevpn.fullname" . }}
kubectl scale -n {{ include "kubevpn.namespace" . }} --replicas=0 deployment/{{ include "kubevpn.fullname" . }}
kubectl scale -n {{ include "kubevpn.namespace" . }} --replicas=1 deployment/{{ include "kubevpn.fullname" . }}
export POOLS=$(kubectl get cm {{ include "kubevpn.fullname" . }} -n {{ .Release.Namespace }} -o jsonpath='{.data.IPv4_POOLS}')
export POOLS=$(kubectl get cm {{ include "kubevpn.fullname" . }} -n {{ include "kubevpn.namespace" . }} -o jsonpath='{.data.IPv4_POOLS}')
if [[ -z "${POOLS// }" ]];then
echo "Cidr is empty"
echo "Get pod cidr..."
export POD_CIDR=$(kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' | tr -s '\n' ' ')
echo "Get service cidr..."
export SVC_CIDR=$(echo '{"apiVersion":"v1","kind":"Service","metadata":{"name":"kubevpn-get-svc-cidr-{{ .Release.Namespace }}", "namespace": "{{ .Release.Namespace }}"},"spec":{"clusterIP":"1.1.1.1","ports":[{"port":443}]}}' | kubectl apply -f - 2>&1 | sed 's/.*valid IPs is //')
export SVC_CIDR=$(echo '{"apiVersion":"v1","kind":"Service","metadata":{"name":"kubevpn-get-svc-cidr-{{ include "kubevpn.namespace" . }}", "namespace": "{{ include "kubevpn.namespace" . }}"},"spec":{"clusterIP":"1.1.1.1","ports":[{"port":443}]}}' | kubectl apply -f - 2>&1 | sed 's/.*valid IPs is //')
echo "Pod cidr: $POD_CIDR, service cidr: $SVC_CIDR"
echo "Patch configmap {{ include "kubevpn.fullname" . }}"
kubectl patch configmap {{ include "kubevpn.fullname" . }} -n {{ .Release.Namespace }} -p "{\"data\":{\"IPv4_POOLS\":\"$POD_CIDR $SVC_CIDR\"}}"
kubectl patch configmap {{ include "kubevpn.fullname" . }} -n {{ include "kubevpn.namespace" . }} -p "{\"data\":{\"IPv4_POOLS\":\"$POD_CIDR $SVC_CIDR\"}}"
else
echo "Cidr is NOT empty"
fi

View File

@@ -1,7 +1,8 @@
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
name: {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
namespace: {{ include "kubevpn.namespace" . }}
webhooks:
- admissionReviewVersions:
- v1
@@ -10,15 +11,13 @@ webhooks:
caBundle: {{ .Values.tls.crt }}
service:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "kubevpn.namespace" . }}
path: /pods
port: 80
failurePolicy: Ignore
matchPolicy: Equivalent
name: {{ include "kubevpn.fullname" . }}.naison.io
namespaceSelector:
matchLabels:
ns: {{ .Release.Namespace }}
namespaceSelector: { }
objectSelector: { }
reinvocationPolicy: Never
rules:

View File

@@ -2,6 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
rules:
- apiGroups:
- ""
@@ -20,7 +21,7 @@ rules:
- delete
- apiGroups: [ "" ]
resources: [ "namespaces" ]
resourceNames: [{{ .Release.Namespace }}]
resourceNames: ["{{ include "kubevpn.namespace" . }}"]
verbs:
- get
- patch
@@ -47,14 +48,14 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
name: {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
resourceNames:
- {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
- {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
verbs:
- get
- list

View File

@@ -2,6 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -9,18 +10,18 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "kubevpn.fullname" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "kubevpn.namespace" . }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
name: {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
subjects:
- kind: ServiceAccount
name: {{ include "kubevpn.fullname" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "kubevpn.namespace" . }}
roleRef:
kind: ClusterRole
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
name: {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,8 +1,10 @@
apiVersion: v1
data:
tls_crt: {{ .Values.tls.crt }}
tls_key: {{ .Values.tls.key }}
kind: Secret
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
type: Opaque
stringData:
tls_crt: {{ .Values.tls.crt }}
tls_key: {{ .Values.tls.key }}
tls_server_name: {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}

View File

@@ -2,6 +2,7 @@ apiVersion: v1
kind: Service
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
labels:
{{- include "kubevpn.labels" . | nindent 4 }}
spec:

View File

@@ -3,6 +3,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "kubevpn.serviceAccountName" . }}
namespace: {{ include "kubevpn.namespace" . }}
labels:
{{- include "kubevpn.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}

View File

@@ -2,10 +2,17 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# default namespace
namespace: kubevpn
# default is system mode, available ["system", "gvisor"]
# system: needs privilege permission and cap NET_ADMIN (Best experience)
# gvisor: no needs any additional permission (Best compatibility)
netstack: system
replicaCount: 1
image:
repository: naison/kubevpn
repository: ghcr.io/kubenetworks/kubevpn
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
@@ -21,8 +28,9 @@ cidr:
service: ""
tls:
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURXVENDQWtHZ0F3SUJBZ0lJU0NmUDdHeHVhUkl3RFFZSktvWklodmNOQVFFTEJRQXdNREV1TUN3R0ExVUUKQXd3bGEzVmlaWFp3YmkxMGNtRm1abWxqTFcxaGJtRm5aWEl0WTJGQU1UY3dOamsyTnpjd01EQWVGdzB5TkRBeQpNRE14TWpReE5EQmFGdzB5TlRBeU1ESXhNalF4TkRCYU1DMHhLekFwQmdOVkJBTU1JbXQxWW1WMmNHNHRkSEpoClptWnBZeTF0WVc1aFoyVnlRREUzTURZNU5qYzNNREF3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXcKZ2dFS0FvSUJBUURzVnNleEVpVG00dmlleUhEeU5SbldKbXNiaFBWV24yTkgvNi9wUGVBT3ZUbXgwSDdHUnZJLwpzMzVoZW9EWExhdFVmaDlXT1hXdzRqaGZsdUdWQWlzZGs2Y2ZkS1hVVzJheXpRbFpZd1ZMTzdUUHFoeWF0UHVpCmpRYVB2bUErRGNYMHJRc2Y3SFJwVWhjVTJ1QTJ4WGhZNy9QWWFUdzhkU0NTTHFTK2ZLM3poc0lONTFrYnIzdG4KU2FKcWFybDNhSU82N1JvdmNZbmxERG9XTzFwS1ZSUmROVkM1anVtREJOSWdOam5TSTY5QTFydzR0REkwdjcxWQpPRmhjYnUwNnFVdkNNU1JzR3F5ZkhOeUlXakVvcnk4Wk0xVExlcnZhTk12WlFTRndRNk5SRExHYXNlbTBlNTRXCmVublA0OVpIR1FhTjllYnJQSkJuL2pQQ3p0NlFDMkg5QWdNQkFBR2plakI0TUE0R0ExVWREd0VCL3dRRUF3SUYKb0RBVEJnTlZIU1VFRERBS0JnZ3JCZ0VGQlFjREFUQU1CZ05WSFJNQkFmOEVBakFBTUI4R0ExVWRJd1FZTUJhQQpGQVA3WmhvcGsvbEc3MVNCMk42QkpKdDI2eXhuTUNJR0ExVWRFUVFiTUJtQ0YydDFZbVYyY0c0dGRISmhabVpwCll5MXRZVzVoWjJWeU1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQVhYWk1WazhhQWwwZTlqUWRQTDc3ZVZOL3kKY1ZZZzRBVDlhdkh0UXV2UkZnOU80Z3JMaFVDQnoyN25wdlZZcHNMbmdEMTFRTXpYdHlsRDNMNDJNQ3V0Wnk5VQorL1BCL291ajQzWkZUckJDbk9DZDl6elE2MXZSL1RmbUFrTUhObTNZYjE1OGt2V0ZhNVlBdytRVi9vRDNUcGlXClREVTZXNkxvRFg5N0lNSFk0L3VLNTNzbXVLMjh5VzduSVVrbnpqN3h5UzVOWTFZaVNUN0w2ZFZ0VVppR1FUK00KRk16ODVRcTJOTWVXU1lKTmhhQVk5WEpwMXkrcEhoeWpPVFdjSEFNYmlPR29mODM5N1R6YmUyWHdNQ3BGMWc5NwpMaHZERnNsNzcyOWs1NFJVb1d2ZjFIVFFxL2R6cVBQTTNhWGpTbXFWUEV2Zk5qeGNhZnFnNHBaRmdzYzEKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJREpEQ0NBZ3lnQXdJQkFnSUlJMmROaFBKY0Uxc3dEUVlKS29aSWh2Y05BUUVMQlFBd01ERXVNQ3dHQTFVRQpBd3dsYTNWaVpYWndiaTEwY21GbVptbGpMVzFoYm1GblpYSXRZMkZBTVRjd05qazJOemN3TURBZUZ3MHlOREF5Ck1ETXhNalF4TkRCYUZ3MHlOVEF5TURJeE1qUXhOREJhTURBeExqQXNCZ05WQkFNTUpXdDFZbVYyY0c0dGRISmgKWm1acFl5MXRZVzVoWjJWeUxXTmhRREUzTURZNU5qYzNNREF3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQgpEd0F3Z2dFS0FvSUJBUURBQVpBdEZaTzJEZG9BVTUxWnRiVjI0QkVGN3RkakMzTzBPdEE2UURYTlVwNWlZZGdjCjdORVlGZE55YXltTWZMUVFGTWZqZFcxNWpDQ0N4KzFFMm1KQTVZa0ZFcXJTeDA3Z1pkKy9hcU13ZkhDT0ZTM0UKSUROdzBKYlBGVHZuSGsyZHVXby8zT1BnVmpONWw2UTBWaE10WkJEc2haVHVvSUhWaTJZcldDdnNkMU9mWFVyMwo0Y0ZJUkJ2OW5mNDIzdWthajYxdisrRDd6K3Y0bEN4R0JtUDhpYXFaNFVlckxIdWF2N1hQUnZ4QmQzNDBGY2diCm5TZVUxTXZmcTgvOUg4VTRzeWRGaUpZVUs1RFhkWU15NEw0RlMvbXZRaWR1TU5lWUw1Y2xHSXZTNGFzQjl2QlMKM0ZIY1IrQk1xVzFQWUdDc2YyL0RvdVNRVVNhcnB5VU5aczZKQWdNQkFBR2pRakJBTUE0R0ExVWREd0VCL3dRRQpBd0lDcERBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJRRCsyWWFLWlA1UnU5VWdkamVnU1NiCmR1c3NaekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBVGFNR0NLK2YxSmdKaXplVjlla3ZhckhDZHpmZzJNZkQKV2pCeFUzMXNabE1vZU9GS0hPdndjMVliTzVNTStHTGM0bGhMS2VHV1pwQmVRV0lFamo4V01wa3k2M2VtUUl5eQpOT2hjdVBUTFhCQ0JkS1lhUU1LcU9mN3c4MEw2cVRKclFER0t0a0MxVzEwbFJzbUd0TEtBbDVjU0w4VFRSZVhXCjhiNXRGOFd5Yms1Vm12VWtxdEpkSVNJTjdVOG5nV21WRUVOZFcvckNqclI5TllaSXZBZk9mS1Zrc1JuZEJaQ0kKOXdxVUI2K2JITEJBWjNpV293ZFhpRGhLMSt5Z2ZwNnpUcW9LRmxOWi8rRTNkS0tpbStyZFFGSmIvNTNvU2xaaApwMkVkT1ZNYU1mRjh1ZFhDdE44WjZnVHpPWkJxN1pmWjVpMlU1eFQ2aFNxRjFjT1ZuQS9idmc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBN0ZiSHNSSWs1dUw0bnNodzhqVVoxaVpyRzRUMVZwOWpSLyt2NlQzZ0RyMDVzZEIrCnhrYnlQN04rWVhxQTF5MnJWSDRmVmpsMXNPSTRYNWJobFFJckhaT25IM1NsMUZ0bXNzMEpXV01GU3p1MHo2b2MKbXJUN29vMEdqNzVnUGczRjlLMExIK3gwYVZJWEZOcmdOc1Y0V08vejJHazhQSFVna2k2a3ZueXQ4NGJDRGVkWgpHNjk3WjBtaWFtcTVkMmlEdXUwYUwzR0o1UXc2Rmp0YVNsVVVYVFZRdVk3cGd3VFNJRFk1MGlPdlFOYThPTFF5Ck5MKzlXRGhZWEc3dE9xbEx3akVrYkJxc254emNpRm94S0s4dkdUTlV5M3E3MmpUTDJVRWhjRU9qVVF5eG1ySHAKdEh1ZUZucDV6K1BXUnhrR2pmWG02enlRWi80endzN2VrQXRoL1FJREFRQUJBb0lCQVFEWkRaWVdsS0JaZ0Nodgp3NHlmbFk4bDgyQzVCSEpCM041VWVJbjVmejh3cWk2N2xNMXBraXpYdmlTYXArUitPczQ0S2lEamtwLzVGTHBMCmFBbkRUUnVGN1Y0MmNHNEFTdlZWenlMLytnWVpvenNhNFpPbHJnUFF0UTVLbzhCR0hXWXBvV2N2S1gxOFlNMGIKOVN5b2dORlhkUUNSUjR6dnhXNWxjdnNRaXZkRFNFTUJhbW00bFpEM0ZtUm5HVGlpaUVNSis2SFdlR1lBS1RMSgoxN0NnejZaWjg1bGtUZ0dxeEUrWkQwNDJGYWdJZlJORVI0QmZOMlp6NU5CU3RnMTJFdUpXWmRGcWpxSHlwbnNjCjNjbEd0U1Z5VStvWUFUWnV5Y2VMNVIwZUdzdTB6ZHhLT3ZzSm9yVWZ0dlMrUGovclJxWHVjOVdXSkFLU1FDVm0Ka1I1Y2M4ak5Bb0dCQU8wYkVrNTdtZWYwcXNKT0U3TFlFV1hRRFZiTmhnZ1E2eTlBZlNnVjZDMFFDdC93YkVGaQo0Rm41bTdhSHdqZUJ5OFJnMGhGbTdVNThCb3FyNnBQNFp6MEhwY1ZSTmVLeTF6R0wreFRJRXFnTXQxei9TYVE0CkIwWEZ4Ulg3d2pjeit2OC9GOVdsOElLbHhBWjhxNXd6aHNFUVVYcVIxTzF1T2FjRktSdXg3OU1UQW9HQkFQOHMKRVJBa1R3WEV3UU9ya2dQOW5tTHZLYXMwM0J6UXUrOFBtQWlsOGFmbVR5ZEFWdzJKaHBwOFlUQzl6NDM3VXU4Ngpta2lOVHpRL3MvQ1lCNEdJVVFCMEdlTDJtc2VjdWNaUHhTSW10dElSOWY4bjk2NEpuL3RtVUd4VXRFaWhWdER4ClZCdFBiWmNzc2E5VVVCRFVqRnZJSUdPTGlqSVdxbW8zM3htT0tJaXZBb0dCQU5HV2k0RWFtdnBCK1N1V3JxejUKZDYrQzBEZTVwcys4Zk5nZzdrRWYxRUw1R2xQSGh6bnBPQjN3bWFjb3JCSTZ4cTlKVW9lVmJ4RmdhcnZycVlpeApIRGtEYUpKWjdnTDlTV0YvdGlzeGkrUkdrVk5BU28xQ0JaTzBkVG13ZUlZcGlhWlUxREhENUN6b2NMVzNRRTdyCjhTTDUxTHcrNm5RU2FoM3NYdUVmVWJwSEFvR0JBTk1FNlROMUkxaDg1cldYVEJBNnk2RzdjTFVoNktsM3dRTW8KM1N6QnRyK0h5WXVIUExaNE5iVktDTUhiSm1xZkhXMnpBK1hkM2xNeUh5ZG5Ra1hQcWxUNnJuR3dTRDJ0RVVDNwp0U1hSNkR4L0YvVWpZME1zdUgyWmxnYVFZZXJ5YWE0dTlNUUZBbmNUUWZuaGVya0FYUGFGNEtzUnVYNUVtamR1Cjd2UGVTUTBIQW9HQUM0ZlJmZnFFM3RRdWxSeUJVeHhKNHlPaWJiVlpCV1hxWHRzMU0wczdsZ1YxaGVrYis1VmMKVTZ3MFh2T0pTaEZPaGF6UVdseVZUejhmSVdSa1BXa2MzSzE1dWx6cmh6NWZVa0dYOEw0OGMrTHlaSzZ1M2ZRVgpyL1pRV3JsYlZSWlhRVGhuaGhOM1Jodm96SlZZV0lpckVyMGp3VmRaQWRUYW1XZEpTQ3J4WE1NPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
# will auto generate in job
crt: ''''''
key: ''''''
serviceAccount:
# Specifies whether a service account should be created
@@ -40,14 +48,13 @@ podLabels:
podSecurityContext: { }
# fsGroup: 2000
securityContext: { }
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
runAsUser: 0
runAsGroup: 0
service:
type: ClusterIP

View File

@@ -46,7 +46,13 @@ func CmdAlias(f cmdutil.Factory) *cobra.Command {
Short: i18n.T("Config file alias to execute command simply"),
Long: templates.LongDesc(i18n.T(`
Config file alias to execute command simply, just like ssh alias config
Please point to an existing, complete config file:
1. Via the command-line flag --kubevpnconfig
2. Via the KUBEVPNCONFIG environment variable
3. In your home directory as ~/.kubevpn/config.yaml
It will read ~/.kubevpn/config.yaml file as config, also support special file path
by flag -f. It also supports depends relationship, like one cluster api server needs to
access via another cluster, you can use syntax needs. it will do action to needs cluster first
@@ -100,11 +106,11 @@ func CmdAlias(f cmdutil.Factory) *cobra.Command {
c.Stdout = os.Stdout
c.Stdin = os.Stdin
c.Stderr = os.Stderr
fmt.Printf("Name: %s\n", conf.Name)
fmt.Println(fmt.Sprintf("Name: %s", conf.Name))
if conf.Description != "" {
fmt.Printf("Description: %s\n", conf.Description)
fmt.Println(fmt.Sprintf("Description: %s", conf.Description))
}
fmt.Printf("Command: %v\n", c.Args)
fmt.Println(fmt.Sprintf("Command: %v", c.Args))
err = c.Run()
if err != nil {
return err
@@ -113,7 +119,7 @@ func CmdAlias(f cmdutil.Factory) *cobra.Command {
return nil
},
}
cmd.Flags().StringVarP(&localFile, "file", "f", config.GetConfigFilePath(), "Config file location")
cmd.Flags().StringVarP(&localFile, "kubevpnconfig", "f", util.If(os.Getenv("KUBEVPNCONFIG") != "", os.Getenv("KUBEVPNCONFIG"), config.GetConfigFile()), "Path to the kubevpnconfig file to use for CLI requests.")
cmd.Flags().StringVarP(&remoteAddr, "remote", "r", "", "Remote config file, eg: https://raw.githubusercontent.com/kubenetworks/kubevpn/master/pkg/config/config.yaml")
return cmd
}
@@ -129,7 +135,7 @@ func ParseAndGet(localFile, remoteAddr string, aliasName string) ([]Config, erro
path = remoteAddr
content, err = util.DownloadFileStream(path)
} else {
path = config.GetConfigFilePath()
path = config.GetConfigFile()
content, err = os.ReadFile(path)
}
if err != nil {

View File

@@ -1,9 +1,11 @@
package cmds
import (
"log"
"context"
"reflect"
"testing"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
func TestAlias(t *testing.T) {
@@ -22,7 +24,7 @@ Flags:
- --extra-hosts=xxx.com`
_, err := ParseConfig([]byte(str))
if err != nil {
log.Fatal(err)
plog.G(context.Background()).Fatal(err)
}
}
@@ -42,7 +44,7 @@ Flags:
- --extra-hosts=xxx.com`
_, err := ParseConfig([]byte(str))
if err != nil {
log.Fatal(err)
plog.G(context.Background()).Fatal(err)
}
}
@@ -206,11 +208,11 @@ Flags:
for _, datum := range data {
configs, err := ParseConfig([]byte(datum.Config))
if err != nil {
log.Fatal(err)
plog.G(context.Background()).Fatal(err)
}
getConfigs, err := GetConfigs(configs, datum.Run)
if err != nil && !datum.ExpectError {
log.Fatal(err)
plog.G(context.Background()).Fatal(err)
} else if err != nil {
}
if datum.ExpectError {
@@ -221,7 +223,7 @@ Flags:
c = append(c, config.Name)
}
if !reflect.DeepEqual(c, datum.ExpectOrder) {
log.Fatalf("Not match, expect: %v, real: %v", datum.ExpectOrder, c)
plog.G(context.Background()).Fatalf("Not match, expect: %v, real: %v", datum.ExpectOrder, c)
}
}
}

View File

@@ -1,6 +1,7 @@
package cmds
import (
"context"
"fmt"
"os"
@@ -18,6 +19,7 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
@@ -75,7 +77,7 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
kubevpn clone service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
util.InitLoggerForClient(false)
plog.InitLoggerForClient()
// startup daemon process and sudo process
err = daemon.StartupDaemon(cmd.Context())
if err != nil {
@@ -119,10 +121,6 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
extraRoute.ExtraCIDR = append(extraRoute.ExtraCIDR, ip.String())
}
}
logLevel := log.InfoLevel
if config.Debug {
logLevel = log.DebugLevel
}
req := &rpc.CloneRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
@@ -141,11 +139,14 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
TransferImage: transferImage,
Image: config.Image,
ImagePullSecretName: imagePullSecretName,
Level: int32(logLevel),
Level: int32(util.If(config.Debug, log.DebugLevel, log.InfoLevel)),
LocalDir: options.LocalDir,
RemoteDir: options.RemoteDir,
}
cli := daemon.GetClient(false)
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
resp, err := cli.Clone(cmd.Context(), req)
if err != nil {
return err
@@ -153,7 +154,8 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
err = util.PrintGRPCStream[rpc.CloneResponse](resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
err = remove(cli, args)
return err
}
return err
}
@@ -164,8 +166,8 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers (use `and` to match all headers) with reverse it to target cluster cloned workloads, If not special, redirect all traffic to target cluster cloned workloads. eg: --headers foo=bar --headers env=dev")
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &options.Engine)
cmdutil.AddContainerVarFlags(cmd, &options.TargetContainer, options.TargetContainer)
cmd.Flags().StringVar(&options.TargetImage, "target-image", "", "Clone container use this image to startup container, if not special, use origin image")
cmd.Flags().StringVar(&options.TargetContainer, "target-container", "", "Clone container use special image to startup this container, if not special, use origin image")
cmd.Flags().StringVar(&options.TargetNamespace, "target-namespace", "", "Clone workloads in this namespace, if not special, use origin namespace")
cmd.Flags().StringVar(&options.TargetKubeconfig, "target-kubeconfig", "", "Clone workloads will create in this cluster, if not special, use origin cluster")
cmd.Flags().StringVar(&options.TargetRegistry, "target-registry", "", "Clone workloads will create this registry domain to replace origin registry, if not special, use origin registry")
@@ -176,3 +178,20 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
cmd.ValidArgsFunction = utilcomp.ResourceTypeAndNameCompletionFunc(f)
return cmd
}
func remove(cli rpc.DaemonClient, args []string) error {
resp, err := cli.Remove(context.Background(), &rpc.RemoveRequest{
Workloads: args,
})
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.DisconnectResponse](resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
return nil
}

View File

@@ -55,7 +55,10 @@ func cmdConfigAdd(f cmdutil.Factory) *cobra.Command {
Namespace: ns,
SshJump: sshConf.ToRPC(),
}
cli := daemon.GetClient(false)
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
resp, err := cli.ConfigAdd(cmd.Context(), req)
if err != nil {
return err
@@ -88,8 +91,11 @@ func cmdConfigRemove(f cmdutil.Factory) *cobra.Command {
req := &rpc.ConfigRemoveRequest{
ClusterID: args[0],
}
cli := daemon.GetClient(false)
_, err := cli.ConfigRemove(cmd.Context(), req)
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
_, err = cli.ConfigRemove(cmd.Context(), req)
if err != nil {
return err
}

View File

@@ -19,6 +19,7 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
@@ -30,6 +31,7 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
var sshConf = &pkgssh.SshConfig{}
var transferImage, foreground, lite bool
var imagePullSecretName string
var managerNamespace string
cmd := &cobra.Command{
Use: "connect",
Short: i18n.T("Connect to kubernetes cluster network"),
@@ -64,7 +66,7 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
kubevpn connect --ssh-jump "--ssh-addr jump.naison.org --ssh-username naison --gssapi-password xxx" --ssh-username root --ssh-addr 127.0.0.1:22 --ssh-keyfile ~/.ssh/dst.pem
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
util.InitLoggerForClient(false)
plog.InitLoggerForClient()
// startup daemon process and sudo process
err := daemon.StartupDaemon(cmd.Context())
if err != nil {
@@ -85,10 +87,6 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
extraRoute.ExtraCIDR = append(extraRoute.ExtraCIDR, ip.String())
}
}
logLevel := log.InfoLevel
if config.Debug {
logLevel = log.DebugLevel
}
req := &rpc.ConnectRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
@@ -100,10 +98,14 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
TransferImage: transferImage,
Image: config.Image,
ImagePullSecretName: imagePullSecretName,
Level: int32(logLevel),
Level: int32(util.If(config.Debug, log.DebugLevel, log.InfoLevel)),
ManagerNamespace: managerNamespace,
}
// if is foreground, send to sudo daemon server
cli := daemon.GetClient(false)
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
var resp grpc.ClientStream
if lite {
resp, err = cli.ConnectFork(cmd.Context(), req)
@@ -116,7 +118,8 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
err = util.PrintGRPCStream[rpc.ConnectResponse](resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
err = disconnect(cli, bytes, ns, sshConf)
return err
}
return err
}
@@ -124,20 +127,8 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
util.Print(os.Stdout, config.Slogan)
} else {
<-cmd.Context().Done()
disconnect, err := cli.Disconnect(context.Background(), &rpc.DisconnectRequest{
KubeconfigBytes: ptr.To(string(bytes)),
Namespace: ptr.To(ns),
SshJump: sshConf.ToRPC(),
})
err = disconnect(cli, bytes, ns, sshConf)
if err != nil {
log.Errorf("Disconnect error: %v", err)
return err
}
err = util.PrintGRPCStream[rpc.DisconnectResponse](disconnect)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
_, _ = fmt.Fprint(os.Stdout, "Disconnect completed")
@@ -148,8 +139,29 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &connect.Engine)
cmd.Flags().BoolVar(&foreground, "foreground", false, "Hang up")
cmd.Flags().BoolVar(&lite, "lite", false, "connect to multiple cluster in lite mode. mode \"lite\": design for only connecting to multiple cluster network. mode \"full\": not only connect to cluster network, it also supports proxy workloads inbound traffic to local PC.")
cmd.Flags().StringVar(&managerNamespace, "manager-namespace", "", "The namespace where the traffic manager is to be found. Only works in cluster mode (install kubevpn server by helm)")
handler.AddExtraRoute(cmd.Flags(), extraRoute)
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
return cmd
}
func disconnect(cli rpc.DaemonClient, bytes []byte, ns string, sshConf *pkgssh.SshConfig) error {
resp, err := cli.Disconnect(context.Background(), &rpc.DisconnectRequest{
KubeconfigBytes: ptr.To(string(bytes)),
Namespace: ptr.To(ns),
SshJump: sshConf.ToRPC(),
})
if err != nil {
plog.G(context.Background()).Errorf("Disconnect error: %v", err)
return err
}
err = util.PrintGRPCStream[rpc.DisconnectResponse](resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
return nil
}

View File

@@ -1,9 +1,10 @@
package cmds
import (
"context"
"github.com/docker/docker/libnetwork/resolvconf"
miekgdns "github.com/miekg/dns"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
@@ -12,10 +13,11 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/controlplane"
"github.com/wencaiwulue/kubevpn/v2/pkg/dns"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdControlPlane(_ cmdutil.Factory) *cobra.Command {
func CmdControlPlane(cmdutil.Factory) *cobra.Command {
var (
watchDirectoryFilename string
port uint = 9002
@@ -28,16 +30,15 @@ func CmdControlPlane(_ cmdutil.Factory) *cobra.Command {
Control-plane is a envoy xds server, distribute envoy route configuration
`)),
RunE: func(cmd *cobra.Command, args []string) error {
util.InitLoggerForServer(config.Debug)
go util.StartupPProfForServer(0)
go func() {
conf, err := miekgdns.ClientConfigFromFile(resolvconf.Path())
if err != nil {
log.Fatal(err)
plog.G(context.Background()).Fatal(err)
}
log.Fatal(dns.ListenAndServe("udp", ":53", conf))
plog.G(context.Background()).Fatal(dns.ListenAndServe("udp", ":53", conf))
}()
err := controlplane.Main(cmd.Context(), watchDirectoryFilename, port, log.StandardLogger())
err := controlplane.Main(cmd.Context(), watchDirectoryFilename, port, plog.G(context.Background()))
return err
},
}

View File

@@ -16,12 +16,11 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/action"
"github.com/wencaiwulue/kubevpn/v2/pkg/dns"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdDaemon(_ cmdutil.Factory) *cobra.Command {
func CmdDaemon(cmdutil.Factory) *cobra.Command {
var opt = &daemon.SvrOption{}
cmd := &cobra.Command{
Use: "daemon",
@@ -42,7 +41,7 @@ func CmdDaemon(_ cmdutil.Factory) *cobra.Command {
} else {
go util.StartupPProf(config.PProfPort)
}
return initLogfile(action.GetDaemonLogPath())
return initLogfile(config.GetDaemonLogPath(opt.IsSudo))
},
RunE: func(cmd *cobra.Command, args []string) (err error) {
defer opt.Stop()
@@ -53,7 +52,7 @@ func CmdDaemon(_ cmdutil.Factory) *cobra.Command {
if opt.IsSudo {
for _, profile := range pprof.Profiles() {
func() {
file, e := os.Create(filepath.Join(config.PprofPath, profile.Name()))
file, e := os.Create(filepath.Join(config.GetPProfPath(), profile.Name()))
if e != nil {
return
}

View File

@@ -1,11 +1,11 @@
package cmds
import (
"context"
"fmt"
"os"
"github.com/docker/cli/cli/command"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/completion"
@@ -16,8 +16,8 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/dev"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
)
@@ -29,6 +29,7 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
var sshConf = &pkgssh.SshConfig{}
var transferImage bool
var imagePullSecretName string
var managerNamespace string
cmd := &cobra.Command{
Use: "dev TYPE/NAME [-c CONTAINER] [flags] -- [args...]",
Short: i18n.T("Startup your kubernetes workloads in local Docker container"),
@@ -89,7 +90,7 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
if err != nil {
return err
}
util.InitLoggerForClient(config.Debug)
plog.InitLoggerForClient()
err = daemon.StartupDaemon(cmd.Context())
if err != nil {
return err
@@ -115,7 +116,7 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
for _, function := range options.GetRollbackFuncList() {
if function != nil {
if err := function(); err != nil {
log.Errorf("Rollback failed, error: %s", err.Error())
plog.G(context.Background()).Errorf("Rollback failed, error: %s", err.Error())
}
}
}
@@ -130,7 +131,7 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
return err
}
return options.Main(cmd.Context(), sshConf, conf, hostConfig, imagePullSecretName)
return options.Main(cmd.Context(), sshConf, conf, hostConfig, imagePullSecretName, managerNamespace)
},
}
cmd.Flags().SortFlags = false
@@ -140,6 +141,7 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc("container", completion.ContainerCompletionFunc(f)))
cmd.Flags().StringVar((*string)(&options.ConnectMode), "connect-mode", string(dev.ConnectModeHost), "Connect to kubernetes network in container or in host, eg: ["+string(dev.ConnectModeContainer)+"|"+string(dev.ConnectModeHost)+"]")
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &options.Engine)
cmd.Flags().StringVar(&managerNamespace, "manager-namespace", "", "The namespace where the traffic manager is to be found. Only works in cluster mode (install kubevpn server by helm)")
// diy docker options
cmd.Flags().StringVar(&options.DevImage, "dev-image", "", "Use to startup docker container, Default is pod image")

View File

@@ -15,6 +15,7 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
@@ -37,7 +38,7 @@ func CmdDisconnect(f cmdutil.Factory) *cobra.Command {
kubevpn disconnect
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
util.InitLoggerForClient(false)
plog.InitLoggerForClient()
err = daemon.StartupDaemon(cmd.Context())
return err
},
@@ -60,7 +61,11 @@ func CmdDisconnect(f cmdutil.Factory) *cobra.Command {
}
ids = pointer.Int32(int32(integer))
}
client, err := daemon.GetClient(false).Disconnect(
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
client, err := cli.Disconnect(
cmd.Context(),
&rpc.DisconnectRequest{
ID: ids,

View File

@@ -49,7 +49,11 @@ func CmdGet(f cmdutil.Factory) *cobra.Command {
if err != nil {
return err
}
client, err := daemon.GetClient(true).Get(
cli, err := daemon.GetClient(true)
if err != nil {
return err
}
client, err := cli.Get(
cmd.Context(),
&rpc.GetRequest{
Namespace: ns,

View File

@@ -0,0 +1,43 @@
package cmds
import (
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
)
func CmdImageCopy(cmdutil.Factory) *cobra.Command {
var imageCmd = &cobra.Command{
Use: "image <cmd>",
Short: "copy images",
}
copyCmd := &cobra.Command{
Use: "copy <src_image_ref> <dst_image_ref>",
Aliases: []string{"cp"},
Short: "copy or re-tag image",
Long: `Copy or re-tag an image. This works between registries and only pulls layers
that do not exist at the target. In the same registry it attempts to mount
the layers between repositories. And within the same repository it only
sends the manifest with the new tag.`,
Example: `
# copy an image
kubevpn image copy ghcr.io/kubenetworks/kubevpn:latest registry.example.org/kubevpn/kubevpn:latest
# re-tag an image
kubevpn image copy ghcr.io/kubenetworks/kubevpn:latest ghcr.io/kubenetworks/kubevpn:v2.3.4`,
Args: cobra.MatchAll(cobra.ExactArgs(2), cobra.OnlyValidArgs),
PreRunE: func(cmd *cobra.Command, args []string) error {
plog.InitLoggerForClient()
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
err := regctl.TransferImageWithRegctl(cmd.Context(), args[0], args[1])
return err
},
}
imageCmd.AddCommand(copyCmd)
return imageCmd
}

View File

@@ -34,13 +34,22 @@ func CmdLeave(f cmdutil.Factory) *cobra.Command {
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
leave, err := daemon.GetClient(false).Leave(cmd.Context(), &rpc.LeaveRequest{
_, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return err
}
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
resp, err := cli.Leave(cmd.Context(), &rpc.LeaveRequest{
Namespace: ns,
Workloads: args,
})
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.LeaveResponse](leave)
err = util.PrintGRPCStream[rpc.LeaveResponse](resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil

View File

@@ -25,7 +25,11 @@ func CmdList(f cmdutil.Factory) *cobra.Command {
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
client, err := daemon.GetClient(true).List(
cli, err := daemon.GetClient(true)
if err != nil {
return err
}
client, err := cli.List(
cmd.Context(),
&rpc.ListRequest{},
)

View File

@@ -10,6 +10,7 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
@@ -28,12 +29,16 @@ func CmdLogs(f cmdutil.Factory) *cobra.Command {
kubevpn logs -f
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
util.InitLoggerForClient(false)
plog.InitLoggerForClient()
// startup daemon process and sudo process
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
client, err := daemon.GetClient(true).Logs(cmd.Context(), req)
cli, err := daemon.GetClient(true)
if err != nil {
return err
}
client, err := cli.Logs(cmd.Context(), req)
if err != nil {
return err
}

View File

@@ -18,17 +18,21 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
)
func CmdProxy(f cmdutil.Factory) *cobra.Command {
var headers = make(map[string]string)
var portmap []string
var connect = handler.ConnectOptions{}
var extraRoute = &handler.ExtraRouteInfo{}
var sshConf = &pkgssh.SshConfig{}
var transferImage, foreground bool
var imagePullSecretName string
var managerNamespace string
cmd := &cobra.Command{
Use: "proxy",
Short: i18n.T("Proxy kubernetes workloads inbound traffic into local PC"),
@@ -88,7 +92,7 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
kubevpn proxy deployment/productpage
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
util.InitLoggerForClient(false)
plog.InitLoggerForClient()
if err = daemon.StartupDaemon(cmd.Context()); err != nil {
return err
}
@@ -118,18 +122,17 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
}
}
// todo 将 doConnect 方法封装?内部使用 client 发送到daemon
cli := daemon.GetClient(false)
logLevel := log.InfoLevel
if config.Debug {
logLevel = log.DebugLevel
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
client, err := cli.Proxy(
resp, err := cli.Proxy(
cmd.Context(),
&rpc.ConnectRequest{
&rpc.ProxyRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
Headers: connect.Headers,
PortMap: connect.PortMap,
Headers: headers,
PortMap: portmap,
Workloads: args,
ExtraRoute: extraRoute.ToRPC(),
Engine: string(connect.Engine),
@@ -137,17 +140,19 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
TransferImage: transferImage,
Image: config.Image,
ImagePullSecretName: imagePullSecretName,
Level: int32(logLevel),
Level: int32(util.If(config.Debug, log.DebugLevel, log.InfoLevel)),
OriginKubeconfigPath: util.GetKubeConfigPath(f),
ManagerNamespace: managerNamespace,
},
)
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.ConnectResponse](client)
err = util.PrintGRPCStream[rpc.ConnectResponse](resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
err = leave(cli, ns, args)
return err
}
return err
}
@@ -157,30 +162,40 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
// leave from cluster resources
<-cmd.Context().Done()
stream, err := cli.Leave(context.Background(), &rpc.LeaveRequest{
Workloads: args,
})
err = leave(cli, ns, args)
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.LeaveResponse](stream)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
}
return nil
},
}
cmd.Flags().StringToStringVarP(&connect.Headers, "headers", "H", map[string]string{}, "Traffic with special headers (use `and` to match all headers) with reverse it to local PC, If not special, redirect all traffic to local PC. format: <KEY>=<VALUE> eg: --headers foo=bar --headers env=dev")
cmd.Flags().StringArrayVar(&connect.PortMap, "portmap", []string{}, "Port map, map container port to local port, format: [tcp/udp]/containerPort:localPort, If not special, localPort will use containerPort. eg: tcp/80:8080 or udp/5000:5001 or 80 or 80:8080")
cmd.Flags().StringToStringVarP(&headers, "headers", "H", map[string]string{}, "Traffic with special headers (use `and` to match all headers) with reverse it to local PC, If not special, redirect all traffic to local PC. format: <KEY>=<VALUE> eg: --headers foo=bar --headers env=dev")
cmd.Flags().StringArrayVar(&portmap, "portmap", []string{}, "Port map, map container port to local port, format: [tcp/udp]/containerPort:localPort, If not special, localPort will use containerPort. eg: tcp/80:8080 or udp/5000:5001 or 80 or 80:8080")
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &connect.Engine)
cmd.Flags().BoolVar(&foreground, "foreground", false, "foreground hang up")
cmd.Flags().StringVar(&managerNamespace, "manager-namespace", "", "The namespace where the traffic manager is to be found. Only works in cluster mode (install kubevpn server by helm)")
handler.AddExtraRoute(cmd.Flags(), extraRoute)
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
cmd.ValidArgsFunction = utilcomp.ResourceTypeAndNameCompletionFunc(f)
return cmd
}
func leave(cli rpc.DaemonClient, ns string, args []string) error {
stream, err := cli.Leave(context.Background(), &rpc.LeaveRequest{
Namespace: ns,
Workloads: args,
})
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.LeaveResponse](stream)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
return nil
}

View File

@@ -29,8 +29,8 @@ func CmdQuit(f cmdutil.Factory) *cobra.Command {
kubevpn quit
`)),
RunE: func(cmd *cobra.Command, args []string) error {
_ = quit(cmd.Context(), false)
_ = quit(cmd.Context(), true)
_ = quit(cmd.Context(), false)
util.CleanExtensionLib()
_, _ = fmt.Fprint(os.Stdout, "Exited")
return nil
@@ -40,9 +40,9 @@ func CmdQuit(f cmdutil.Factory) *cobra.Command {
}
func quit(ctx context.Context, isSudo bool) error {
cli := daemon.GetClient(isSudo)
if cli == nil {
return nil
cli, err := daemon.GetClient(isSudo)
if err != nil {
return err
}
client, err := cli.Quit(ctx, &rpc.QuitRequest{})
if err != nil {

View File

@@ -32,13 +32,17 @@ func CmdRemove(f cmdutil.Factory) *cobra.Command {
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
leave, err := daemon.GetClient(false).Remove(cmd.Context(), &rpc.RemoveRequest{
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
resp, err := cli.Remove(cmd.Context(), &rpc.RemoveRequest{
Workloads: args,
})
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.RemoveResponse](leave)
err = util.PrintGRPCStream[rpc.RemoveResponse](resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil

View File

@@ -10,6 +10,7 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
@@ -46,7 +47,7 @@ func CmdReset(f cmdutil.Factory) *cobra.Command {
kubevpn reset deployment/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
util.InitLoggerForClient(false)
plog.InitLoggerForClient()
return daemon.StartupDaemon(cmd.Context())
},
Args: cobra.MatchAll(cobra.ExactArgs(1)),
@@ -55,7 +56,10 @@ func CmdReset(f cmdutil.Factory) *cobra.Command {
if err != nil {
return err
}
cli := daemon.GetClient(false)
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
req := &rpc.ResetRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,

View File

@@ -31,7 +31,7 @@ func NewKubeVPNCommand() *cobra.Command {
}
flags := cmd.PersistentFlags()
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
configFlags := genericclioptions.NewConfigFlags(true)
configFlags.WrapConfigFn = func(c *rest.Config) *rest.Config {
if path, ok := os.LookupEnv(config.EnvSSHJump); ok {
kubeconfigBytes, err := os.ReadFile(path)
@@ -61,7 +61,7 @@ func NewKubeVPNCommand() *cobra.Command {
CmdDev(factory),
// Hidden, Server Commands (DO NOT USE IT !!!)
CmdControlPlane(factory),
CmdServe(factory),
CmdServer(factory),
CmdDaemon(factory),
CmdWebhook(factory),
CmdSyncthing(factory),
@@ -77,6 +77,7 @@ func NewKubeVPNCommand() *cobra.Command {
CmdConfig(factory),
CmdSSH(factory),
CmdSSHDaemon(factory),
CmdImageCopy(factory),
CmdLogs(factory),
CmdCp(factory),
CmdReset(factory),

View File

@@ -1,62 +0,0 @@
package cmds
import (
"math/rand"
"os"
"runtime"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"go.uber.org/automaxprocs/maxprocs"
glog "gvisor.dev/gvisor/pkg/log"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/core"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdServe(_ cmdutil.Factory) *cobra.Command {
var route = &core.Route{}
cmd := &cobra.Command{
Use: "serve",
Hidden: true,
Short: "Server side, startup traffic manager, forward inbound and outbound traffic",
Long: templates.LongDesc(i18n.T(`
Server side, startup traffic manager, forward inbound and outbound traffic.
`)),
Example: templates.Examples(i18n.T(`
# serve node
kubevpn serve -L "tcp://:10800" -L "tun://127.0.0.1:8422?net=223.254.0.123/32"
`)),
PreRun: func(*cobra.Command, []string) {
util.InitLoggerForServer(config.Debug)
runtime.GOMAXPROCS(0)
go util.StartupPProfForServer(config.PProfPort)
glog.SetTarget(util.ServerEmitter{Writer: &glog.Writer{Next: os.Stderr}})
},
RunE: func(cmd *cobra.Command, args []string) error {
rand.Seed(time.Now().UnixNano())
_, _ = maxprocs.Set(maxprocs.Logger(nil))
ctx := cmd.Context()
err := handler.Complete(ctx, route)
if err != nil {
return err
}
servers, err := handler.Parse(*route)
if err != nil {
log.Errorf("Parse server failed: %v", err)
return err
}
return handler.Run(ctx, servers)
},
}
cmd.Flags().StringArrayVarP(&route.ServeNodes, "node", "L", []string{}, "Startup node server. eg: tcp://localhost:1080")
cmd.Flags().StringVarP(&route.ChainNode, "chain", "F", "", "Forward chain. eg: tcp://192.168.1.100:2345")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug log or not")
return cmd
}

View File

@@ -0,0 +1,60 @@
package cmds
import (
"math/rand"
"os"
"runtime"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"go.uber.org/automaxprocs/maxprocs"
glog "gvisor.dev/gvisor/pkg/log"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/core"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdServer(cmdutil.Factory) *cobra.Command {
var route = &core.Route{}
cmd := &cobra.Command{
Use: "server",
Hidden: true,
Short: "Server side, startup traffic manager, forward inbound and outbound traffic",
Long: templates.LongDesc(i18n.T(`
Server side, startup traffic manager, forward inbound and outbound traffic.
`)),
Example: templates.Examples(i18n.T(`
# server listener
kubevpn server -l "tcp://:10800" -l "tun://127.0.0.1:8422?net=198.19.0.123/32"
`)),
PreRun: func(*cobra.Command, []string) {
runtime.GOMAXPROCS(0)
go util.StartupPProfForServer(config.PProfPort)
glog.SetTarget(plog.ServerEmitter{Writer: &glog.Writer{Next: os.Stderr}})
},
RunE: func(cmd *cobra.Command, args []string) error {
rand.Seed(time.Now().UnixNano())
_, _ = maxprocs.Set(maxprocs.Logger(nil))
ctx := cmd.Context()
logger := plog.InitLoggerForServer()
logger.SetLevel(util.If(config.Debug, log.DebugLevel, log.InfoLevel))
servers, err := handler.Parse(*route)
if err != nil {
plog.G(ctx).Errorf("Parse server failed: %v", err)
return err
}
return handler.Run(plog.WithLogger(ctx, logger), servers)
},
}
cmd.Flags().StringArrayVarP(&route.Listeners, "listener", "l", []string{}, "Startup listener server. eg: tcp://localhost:1080")
cmd.Flags().StringVarP(&route.Forwarder, "forwarder", "f", "", "Special forwarder. eg: tcp://192.168.1.100:2345")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug log or not")
return cmd
}

View File

@@ -6,9 +6,10 @@ import (
"fmt"
"io"
"os"
"strings"
"github.com/containerd/containerd/platforms"
"github.com/google/uuid"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/net/websocket"
@@ -19,15 +20,18 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
// CmdSSH
// Remember to use network mask 32, because ssh using unique network CIDR 223.255.0.0/16
func CmdSSH(_ cmdutil.Factory) *cobra.Command {
// Remember to use network mask 32, because ssh using unique network CIDR 198.18.0.0/16
func CmdSSH(cmdutil.Factory) *cobra.Command {
var sshConf = &pkgssh.SshConfig{}
var extraCIDR []string
var platform string
var lite bool
cmd := &cobra.Command{
Use: "ssh",
Short: "Ssh to jump server",
@@ -50,10 +54,14 @@ func CmdSSH(_ cmdutil.Factory) *cobra.Command {
kubevpn ssh --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
util.InitLoggerForClient(false)
plog.InitLoggerForClient()
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
plat, err := platforms.Parse(platform)
if err != nil {
return err
}
config, err := websocket.NewConfig("ws://test/ws", "http://test")
if err != nil {
return err
@@ -62,11 +70,6 @@ func CmdSSH(_ cmdutil.Factory) *cobra.Command {
if !terminal.IsTerminal(fd) {
return fmt.Errorf("stdin is not a terminal")
}
state, err := terminal.MakeRaw(fd)
if err != nil {
return fmt.Errorf("terminal make raw: %s", err)
}
defer terminal.Restore(fd, state)
width, height, err := terminal.GetSize(fd)
if err != nil {
return fmt.Errorf("terminal get size: %s", err)
@@ -77,13 +80,15 @@ func CmdSSH(_ cmdutil.Factory) *cobra.Command {
ExtraCIDR: extraCIDR,
Width: width,
Height: height,
Platform: platforms.Format(platforms.Normalize(plat)),
SessionID: sessionID,
Lite: lite,
}
bytes, err := json.Marshal(ssh)
marshal, err := json.Marshal(ssh)
if err != nil {
return err
}
config.Header.Set("ssh", string(bytes))
config.Header.Set("ssh", string(marshal))
client := daemon.GetTCPClient(true)
if client == nil {
return fmt.Errorf("client is nil")
@@ -98,17 +103,44 @@ func CmdSSH(_ cmdutil.Factory) *cobra.Command {
go func() {
errChan <- monitorSize(cmd.Context(), sessionID)
}()
readyCtx, cancelFunc := context.WithCancel(cmd.Context())
checker := func(log string) bool {
isReady := strings.Contains(log, fmt.Sprintf(handler.SshTerminalReadyFormat, sessionID))
if isReady {
cancelFunc()
}
return isReady
}
var state *terminal.State
go func() {
select {
case <-cmd.Context().Done():
return
case <-readyCtx.Done():
}
if state, err = terminal.MakeRaw(fd); err != nil {
plog.G(context.Background()).Errorf("terminal make raw: %s", err)
}
}()
go func() {
_, err := io.Copy(conn, os.Stdin)
errChan <- err
}()
go func() {
_, err := io.Copy(os.Stdout, conn)
_, err := io.Copy(io.MultiWriter(os.Stdout, util.NewWriter(checker)), conn)
errChan <- err
}()
defer func() {
if state != nil {
terminal.Restore(fd, state)
}
}()
select {
case err = <-errChan:
case err := <-errChan:
return err
case <-cmd.Context().Done():
return cmd.Context().Err()
@@ -117,6 +149,8 @@ func CmdSSH(_ cmdutil.Factory) *cobra.Command {
}
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
cmd.Flags().StringArrayVar(&extraCIDR, "extra-cidr", []string{}, "Extra network CIDR string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
cmd.Flags().StringVar(&platform, "platform", util.If(os.Getenv("KUBEVPN_DEFAULT_PLATFORM") != "", os.Getenv("KUBEVPN_DEFAULT_PLATFORM"), "linux/amd64"), "Set ssh server platform if needs to install command kubevpn")
cmd.Flags().BoolVar(&lite, "lite", false, "connect to ssh server in lite mode. mode \"lite\": design for only connect to ssh server. mode \"full\": not only connect to ssh server, it also create a two-way tunnel communicate with inner ip")
return cmd
}
@@ -153,7 +187,7 @@ func monitorSize(ctx context.Context, sessionID string) error {
return nil
}
if err = encoder.Encode(&size); err != nil {
log.Errorf("Encode resize: %s", err)
plog.G(ctx).Errorf("Encode resize: %s", err)
return err
}
}

View File

@@ -14,8 +14,8 @@ import (
)
// CmdSSHDaemon
// set local tun ip 223.254.0.1/32, remember to use mask 32
func CmdSSHDaemon(_ cmdutil.Factory) *cobra.Command {
// set local tun ip 198.19.0.1/32, remember to use mask 32
func CmdSSHDaemon(cmdutil.Factory) *cobra.Command {
var clientIP string
cmd := &cobra.Command{
Use: "ssh-daemon",
@@ -24,14 +24,18 @@ func CmdSSHDaemon(_ cmdutil.Factory) *cobra.Command {
Long: templates.LongDesc(i18n.T(`Ssh daemon server`)),
Example: templates.Examples(i18n.T(`
# SSH daemon server
kubevpn ssh-daemon --client-ip 223.254.0.123/32
kubevpn ssh-daemon --client-ip 198.19.0.123/32
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
err := daemon.StartupDaemon(cmd.Context())
return err
},
RunE: func(cmd *cobra.Command, args []string) error {
client, err := daemon.GetClient(true).SshStart(
cli, err := daemon.GetClient(true)
if err != nil {
return err
}
resp, err := cli.SshStart(
cmd.Context(),
&rpc.SshStartRequest{
ClientIP: clientIP,
@@ -40,7 +44,7 @@ func CmdSSHDaemon(_ cmdutil.Factory) *cobra.Command {
if err != nil {
return err
}
_, err = fmt.Fprint(os.Stdout, client.ServerIP)
_, err = fmt.Fprint(os.Stdout, resp.ServerIP)
return err
},
}

View File

@@ -21,6 +21,7 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
@@ -60,7 +61,7 @@ func CmdStatus(f cmdutil.Factory) *cobra.Command {
kubevpn status -o yaml
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
util.InitLoggerForClient(false)
plog.InitLoggerForClient()
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
@@ -70,8 +71,8 @@ func CmdStatus(f cmdutil.Factory) *cobra.Command {
if err != nil {
return err
}
for _, config := range configs {
clusterID, err := GetClusterIDByConfig(cmd, config)
for _, conf := range configs {
clusterID, err := GetClusterIDByConfig(cmd, conf)
if err != nil {
return err
}
@@ -79,7 +80,11 @@ func CmdStatus(f cmdutil.Factory) *cobra.Command {
}
}
resp, err := daemon.GetClient(false).Status(
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
resp, err := cli.Status(
cmd.Context(),
&rpc.StatusRequest{
ClusterIDs: clusterIDs,
@@ -97,7 +102,7 @@ func CmdStatus(f cmdutil.Factory) *cobra.Command {
},
}
cmd.Flags().StringVar(&aliasName, "alias", "", "Alias name, query connect status by alias config name")
cmd.Flags().StringVarP(&localFile, "file", "f", config.GetConfigFilePath(), "Config file location")
cmd.Flags().StringVarP(&localFile, "kubevpnconfig", "f", util.If(os.Getenv("KUBEVPNCONFIG") != "", os.Getenv("KUBEVPNCONFIG"), config.GetConfigFile()), "Path to the kubevpnconfig file to use for CLI requests.")
cmd.Flags().StringVarP(&remoteAddr, "remote", "r", "", "Remote config file, eg: https://raw.githubusercontent.com/kubenetworks/kubevpn/master/pkg/config/config.yaml")
cmd.Flags().StringVarP(&format, "output", "o", FormatTable, fmt.Sprintf("Output format. One of: (%s, %s, %s)", FormatJson, FormatYaml, FormatTable))
return cmd
@@ -156,7 +161,7 @@ func genProxyMsg(w *tabwriter.Writer, list []*rpc.Status) {
_, _ = fmt.Fprintf(w, "\n")
w.SetRememberedWidths(nil)
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Name", "Headers", "IP", "PortMap", "CurrentPC")
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Namespace", "Name", "Headers", "IP", "PortMap", "CurrentPC")
for _, c := range list {
for _, proxy := range c.ProxyList {
for _, rule := range proxy.RuleList {
@@ -171,8 +176,9 @@ func genProxyMsg(w *tabwriter.Writer, list []*rpc.Status) {
for k, v := range rule.PortMap {
portmap = append(portmap, fmt.Sprintf("%d->%d", k, v))
}
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%v\n",
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\t%v\n",
c.ID,
proxy.Namespace,
proxy.Workload,
strings.Join(headers, ","),
rule.LocalTunIPv4,
@@ -198,7 +204,7 @@ func genCloneMsg(w *tabwriter.Writer, list []*rpc.Status) {
_, _ = fmt.Fprintf(w, "\n")
w.SetRememberedWidths(nil)
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Name", "Headers", "ToName", "ToKubeconfig", "ToNamespace", "SyncthingGUI")
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Namespace", "Name", "Headers", "ToName", "ToKubeconfig", "ToNamespace", "SyncthingGUI")
for _, c := range list {
for _, clone := range c.CloneList {
//_, _ = fmt.Fprintf(w, "%s\n", clone.Workload)
@@ -210,8 +216,9 @@ func genCloneMsg(w *tabwriter.Writer, list []*rpc.Status) {
if len(headers) == 0 {
headers = []string{"*"}
}
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\t%s\n",
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
c.ID,
clone.Namespace,
clone.Workload,
strings.Join(headers, ","),
rule.DstWorkload,
@@ -229,7 +236,7 @@ func GetClusterIDByConfig(cmd *cobra.Command, config Config) (string, error) {
var sshConf = &pkgssh.SshConfig{}
pkgssh.AddSshFlags(flags, sshConf)
handler.AddExtraRoute(flags, &handler.ExtraRouteInfo{})
configFlags := genericclioptions.NewConfigFlags(false).WithDeprecatedPasswordFlag()
configFlags := genericclioptions.NewConfigFlags(true)
configFlags.AddFlags(flags)
matchVersionFlags := cmdutil.NewMatchVersionFlags(&warp{ConfigFlags: configFlags})
matchVersionFlags.AddFlags(flags)

View File

@@ -29,8 +29,8 @@ func TestPrintProxyAndClone(t *testing.T) {
RuleList: []*rpc.ProxyRule{
{
Headers: map[string]string{"user": "naison"},
LocalTunIPv4: "223.254.0.103",
LocalTunIPv6: "efff:ffff:ffff:ffff:ffff:ffff:ffff:999d",
LocalTunIPv4: "198.19.0.103",
LocalTunIPv6: "2001:2::999d",
CurrentDevice: false,
PortMap: map[int32]int32{8910: 8910},
},
@@ -98,8 +98,8 @@ func TestPrintProxy(t *testing.T) {
RuleList: []*rpc.ProxyRule{
{
Headers: map[string]string{"user": "naison"},
LocalTunIPv4: "223.254.0.103",
LocalTunIPv6: "efff:ffff:ffff:ffff:ffff:ffff:ffff:999d",
LocalTunIPv4: "198.19.0.103",
LocalTunIPv6: "2001:2::999d",
CurrentDevice: false,
PortMap: map[int32]int32{8910: 8910},
},

View File

@@ -10,7 +10,7 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdSyncthing(_ cmdutil.Factory) *cobra.Command {
func CmdSyncthing(cmdutil.Factory) *cobra.Command {
var detach bool
var dir string
cmd := &cobra.Command{

View File

@@ -1,7 +1,6 @@
package cmds
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -12,6 +11,7 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
@@ -50,7 +50,7 @@ func CmdUninstall(f cmdutil.Factory) *cobra.Command {
kubevpn uninstall --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
util.InitLoggerForClient(false)
plog.InitLoggerForClient()
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
@@ -58,16 +58,19 @@ func CmdUninstall(f cmdutil.Factory) *cobra.Command {
if err != nil {
return err
}
cli := daemon.GetClient(false)
disconnect, err := cli.Disconnect(cmd.Context(), &rpc.DisconnectRequest{
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
disconnectResp, err := cli.Disconnect(cmd.Context(), &rpc.DisconnectRequest{
KubeconfigBytes: ptr.To(string(bytes)),
Namespace: ptr.To(ns),
SshJump: sshConf.ToRPC(),
})
if err != nil {
log.Warnf("Failed to disconnect from cluter: %v", err)
plog.G(cmd.Context()).Warnf("Failed to disconnect from cluter: %v", err)
} else {
_ = util.PrintGRPCStream[rpc.DisconnectResponse](disconnect)
_ = util.PrintGRPCStream[rpc.DisconnectResponse](disconnectResp)
}
req := &rpc.UninstallRequest{

View File

@@ -12,11 +12,11 @@ import (
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/upgrade"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdUpgrade(_ cmdutil.Factory) *cobra.Command {
func CmdUpgrade(cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "upgrade",
Short: i18n.T("Upgrade kubevpn client to latest version"),
@@ -29,7 +29,7 @@ func CmdUpgrade(_ cmdutil.Factory) *cobra.Command {
const (
envLatestUrl = "KUBEVPN_LATEST_VERSION_URL"
)
util.InitLoggerForClient(false)
plog.InitLoggerForClient()
var client = http.DefaultClient
if config.GitHubOAuthToken != "" {
client = oauth2.NewClient(cmd.Context(), oauth2.StaticTokenSource(&oauth2.Token{AccessToken: config.GitHubOAuthToken, TokenType: "Bearer"}))
@@ -44,13 +44,13 @@ func CmdUpgrade(_ cmdutil.Factory) *cobra.Command {
return err
}
if !needsUpgrade {
_, _ = fmt.Fprintf(os.Stdout, "Already up to date, don't needs to upgrade, version: %s", latestVersion)
_, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("Already up to date, don't needs to upgrade, version: %s", latestVersion))
return nil
}
_, _ = fmt.Fprintf(os.Stdout, "Current version is: %s less than latest version: %s, needs to upgrade", config.Version, latestVersion)
_, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("Current version is: %s less than latest version: %s, needs to upgrade", config.Version, latestVersion))
_ = os.Setenv(envLatestUrl, url)
_ = quit(cmd.Context(), false)
_ = quit(cmd.Context(), true)
_ = quit(cmd.Context(), false)
}
return upgrade.Main(cmd.Context(), client, url)
},

View File

@@ -64,12 +64,13 @@ func init() {
}
func getDaemonVersion() string {
cli := daemon.GetClient(false)
if cli != nil {
version, err := cli.Version(context.Background(), &rpc.VersionRequest{})
if err == nil {
return version.Version
}
cli, err := daemon.GetClient(false)
if err != nil {
return "unknown"
}
return "unknown"
version, err := cli.Version(context.Background(), &rpc.VersionRequest{})
if err != nil {
return "unknown"
}
return version.Version
}

View File

@@ -22,7 +22,6 @@ func CmdWebhook(f cmdutil.Factory) *cobra.Command {
`)),
Args: cobra.MaximumNArgs(0),
PreRun: func(cmd *cobra.Command, args []string) {
util.InitLoggerForServer(true)
go util.StartupPProfForServer(0)
},
RunE: func(cmd *cobra.Command, args []string) error {

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 372 KiB

318
go.mod
View File

@@ -4,22 +4,23 @@ go 1.23.2
require (
github.com/cilium/ipam v0.0.0-20230509084518-fd66eae7909b
github.com/containerd/containerd v1.7.14
github.com/containerd/containerd v1.7.27
github.com/containernetworking/cni v1.1.2
github.com/coredns/caddy v1.1.1
github.com/coredns/coredns v1.11.2
github.com/coredns/caddy v1.1.2-0.20241029205200-8de985351a98
github.com/coredns/coredns v1.12.1
github.com/distribution/reference v0.6.0
github.com/docker/cli v26.0.0+incompatible
github.com/docker/docker v26.1.4+incompatible
github.com/docker/cli v27.5.1+incompatible
github.com/docker/docker v27.5.1+incompatible
github.com/docker/go-connections v0.5.0
github.com/docker/go-units v0.5.0
github.com/docker/libcontainer v2.2.1+incompatible
github.com/envoyproxy/go-control-plane v0.12.0
github.com/fsnotify/fsnotify v1.7.0
github.com/envoyproxy/go-control-plane v0.13.4
github.com/envoyproxy/go-control-plane/envoy v1.32.4
github.com/fsnotify/fsnotify v1.8.0
github.com/gliderlabs/ssh v0.3.8
github.com/google/gopacket v1.1.19
github.com/google/uuid v1.6.0
github.com/hashicorp/go-version v1.6.0
github.com/hashicorp/go-version v1.7.0
github.com/hpcloud/tail v1.0.0
github.com/jcmturner/gofork v1.7.6
github.com/jcmturner/gokrb5/v8 v8.4.4
@@ -27,176 +28,226 @@ require (
github.com/libp2p/go-netroute v0.2.1
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de
github.com/mattbaird/jsonpatch v0.0.0-20240118010651-0ba75a80ca38
github.com/miekg/dns v1.1.58
github.com/moby/sys/signal v0.7.0
github.com/moby/term v0.5.0
github.com/opencontainers/image-spec v1.1.0
github.com/miekg/dns v1.1.64
github.com/moby/term v0.5.2
github.com/opencontainers/image-spec v1.1.1
github.com/pkg/errors v0.9.1
github.com/prometheus-community/pro-bing v0.4.0
github.com/regclient/regclient v0.8.0
github.com/schollz/progressbar/v3 v3.14.2
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
github.com/syncthing/syncthing v1.27.12
github.com/thejerf/suture/v4 v4.0.5
go.uber.org/automaxprocs v1.5.3
golang.org/x/crypto v0.32.0
golang.org/x/net v0.30.0
golang.org/x/oauth2 v0.21.0
golang.org/x/sync v0.10.0
golang.org/x/sys v0.29.0
golang.org/x/term v0.28.0
golang.org/x/text v0.21.0
golang.org/x/time v0.6.0
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6
github.com/syncthing/syncthing v1.29.2
github.com/thejerf/suture/v4 v4.0.6
go.uber.org/automaxprocs v1.6.0
golang.org/x/crypto v0.37.0
golang.org/x/net v0.39.0
golang.org/x/oauth2 v0.28.0
golang.org/x/sys v0.32.0
golang.org/x/term v0.31.0
golang.org/x/text v0.24.0
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
golang.zx2c4.com/wireguard v0.0.0-20220920152132-bb719d3a6e2c
golang.zx2c4.com/wireguard/windows v0.5.3
google.golang.org/grpc v1.62.1
google.golang.org/protobuf v1.34.2
google.golang.org/grpc v1.71.1
google.golang.org/protobuf v1.36.6
gopkg.in/natefinch/lumberjack.v2 v2.2.1
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987
k8s.io/api v0.31.0-alpha.0
k8s.io/apimachinery v0.31.0-alpha.0
k8s.io/cli-runtime v0.29.3
k8s.io/client-go v0.31.0-alpha.0
helm.sh/helm/v4 v4.0.0-20250324191910-0199b748aaea
k8s.io/api v0.32.3
k8s.io/apimachinery v0.32.3
k8s.io/cli-runtime v0.32.3
k8s.io/client-go v0.32.3
k8s.io/klog/v2 v2.130.1
k8s.io/kubectl v0.29.3
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
sigs.k8s.io/controller-runtime v0.18.4
sigs.k8s.io/kustomize/api v0.16.0
k8s.io/kubectl v0.32.3
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e
sigs.k8s.io/controller-runtime v0.20.4
sigs.k8s.io/kustomize/api v0.19.0
sigs.k8s.io/yaml v1.4.0
tailscale.com v1.74.1
)
require (
cel.dev/expr v0.15.0 // indirect
cloud.google.com/go/compute/metadata v0.3.0 // indirect
dario.cat/mergo v1.0.0 // indirect
cel.dev/expr v0.19.1 // indirect
cloud.google.com/go/auth v0.15.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect
cloud.google.com/go/compute/metadata v0.6.0 // indirect
dario.cat/mergo v1.0.1 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.29 // indirect
github.com/Azure/go-autorest/autorest v0.11.30 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/DataDog/appsec-internal-go v1.5.0 // indirect
github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.0 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.0 // indirect
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
github.com/DataDog/appsec-internal-go v1.9.0 // indirect
github.com/DataDog/datadog-agent/pkg/obfuscate v0.58.0 // indirect
github.com/DataDog/datadog-agent/pkg/proto v0.58.0 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.58.0 // indirect
github.com/DataDog/datadog-agent/pkg/trace v0.58.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/log v0.58.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/scrubber v0.58.0 // indirect
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
github.com/DataDog/go-libddwaf/v2 v2.4.2 // indirect
github.com/DataDog/go-sqllexer v0.0.11 // indirect
github.com/DataDog/go-libddwaf/v3 v3.5.1 // indirect
github.com/DataDog/go-runtime-metrics-internal v0.0.4-0.20241206090539-a14610dc22b6 // indirect
github.com/DataDog/go-sqllexer v0.0.14 // indirect
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/sketches-go v1.4.4 // indirect
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 // indirect
github.com/DataDog/sketches-go v1.4.5 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/hcsshim v0.12.2 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.3.0 // indirect
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Microsoft/hcsshim v0.12.9 // indirect
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
github.com/antonmedv/expr v1.15.5 // indirect
github.com/apparentlymart/go-cidr v1.1.0 // indirect
github.com/aws/aws-sdk-go v1.51.12 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go v1.55.6 // indirect
github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect
github.com/aws/aws-sdk-go-v2/config v1.29.9 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.62 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 // indirect
github.com/aws/smithy-go v1.22.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/calmh/incontainer v1.0.0 // indirect
github.com/calmh/xdr v1.2.0 // indirect
github.com/ccding/go-stun v0.1.5 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
github.com/cncf/xds/go v0.0.0-20240329184929-0c46c01016dc // indirect
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect
github.com/cilium/ebpf v0.16.0 // indirect
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/dnstap/golang-dnstap v0.4.0 // indirect
github.com/docker/cli-docs-tool v0.9.0 // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.8.1 // indirect
github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/ebitengine/purego v0.7.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect
github.com/ebitengine/purego v0.8.3 // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/expr-lang/expr v1.17.2 // indirect
github.com/farsightsec/golang-framestream v0.3.0 // indirect
github.com/fatih/camelcase v1.0.0 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/fvbommel/sortorder v1.1.0 // indirect
github.com/fxamacker/cbor/v2 v2.6.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/gaissmai/bart v0.11.1 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
github.com/go-errors/errors v1.5.1 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 // indirect
github.com/go-ldap/ldap/v3 v3.4.8 // indirect
github.com/go-ldap/ldap/v3 v3.4.10 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonpointer v0.21.1 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-openapi/swag v0.23.1 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/google/pprof v0.0.0-20250202011525-fc3143867406 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.3 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/greatroar/blobloom v0.8.0 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
github.com/illarion/gonotify/v2 v2.0.3 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/infobloxopen/go-trees v0.0.0-20221216143356-66ceba885ebc // indirect
github.com/jackpal/gateway v1.0.15 // indirect
github.com/jackpal/gateway v1.0.16 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jmoiron/sqlx v1.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect
github.com/jsimonetti/rtnetlink v1.4.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
github.com/mailru/easyjson v0.9.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mdlayher/netlink v1.7.2 // indirect
@@ -204,55 +255,64 @@ require (
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/spdystream v0.5.0 // indirect
github.com/moby/sys/sequential v0.6.0 // indirect
github.com/moby/sys/symlink v0.2.0 // indirect
github.com/moby/sys/user v0.1.0 // indirect
github.com/moby/sys/user v0.4.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/onsi/ginkgo/v2 v2.20.0 // indirect
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 // indirect
github.com/openzipkin/zipkin-go v0.4.2 // indirect
github.com/openzipkin/zipkin-go v0.4.3 // indirect
github.com/oschwald/geoip2-golang v1.11.0 // indirect
github.com/oschwald/maxminddb-golang v1.13.1 // indirect
github.com/outcaste-io/ristretto v0.2.3 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/prometheus/client_golang v1.19.1 // indirect
github.com/prometheus/client_golang v1.21.1 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/quic-go/quic-go v0.46.0 // indirect
github.com/prometheus/common v0.63.0 // indirect
github.com/prometheus/procfs v0.16.0 // indirect
github.com/quic-go/quic-go v0.50.1 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rubenv/sql-migrate v1.7.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
github.com/shirou/gopsutil/v4 v4.24.7 // indirect
github.com/shirou/gopsutil/v3 v3.24.4 // indirect
github.com/shirou/gopsutil/v4 v4.25.1 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/stretchr/testify v1.9.0 // indirect
github.com/stretchr/testify v1.10.0 // indirect
github.com/syncthing/notify v0.0.0-20210616190510-c6b7342338d2 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 // indirect
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/tinylib/msgp v1.1.9 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/tinylib/msgp v1.2.1 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0 // indirect
@@ -262,42 +322,56 @@ require (
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.etcd.io/etcd/api/v3 v3.5.13 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.13 // indirect
go.etcd.io/etcd/client/v3 v3.5.13 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
go.opentelemetry.io/otel v1.24.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.opentelemetry.io/otel/sdk v1.22.0 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.starlark.net v0.0.0-20240329153429-e6e8e7ce1b7a // indirect
go.etcd.io/etcd/api/v3 v3.5.20 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.20 // indirect
go.etcd.io/etcd/client/v3 v3.5.20 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/collector/component v0.104.0 // indirect
go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect
go.opentelemetry.io/collector/pdata v1.11.0 // indirect
go.opentelemetry.io/collector/pdata/pprofile v0.104.0 // indirect
go.opentelemetry.io/collector/semconv v0.104.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
go.opentelemetry.io/otel v1.35.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect
go.opentelemetry.io/otel/metric v1.35.0 // indirect
go.opentelemetry.io/otel/sdk v1.35.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
go.opentelemetry.io/otel/trace v1.35.0 // indirect
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/mock v0.4.0 // indirect
go.uber.org/mock v0.5.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/tools v0.26.0 // indirect
golang.org/x/exp v0.0.0-20250207012021-f9890c6ad9f3 // indirect
golang.org/x/mod v0.23.0 // indirect
golang.org/x/sync v0.13.0 // indirect
golang.org/x/time v0.11.0 // indirect
golang.org/x/tools v0.30.0 // indirect
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/api v0.172.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
gopkg.in/DataDog/dd-trace-go.v1 v1.62.0 // indirect
gopkg.in/evanphx/json-patch.v5 v5.9.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
google.golang.org/api v0.227.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250409194420-de1ac958c67a // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e // indirect
gopkg.in/DataDog/dd-trace-go.v1 v1.72.2 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.31.0-alpha.0 // indirect
k8s.io/component-base v0.31.0-alpha.0 // indirect
k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/kyaml v0.16.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
k8s.io/apiextensions-apiserver v0.32.3 // indirect
k8s.io/apiserver v0.32.3 // indirect
k8s.io/component-base v0.32.3 // indirect
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
oras.land/oras-go/v2 v2.5.0 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
)

847
go.sum

File diff suppressed because it is too large Load Diff

6
go.work Normal file
View File

@@ -0,0 +1,6 @@
go 1.23.2
use (
.
./pkg/syncthing/auto
)

667
go.work.sum Normal file
View File

@@ -0,0 +1,667 @@
4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs=
4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU=
cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw=
cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms=
cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA=
cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q=
cloud.google.com/go/accessapproval v1.8.5/go.mod h1:aO61iJuMRAaugpD0rWgpwj9aXvWimCWTEbA/kYAFddE=
cloud.google.com/go/accesscontextmanager v1.9.5/go.mod h1:i6WSokkuePCT3jWwRzhge/pZicoErUBbDWjAUd8AoQU=
cloud.google.com/go/aiplatform v1.81.0/go.mod h1:uwLaCFXLvVnKzxl3OXQRw1Hry3KJOIgpofYorq0ZMPk=
cloud.google.com/go/analytics v0.27.1/go.mod h1:2itQDvSWyGiBvs80ocjFjfu/ZUIo25fC93hsEX4fnoU=
cloud.google.com/go/apigateway v1.7.5/go.mod h1:iJ9zoE4KMNF1CHBFV4pZDCJRZzonqKj4BECymhvAwWk=
cloud.google.com/go/apigeeconnect v1.7.5/go.mod h1:XAGnQGiFakRMV3H6bawRb5JAIXIbFSfzGKLDqL1dYgQ=
cloud.google.com/go/apigeeregistry v0.9.5/go.mod h1:e6oNKW1utj+A1fpTw+YUpPkFusNT8gfFbqx/8upsgCY=
cloud.google.com/go/appengine v1.9.5/go.mod h1:x4zKNF1qRX++Joni0nQFJoNobodzWX1bieiGRMWx+4U=
cloud.google.com/go/area120 v0.9.5/go.mod h1:1rAIWfyOiCXk/kuTqFU//pfrHiA8GM8LziM79Lm0zxk=
cloud.google.com/go/artifactregistry v1.16.3/go.mod h1:eiLO70Qh5Z9Jbwctl0KdW5VzJ5HncWgNaYN0NdF8lmM=
cloud.google.com/go/asset v1.20.5/go.mod h1:0pbY+F3Pr3teQLK1ZXpUjGPNBPfUiL1tpxRxRmLCV/c=
cloud.google.com/go/assuredworkloads v1.12.5/go.mod h1:OHjBWxs611PdU/VkGDoNQ/SFZHIYQTPtZlfDAUWN8K0=
cloud.google.com/go/automl v1.14.6/go.mod h1:mEn1QHZmPTnmrq6zj33gyKX1K7L32izry14I6LQCO5M=
cloud.google.com/go/baremetalsolution v1.3.5/go.mod h1:FfLWTwf9g7MVh0jhomxs1ErK9J/E9GBALdsunmFo50Q=
cloud.google.com/go/batch v1.12.1/go.mod h1:hB6jwKyX2zoFoIXw6/pT2CPIbvo0ya7mpQXFJ9QbnAY=
cloud.google.com/go/beyondcorp v1.1.5/go.mod h1:C77HvHG9ntYvI3+/WXht0tqx/fNxfD4MahSutTOkJYg=
cloud.google.com/go/bigquery v1.67.0/go.mod h1:HQeP1AHFuAz0Y55heDSb0cjZIhnEkuwFRBGo6EEKHug=
cloud.google.com/go/bigtable v1.36.0/go.mod h1:u98oqNAXiAufepkRGAd95lq2ap4kHGr3wLeFojvJwew=
cloud.google.com/go/billing v1.20.3/go.mod h1:DJt75ird7g3zrTODh2Eo8ZT2d3jtoEI5L6qNXIHwOY0=
cloud.google.com/go/binaryauthorization v1.9.4/go.mod h1:LimAql4UPC7B/F+RW9rQpsUpzDFNO+VKwVRyHG9txKU=
cloud.google.com/go/certificatemanager v1.9.4/go.mod h1:KneWp8OAhBVD4fqMUB6daOA90MHh9xVB8E3ZFN8w2dc=
cloud.google.com/go/channel v1.19.4/go.mod h1:W82e3qLLe9wvZShy3aAg/6frvMYOdHKSaIwTLJT2Yxs=
cloud.google.com/go/cloudbuild v1.22.1/go.mod h1:/3syBgG56xUK1UD8dXAOSnPWF4Cs0ZZ/eXhoTIBipwg=
cloud.google.com/go/clouddms v1.8.6/go.mod h1:++xrkEPp1mAKZKFk3MMD63UkK7KpnSBt9kRLRSOYliE=
cloud.google.com/go/cloudtasks v1.13.5/go.mod h1:AReQFk11yF7sHEOKHXP3/SufAeiHn4yXWpqQGds9Of0=
cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU=
cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls=
cloud.google.com/go/compute v1.36.0 h1:QzLrJRxytIGE8OJWzmMweIdiu2pIlRVq9kSi7+xnDkU=
cloud.google.com/go/compute v1.36.0/go.mod h1:+GZuz5prSWFLquViP55zcjRrOm7vRpIllx2MaYpzuiI=
cloud.google.com/go/contactcenterinsights v1.17.2/go.mod h1:9yuX5Y7KFqsQgNydM7WeuGcYWWs/0dBCElXaOF6ltmo=
cloud.google.com/go/container v1.42.3/go.mod h1:8ZT9RBJXjWXqRMM/sEW8dxolZUofxKJUaO9mMXSkDz0=
cloud.google.com/go/containeranalysis v0.14.0/go.mod h1:vct7OEtK07Azaiyo6aCyae4teFL28t7JZQkr1DlTC5s=
cloud.google.com/go/datacatalog v1.25.0/go.mod h1:Bodb/U9ZV549+0sQPoX6WtYnbFwqayuYldw5p6PmbH4=
cloud.google.com/go/dataflow v0.10.5/go.mod h1:rLRbgv1ZK34XW72xrmJysN7z0PCwgsh0wtjWx5Yavoc=
cloud.google.com/go/dataform v0.11.1/go.mod h1:2TYH+Dmqnx9ewr/YG8HbMpcNQBX5gdCyP8W/8GwprWk=
cloud.google.com/go/datafusion v1.8.5/go.mod h1:xMoW16ciCOQpS8rNUDU1tWgHkhbQ3KKaV9o7UTggEtQ=
cloud.google.com/go/datalabeling v0.9.5/go.mod h1:xJzHTfjCvPeF87QreDSFTl98mRS/vp47EWwDBHvQiMU=
cloud.google.com/go/dataplex v1.24.0/go.mod h1:rNqsuS0Yag0NDGybhNpCaeqU/Jq8z4gFqqF0MUajHwE=
cloud.google.com/go/dataproc/v2 v2.11.1/go.mod h1:KDbkJUYjcz+t8nfapg0upz665P0SrsDW7I9RC9GZf4o=
cloud.google.com/go/dataqna v0.9.5/go.mod h1:UFRToVzSTCgwDkeSa4J0WE6bmbemdOZhUCUfs+DlJFc=
cloud.google.com/go/datastore v1.20.0/go.mod h1:uFo3e+aEpRfHgtp5pp0+6M0o147KoPaYNaPAKpfh8Ew=
cloud.google.com/go/datastream v1.14.0/go.mod h1:H0luYVOhiyUrzE2efbv1OHFRjzgZfHO9snDuBXmnQXE=
cloud.google.com/go/deploy v1.26.4/go.mod h1:MaPXP4rU984LmRF+DmJ1qNEZrTI7Rez+hfku0oRudTk=
cloud.google.com/go/dialogflow v1.68.1/go.mod h1:CpfTOpLjhM9ZXu+VzJ56xrX9GMBJt1aIjPMChiLUGso=
cloud.google.com/go/dlp v1.22.0/go.mod h1:2cMTKdeReZI64BDsYzsBZFtXdDqb3nhDKHRsRUl7J9Y=
cloud.google.com/go/documentai v1.36.0/go.mod h1:LsX1RO08WDd8mFBviYB03jgCytz2oIcwIZ9lBw5bKiM=
cloud.google.com/go/domains v0.10.5/go.mod h1:VP7djhZJy47uxUoJGfDilXpUnAaIExcHL86vv3yfaQs=
cloud.google.com/go/edgecontainer v1.4.2/go.mod h1:MhrgxorZIp/4myFe2a/Y0OHSx8PCxeyHBRZATvcTTZs=
cloud.google.com/go/errorreporting v0.3.2/go.mod h1:s5kjs5r3l6A8UUyIsgvAhGq6tkqyBCUss0FRpsoVTww=
cloud.google.com/go/essentialcontacts v1.7.5/go.mod h1:AzwvwPKMUnf8bwfLP0R/+BjzC7bi3OTaLABtUF/q428=
cloud.google.com/go/eventarc v1.15.4/go.mod h1:E5vNWMxaZOwfMfQlQOsoE5TY07tKtOiMLF9s99/btyo=
cloud.google.com/go/filestore v1.10.1/go.mod h1:uZfxcuSzAK8NZGflw9bvB0YOT2O8vhyfEVaFAG+vTkg=
cloud.google.com/go/firestore v1.18.0/go.mod h1:5ye0v48PhseZBdcl0qbl3uttu7FIEwEYVaWm0UIEOEU=
cloud.google.com/go/functions v1.19.4/go.mod h1:qmx3Yrm8ZdwQrWplvnpoL4tHW7s8ULNKwP2SjfX9zSM=
cloud.google.com/go/gkebackup v1.6.4/go.mod h1:ZYY7CdiOKobk3gzEKBbRymaEo22bkR1EPkwZ7Tvts/U=
cloud.google.com/go/gkeconnect v0.12.3/go.mod h1:Ra5w3QcA+ybM2hopIz4ZsQQsDqzoYws3Zn21CLGzfrw=
cloud.google.com/go/gkehub v0.15.5/go.mod h1:hIIoZAGNuiKWp6y4fW9JCEPg9xM7OX9sZwgiJrozrWQ=
cloud.google.com/go/gkemulticloud v1.5.2/go.mod h1:THwE0upZyYmgjEZtgbvGkf0VRkEdPkML9dF/J3lSahg=
cloud.google.com/go/gsuiteaddons v1.7.6/go.mod h1:TPlgcxjwv+L3fx9S6El4dDWItBxJpIyYTs4YPk6Zc48=
cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA=
cloud.google.com/go/iam v1.5.0/go.mod h1:U+DOtKQltF/LxPEtcDLoobcsZMilSRwR7mgNL7knOpo=
cloud.google.com/go/iap v1.10.5/go.mod h1:Sal3oNlcIiv9YWkXWLD9fYzbSCbnrqOD4Pm8JyaiZZY=
cloud.google.com/go/ids v1.5.5/go.mod h1:XHNjg7KratNBxruoiG2Mfx2lFMnRQZWCr/p7T7AV724=
cloud.google.com/go/iot v1.8.5/go.mod h1:BlwypQBsnaiVRCy2+49Zz4ClJLDidldn05+Fp1uGFOs=
cloud.google.com/go/kms v1.21.1/go.mod h1:s0wCyByc9LjTdCjG88toVs70U9W+cc6RKFc8zAqX7nE=
cloud.google.com/go/language v1.14.4/go.mod h1:EqwoMieV6UsNeqHV2tRxuhmfDyC3YqEu1er53CrRkeA=
cloud.google.com/go/lifesciences v0.10.5/go.mod h1:p+vxvHLx0/4QeVp3DU5Gcnyoi+kKNFWRqfgn2d8HuNc=
cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=
cloud.google.com/go/longrunning v0.5.6/go.mod h1:vUaDrWYOMKRuhiv6JBnn49YxCPz2Ayn9GqyjaBT8/mA=
cloud.google.com/go/longrunning v0.6.6/go.mod h1:hyeGJUrPHcx0u2Uu1UFSoYZLn4lkMrccJig0t4FI7yw=
cloud.google.com/go/managedidentities v1.7.5/go.mod h1:cD8aai2c7nWdOzBMP48wJUM9zsdIu1VbdojGSlLGqjM=
cloud.google.com/go/maps v1.20.1/go.mod h1:aMmv5a4nJBF3WpbPoGathd05Wbl4uuHEw2/bXX+2gZ4=
cloud.google.com/go/mediatranslation v0.9.5/go.mod h1:JGsL9cldTUtRi3u6Q+BMXzY1zZFOWdbmZLf1C69G2Zs=
cloud.google.com/go/memcache v1.11.5/go.mod h1:SYrG9bR51Q82rGpj04gA5YwL0aZGdDcqPvxfQiaxio4=
cloud.google.com/go/metastore v1.14.5/go.mod h1:mWHoEHrIFMv4yjKxczc1S6LIwhDQ7rTcAIix2BEIad8=
cloud.google.com/go/monitoring v1.24.1/go.mod h1:Z05d1/vn9NaujqY2voG6pVQXoJGbp+r3laV+LySt9K0=
cloud.google.com/go/networkconnectivity v1.17.0/go.mod h1:RiX351sXmQ/iScNWUBLN+4L9HJeP3etBCIsXCt366Mc=
cloud.google.com/go/networkmanagement v1.18.2/go.mod h1:QOOTm+LgXEPeA9u9bAeDETBYkibzMVTYH4mIi9GJATc=
cloud.google.com/go/networksecurity v0.10.5/go.mod h1:CqJMtLG67gxHEAjGjccwEm5a7Tb6h0kPtHK5SEHnwMc=
cloud.google.com/go/notebooks v1.12.5/go.mod h1:265WkAl2d3YKqxB+nFFkI+xwnc9CWDdvHs+Pl3TUhLM=
cloud.google.com/go/optimization v1.7.5/go.mod h1:/nM8SUgl5C43X8Bb/AzEZdCL9CrUv9JtOVx6Ql4Ohg8=
cloud.google.com/go/orchestration v1.11.7/go.mod h1:0u82lPJh6P5DpeaLtoeyrYafLEBAQ6m7gZwdhVSM1Ag=
cloud.google.com/go/orgpolicy v1.14.3/go.mod h1:bc5nFdnE+4vwCLvv3uNFWUtsywFf6Szv+eW8SmAbQlQ=
cloud.google.com/go/osconfig v1.14.4/go.mod h1:WQ5UV8yf1yhqrFrMD//dsqF/dqpepo9nzSF34aQ4vC8=
cloud.google.com/go/oslogin v1.14.5/go.mod h1:H/wQ2JrheJ/NqGomDgRGj7YwRUKPl/EqQYUse5z+eCU=
cloud.google.com/go/phishingprotection v0.9.5/go.mod h1:9eflfOQ/ZBWXzjX7Y5GCEDgK3KzpQafnFuGzdwt/AFM=
cloud.google.com/go/policytroubleshooter v1.11.5/go.mod h1:/AnSQG4qCijhusdepnPROvb34cqvwZozTpnPmLt09Uk=
cloud.google.com/go/privatecatalog v0.10.6/go.mod h1:rXuTtOfEicEN2bZRBkz/KTdDJndzvc4zb1b2Jaxkc8w=
cloud.google.com/go/pubsub v1.37.0/go.mod h1:YQOQr1uiUM092EXwKs56OPT650nwnawc+8/IjoUeGzQ=
cloud.google.com/go/pubsub v1.49.0/go.mod h1:K1FswTWP+C1tI/nfi3HQecoVeFvL4HUOB1tdaNXKhUY=
cloud.google.com/go/pubsublite v1.8.2/go.mod h1:4r8GSa9NznExjuLPEJlF1VjOPOpgf3IT6k8x/YgaOPI=
cloud.google.com/go/recaptchaenterprise/v2 v2.20.2/go.mod h1:BuZevlArTGydeIvlO3Mp4nQwLWPsnzUDUF/84+1bmfc=
cloud.google.com/go/recommendationengine v0.9.5/go.mod h1:7Ngg07UK3Ix45dwj/DXgWJa0661YyKfE84XKXnM6qo0=
cloud.google.com/go/recommender v1.13.4/go.mod h1:2xpcTYCOy2JlePWcMcVqS+dNiiMNCNGT/PtsjGP1BTQ=
cloud.google.com/go/redis v1.18.1/go.mod h1:lZQIhkqbhlmqGlFws6yzxSt2qNrAsPDHozWYGvXywqM=
cloud.google.com/go/resourcemanager v1.10.5/go.mod h1:3h1p8//AxBksoqJR/sD5AeGKVuuhZi805WC9nGogRGE=
cloud.google.com/go/resourcesettings v1.8.3/go.mod h1:BzgfXFHIWOOmHe6ZV9+r3OWfpHJgnqXy8jqwx4zTMLw=
cloud.google.com/go/retail v1.19.3/go.mod h1:o34bfr78e/gDLbHeDp0jiXKkXK7onYCJc86qrTM4Pac=
cloud.google.com/go/run v1.9.2/go.mod h1:QD5H5hNuz900FYLQGtbMlA0dqZogy/Wj0xpLwTzK2+Q=
cloud.google.com/go/scheduler v1.11.6/go.mod h1:gb8qfU07hAyXXtwrKXs7nbc9ar/R8vNsaRHswZpgPyM=
cloud.google.com/go/secretmanager v1.14.6/go.mod h1:0OWeM3qpJ2n71MGgNfKsgjC/9LfVTcUqXFUlGxo5PzY=
cloud.google.com/go/security v1.18.4/go.mod h1:+oNVB34sloqG2K3IpoT2KUDgNAbAJ9A2uENjAUvgzRQ=
cloud.google.com/go/securitycenter v1.36.1/go.mod h1:SxE1r7Y5V9AVPa+DU0d+4QAOIJzcKglO3Vc4zvcQtPo=
cloud.google.com/go/servicedirectory v1.12.5/go.mod h1:v/sr/Z4lbZzJBSn5H7bObu8FKoS6NZZ0ysQ3gi0vMMM=
cloud.google.com/go/shell v1.8.5/go.mod h1:vuRxgLhy5pR9TZVqWvR/7lfSiMCLv6ucuoYDtQKKuJ8=
cloud.google.com/go/spanner v1.79.0/go.mod h1:224ub0ngSaiy7SJI7QZ1pu9zoVPt6CgfwDGBNhUUuzU=
cloud.google.com/go/speech v1.26.1/go.mod h1:YTt2qy3GFlzxNJmWj7aDEZjTqESvP2pWpExdOqtCQ6k=
cloud.google.com/go/storagetransfer v1.12.3/go.mod h1:JzyP1ymNdy+F0VjyVCKzuk1WjLJ1yZGhtXcBlzBkPjk=
cloud.google.com/go/talent v1.8.2/go.mod h1:SAIKGqmpKBCOf1LZLtL/7yzNqY2YTYHk0CgMlEWBXMY=
cloud.google.com/go/texttospeech v1.12.0/go.mod h1:BdrVnsA7LnGe9v+zY3nfNJ2veaqLFbpkpBz3U+jsY34=
cloud.google.com/go/tpu v1.8.2/go.mod h1:W/fW8HHjrzx1Ae5ahXiWnc/O0FNAQCbXdGdE7Hac3dc=
cloud.google.com/go/trace v1.11.5/go.mod h1:TwblCcqNInriu5/qzaeYEIH7wzUcchSdeY2l5wL3Eec=
cloud.google.com/go/translate v1.10.3/go.mod h1:GW0vC1qvPtd3pgtypCv4k4U8B7EdgK9/QEF2aJEUovs=
cloud.google.com/go/translate v1.12.4/go.mod h1:u3NmYPWGXeNVz94QYzdd8kI7Rvi3wyp2jsjN3qAciCY=
cloud.google.com/go/video v1.23.4/go.mod h1:G95szckwF/7LatG9fGfNXceMzLf7W0UhKTZi6zXKHPs=
cloud.google.com/go/videointelligence v1.12.5/go.mod h1:OFaZL0H53vQl/uyz/8gqXMJ5nr69RIC3ffPGJwKCNww=
cloud.google.com/go/vision/v2 v2.9.4/go.mod h1:VotOrCFm0DbWKU7KvtyuAm72okClHDoERxrgeeQNPN4=
cloud.google.com/go/vmmigration v1.8.5/go.mod h1:6/VVofjrSGi14/0ZcaoSoZcy9VHDhJ6fNFxnYAPxuLg=
cloud.google.com/go/vmwareengine v1.3.4/go.mod h1:2W2NdtnfEe/0rEKoDfGOpBPtbAAf9ZN/SecH1WwLX6w=
cloud.google.com/go/vpcaccess v1.8.5/go.mod h1:R/oMa0mkPbi5GuIascldW5g/IHXq9YX0TBxJyOzyy28=
cloud.google.com/go/webrisk v1.10.5/go.mod h1:Cd8ce1mCt1fbiufmVkHeZZlPGfe4LQVHw006MtBIxvk=
cloud.google.com/go/websecurityscanner v1.7.5/go.mod h1:QGRxdN0ihdyjwDPaLf96O+ks4u+SBG7/bPNs+fc+LR0=
cloud.google.com/go/workflows v1.14.0/go.mod h1:kjar2tf4qQu7VoCTFX+L3yy+2dIFTWr6R4i52DN6ySk=
filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA=
fyne.io/systray v1.11.0/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs=
github.com/99designs/gqlgen v0.17.36/go.mod h1:6RdyY8puhCoWAQVr2qzF2OMVfudQzc8ACxzpzluoQm4=
github.com/Abirdcfly/dupword v0.0.11/go.mod h1:wH8mVGuf3CP5fsBTkfWwwwKTjDnVVCxtU8d8rgeVYXA=
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU=
github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0=
github.com/Antonboom/errname v0.1.9/go.mod h1:nLTcJzevREuAsgTbG85UsuiWpMpAqbKD1HNZ29OzE58=
github.com/Antonboom/nilnil v0.1.4/go.mod h1:iOov/7gRcXkeEU+EMGpBu2ORih3iyVEiWjeste1SJm8=
github.com/AudriusButkevicius/recli v0.0.7-0.20220911121932-d000ce8fbf0f/go.mod h1:Nhfib1j/VFnLrXL9cHgA+/n2O6P5THuWelOnbfPNd78=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
github.com/DataDog/datadog-agent/comp/trace/compression/def v0.58.0/go.mod h1:samFXdP0HVSwD223LPLzcPKUjRQ6/uwr/1wMPo2HhRg=
github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip v0.58.0/go.mod h1:FTweq0EZjbOgeWgV7+3R1Zx9l2b9de7LwceYSNNdZvM=
github.com/DataDog/datadog-agent/comp/trace/compression/impl-zstd v0.58.0/go.mod h1:wIlhI+gwxKQzDQFr4PjvXXuKHUsx3QWY2TbwDv1yaDs=
github.com/DataDog/datadog-agent/pkg/util/cgroups v0.58.0/go.mod h1:XjTdv3Kb7EqpPnMlmmQK1MV6EFOArwoa6wSVB+P7TdU=
github.com/DataDog/datadog-agent/pkg/util/pointer v0.58.0/go.mod h1:t1DlnUEMltkvwPLc7zCtP1u5cBDu+30daR2VhQO5bvA=
github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0/go.mod h1:b3g59n2Y+T5xmcxJL+UEG2f8cQploZm1mR/v6BW0mU0=
github.com/GoogleCloudPlatform/gke-networking-api v0.1.2-0.20240904205008-bc15495fd43f/go.mod h1:YnoYXo/cwpqFmIXKblHOV5jFEpsSL3PZeo0zaR3oGTI=
github.com/GoogleCloudPlatform/k8s-cloud-provider v1.25.0/go.mod h1:UTfhBnADaj2rybPT049NScSh7Eall3u2ib43wmz3deg=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM=
github.com/IBM/sarama v1.43.1/go.mod h1:GG5q1RURtDNPz8xxJs3mgX6Ytak8Z9eLhAkJPObe2xE=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/Masterminds/vcs v1.13.3/go.mod h1:TiE7xuEjl1N4j016moRd6vezp6e6Lz23gypeXfzXeW8=
github.com/Microsoft/cosesign1go v1.2.0/go.mod h1:1La/HcGw19rRLhPW0S6u55K6LKfti+GQSgGCtrfhVe8=
github.com/Microsoft/didx509go v0.0.3/go.mod h1:wWt+iQsLzn3011+VfESzznLIp/Owhuj7rLF7yLglYbk=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc=
github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g=
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw=
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
github.com/alecthomas/kong v1.6.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
github.com/alecthomas/kong v1.10.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE=
github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I=
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/ashanbrown/forbidigo v1.5.1/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU=
github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13/go.mod h1:gpAbvyDGQFozTEmlTFO8XcQKHzubdq0LzRyJpG6MiXM=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.64/go.mod h1:4Q7R9MFpXRdjO3YnAfUTdnuENs32WzBkASt6VxSYDYQ=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.3/go.mod h1:jYLMm3Dh0wbeV3lxth5ryks/O2M/omVXWyYm3YcEVqQ=
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.21.4/go.mod h1:aryF4jxgjhbqpdhj8QybUZI3xYrX8MQIKm4WbOv8Whg=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.93.2/go.mod h1:VX22JN3HQXDtQ3uS4h4TtM+K11vydq58tpHTlsm8TL8=
github.com/aws/aws-sdk-go-v2/service/eventbridge v1.20.4/go.mod h1:XlbY5AGZhlipCdhRorT18/HEThKAxo51hMmhixreJoM=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.35/go.mod h1:YVHrksq36j0sbXCT6rSuQafpfYkMYqy0QTk7JTCTBIU=
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.7.34/go.mod h1:CDPcT6pljRaqz1yLsOgPUvOPOczFvXuJxOKzDzAbF0c=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.3/go.mod h1:TXBww3ANB+QRj+/dUoYDvI8d/u4F4WzTxD4mxtDoxrg=
github.com/aws/aws-sdk-go-v2/service/kinesis v1.18.4/go.mod h1:HnjgmL8TNmYtGcrA3N6EeCnDvlX6CteCdUbZ1wV8QWQ=
github.com/aws/aws-sdk-go-v2/service/s3 v1.33.0/go.mod h1:J9kLNzEiHSeGMyN7238EjJmBpCniVzFda75Gxl/NqB8=
github.com/aws/aws-sdk-go-v2/service/sfn v1.19.4/go.mod h1:uWCH4ATwNrkRO40j8Dmy7u/Y1/BVWgCM+YjBNYZeOro=
github.com/aws/aws-sdk-go-v2/service/sns v1.21.4/go.mod h1:bbB779DXXOnPXvB7F3dP7AjuV1Eyr7fNyrA058ExuzY=
github.com/aws/aws-sdk-go-v2/service/sqs v1.24.4/go.mod h1:c1AF/ac4k4xz32FprEk6AqqGFH/Fkub9VUPSrASlllA=
github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM=
github.com/bazelbuild/rules_go v0.44.2/go.mod h1:Dhcz716Kqg1RHNWos+N6MlXNkjNP2EwZQ0LukRKJfMs=
github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI=
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI=
github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k=
github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo=
github.com/bradfitz/gomemcache v0.0.0-20230611145640-acc696258285/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
github.com/bramvdbogaerde/go-scp v1.4.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
github.com/breml/bidichk v0.2.4/go.mod h1:7Zk0kRFt1LIZxtQdl9W9JwGAcLTTkOs+tN7wuEYGJ3s=
github.com/breml/errchkjson v0.3.1/go.mod h1:XroxrzKjdiutFyW3nWhw34VGg7kiMsDQox73yWCGI2U=
github.com/butuzov/ireturn v0.2.0/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
github.com/bytedance/sonic v1.12.0/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk=
github.com/bytedance/sonic/loader v0.2.0/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/cavaliergopher/cpio v1.0.1/go.mod h1:pBdaqQjnvXxdS/6CvNDwIANIFSP0xRKI16PX4xejRQc=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ=
github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8/go.mod h1:gakxgyXaaPkxvLw1XQxNGK4I37ys9iBRzNUx/B7pUCo=
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
github.com/confluentinc/confluent-kafka-go v1.9.2/go.mod h1:ptXNqsuDfYbAE/LBW6pnwWZElUoWxHoV8E43DCrliyo=
github.com/confluentinc/confluent-kafka-go/v2 v2.4.0/go.mod h1:E1dEQy50ZLfqs7T9luxz0rLxaeFZJZE92XvApJOr/Rk=
github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
github.com/containerd/btrfs/v2 v2.0.0/go.mod h1:swkD/7j9HApWpzl8OHfrHNxppPd9l44DFZdF94BUj9k=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc=
github.com/containerd/continuity v0.4.4/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM=
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
github.com/containerd/imgcrypt v1.1.8/go.mod h1:x6QvFIkMyO2qGIY2zXc88ivEzcbgvLdWjoZyGqDap5U=
github.com/containerd/nri v0.8.0/go.mod h1:uSkgBrCdEtAiEz4vnrq8gmAC4EnVAM5Klt0OuK5rZYQ=
github.com/containerd/protobuild v0.3.0/go.mod h1:5mNMFKKAwCIAkFBPiOdtRx2KiQlyEJeMXnL5R1DsWu8=
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
github.com/containerd/zfs v1.1.0/go.mod h1:oZF9wBnrnQjpWLaPKEinrx3TQ9a+W/RJO7Zb41d8YLE=
github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4=
github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8=
github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc=
github.com/daixiang0/gci v0.10.1/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps=
github.com/dave/astrid v0.0.0-20170323122508-8c2895878b14/go.mod h1:Sth2QfxfATb/nW4EsrSi2KyJmbcniZ8TgTaji17D6ms=
github.com/dave/brenda v1.1.0/go.mod h1:4wCUr6gSlu5/1Tk7akE5X7UorwiQ8Rij0SKH3/BGMOM=
github.com/dave/courtney v0.4.0/go.mod h1:3WSU3yaloZXYAxRuWt8oRyVb9SaRiMBt5Kz/2J227tM=
github.com/dave/patsy v0.0.0-20210517141501-957256f50cba/go.mod h1:qfR88CgEGLoiqDaE+xxDCi5QA5v4vUoW0UCX2Nd5Tlc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c=
github.com/denisenkom/go-mssqldb v0.11.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A=
github.com/dimfeld/httptreemux/v5 v5.5.0/go.mod h1:QeEylH57C0v3VO0tkKraVz9oD3Uu93CKPnTLbsidvSw=
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40=
github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho=
github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/elastic/crd-ref-docs v0.0.12/go.mod h1:X83mMBdJt05heJUYiS3T0yJ/JkCuliuhSUNav5Gjo/U=
github.com/elastic/elastic-transport-go/v8 v8.1.0/go.mod h1:87Tcz8IVNe6rVSLdBux1o/PEItLtyabHU3naC7IoqKI=
github.com/elastic/go-elasticsearch/v6 v6.8.5/go.mod h1:UwaDJsD3rWLM5rKNFzv9hgox93HoX8utj1kxD9aFUcI=
github.com/elastic/go-elasticsearch/v7 v7.17.1/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4=
github.com/elastic/go-elasticsearch/v8 v8.4.0/go.mod h1:yY52i2Vj0unLz+N3Nwx1gM5LXwoj3h2dgptNGBYkMLA=
github.com/emicklei/go-restful v2.16.0+incompatible h1:rgqiKNjTnFQA6kkhFe16D8epTksy9HQ1MyrbDXSdYhM=
github.com/emicklei/go-restful v2.16.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0=
github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY=
github.com/evanw/esbuild v0.19.11/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48=
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI=
github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI=
github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/garyburd/redigo v1.6.4/go.mod h1:rTb6epsqigu3kYKBnaF028A7Tf/Aw5s0cqA47doKKqw=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/go-chi/chi v1.5.4/go.mod h1:uaf8YgoFazUOkPBG7fxPftUylNumIev9awIWOENIuEg=
github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
github.com/go-critic/go-critic v0.8.0/go.mod h1:5TjdkPI9cu/yKbYS96BTsslihjKd6zg6vd8O9RZXj2s=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-pg/pg/v10 v10.11.1/go.mod h1:ExJWndhDNNftBdw1Ow83xqpSf4WMSJK8urmXD5VXS1I=
github.com/go-pg/zerochecker v0.2.0/go.mod h1:NJZ4wKL0NmTtz0GKCoJ8kym6Xn/EQzXRl2OnAe7MmDo=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.15.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU=
github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw=
github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ=
github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4=
github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA=
github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ=
github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig=
github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-yaml v1.12.0/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
github.com/gocql/gocql v1.6.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8=
github.com/godror/godror v0.40.4/go.mod h1:i8YtVTHUJKfFT3wTat4A9UoqScUtZXiYB9Rf3SVARgc=
github.com/godror/knownpb v0.1.1/go.mod h1:4nRFbQo1dDuwKnblRXDxrfCFYeT4hjg3GjMqef58eRE=
github.com/gofiber/fiber/v2 v2.52.5/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/mock v1.7.0-rc.1/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs=
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ=
github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs=
github.com/golangci/golangci-lint v1.52.2/go.mod h1:S5fhC5sHM5kE22/HcATKd1XLWQxX+y7mHj8B5H91Q/0=
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc=
github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs=
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8=
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
github.com/google/go-containerregistry v0.20.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
github.com/google/go-github/v56 v56.0.0/go.mod h1:D8cdcX98YWJvi7TLo7zM4/h8ZTx6u6fwGEkCdisopo0=
github.com/google/go-pkcs11 v0.3.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY=
github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4=
github.com/google/rpmpack v0.5.0/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI=
github.com/google/subcommands v1.0.2-0.20190508160503-636abe8753b8/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0=
github.com/goreleaser/chglog v0.5.0/go.mod h1:Ri46M3lrMuv76FHszs3vtABR8J8k1w9JHYAzxeeOl28=
github.com/goreleaser/fileglob v1.3.0/go.mod h1:Jx6BoXv3mbYkEzwm9THo7xbr5egkAraxkGorbJb4RxU=
github.com/goreleaser/nfpm/v2 v2.33.1/go.mod h1:8wwWWvJWmn84xo/Sqiv0aMvEGTHlHZTXTEuVSgQpkIM=
github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk=
github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc=
github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM=
github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak=
github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A=
github.com/graph-gophers/graphql-go v1.5.0/go.mod h1:YtmJZDLbF1YYNrlNAuiO5zAStUWc3XZT07iGsVqe1Os=
github.com/graphql-go/graphql v0.8.1/go.mod h1:nKiHzRM0qopJEwCITUuIsxk9PlVlwIiiI8pnJEhordQ=
github.com/graphql-go/handler v0.2.3/go.mod h1:leLF6RpV5uZMN1CdImAxuiayrYYhOk33bZciaUGaXeU=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hanwen/go-fuse/v2 v2.3.0/go.mod h1:xKwi1cF7nXAOBCXujD5ie0ZKsxc8GGSA1rlMJc+8IJs=
github.com/hashicorp/consul/api v1.24.0/go.mod h1:NZJGRFYruc/80wYowkPFCp1LbGmJC9L8izrwfyVx/Wg=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8=
github.com/hashicorp/vault/sdk v0.9.2/go.mod h1:gG0lA7P++KefplzvcD3vrfCmgxVAM7Z/SqX5NeOL/98=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inetaf/tcpproxy v0.0.0-20240214030015-3ce58045626c/go.mod h1:Di7LXRyUcnvAcLicFhtM9/MlZl/TNgRSDHORM2c6CMI=
github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI=
github.com/intel/goresctrl v0.5.0/go.mod h1:mIe63ggylWYr0cU/l8n11FAkesqfvuP3oktIsxvu0T0=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4=
github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc=
github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0=
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/josephspurrier/goversioninfo v1.4.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE=
github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0=
github.com/junk1tm/musttag v0.5.0/go.mod h1:PcR7BA+oREQYvHwgjIDmw3exJeds5JzRcvEJTfjrA0M=
github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw=
github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU=
github.com/knadh/koanf/v2 v2.0.2/go.mod h1:HN9uZ+qFAejH1e4G41gnoffIanINWQuONLXiV7kir6k=
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I=
github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes=
github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA=
github.com/labstack/echo v3.3.10+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s=
github.com/labstack/echo/v4 v4.11.1/go.mod h1:YuYRTSM3CHs2ybfrL8Px48bO6BAnYIN4l8wSTMP6BDQ=
github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0=
github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY=
github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU=
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4=
github.com/lestrrat-go/jwx v1.2.29/go.mod h1:hU8k2l6WF0ncx20uQdOmik/Gjg6E3/wIRtXSNFeZuB8=
github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM=
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE=
github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc=
github.com/maruel/panicparse/v2 v2.4.0/go.mod h1:nOY2OKe8csO3F3SA5+hsxot05JLgukrF54B9x88fVp4=
github.com/maruel/panicparse/v2 v2.5.0/go.mod h1:DA2fDiBk63bKfBf4CVZP9gb4fuvzdPbLDsSI873hweQ=
github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/maxbrunsfeld/counterfeiter/v6 v6.8.1/go.mod h1:eyp4DdUJAKkr9tvxR3jWhw2mDK7CWABMG5r9uyaKC7I=
github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ=
github.com/maxmind/geoipupdate/v6 v6.1.0/go.mod h1:cZYCDzfMzTY4v6dKRdV7KTB6SStxtn3yFkiJ1btTGGc=
github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc=
github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o=
github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE=
github.com/mgechev/revive v1.3.1/go.mod h1:YlD6TTWl2B8A103R9KWJSPVI9DrEf+oqr15q21Ld+5I=
github.com/microsoft/go-mssqldb v0.21.0/go.mod h1:+4wZTUnz/SV6nffv+RRRB/ss8jPng5Sho2SmM1l2ts4=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4=
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
github.com/mohae/deepcopy v0.0.0-20170308212314-bb9b5e7adda9/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE=
github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM=
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
github.com/nelsam/hel/v2 v2.3.3/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
github.com/nishanths/exhaustive v0.10.0/go.mod h1:IbwrGdVMizvDcIxPYGVdQn5BqWJaOwpCvg4RGb8r/TA=
github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c=
github.com/nunnatsa/ginkgolinter v0.11.2/go.mod h1:dJIGXYXbkBswqa/pIzG0QlVTTDSBMxDoCFwhsl4Uras=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/open-policy-agent/opa v0.68.0/go.mod h1:5E5SvaPwTpwt2WM177I9Z3eT7qUpmOGjk1ZdHs+TZ4w=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.104.0/go.mod h1:4bLfc6BnVKRp3yY+ueEUEeyNWjW/InCGbFs9ZA7o/ko=
github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.104.0/go.mod h1:I2so4Vn+ROaCECo0bdQXNxyUjY9tbq1JvcyuWPETLcM=
github.com/opencontainers/runc v1.1.14/go.mod h1:E4C2z+7BxR7GHXp0hAY53mek+x49X1LjPNeMTfRGvOA=
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
github.com/polyfloyd/go-errorlint v1.4.1/go.mod h1:k6fU/+fQe38ednoZS51T7gSIGQW1y94d6TkSr35OzH8=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff/go.mod h1:FvE8dtQ1Ww63IlyKBn1V4s+zMwF9kHkVNkQBR1pM4CU=
github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/quasilyte/go-ruleguard v0.3.19/go.mod h1:lHSn69Scl48I7Gt9cX3VrbsZYvYiBYszZOZW4A+oTEw=
github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng=
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/riywo/loginshell v0.0.0-20200815045211-7d26008be1ab/go.mod h1:/PfPXh0EntGc3QAAyUaviy4S9tzy4Zp0e2ilq4voC6E=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww=
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50=
github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ=
github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs=
github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ=
github.com/sashamelentyev/usestdlibvars v1.23.0/go.mod h1:YPwr/Y1LATzHI93CqoPUN/2BzGQ/6N/cl/KwgR0B/aU=
github.com/securego/gosec/v2 v2.15.0/go.mod h1:VOjTrZOkUtSDt2QLSJmQBMWnvwiQPEjg0l+5juIqGk8=
github.com/segmentio/kafka-go v0.4.42/go.mod h1:d0g15xPMqoUookug0OU75DhGZxXwCFxSLeJ4uphwJzg=
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
github.com/shirou/gopsutil/v4 v4.25.3/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA=
github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4=
github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY=
github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg=
github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo=
github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I=
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M=
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/studio-b12/gowebdav v0.9.0/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE=
github.com/syncthing/notify v0.0.0-20250207082249-f0fa8f99c2bc/go.mod h1:J0q59IWjLtpRIJulohwqEZvjzwOfTEPp8SVhDJl+y0Y=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk=
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e/go.mod h1:XrBNfAFN+pwoWuksbFS9Ccxnopa15zJGgXRFN90l3K4=
github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8=
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg=
github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41/go.mod h1:/roCdA6gg6lQyw/Oz6gIIGu3ggJKYhF+WC/AQReE5XQ=
github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ=
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8=
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8=
github.com/tailscale/mkctr v0.0.0-20240628074852-17ca944da6ba/go.mod h1:DxnqIXBplij66U2ZkL688xy07q97qQ83P+TVueLiHq4=
github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU=
github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y=
github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg=
github.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk=
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
github.com/tcnksm/go-httpstat v0.2.0/go.mod h1:s3JVJFtQxtBEBC9dwcdTTXS9xFnM3SXAZwPG41aurT8=
github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg=
github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8=
github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
github.com/tidwall/buntdb v1.3.0/go.mod h1:lZZrZUWzlyDJKlLQ6DKAy53LnG7m5kHyrEHvvcDmBpU=
github.com/tidwall/gjson v1.16.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/grect v0.1.4/go.mod h1:9FBsaYRaR0Tcy4UwefBX/UDcDcDy9V5jUcxHzv2jd5Q=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tidwall/rtred v0.1.2/go.mod h1:hd69WNXQ5RP9vHd7dqekAz+RIdtfBogmglkZSRxCHFQ=
github.com/tidwall/tinyqueue v0.1.1/go.mod h1:O/QNHwrnjqr6IHItYrzoHAKYhBkLI67Q096fQP5zMYw=
github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ=
github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs=
github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE=
github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM=
github.com/twitchtv/twirp v8.1.3+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI=
github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
github.com/uptrace/bun v1.1.17/go.mod h1:hATAzivtTIRsSJR4B8AXR+uABqnQxr3myKDKEf5iQ9U=
github.com/uptrace/bun/dialect/sqlitedialect v1.1.17/go.mod h1:YF0FO4VVnY9GHNH6rM4r3STlVEBxkOc6L88Bm5X5mzA=
github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po=
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
github.com/valkey-io/valkey-go v1.0.52/go.mod h1:BXlVAPIL9rFQinSFM+N32JfWzfCaUAqBpZkc4vPY6fM=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.51.0/go.mod h1:oI2XroL+lI7vdXyYoQk03bXBThfFl2cVdIA3Xl7cH8g=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
github.com/vektah/gqlparser/v2 v2.5.16/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08PUS3pJqepRww=
github.com/veraison/go-cose v1.1.0/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4=
github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
github.com/vmihailenco/bufpool v0.1.11/go.mod h1:AFf/MOy3l2CFTKbxwt0mp2MwnqjNEs5H/UxrkA5jxTQ=
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/willabides/kongplete v0.4.0/go.mod h1:0P0jtWD9aTsqPSUAl4de35DLghrr57XcayPyvqSi2X8=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk=
github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
github.com/zenazn/goji v1.0.1/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE=
gitlab.com/digitalxero/go-conventional-commit v1.0.7/go.mod h1:05Xc2BFsSyC5tKhK0y+P3bs0AwUtNuTp+mTpbCU/DZ0=
go.einride.tech/aip v0.66.0/go.mod h1:qAhMsfT7plxBX+Oy7Huol6YUvZ0ZzdUz26yZsQwfl1M=
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE=
go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY=
go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI=
go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s=
go.mongodb.org/mongo-driver v1.12.1/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector v0.104.0 h1:R3zjM4O3K3+ttzsjPV75P80xalxRbwYTURlK0ys7uyo=
go.opentelemetry.io/collector v0.104.0/go.mod h1:Tm6F3na9ajnOm6I5goU9dURKxq1fSBK1yA94nvUix3k=
go.opentelemetry.io/collector/confmap v0.94.1/go.mod h1:pCT5UtcHaHVJ5BIILv1Z2VQyjZzmT9uTdBmC9+Z0AgA=
go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo=
go.opentelemetry.io/collector/pdata/testdata v0.104.0/go.mod h1:3SnYKu8gLfxURJMWS/cFEUFs+jEKS6jvfqKXnOZsdkQ=
go.opentelemetry.io/collector/processor v0.104.0/go.mod h1:qU2/xCCYdvVORkN6aq0H/WUWkvo505VGYg2eOwPvaTg=
go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4=
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/exp/typeparams v0.0.0-20240119083558-1b970713d09a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
google.golang.org/genproto v0.0.0-20240325203815-454cdb8f5daa h1:ePqxpG3LVx+feAUOx8YmR5T7rc0rdzK8DyxM8cQ9zq0=
google.golang.org/genproto v0.0.0-20240325203815-454cdb8f5daa/go.mod h1:CnZenrTdRJb7jc+jOm0Rkywq+9wh0QC4U8tyiRbEPPM=
google.golang.org/genproto v0.0.0-20240924160255-9d4c2d233b61 h1:KipVMxePgXPFBzXOvpKbny3RVdVmJOD64R/Ob7GPWEs=
google.golang.org/genproto v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:HiAZQz/G7n0EywFjmncAwsfnmFm2bjm7qPjwl8hyzjM=
google.golang.org/genproto/googleapis/bytestream v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:WkJpQl6Ujj3ElX4qZaNm5t6cT95ffI4K+HKQ0+1NyMw=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/jinzhu/gorm.v1 v1.9.2/go.mod h1:56JJPUzbikvTVnoyP1nppSkbJ2L8sunqTBDY2fDrmFg=
gopkg.in/olivere/elastic.v3 v3.0.75/go.mod h1:yDEuSnrM51Pc8dM5ov7U8aI/ToR3PG0llA8aRv2qmw0=
gopkg.in/olivere/elastic.v5 v5.0.84/go.mod h1:LXF6q9XNBxpMqrcgax95C6xyARXWbbCXUrtTxrNrxJI=
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gorm.io/driver/mysql v1.0.1/go.mod h1:KtqSthtg55lFp3S5kUXqlGaelnWpKitn4k1xZTnoiPw=
gorm.io/driver/postgres v1.4.6/go.mod h1:UJChCNLFKeBqQRE+HrkFUbKbq9idPXmTOk2u4Wok8S4=
gorm.io/driver/sqlserver v1.4.2/go.mod h1:XHwBuB4Tlh7DqO0x7Ema8dmyWsQW7wi38VQOAFkrbXY=
gorm.io/gorm v1.25.3/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs=
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
k8s.io/cloud-provider v0.32.2/go.mod h1:2s8TeAXhVezp5VISaTxM6vW3yDonOZXoN4Aryz1p1PQ=
k8s.io/code-generator v0.32.3/go.mod h1:+mbiYID5NLsBuqxjQTygKM/DAdKpAjvBzrJd64NU1G8=
k8s.io/component-helpers v0.32.3/go.mod h1:utTBXk8lhkJewBKNuNf32Xl3KT/0VV19DmiXU/SV4Ao=
k8s.io/controller-manager v0.32.2/go.mod h1:o5uo2tLCQhuoMt0RfKcQd0eqaNmSKOKiT+0YELCqXOk=
k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0=
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/kms v0.32.3/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM=
k8s.io/kube-controller-manager v0.32.2/go.mod h1:x7998ZLC+2lYnoizUwvVtHVPuoLeb7BhQEneeiNyVOg=
k8s.io/kubelet v0.32.2/go.mod h1:cC1ms5RS+lu0ckVr6AviCQXHLSPKEBC3D5oaCBdTGkI=
k8s.io/kubernetes v1.32.2/go.mod h1:tiIKO63GcdPRBHW2WiUFm3C0eoLczl3f7qi56Dm1W8I=
k8s.io/metrics v0.32.3/go.mod h1:9R1Wk5cb+qJpCQon9h52mgkVCcFeYxcY+YkumfwHVCU=
mellium.im/sasl v0.3.1/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw=
mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js=
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8/go.mod h1:Oh/d7dEtzsNHGOq1Cdv8aMm3KdKhVvPbRQcM8WFpBR8=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.1/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/controller-tools v0.15.1-0.20240618033008-7824932b0cab/go.mod h1:egedX5jq2KrZ3A2zaOz3e2DSsh5BhFyyjvNcBRIQel8=
sigs.k8s.io/controller-tools v0.17.2/go.mod h1:4q5tZG2JniS5M5bkiXY2/potOiXyhoZVw/U48vLkXk0=
sigs.k8s.io/kustomize/kustomize/v5 v5.5.0/go.mod h1:AeFCmgCrXzmvjWWaeZCyBp6XzG1Y0w1svYus8GhJEOE=
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
tags.cncf.io/container-device-interface v0.8.1/go.mod h1:Apb7N4VdILW0EVdEMRYXIDVRZfNJZ+kmEUss2kRRQ6Y=
tags.cncf.io/container-device-interface/specs-go v0.8.0/go.mod h1:BhJIkjjPh4qpys+qm4DAYtUyryaTDg9zris+AczXyws=

235
install.sh Executable file
View File

@@ -0,0 +1,235 @@
#!/bin/sh
# KubeVPN installation script
# This script installs KubeVPN CLI to your system
# Created for https://github.com/kubenetworks/kubevpn
# curl -fsSL https://kubevpn.dev/install.sh | sh
set -e
# Colors and formatting
YELLOW='\033[0;33m'
GREEN='\033[0;32m'
RED='\033[0;31m'
BLUE='\033[0;34m'
BOLD='\033[1m'
RESET='\033[0m'
# Installation configuration
INSTALL_DIR=${INSTALL_DIR:-"/usr/local/bin"}
GITHUB_REPO="kubenetworks/kubevpn"
GITHUB_URL="https://github.com/${GITHUB_REPO}"
VERSION_URL="https://raw.githubusercontent.com/kubenetworks/kubevpn/refs/heads/master/plugins/stable.txt"
ZIP_FILE="kubevpn.zip"
log() {
echo "${BLUE}${BOLD}==> ${RESET}$1"
}
success() {
echo "${GREEN}${BOLD}==> $1${RESET}"
}
warn() {
echo "${YELLOW}${BOLD}==> $1${RESET}"
}
error() {
echo "${RED}${BOLD}==> $1${RESET}"
}
get_system_info() {
OS=$(uname | tr '[:upper:]' '[:lower:]')
log "Detected OS: ${OS}"
case $OS in
linux | darwin) ;;
msys_nt | msys | cygwin)
error "Windows is not supported, please install KubeVPN manually using scoop. More info: ${GITHUB_URL}"
exit 1
;;
*)
error "Unsupported operating system: ${OS}"
exit 1
;;
esac
ARCH=$(uname -m)
case $ARCH in
x86_64)
ARCH="amd64"
;;
aarch64 | arm64)
ARCH="arm64"
;;
i386 | i686)
ARCH="386"
;;
*)
error "Unsupported architecture: ${ARCH}"
exit 1
;;
esac
log "Detected architecture: ${ARCH}"
}
check_requirements() {
if command -v curl >/dev/null 2>&1; then
DOWNLOADER="curl"
elif command -v wget >/dev/null 2>&1; then
DOWNLOADER="wget"
else
error "Either curl or wget is required for installation"
exit 1
fi
if ! command -v unzip >/dev/null 2>&1; then
error "unzip is required but not installed"
exit 1
fi
if [ ! -d "$INSTALL_DIR" ]; then
log "Installation directory $INSTALL_DIR does not exist, attempting to create it"
if ! mkdir -p "$INSTALL_DIR" 2>/dev/null; then
if ! command -v sudo >/dev/null 2>&1 && ! command -v su >/dev/null 2>&1; then
error "Cannot create $INSTALL_DIR and neither sudo nor su is available"
exit 1
fi
fi
fi
if [ ! -w "$INSTALL_DIR" ] && ! command -v sudo >/dev/null 2>&1 && ! command -v su >/dev/null 2>&1; then
error "No write permission to $INSTALL_DIR and neither sudo nor su is available"
exit 1
fi
}
get_latest_version() {
log "Fetching the latest release version..."
if [ "$DOWNLOADER" = "curl" ]; then
VERSION=$(curl -s "$VERSION_URL")
else
VERSION=$(wget -qO- "$VERSION_URL")
fi
if [ -z "$VERSION" ]; then
error "Could not determine the latest version"
exit 1
fi
VERSION=$(echo "$VERSION" | tr -d 'v' | tr -d '\n')
success "Latest version: ${VERSION}"
}
download_binary() {
DOWNLOAD_URL="$GITHUB_URL/releases/download/v${VERSION}/kubevpn_v${VERSION}_${OS}_${ARCH}.zip"
log "Downloading KubeVPN binary from $DOWNLOAD_URL"
if [ "$DOWNLOADER" = "curl" ]; then
curl -L -o "$ZIP_FILE" "$DOWNLOAD_URL" || {
error "Failed to download KubeVPN"
exit 1
}
else
wget -O "$ZIP_FILE" "$DOWNLOAD_URL" || {
error "Failed to download KubeVPN"
exit 1
}
fi
}
install_binary() {
log "Installing KubeVPN..."
TMP_DIR=$(mktemp -d)
BINARY="$TMP_DIR/bin/kubevpn"
unzip -o -q "$ZIP_FILE" -d "$TMP_DIR"
if [ -f "$TMP_DIR/checksums.txt" ]; then
EXPECTED_CHECKSUM=$(cat "$TMP_DIR/checksums.txt" | awk '{print $1}')
if command -v shasum >/dev/null 2>&1; then
ACTUAL_CHECKSUM=$(shasum -a 256 "$BINARY" | awk '{print $1}')
elif command -v sha256sum >/dev/null 2>&1; then
ACTUAL_CHECKSUM=$(sha256sum "$BINARY" | awk '{print $1}')
else
warn "No checksum tool available, skipping verification"
ACTUAL_CHECKSUM=$EXPECTED_CHECKSUM
fi
[ "$ACTUAL_CHECKSUM" = "$EXPECTED_CHECKSUM" ] || {
error "Checksum verification failed (Expected: $EXPECTED_CHECKSUM, Got: $ACTUAL_CHECKSUM)"
# Clean up
rm -rf "$TMP_DIR"
rm -f "$ZIP_FILE"
exit 1
}
fi
# Check if we need sudo
if [ -w "$INSTALL_DIR" ]; then
mv "$BINARY" "$INSTALL_DIR/kubevpn"
chmod +x "$INSTALL_DIR/kubevpn"
else
warn "Elevated permissions required to install to $INSTALL_DIR"
if command -v sudo >/dev/null 2>&1; then
sudo mv "$BINARY" "$INSTALL_DIR/kubevpn"
sudo chmod +x "$INSTALL_DIR/kubevpn"
else
su -c "mv \"$BINARY\" \"$INSTALL_DIR/kubevpn\" && chmod +x \"$INSTALL_DIR/kubevpn\""
fi
fi
# Clean up
rm -f "$ZIP_FILE"
rm -rf "$TMP_DIR"
}
verify_installation() {
if [ -x "$INSTALL_DIR/kubevpn" ]; then
VERSION_OUTPUT=$("$INSTALL_DIR/kubevpn" version 2>&1 || echo "unknown")
success "KubeVPN installed successfully"
log "$VERSION_OUTPUT"
log "KubeVPN has been installed to: $INSTALL_DIR/kubevpn"
# Check if the installed binary is in PATH
if command -v kubevpn >/dev/null 2>&1; then
FOUND_PATH=$(command -v kubevpn)
if [ "$FOUND_PATH" != "$INSTALL_DIR/kubevpn" ]; then
warn "Another kubevpn binary was found in your PATH at: $FOUND_PATH"
warn "Make sure $INSTALL_DIR is in your PATH to use the newly installed version"
fi
else
warn "Make sure $INSTALL_DIR is in your PATH to use kubevpn"
fi
echo ""
log "To connect to a Kubernetes cluster:"
if [ "$FOUND_PATH" != "$INSTALL_DIR/kubevpn" ]; then
echo " $INSTALL_DIR/kubevpn connect"
else
echo " kubevpn connect"
fi
echo ""
log "For more information, visit:"
echo " $GITHUB_URL"
success "Done! enjoy KubeVPN 🚀"
else
error "KubeVPN installation failed"
exit 1
fi
}
main() {
log "Starting KubeVPN installation..."
get_system_info
check_requirements
get_latest_version
download_binary
install_binary
verify_installation
}
main

View File

@@ -2,8 +2,6 @@ package config
import (
"net"
"os"
"path/filepath"
"sync"
"time"
@@ -14,6 +12,12 @@ const (
// configmap name
ConfigMapPodTrafficManager = "kubevpn-traffic-manager"
// helm app name kubevpn
HelmAppNameKubevpn = "kubevpn"
// default installed namespace
DefaultNamespaceKubevpn = "kubevpn"
// config map keys
KeyDHCP = "DHCP"
KeyDHCP6 = "DHCP6"
@@ -25,36 +29,43 @@ const (
TLSCertKey = "tls_crt"
// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
TLSPrivateKeyKey = "tls_key"
// TLSServerName for tls config server name
TLSServerName = "tls_server_name"
// container name
ContainerSidecarEnvoyProxy = "envoy-proxy"
ContainerSidecarControlPlane = "control-plane"
ContainerSidecarWebhook = "webhook"
ContainerSidecarVPN = "vpn"
ContainerSidecarSyncthing = "syncthing"
VolumeEnvoyConfig = "envoy-config"
VolumeSyncthing = "syncthing"
innerIPv4Pool = "223.254.0.100/16"
// 原因在docker环境中设置docker的 gateway 和 subnet不能 inner 的冲突,也不能和 docker的 172.17 冲突
// 不然的话,请求会不通的
// 解决的问题:在 k8s 中的 名叫 kubernetes 的 service ip 为
// ➜ ~ kubectl get service kubernetes
//NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
//kubernetes ClusterIP 172.17.0.1 <none> 443/TCP 190d
//
// ➜ ~ docker network inspect bridge | jq '.[0].IPAM.Config'
//[
// {
// "Subnet": "172.17.0.0/16",
// "Gateway": "172.17.0.1"
// }
//]
// 如果不创建 network那么是无法请求到 这个 kubernetes 的 service 的
dockerInnerIPv4Pool = "223.255.0.100/16"
// innerIPv4Pool is used as tun ip
// 198.19.0.0/16 network is part of the 198.18.0.0/15 (reserved for benchmarking).
// https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
// so we split it into 2 parts: 198.18.0.0/15 --> [198.19.0.0/16, 198.19.0.0/16]
innerIPv4Pool = "198.19.0.100/16"
/*
reasondocker use 172.17.0.0/16 network conflict with k8s service kubernetes
➜ ~ kubectl get service kubernetes
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 172.17.0.1 <none> 443/TCP 190d
//The IPv6 address prefixes FE80::/10 and FF02::/16 are not routable
innerIPv6Pool = "efff:ffff:ffff:ffff:ffff:ffff:ffff:9999/64"
➜ ~ docker network inspect bridge | jq '.[0].IPAM.Config'
[
{
"Subnet": "172.17.0.0/16",
"Gateway": "172.17.0.1"
}
]
*/
dockerInnerIPv4Pool = "198.18.0.100/16"
// 2001:2::/64 network is part of the 2001:2::/48 (reserved for benchmarking)
// https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
innerIPv6Pool = "2001:2::9999/64"
DefaultNetDir = "/etc/cni/net.d"
@@ -90,18 +101,14 @@ const (
var (
// Image inject --ldflags -X
Image = "docker.io/naison/kubevpn:latest"
Image = "ghcr.io/kubenetworks/kubevpn:latest"
Version = "latest"
GitCommit = ""
// GitHubOAuthToken --ldflags -X
GitHubOAuthToken = ""
OriginImage = "docker.io/naison/kubevpn:" + Version
DaemonPath string
HomePath string
PprofPath string
OriginImage = "ghcr.io/kubenetworks/kubevpn:" + Version
)
var (
@@ -116,13 +123,19 @@ var (
)
func init() {
RouterIP, CIDR, _ = net.ParseCIDR(innerIPv4Pool)
RouterIP6, CIDR6, _ = net.ParseCIDR(innerIPv6Pool)
DockerRouterIP, DockerCIDR, _ = net.ParseCIDR(dockerInnerIPv4Pool)
dir, _ := os.UserHomeDir()
DaemonPath = filepath.Join(dir, HOME, Daemon)
HomePath = filepath.Join(dir, HOME)
PprofPath = filepath.Join(dir, HOME, Daemon, PProfDir)
var err error
RouterIP, CIDR, err = net.ParseCIDR(innerIPv4Pool)
if err != nil {
panic(err)
}
RouterIP6, CIDR6, err = net.ParseCIDR(innerIPv6Pool)
if err != nil {
panic(err)
}
DockerRouterIP, DockerCIDR, err = net.ParseCIDR(dockerInnerIPv4Pool)
if err != nil {
panic(err)
}
}
var Debug bool
@@ -134,7 +147,7 @@ var (
)
var (
KeepAliveTime = 180 * time.Second
KeepAliveTime = 60 * time.Second
DialTimeout = 15 * time.Second
HandshakeTimeout = 5 * time.Second
ConnectTimeout = 5 * time.Second
@@ -143,10 +156,47 @@ var (
)
var (
// network layer ip needs 20 bytes
// transport layer UDP header needs 8 bytes
// UDP over TCP header needs 22 bytes
DefaultMTU = 1500 - 20 - 8 - 21
// DefaultMTU
/**
+--------------------------------------------------------------------+
| Original IP Packet from TUN |
+-------------------+------------------------------------------------+
| IP Header (20B) | Payload (MTU size) |
+-------------------+------------------------------------------------+
After adding custom 2-byte header:
+----+-------------------+-------------------------------------------+
| LH | IP Header (20B) | Payload |
+----+-------------------+-------------------------------------------+
| 2B | 20B | 1453 - 20 = 1433B |
+----+-------------------+-------------------------------------------+
TLS 1.3 Record Structure Breakdown:
+---------------------+--------------------------+-------------------+
| TLS Header (5B) | Encrypted Data (N) | Auth Tag (16B) |
+---------------------+--------------------------+-------------------+
| Content Type (1) | ↑ | AEAD Authentication
| Version (2) | Encrypted Payload | (e.g. AES-GCM) |
| Length (2) | (Original Data + LH2) | |
+---------------------+--------------------------+-------------------+
|←------- 5B --------→|←---- Length Field ------→|←----- 16B -------→|
Final Ethernet Frame:
+--------+----------------+----------------+-----------------------+--------+
| EthHdr | IP Header | TCP Header | TLS Components |
| (14B) | (20B) | (20B) +---------+-------------+--------+
| | | | Hdr(5B) | Data+LH2 | Tag(16)|
+--------+----------------+----------------+---------+-------------+--------+
|←------------------- Total 1500B Ethernet Frame --------------------------→|
ipv4: 20
ipv6: 40
mtu: 1417
*/
DefaultMTU = 1500 - max(20, 40) - 20 - 5 - 2 - 16
)
var (

View File

@@ -12,39 +12,54 @@ const (
HOME = ".kubevpn"
Daemon = "daemon"
SockPath = "daemon.sock"
SudoSockPath = "sudo_daemon.sock"
SockPath = "user_daemon.sock"
SudoSockPath = "root_daemon.sock"
PidPath = "daemon.pid"
SudoPidPath = "sudo_daemon.pid"
PidPath = "user_daemon.pid"
SudoPidPath = "root_daemon.pid"
LogFile = "daemon.log"
UserLogFile = "user_daemon.log"
SudoLogFile = "root_daemon.log"
ConfigFile = "config.yaml"
TmpDir = "tmp"
)
//go:embed config.yaml
var config []byte
var (
daemonPath string
homePath string
//go:embed config.yaml
config []byte
)
func init() {
err := os.MkdirAll(DaemonPath, 0755)
dir, err := os.UserHomeDir()
if err != nil {
panic(err)
}
err = os.Chmod(DaemonPath, 0755)
if err != nil {
panic(err)
}
err = os.MkdirAll(PprofPath, 0755)
if err != nil {
panic(err)
}
err = os.Chmod(PprofPath, 0755)
if err != nil {
panic(err)
homePath = filepath.Join(dir, HOME)
daemonPath = filepath.Join(dir, HOME, Daemon)
var paths = []string{homePath, daemonPath, GetPProfPath(), GetSyncthingPath(), GetTempPath()}
for _, path := range paths {
_, err = os.Stat(path)
if errors.Is(err, os.ErrNotExist) {
err = os.MkdirAll(path, 0755)
if err != nil {
panic(err)
}
err = os.Chmod(path, 0755)
if err != nil {
panic(err)
}
} else if err != nil {
panic(err)
}
}
path := filepath.Join(HomePath, ConfigFile)
path := filepath.Join(homePath, ConfigFile)
_, err = os.Stat(path)
if errors.Is(err, os.ErrNotExist) {
err = os.WriteFile(path, config, 0644)
@@ -59,7 +74,7 @@ func GetSockPath(isSudo bool) string {
if isSudo {
name = SudoSockPath
}
return filepath.Join(DaemonPath, name)
return filepath.Join(daemonPath, name)
}
func GetPidPath(isSudo bool) string {
@@ -67,17 +82,28 @@ func GetPidPath(isSudo bool) string {
if isSudo {
name = SudoPidPath
}
return filepath.Join(DaemonPath, name)
return filepath.Join(daemonPath, name)
}
func GetSyncthingPath() string {
return filepath.Join(DaemonPath, SyncthingDir)
return filepath.Join(daemonPath, SyncthingDir)
}
func GetSyncthingGUIPath() string {
return filepath.Join(DaemonPath, SyncthingDir, SyncthingGUIDir)
func GetConfigFile() string {
return filepath.Join(homePath, ConfigFile)
}
func GetConfigFilePath() string {
return filepath.Join(HomePath, ConfigFile)
func GetTempPath() string {
return filepath.Join(homePath, TmpDir)
}
func GetDaemonLogPath(isSudo bool) string {
if isSudo {
return filepath.Join(daemonPath, SudoLogFile)
}
return filepath.Join(daemonPath, UserLogFile)
}
func GetPProfPath() string {
return filepath.Join(daemonPath, PProfDir)
}

View File

@@ -9,13 +9,8 @@ import (
const (
SyncthingDir = "syncthing"
SyncthingGUIDir = "gui"
DefaultRemoteDir = "/kubevpn-data"
// EnvDisableSyncthingLog disable syncthing log, because it can not set output writer, only write os.Stdout or io.Discard
EnvDisableSyncthingLog = "LOGGER_DISCARD"
SyncthingAPIKey = "kubevpn"
)

View File

@@ -26,7 +26,7 @@ import (
"github.com/envoyproxy/go-control-plane/pkg/cache/types"
"github.com/envoyproxy/go-control-plane/pkg/resource/v3"
"github.com/envoyproxy/go-control-plane/pkg/wellknown"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/durationpb"
@@ -38,9 +38,10 @@ import (
)
type Virtual struct {
Uid string // group.resource.name
Ports []ContainerPort
Rules []*Rule
Namespace string
Uid string // group.resource.name
Ports []ContainerPort
Rules []*Rule
}
type ContainerPort struct {
@@ -90,7 +91,7 @@ type Rule struct {
PortMap map[int32]string
}
func (a *Virtual) To(enableIPv6 bool) (
func (a *Virtual) To(enableIPv6 bool, logger *log.Logger) (
listeners []types.Resource,
clusters []types.Resource,
routes []types.Resource,
@@ -100,7 +101,7 @@ func (a *Virtual) To(enableIPv6 bool) (
for _, port := range a.Ports {
isFargateMode := port.EnvoyListenerPort != 0
listenerName := fmt.Sprintf("%s_%v_%s", a.Uid, util.If(isFargateMode, port.EnvoyListenerPort, port.ContainerPort), port.Protocol)
listenerName := fmt.Sprintf("%s_%s_%v_%s", a.Namespace, a.Uid, util.If(isFargateMode, port.EnvoyListenerPort, port.ContainerPort), port.Protocol)
routeName := listenerName
listeners = append(listeners, ToListener(listenerName, routeName, util.If(isFargateMode, port.EnvoyListenerPort, port.ContainerPort), port.Protocol, isFargateMode))
@@ -117,7 +118,7 @@ func (a *Virtual) To(enableIPv6 bool) (
if strings.Index(ports, ":") > 0 {
ports = strings.Split(ports, ":")[0]
} else {
logrus.Errorf("fargate mode port should have two pair")
logger.Errorf("fargate mode port should have two pair: %s", ports)
}
}
envoyRulePort, _ := strconv.Atoi(ports)

View File

@@ -8,6 +8,8 @@ import (
serverv3 "github.com/envoyproxy/go-control-plane/pkg/server/v3"
"github.com/fsnotify/fsnotify"
log "github.com/sirupsen/logrus"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
func Main(ctx context.Context, filename string, port uint, logger *log.Logger) error {
@@ -46,7 +48,7 @@ func Main(ctx context.Context, filename string, port uint, logger *log.Logger) e
case msg := <-notifyCh:
err = proc.ProcessFile(msg)
if err != nil {
log.Errorf("Failed to process file: %v", err)
plog.G(ctx).Errorf("Failed to process file: %v", err)
return err
}
case err = <-errChan:

View File

@@ -49,7 +49,7 @@ func (p *Processor) newVersion() string {
func (p *Processor) ProcessFile(file NotifyMessage) error {
configList, err := ParseYaml(file.FilePath)
if err != nil {
p.logger.Errorf("error parsing yaml file: %+v", err)
p.logger.Errorf("error parsing yaml file: %v", err)
return err
}
enableIPv6, _ := util.DetectSupportIPv6()
@@ -57,15 +57,16 @@ func (p *Processor) ProcessFile(file NotifyMessage) error {
if len(config.Uid) == 0 {
continue
}
lastConfig, ok := p.expireCache.Get(config.Uid)
uid := util.GenEnvoyUID(config.Namespace, config.Uid)
lastConfig, ok := p.expireCache.Get(uid)
if ok && reflect.DeepEqual(lastConfig.(*Virtual), config) {
marshal, _ := json.Marshal(config)
p.logger.Debugf("config are same, not needs to update, config: %s", string(marshal))
p.logger.Infof("config are same, not needs to update, config: %s", string(marshal))
continue
}
p.logger.Debugf("update config, version %d, config %v", p.version, config)
p.logger.Infof("update config, version %d, config %v", p.version, config)
listeners, clusters, routes, endpoints := config.To(enableIPv6)
listeners, clusters, routes, endpoints := config.To(enableIPv6, p.logger)
resources := map[resource.Type][]types.Resource{
resource.ListenerType: listeners, // listeners
resource.RouteType: routes, // routes
@@ -86,13 +87,13 @@ func (p *Processor) ProcessFile(file NotifyMessage) error {
p.logger.Errorf("snapshot inconsistency: %v, err: %v", snapshot, err)
return err
}
p.logger.Debugf("will serve snapshot %+v, nodeID: %s", snapshot, config.Uid)
if err = p.cache.SetSnapshot(context.Background(), config.Uid, snapshot); err != nil {
p.logger.Infof("will serve snapshot %+v, nodeID: %s", snapshot, uid)
if err = p.cache.SetSnapshot(context.Background(), uid, snapshot); err != nil {
p.logger.Errorf("snapshot error %q for %v", err, snapshot)
return err
}
p.expireCache.Set(config.Uid, config, time.Minute*5)
p.expireCache.Set(uid, config, time.Minute*5)
}
return nil
}

View File

@@ -13,8 +13,9 @@ import (
runtimeservice "github.com/envoyproxy/go-control-plane/envoy/service/runtime/v3"
secretservice "github.com/envoyproxy/go-control-plane/envoy/service/secret/v3"
serverv3 "github.com/envoyproxy/go-control-plane/pkg/server/v3"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
const (
@@ -38,6 +39,6 @@ func RunServer(ctx context.Context, server serverv3.Server, port uint) error {
secretservice.RegisterSecretDiscoveryServiceServer(grpcServer, server)
runtimeservice.RegisterRuntimeDiscoveryServiceServer(grpcServer, server)
log.Infof("Management server listening on %d", port)
plog.G(ctx).Infof("Management server listening on %d", port)
return grpcServer.Serve(listener)
}

52
pkg/core/bufferedtcp.go Normal file
View File

@@ -0,0 +1,52 @@
package core
import (
"context"
"errors"
"net"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
type bufferedTCP struct {
net.Conn
Chan chan *DatagramPacket
closed bool
}
func NewBufferedTCP(conn net.Conn) net.Conn {
c := &bufferedTCP{
Conn: conn,
Chan: make(chan *DatagramPacket, MaxSize),
}
go c.Run()
return c
}
func (c *bufferedTCP) Write(b []byte) (n int, err error) {
if c.closed {
return 0, errors.New("tcp channel is closed")
}
if len(b) == 0 {
return 0, nil
}
buf := config.LPool.Get().([]byte)[:]
n = copy(buf, b)
c.Chan <- newDatagramPacket(buf, n)
return n, nil
}
func (c *bufferedTCP) Run() {
for buf := range c.Chan {
_, err := c.Conn.Write(buf.Data[:buf.DataLength])
config.LPool.Put(buf.Data[:])
if err != nil {
plog.G(context.Background()).Errorf("[TCP] Write packet failed: %v", err)
_ = c.Conn.Close()
c.closed = true
return
}
}
}

View File

@@ -1,97 +0,0 @@
package core
import (
"context"
"errors"
"math"
"net"
)
var (
// ErrorEmptyChain is an error that implies the chain is empty.
ErrorEmptyChain = errors.New("empty chain")
)
type Chain struct {
retries int
node *Node
}
func NewChain(retry int, node *Node) *Chain {
return &Chain{retries: retry, node: node}
}
func (c *Chain) Node() *Node {
return c.node
}
func (c *Chain) IsEmpty() bool {
return c == nil || c.node == nil
}
func (c *Chain) DialContext(ctx context.Context) (conn net.Conn, err error) {
for i := 0; i < int(math.Max(float64(1), float64(c.retries))); i++ {
conn, err = c.dial(ctx)
if err == nil {
break
}
}
return
}
func (c *Chain) dial(ctx context.Context) (net.Conn, error) {
if c.IsEmpty() {
return nil, ErrorEmptyChain
}
conn, err := c.getConn(ctx)
if err != nil {
return nil, err
}
var cc net.Conn
cc, err = c.Node().Client.ConnectContext(ctx, conn)
if err != nil {
_ = conn.Close()
return nil, err
}
return cc, nil
}
func (*Chain) resolve(addr string) string {
if host, port, err := net.SplitHostPort(addr); err == nil {
if ips, err := net.LookupIP(host); err == nil && len(ips) > 0 {
return net.JoinHostPort(ips[0].String(), port)
}
}
return addr
}
func (c *Chain) getConn(ctx context.Context) (net.Conn, error) {
if c.IsEmpty() {
return nil, ErrorEmptyChain
}
return c.Node().Client.Dial(ctx, c.resolve(c.Node().Addr))
}
type Handler interface {
Handle(ctx context.Context, conn net.Conn)
}
type Client struct {
Connector
Transporter
}
type Connector interface {
ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error)
}
type Transporter interface {
Dial(ctx context.Context, addr string) (net.Conn, error)
}
type Server struct {
Listener net.Listener
Handler Handler
}

96
pkg/core/forwarder.go Normal file
View File

@@ -0,0 +1,96 @@
package core
import (
"context"
"errors"
"net"
)
var (
// ErrorEmptyForwarder is an error that implies the forward is empty.
ErrorEmptyForwarder = errors.New("empty forwarder")
)
type Forwarder struct {
retries int
node *Node
}
func NewForwarder(retry int, node *Node) *Forwarder {
return &Forwarder{retries: retry, node: node}
}
func (c *Forwarder) Node() *Node {
return c.node
}
func (c *Forwarder) IsEmpty() bool {
return c == nil || c.node == nil
}
func (c *Forwarder) DialContext(ctx context.Context) (conn net.Conn, err error) {
for i := 0; i < max(1, c.retries); i++ {
conn, err = c.dial(ctx)
if err == nil {
break
}
}
return
}
func (c *Forwarder) dial(ctx context.Context) (net.Conn, error) {
if c.IsEmpty() {
return nil, ErrorEmptyForwarder
}
conn, err := c.getConn(ctx)
if err != nil {
return nil, err
}
var cc net.Conn
cc, err = c.Node().Client.ConnectContext(ctx, conn)
if err != nil {
_ = conn.Close()
return nil, err
}
return cc, nil
}
func (*Forwarder) resolve(addr string) string {
if host, port, err := net.SplitHostPort(addr); err == nil {
if ips, err := net.LookupIP(host); err == nil && len(ips) > 0 {
return net.JoinHostPort(ips[0].String(), port)
}
}
return addr
}
func (c *Forwarder) getConn(ctx context.Context) (net.Conn, error) {
if c.IsEmpty() {
return nil, ErrorEmptyForwarder
}
return c.Node().Client.Dial(ctx, c.Node().Addr)
}
type Handler interface {
Handle(ctx context.Context, conn net.Conn)
}
type Client struct {
Connector
Transporter
}
type Connector interface {
ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error)
}
type Transporter interface {
Dial(ctx context.Context, addr string) (net.Conn, error)
}
type Server struct {
Listener net.Listener
Handler Handler
}

View File

@@ -3,22 +3,22 @@ package core
import (
"context"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip/stack"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func ICMPForwarder(s *stack.Stack, ctx context.Context) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
func ICMPForwarder(ctx context.Context, s *stack.Stack) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
return func(id stack.TransportEndpointID, buffer *stack.PacketBuffer) bool {
log.Debugf("[TUN-ICMP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
plog.G(ctx).Infof("[TUN-ICMP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
)
ctx1, cancelFunc := context.WithCancel(ctx)
defer cancelFunc()
ok, err := util.PingOnce(ctx1, id.RemoteAddress.String(), id.LocalAddress.String())
if err != nil {
log.Debugf("[TUN-ICMP] Failed to ping dst %s from src %s",
plog.G(ctx).Errorf("[TUN-ICMP] Failed to ping dst %s from src %s",
id.LocalAddress.String(), id.RemoteAddress.String(),
)
}

View File

@@ -3,7 +3,6 @@ package core
import (
"context"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/link/packetsocket"
@@ -13,6 +12,8 @@ import (
"gvisor.dev/gvisor/pkg/tcpip/transport/raw"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
@@ -34,10 +35,10 @@ func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
RawFactory: raw.EndpointFactory{},
})
// set handler for TCP UDP ICMP
s.SetTransportProtocolHandler(tcp.ProtocolNumber, TCPForwarder(s, ctx))
s.SetTransportProtocolHandler(udp.ProtocolNumber, UDPForwarder(s, ctx))
s.SetTransportProtocolHandler(header.ICMPv4ProtocolNumber, ICMPForwarder(s, ctx))
s.SetTransportProtocolHandler(header.ICMPv6ProtocolNumber, ICMPForwarder(s, ctx))
s.SetTransportProtocolHandler(tcp.ProtocolNumber, TCPForwarder(ctx, s))
s.SetTransportProtocolHandler(udp.ProtocolNumber, UDPForwarder(ctx, s))
s.SetTransportProtocolHandler(header.ICMPv4ProtocolNumber, ICMPForwarder(ctx, s))
s.SetTransportProtocolHandler(header.ICMPv6ProtocolNumber, ICMPForwarder(ctx, s))
s.SetRouteTable([]tcpip.Route{
{
@@ -61,7 +62,7 @@ func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
{
opt := tcpip.TCPSACKEnabled(true)
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
log.Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %v", tcp.ProtocolNumber, opt, opt, err)
plog.G(ctx).Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %v", tcp.ProtocolNumber, opt, opt, err)
}
}
@@ -69,10 +70,10 @@ func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
{
opt := tcpip.DefaultTTLOption(64)
if err := s.SetNetworkProtocolOption(ipv4.ProtocolNumber, &opt); err != nil {
log.Fatalf("SetNetworkProtocolOption(%d, &%T(%d)): %v", ipv4.ProtocolNumber, opt, opt, err)
plog.G(ctx).Fatalf("SetNetworkProtocolOption(%d, &%T(%d)): %v", ipv4.ProtocolNumber, opt, opt, err)
}
if err := s.SetNetworkProtocolOption(ipv6.ProtocolNumber, &opt); err != nil {
log.Fatalf("SetNetworkProtocolOption(%d, &%T(%d)): %v", ipv6.ProtocolNumber, opt, opt, err)
plog.G(ctx).Fatalf("SetNetworkProtocolOption(%d, &%T(%d)): %v", ipv6.ProtocolNumber, opt, opt, err)
}
}
@@ -80,23 +81,23 @@ func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
{
opt := tcpip.TCPModerateReceiveBufferOption(true)
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
log.Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %v", tcp.ProtocolNumber, opt, opt, err)
plog.G(ctx).Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %v", tcp.ProtocolNumber, opt, opt, err)
}
}
{
if err := s.SetForwardingDefaultAndAllNICs(ipv4.ProtocolNumber, true); err != nil {
log.Fatalf("Set IPv4 forwarding: %v", err)
plog.G(ctx).Fatalf("Set IPv4 forwarding: %v", err)
}
if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, true); err != nil {
log.Fatalf("Set IPv6 forwarding: %v", err)
plog.G(ctx).Fatalf("Set IPv6 forwarding: %v", err)
}
}
{
option := tcpip.TCPModerateReceiveBufferOption(true)
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &option); err != nil {
log.Fatalf("Set TCP moderate receive buffer: %v", err)
plog.G(ctx).Fatalf("Set TCP moderate receive buffer: %v", err)
}
}
return s

View File

@@ -1,30 +1,27 @@
package core
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"io"
"net"
"time"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"gvisor.dev/gvisor/pkg/waiter"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
func TCPForwarder(s *stack.Stack, ctx context.Context) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
func TCPForwarder(ctx context.Context, s *stack.Stack) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
return tcp.NewForwarder(s, 0, 100000, func(request *tcp.ForwarderRequest) {
defer request.Complete(false)
id := request.ID()
log.Debugf("[TUN-TCP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
plog.G(ctx).Infof("[TUN-TCP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
)
@@ -35,14 +32,14 @@ func TCPForwarder(s *stack.Stack, ctx context.Context) func(stack.TransportEndpo
var d = net.Dialer{Timeout: time.Second * 5}
remote, err := d.DialContext(ctx, "tcp", net.JoinHostPort(host, port))
if err != nil {
log.Errorf("[TUN-TCP] Failed to connect addr %s: %v", net.JoinHostPort(host, port), err)
plog.G(ctx).Errorf("[TUN-TCP] Failed to connect addr %s: %v", net.JoinHostPort(host, port), err)
return
}
w := &waiter.Queue{}
endpoint, tErr := request.CreateEndpoint(w)
if tErr != nil {
log.Debugf("[TUN-TCP] Failed to create endpoint: %v", tErr)
plog.G(ctx).Errorf("[TUN-TCP] Failed to create endpoint: %v", tErr)
return
}
conn := gonet.NewTCPConn(w, endpoint)
@@ -54,74 +51,19 @@ func TCPForwarder(s *stack.Stack, ctx context.Context) func(stack.TransportEndpo
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
written, err2 := io.CopyBuffer(remote, conn, buf)
log.Debugf("[TUN-TCP] Write length %d data to remote", written)
plog.G(ctx).Infof("[TUN-TCP] Write length %d data to remote", written)
errChan <- err2
}()
go func() {
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
written, err2 := io.CopyBuffer(conn, remote, buf)
log.Debugf("[TUN-TCP] Read length %d data from remote", written)
plog.G(ctx).Infof("[TUN-TCP] Read length %d data from remote", written)
errChan <- err2
}()
err = <-errChan
if err != nil && !errors.Is(err, io.EOF) {
log.Debugf("[TUN-TCP] Disconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
plog.G(ctx).Errorf("[TUN-TCP] Disconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
}
}).HandlePacket
}
func WriteProxyInfo(conn net.Conn, id stack.TransportEndpointID) error {
var b bytes.Buffer
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
binary.BigEndian.PutUint16(i, id.LocalPort)
b.Write(i)
binary.BigEndian.PutUint16(i, id.RemotePort)
b.Write(i)
b.WriteByte(byte(id.LocalAddress.Len()))
b.Write(id.LocalAddress.AsSlice())
b.WriteByte(byte(id.RemoteAddress.Len()))
b.Write(id.RemoteAddress.AsSlice())
_, err := b.WriteTo(conn)
return err
}
// ParseProxyInfo parse proxy info [20]byte
func ParseProxyInfo(conn net.Conn) (id stack.TransportEndpointID, err error) {
var n int
var port = make([]byte, 2)
// local port
if n, err = io.ReadFull(conn, port); err != nil || n != 2 {
return
}
id.LocalPort = binary.BigEndian.Uint16(port)
// remote port
if n, err = io.ReadFull(conn, port); err != nil || n != 2 {
return
}
id.RemotePort = binary.BigEndian.Uint16(port)
// local address
if n, err = io.ReadFull(conn, port[:1]); err != nil || n != 1 {
return
}
var localAddress = make([]byte, port[0])
if n, err = io.ReadFull(conn, localAddress); err != nil || n != len(localAddress) {
return
}
id.LocalAddress = tcpip.AddrFromSlice(localAddress)
// remote address
if n, err = io.ReadFull(conn, port[:1]); err != nil || n != 1 {
return
}
var remoteAddress = make([]byte, port[0])
if n, err = io.ReadFull(conn, remoteAddress); err != nil || n != len(remoteAddress) {
return
}
id.RemoteAddress = tcpip.AddrFromSlice(remoteAddress)
return
}

View File

@@ -2,23 +2,25 @@ package core
import (
"context"
"crypto/tls"
"errors"
"net"
"sync"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
"gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
type gvisorTCPHandler struct {
// map[srcIP]net.Conn
routeMapTCP *sync.Map
packetChan chan *datagramPacket
packetChan chan *Packet
}
func GvisorTCPHandler() Handler {
@@ -32,7 +34,7 @@ func (h *gvisorTCPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
defer tcpConn.Close()
cancel, cancelFunc := context.WithCancel(ctx)
defer cancelFunc()
log.Debugf("[TCP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
plog.G(ctx).Infof("[TUN-GVISOR] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
h.handle(cancel, tcpConn)
}
@@ -41,7 +43,7 @@ func (h *gvisorTCPHandler) handle(ctx context.Context, tcpConn net.Conn) {
errChan := make(chan error, 2)
go func() {
defer util.HandleCrash()
h.readFromTCPConnWriteToEndpoint(ctx, tcpConn, endpoint)
h.readFromTCPConnWriteToEndpoint(ctx, NewBufferedTCP(tcpConn), endpoint)
util.SafeClose(errChan)
}()
go func() {
@@ -60,14 +62,25 @@ func (h *gvisorTCPHandler) handle(ctx context.Context, tcpConn net.Conn) {
}
func GvisorTCPListener(addr string) (net.Listener, error) {
log.Debugf("Gvisor TCP listening addr: %s", addr)
plog.G(context.Background()).Infof("Gvisor TCP listening addr: %s", addr)
laddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return nil, err
}
ln, err := net.ListenTCP("tcp", laddr)
listener, err := net.ListenTCP("tcp", laddr)
if err != nil {
return nil, err
}
return &tcpKeepAliveListener{TCPListener: ln}, nil
serverConfig, err := util.GetTlsServerConfig(nil)
if err != nil {
if errors.Is(err, util.ErrNoTLSConfig) {
plog.G(context.Background()).Warn("tls config not found in config, use raw tcp mode")
return &tcpKeepAliveListener{TCPListener: listener}, nil
}
plog.G(context.Background()).Errorf("failed to get tls server config: %v", err)
_ = listener.Close()
return nil, err
}
plog.G(context.Background()).Debugf("Use tls mode")
return tls.NewListener(&tcpKeepAliveListener{TCPListener: listener}, serverConfig), nil
}

View File

@@ -5,7 +5,6 @@ import (
"net"
"github.com/google/gopacket/layers"
log "github.com/sirupsen/logrus"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
"gvisor.dev/gvisor/pkg/buffer"
@@ -16,25 +15,20 @@ import (
"gvisor.dev/gvisor/pkg/tcpip/stack"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (h *gvisorTCPHandler) readFromEndpointWriteToTCPConn(ctx context.Context, conn net.Conn, endpoint *channel.Endpoint) {
tcpConn, _ := newGvisorFakeUDPTunnelConnOverTCP(ctx, conn)
for {
select {
case <-ctx.Done():
return
default:
}
tcpConn, _ := newGvisorUDPConnOverTCP(ctx, conn)
for ctx.Err() == nil {
pktBuffer := endpoint.ReadContext(ctx)
if pktBuffer != nil {
sniffer.LogPacket("[gVISOR] ", sniffer.DirectionSend, pktBuffer.NetworkProtocolNumber, pktBuffer)
buf := pktBuffer.ToView().AsSlice()
_, err := tcpConn.Write(buf)
if err != nil {
log.Errorf("[TUN] Failed to write data to tun device: %v", err)
plog.G(ctx).Errorf("[TUN-GVISOR] Failed to write data to tun device: %v", err)
}
}
}
@@ -42,23 +36,19 @@ func (h *gvisorTCPHandler) readFromEndpointWriteToTCPConn(ctx context.Context, c
// tun --> dispatcher
func (h *gvisorTCPHandler) readFromTCPConnWriteToEndpoint(ctx context.Context, conn net.Conn, endpoint *channel.Endpoint) {
tcpConn, _ := newGvisorFakeUDPTunnelConnOverTCP(ctx, conn)
for {
select {
case <-ctx.Done():
return
default:
}
tcpConn, _ := newGvisorUDPConnOverTCP(ctx, conn)
defer h.removeFromRouteMapTCP(ctx, conn)
for ctx.Err() == nil {
buf := config.LPool.Get().([]byte)[:]
read, err := tcpConn.Read(buf[:])
if err != nil {
log.Errorf("[TUN] Failed to read from tcp conn: %v", err)
plog.G(ctx).Errorf("[TCP-GVISOR] Failed to read from tcp conn: %v", err)
config.LPool.Put(buf[:])
return
}
if read == 0 {
log.Warnf("[TUN] Read from tcp conn length is %d", read)
plog.G(ctx).Warnf("[TCP-GVISOR] Read from tcp conn length is %d", read)
config.LPool.Put(buf[:])
continue
}
@@ -72,7 +62,7 @@ func (h *gvisorTCPHandler) readFromTCPConnWriteToEndpoint(ctx context.Context, c
protocol = header.IPv4ProtocolNumber
ipHeader, err := ipv4.ParseHeader(buf[:read])
if err != nil {
log.Errorf("Failed to parse IPv4 header: %v", err)
plog.G(ctx).Errorf("Failed to parse IPv4 header: %v", err)
config.LPool.Put(buf[:])
continue
}
@@ -83,7 +73,7 @@ func (h *gvisorTCPHandler) readFromTCPConnWriteToEndpoint(ctx context.Context, c
protocol = header.IPv6ProtocolNumber
ipHeader, err := ipv6.ParseHeader(buf[:read])
if err != nil {
log.Errorf("Failed to parse IPv6 header: %s", err.Error())
plog.G(ctx).Errorf("[TCP-GVISOR] Failed to parse IPv6 header: %s", err.Error())
config.LPool.Put(buf[:])
continue
}
@@ -91,19 +81,22 @@ func (h *gvisorTCPHandler) readFromTCPConnWriteToEndpoint(ctx context.Context, c
src = ipHeader.Src
dst = ipHeader.Dst
} else {
log.Debugf("[TUN-GVISOR] Unknown packet")
plog.G(ctx).Errorf("[TCP-GVISOR] Unknown packet")
config.LPool.Put(buf[:])
continue
}
h.addRoute(src, conn)
// inner ip like 223.254.0.100/102/103 connect each other
if config.CIDR.Contains(dst) || config.CIDR6.Contains(dst) {
log.Tracef("[TUN-RAW] Forward to TUN device, SRC: %s, DST: %s, Length: %d", src.String(), dst.String(), read)
util.SafeWrite(h.packetChan, &datagramPacket{
DataLength: uint16(read),
Data: buf[:],
})
h.addToRouteMapTCP(ctx, src, conn)
// inner ip like 198.19.0.100/102/103 connect each other
// for issue 594, sometimes k8s service network CIDR also use CIDR 198.19.151.170
// if we can find dst in route map, just trade packet as inner communicate
// if not find dst in route map, just trade packet as k8s service/pod ip
_, found := h.routeMapTCP.Load(dst.String())
if found && (config.CIDR.Contains(dst) || config.CIDR6.Contains(dst)) {
err = h.handlePacket(ctx, buf, read, src, dst, layers.IPProtocol(ipProtocol).String())
if err != nil {
plog.G(ctx).Errorf("[TCP-GVISOR] Failed to handle packet: %v", err)
}
continue
}
@@ -115,18 +108,51 @@ func (h *gvisorTCPHandler) readFromTCPConnWriteToEndpoint(ctx context.Context, c
sniffer.LogPacket("[gVISOR] ", sniffer.DirectionRecv, protocol, pkt)
endpoint.InjectInbound(protocol, pkt)
pkt.DecRef()
log.Tracef("[TUN-%s] Write to Gvisor IP-Protocol: %s, SRC: %s, DST: %s, Length: %d", layers.IPProtocol(ipProtocol).String(), layers.IPProtocol(ipProtocol).String(), src.String(), dst, read)
plog.G(ctx).Debugf("[TCP-GVISOR] Write to Gvisor. SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(ipProtocol).String(), read)
}
}
func (h *gvisorTCPHandler) addRoute(src net.IP, tcpConn net.Conn) {
func (h *gvisorTCPHandler) handlePacket(ctx context.Context, buf []byte, length int, src, dst net.IP, protocol string) error {
if conn, ok := h.routeMapTCP.Load(dst.String()); ok {
plog.G(ctx).Debugf("[TCP-GVISOR] Find TCP route SRC: %s to DST: %s -> %s", src, dst, conn.(net.Conn).RemoteAddr())
dgram := newDatagramPacket(buf, length)
err := dgram.Write(conn.(net.Conn))
config.LPool.Put(buf[:])
if err != nil {
plog.G(ctx).Errorf("[TCP-GVISOR] Failed to write to %s <- %s : %s", conn.(net.Conn).RemoteAddr(), conn.(net.Conn).LocalAddr(), err)
return err
}
} else if config.RouterIP.Equal(dst) || config.RouterIP6.Equal(dst) {
plog.G(ctx).Debugf("[TCP-GVISOR] Forward to TUN device, SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, protocol, length)
util.SafeWrite(h.packetChan, NewPacket(buf[:], length, src, dst), func(v *Packet) {
config.LPool.Put(v.data[:])
plog.G(context.Background()).Errorf("[TCP-GVISOR] Drop packet, SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, protocol, v.length)
})
} else {
plog.G(ctx).Warnf("[TCP-GVISOR] No route for src: %s -> dst: %s, drop it", src, dst)
config.LPool.Put(buf[:])
}
return nil
}
func (h *gvisorTCPHandler) addToRouteMapTCP(ctx context.Context, src net.IP, tcpConn net.Conn) {
value, loaded := h.routeMapTCP.LoadOrStore(src.String(), tcpConn)
if loaded {
if tcpConn != value.(net.Conn) {
if value.(net.Conn) != tcpConn {
h.routeMapTCP.Store(src.String(), tcpConn)
log.Debugf("[TCP] Replace route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
plog.G(ctx).Infof("[TUN-GVISOR] Replace route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
}
} else {
log.Debugf("[TCP] Add new route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
plog.G(ctx).Infof("[TUN-GVISOR] Add new route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
}
}
func (h *gvisorTCPHandler) removeFromRouteMapTCP(ctx context.Context, tcpConn net.Conn) {
h.routeMapTCP.Range(func(key, value any) bool {
if value.(net.Conn) == tcpConn {
h.routeMapTCP.Delete(key)
plog.G(ctx).Infof("[TCP-GVISOR] Delete to DST %s by conn %s from globle route map TCP", key, tcpConn.LocalAddr())
}
return true
})
}

View File

@@ -7,20 +7,20 @@ import (
"time"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
"gvisor.dev/gvisor/pkg/waiter"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func UDPForwarder(s *stack.Stack, ctx context.Context) func(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
func UDPForwarder(ctx context.Context, s *stack.Stack) func(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
return udp.NewForwarder(s, func(request *udp.ForwarderRequest) {
id := request.ID()
log.Debugf("[TUN-UDP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
plog.G(ctx).Infof("[TUN-UDP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
)
src := &net.UDPAddr{
@@ -35,14 +35,14 @@ func UDPForwarder(s *stack.Stack, ctx context.Context) func(id stack.TransportEn
w := &waiter.Queue{}
endpoint, tErr := request.CreateEndpoint(w)
if tErr != nil {
log.Debugf("[TUN-UDP] Failed to create endpoint to dst: %s: %v", dst.String(), tErr)
plog.G(ctx).Errorf("[TUN-UDP] Failed to create endpoint to dst: %s: %v", dst.String(), tErr)
return
}
// dial dst
remote, err1 := net.DialUDP("udp", nil, dst)
if err1 != nil {
log.Errorf("[TUN-UDP] Failed to connect dst: %s: %v", dst.String(), err1)
plog.G(ctx).Errorf("[TUN-UDP] Failed to connect dst: %s: %v", dst.String(), err1)
return
}
@@ -78,7 +78,7 @@ func UDPForwarder(s *stack.Stack, ctx context.Context) func(id stack.TransportEn
break
}
}
log.Debugf("[TUN-UDP] Write length %d data from src: %s -> dst: %s", written, src.String(), dst.String())
plog.G(ctx).Infof("[TUN-UDP] Write length %d data from src: %s -> dst: %s", written, src, dst)
errChan <- err
}()
go func() {
@@ -108,12 +108,12 @@ func UDPForwarder(s *stack.Stack, ctx context.Context) func(id stack.TransportEn
break
}
}
log.Debugf("[TUN-UDP] Read length %d data from dst: %s -> src: %s", written, dst.String(), src.String())
plog.G(ctx).Infof("[TUN-UDP] Read length %d data from dst: %s -> src: %s", written, dst, src)
errChan <- err
}()
err1 = <-errChan
if err1 != nil && !errors.Is(err1, io.EOF) {
log.Debugf("[TUN-UDP] Disconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err1)
plog.G(ctx).Errorf("[TUN-UDP] Disconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err1)
}
}()
}).HandlePacket

View File

@@ -3,11 +3,14 @@ package core
import (
"context"
"fmt"
"io"
"net"
"time"
log "github.com/sirupsen/logrus"
"github.com/pkg/errors"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
@@ -19,14 +22,14 @@ func GvisorUDPHandler() Handler {
func (h *gvisorUDPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
defer tcpConn.Close()
log.Debugf("[TUN-UDP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
plog.G(ctx).Debugf("[TUN-UDP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
// 1, get proxy info
endpointID, err := ParseProxyInfo(tcpConn)
endpointID, err := util.ParseProxyInfo(tcpConn)
if err != nil {
log.Errorf("[TUN-UDP] Failed to parse proxy info: %v", err)
plog.G(ctx).Errorf("[TUN-UDP] Failed to parse proxy info: %v", err)
return
}
log.Debugf("[TUN-UDP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
plog.G(ctx).Infof("[TUN-UDP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress: %s",
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
)
// 2, dial proxy
@@ -37,45 +40,49 @@ func (h *gvisorUDPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
var remote *net.UDPConn
remote, err = net.DialUDP("udp", nil, addr)
if err != nil {
log.Errorf("[TUN-UDP] Failed to connect addr %s: %v", addr.String(), err)
plog.G(ctx).Errorf("[TUN-UDP] Failed to connect addr %s: %v", addr.String(), err)
return
}
handle(ctx, tcpConn, remote)
}
// fake udp connect over tcp
type gvisorFakeUDPTunnelConn struct {
type gvisorUDPConnOverTCP struct {
// tcp connection
net.Conn
ctx context.Context
}
func newGvisorFakeUDPTunnelConnOverTCP(ctx context.Context, conn net.Conn) (net.Conn, error) {
return &gvisorFakeUDPTunnelConn{ctx: ctx, Conn: conn}, nil
func newGvisorUDPConnOverTCP(ctx context.Context, conn net.Conn) (net.Conn, error) {
return &gvisorUDPConnOverTCP{ctx: ctx, Conn: conn}, nil
}
func (c *gvisorFakeUDPTunnelConn) Read(b []byte) (int, error) {
func (c *gvisorUDPConnOverTCP) Read(b []byte) (int, error) {
select {
case <-c.ctx.Done():
return 0, c.ctx.Err()
default:
dgram, err := readDatagramPacket(c.Conn, b)
datagram, err := readDatagramPacket(c.Conn, b)
if err != nil {
return 0, err
}
return int(dgram.DataLength), nil
return int(datagram.DataLength), nil
}
}
func (c *gvisorFakeUDPTunnelConn) Write(b []byte) (int, error) {
dgram := newDatagramPacket(b)
if err := dgram.Write(c.Conn); err != nil {
func (c *gvisorUDPConnOverTCP) Write(b []byte) (int, error) {
buf := config.LPool.Get().([]byte)[:]
n := copy(buf, b)
defer config.LPool.Put(buf)
packet := newDatagramPacket(buf, n)
if err := packet.Write(c.Conn); err != nil {
return 0, err
}
return len(b), nil
}
func (c *gvisorFakeUDPTunnelConn) Close() error {
func (c *gvisorUDPConnOverTCP) Close() error {
if cc, ok := c.Conn.(interface{ CloseRead() error }); ok {
_ = cc.CloseRead()
}
@@ -86,7 +93,7 @@ func (c *gvisorFakeUDPTunnelConn) Close() error {
}
func GvisorUDPListener(addr string) (net.Listener, error) {
log.Debugf("Gvisor UDP over TCP listening addr: %s", addr)
plog.G(context.Background()).Infof("Gvisor UDP over TCP listening addr: %s", addr)
laddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return nil, err
@@ -100,50 +107,39 @@ func GvisorUDPListener(addr string) (net.Listener, error) {
func handle(ctx context.Context, tcpConn net.Conn, udpConn *net.UDPConn) {
defer udpConn.Close()
log.Debugf("[TUN-UDP] %s <-> %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
plog.G(ctx).Debugf("[TUN-UDP] %s <-> %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
errChan := make(chan error, 2)
go func() {
defer util.HandleCrash()
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
for {
select {
case <-ctx.Done():
return
default:
}
for ctx.Err() == nil {
err := tcpConn.SetReadDeadline(time.Now().Add(time.Second * 30))
if err != nil {
log.Errorf("[TUN-UDP] Failed to set read deadline: %v", err)
errChan <- err
errChan <- errors.WithMessage(err, "set read deadline failed")
return
}
dgram, err := readDatagramPacket(tcpConn, buf[:])
datagram, err := readDatagramPacket(tcpConn, buf)
if err != nil {
log.Errorf("[TUN-UDP] %s -> %s: %v", tcpConn.RemoteAddr(), udpConn.LocalAddr(), err)
errChan <- err
errChan <- errors.WithMessage(err, "read datagram packet failed")
return
}
if dgram.DataLength == 0 {
log.Errorf("[TUN-UDP] Length is zero")
if datagram.DataLength == 0 {
errChan <- fmt.Errorf("length of read packet is zero")
return
}
err = udpConn.SetWriteDeadline(time.Now().Add(time.Second * 30))
if err != nil {
log.Errorf("[TUN-UDP] Failed to set write deadline: %v", err)
errChan <- err
errChan <- errors.WithMessage(err, "set write deadline failed")
return
}
if _, err = udpConn.Write(dgram.Data); err != nil {
log.Errorf("[TUN-UDP] %s -> %s : %s", tcpConn.RemoteAddr(), "localhost:8422", err)
errChan <- err
if _, err = udpConn.Write(datagram.Data[:datagram.DataLength]); err != nil {
errChan <- errors.WithMessage(err, "write datagram packet failed")
return
}
log.Debugf("[TUN-UDP] %s >>> %s length: %d", tcpConn.RemoteAddr(), "localhost:8422", dgram.DataLength)
plog.G(ctx).Debugf("[TUN-UDP] %s >>> %s length: %d", tcpConn.RemoteAddr(), udpConn.RemoteAddr(), datagram.DataLength)
}
}()
@@ -152,27 +148,18 @@ func handle(ctx context.Context, tcpConn net.Conn, udpConn *net.UDPConn) {
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
for {
select {
case <-ctx.Done():
return
default:
}
for ctx.Err() == nil {
err := udpConn.SetReadDeadline(time.Now().Add(time.Second * 30))
if err != nil {
log.Errorf("[TUN-UDP] Failed to set read deadline failed: %v", err)
errChan <- err
errChan <- errors.WithMessage(err, "set read deadline failed")
return
}
n, _, err := udpConn.ReadFrom(buf[:])
if err != nil {
log.Errorf("[TUN-UDP] %s : %s", tcpConn.RemoteAddr(), err)
errChan <- err
errChan <- errors.WithMessage(err, "read datagram packet failed")
return
}
if n == 0 {
log.Errorf("[TUN-UDP] Length is zero")
errChan <- fmt.Errorf("length of read packet is zero")
return
}
@@ -180,23 +167,21 @@ func handle(ctx context.Context, tcpConn net.Conn, udpConn *net.UDPConn) {
// pipe from peer to tunnel
err = tcpConn.SetWriteDeadline(time.Now().Add(time.Second * 30))
if err != nil {
log.Errorf("[TUN-UDP] Error: set write deadline failed: %v", err)
errChan <- errors.WithMessage(err, "set write deadline failed")
return
}
packet := newDatagramPacket(buf, n)
if err = packet.Write(tcpConn); err != nil {
errChan <- err
return
}
dgram := newDatagramPacket(buf[:n])
if err = dgram.Write(tcpConn); err != nil {
log.Errorf("[TUN-UDP] Error: %s <- %s : %s", tcpConn.RemoteAddr(), dgram.Addr(), err)
errChan <- err
return
}
log.Debugf("[TUN-UDP] %s <<< %s length: %d", tcpConn.RemoteAddr(), dgram.Addr(), len(dgram.Data))
plog.G(ctx).Debugf("[TUN-UDP] %s <<< %s length: %d", tcpConn.RemoteAddr(), tcpConn.LocalAddr(), packet.DataLength)
}
}()
err := <-errChan
if err != nil {
log.Errorf("[TUN-UDP] %v", err)
if err != nil && !errors.Is(err, io.EOF) {
plog.G(ctx).Errorf("[TUN-UDP] %v", err)
}
log.Debugf("[TUN-UDP] %s >-< %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
plog.G(ctx).Debugf("[TUN-UDP] %s >-< %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
return
}

69
pkg/core/packetconn.go Normal file
View File

@@ -0,0 +1,69 @@
package core
import (
"context"
"net"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
)
var _ net.PacketConn = (*PacketConnOverTCP)(nil)
type PacketConnOverTCP struct {
// tcp connection
net.Conn
ctx context.Context
}
func NewPacketConnOverTCP(ctx context.Context, conn net.Conn) (net.Conn, error) {
return &PacketConnOverTCP{ctx: ctx, Conn: conn}, nil
}
func (c *PacketConnOverTCP) ReadFrom(b []byte) (int, net.Addr, error) {
select {
case <-c.ctx.Done():
return 0, nil, c.ctx.Err()
default:
datagram, err := readDatagramPacket(c.Conn, b)
if err != nil {
return 0, nil, err
}
return int(datagram.DataLength), nil, nil
}
}
func (c *PacketConnOverTCP) Read(b []byte) (int, error) {
n, _, err := c.ReadFrom(b)
return n, err
}
func (c *PacketConnOverTCP) WriteTo(b []byte, _ net.Addr) (int, error) {
if len(b) == 0 {
return 0, nil
}
buf := config.LPool.Get().([]byte)[:]
n := copy(buf, b)
defer config.LPool.Put(buf)
packet := newDatagramPacket(buf, n)
if err := packet.Write(c.Conn); err != nil {
return 0, err
}
return len(b), nil
}
func (c *PacketConnOverTCP) Write(b []byte) (int, error) {
n, err := c.WriteTo(b, nil)
return n, err
}
func (c *PacketConnOverTCP) Close() error {
if cc, ok := c.Conn.(interface{ CloseRead() error }); ok {
_ = cc.CloseRead()
}
if cc, ok := c.Conn.(interface{ CloseWrite() error }); ok {
_ = cc.CloseWrite()
}
return c.Conn.Close()
}

View File

@@ -1,17 +1,16 @@
package core
import (
"context"
"fmt"
"net"
"os"
"strings"
"sync"
"github.com/containernetworking/cni/pkg/types"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/tun"
)
@@ -19,124 +18,113 @@ var (
// RouteMapTCP map[srcIP]net.Conn Globe route table for inner ip
RouteMapTCP = &sync.Map{}
// TCPPacketChan tcp connects
TCPPacketChan = make(chan *datagramPacket, MaxSize)
TCPPacketChan = make(chan *Packet, MaxSize)
)
type TCPUDPacket struct {
data *datagramPacket
data *DatagramPacket
}
// Route example:
// -L "tcp://:10800" -L "tun://:8422?net=223.254.0.100/16"
// -L "tun:/10.233.24.133:8422?net=223.254.0.102/16&route=223.254.0.0/16"
// -L "tun:/127.0.0.1:8422?net=223.254.0.102/16&route=223.254.0.0/16,10.233.0.0/16" -F "tcp://127.0.0.1:10800"
// -l "tcp://:10800" -l "tun://:8422?net=198.19.0.100/16"
// -l "tun:/10.233.24.133:8422?net=198.19.0.102/16&route=198.19.0.0/16"
// -l "tun:/127.0.0.1:8422?net=198.19.0.102/16&route=198.19.0.0/16,10.233.0.0/16" -f "tcp://127.0.0.1:10800"
type Route struct {
ServeNodes []string // -L tun
ChainNode string // -F tcp
Retries int
Listeners []string // -l tun
Forwarder string // -f tcp
Retries int
}
func (r *Route) parseChain() (*Chain, error) {
node, err := parseChainNode(r.ChainNode)
func (r *Route) ParseForwarder() (*Forwarder, error) {
forwarder, err := ParseNode(r.Forwarder)
if err != nil {
return nil, err
}
return NewChain(r.Retries, node), nil
}
func parseChainNode(ns string) (*Node, error) {
node, err := ParseNode(ns)
if err != nil {
return nil, err
forwarder.Client = &Client{
Connector: NewUDPOverTCPConnector(),
Transporter: TCPTransporter(nil),
}
node.Client = &Client{
Connector: UDPOverTCPTunnelConnector(),
Transporter: TCPTransporter(),
}
return node, nil
return NewForwarder(r.Retries, forwarder), nil
}
func (r *Route) GenerateServers() ([]Server, error) {
chain, err := r.parseChain()
forwarder, err := r.ParseForwarder()
if err != nil && !errors.Is(err, ErrorInvalidNode) {
log.Errorf("Failed to parse chain: %v", err)
plog.G(context.Background()).Errorf("Failed to parse forwarder: %v", err)
return nil, err
}
servers := make([]Server, 0, len(r.ServeNodes))
for _, serveNode := range r.ServeNodes {
servers := make([]Server, 0, len(r.Listeners))
for _, l := range r.Listeners {
var node *Node
node, err = ParseNode(serveNode)
node, err = ParseNode(l)
if err != nil {
log.Errorf("Failed to parse node %s: %v", serveNode, err)
plog.G(context.Background()).Errorf("Failed to parse node %s: %v", l, err)
return nil, err
}
var ln net.Listener
var listener net.Listener
var handler Handler
switch node.Protocol {
case "tun":
handler = TunHandler(chain, node)
ln, err = tun.Listener(tun.Config{
handler = TunHandler(forwarder, node)
listener, err = tun.Listener(tun.Config{
Name: node.Get("name"),
Addr: node.Get("net"),
Addr6: os.Getenv(config.EnvInboundPodTunIPv6),
Addr6: node.Get("net6"),
MTU: node.GetInt("mtu"),
Routes: parseIPRoutes(node.Get("route")),
Routes: parseRoutes(node.Get("route")),
Gateway: node.Get("gw"),
})
if err != nil {
log.Errorf("Failed to create tun listener: %v", err)
plog.G(context.Background()).Errorf("Failed to create tun listener: %v", err)
return nil, err
}
case "tcp":
handler = TCPHandler()
ln, err = TCPListener(node.Addr)
listener, err = TCPListener(node.Addr)
if err != nil {
log.Errorf("Failed to create tcp listener: %v", err)
plog.G(context.Background()).Errorf("Failed to create tcp listener: %v", err)
return nil, err
}
case "gtcp":
handler = GvisorTCPHandler()
ln, err = GvisorTCPListener(node.Addr)
listener, err = GvisorTCPListener(node.Addr)
if err != nil {
log.Errorf("Failed to create gvisor tcp listener: %v", err)
plog.G(context.Background()).Errorf("Failed to create gvisor tcp listener: %v", err)
return nil, err
}
case "gudp":
handler = GvisorUDPHandler()
ln, err = GvisorUDPListener(node.Addr)
listener, err = GvisorUDPListener(node.Addr)
if err != nil {
log.Errorf("Failed to create gvisor udp listener: %v", err)
plog.G(context.Background()).Errorf("Failed to create gvisor udp listener: %v", err)
return nil, err
}
case "ssh":
handler = SSHHandler()
ln, err = SSHListener(node.Addr)
listener, err = SSHListener(node.Addr)
if err != nil {
log.Errorf("Failed to create ssh listener: %v", err)
plog.G(context.Background()).Errorf("Failed to create ssh listener: %v", err)
return nil, err
}
default:
log.Errorf("Not support protocol %s", node.Protocol)
plog.G(context.Background()).Errorf("Not support protocol %s", node.Protocol)
return nil, fmt.Errorf("not support protocol %s", node.Protocol)
}
servers = append(servers, Server{Listener: ln, Handler: handler})
servers = append(servers, Server{Listener: listener, Handler: handler})
}
return servers, nil
}
func parseIPRoutes(routeStringList string) (routes []types.Route) {
if len(routeStringList) == 0 {
return
}
routeList := strings.Split(routeStringList, ",")
for _, route := range routeList {
func parseRoutes(str string) []types.Route {
var routes []types.Route
list := strings.Split(str, ",")
for _, route := range list {
if _, ipNet, _ := net.ParseCIDR(strings.TrimSpace(route)); ipNet != nil {
routes = append(routes, types.Route{Dst: *ipNet})
}
}
return
return routes
}

View File

@@ -8,8 +8,9 @@ import (
"net"
"github.com/gliderlabs/ssh"
log "github.com/sirupsen/logrus"
gossh "golang.org/x/crypto/ssh"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
func SSHListener(addr string) (net.Listener, error) {
@@ -17,7 +18,7 @@ func SSHListener(addr string) (net.Listener, error) {
if err != nil {
return nil, err
}
log.Debugf("starting ssh server on port %s...", addr)
plog.G(context.Background()).Infof("starting ssh server on port %s...", addr)
return ln, err
}
@@ -32,15 +33,15 @@ func (s *sshHandler) Handle(ctx context.Context, conn net.Conn) {
forwardHandler := &ssh.ForwardedTCPHandler{}
server := ssh.Server{
LocalPortForwardingCallback: ssh.LocalPortForwardingCallback(func(ctx ssh.Context, dhost string, dport uint32) bool {
log.Println("Accepted forward", dhost, dport)
plog.G(ctx).Infoln("Accepted forward", dhost, dport)
return true
}),
Handler: ssh.Handler(func(s ssh.Session) {
io.WriteString(s, "Remote forwarding available...\n")
select {}
<-s.Context().Done()
}),
ReversePortForwardingCallback: ssh.ReversePortForwardingCallback(func(ctx ssh.Context, host string, port uint32) bool {
log.Println("attempt to bind", host, port, "granted")
plog.G(ctx).Infoln("attempt to bind", host, port, "granted")
return true
}),
RequestHandlers: map[string]ssh.RequestHandler{

View File

@@ -2,20 +2,44 @@ package core
import (
"context"
"crypto/tls"
"errors"
"net"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
type tcpTransporter struct{}
type tcpTransporter struct {
tlsConfig *tls.Config
}
func TCPTransporter() Transporter {
return &tcpTransporter{}
func TCPTransporter(tlsInfo map[string][]byte) Transporter {
tlsConfig, err := util.GetTlsClientConfig(tlsInfo)
if err != nil {
if errors.Is(err, util.ErrNoTLSConfig) {
plog.G(context.Background()).Warn("tls config not found in config, use raw tcp mode")
return &tcpTransporter{}
}
plog.G(context.Background()).Errorf("failed to get tls client config: %v", err)
return &tcpTransporter{}
}
return &tcpTransporter{tlsConfig: tlsConfig}
}
func (tr *tcpTransporter) Dial(ctx context.Context, addr string) (net.Conn, error) {
dialer := &net.Dialer{Timeout: config.DialTimeout}
return dialer.DialContext(ctx, "tcp", addr)
dialer := &net.Dialer{Timeout: config.DialTimeout, KeepAlive: config.KeepAliveTime}
conn, err := dialer.DialContext(ctx, "tcp", addr)
if err != nil {
return nil, err
}
if tr.tlsConfig == nil {
plog.G(ctx).Debugf("tls config not found in config, use raw tcp mode")
return conn, nil
}
plog.G(ctx).Debugf("Use tls mode")
return tls.Client(conn, tr.tlsConfig), nil
}
func TCPListener(addr string) (net.Listener, error) {
@@ -23,11 +47,20 @@ func TCPListener(addr string) (net.Listener, error) {
if err != nil {
return nil, err
}
ln, err := net.ListenTCP("tcp", laddr)
listener, err := net.ListenTCP("tcp", laddr)
if err != nil {
return nil, err
}
return &tcpKeepAliveListener{TCPListener: ln}, nil
serverConfig, err := util.GetTlsServerConfig(nil)
if err != nil {
if errors.Is(err, util.ErrNoTLSConfig) {
plog.G(context.Background()).Warn("tls config not found in config, use raw tcp mode")
return &tcpKeepAliveListener{TCPListener: listener}, nil
}
plog.G(context.Background()).Errorf("failed to get tls server config: %v", err)
return nil, err
}
return tls.NewListener(&tcpKeepAliveListener{TCPListener: listener}, serverConfig), nil
}
type tcpKeepAliveListener struct {

View File

@@ -3,24 +3,23 @@ package core
import (
"context"
"net"
"strings"
"sync"
"time"
log "github.com/sirupsen/logrus"
"github.com/google/gopacket/layers"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
type fakeUDPTunnelConnector struct {
type UDPOverTCPConnector struct {
}
func UDPOverTCPTunnelConnector() Connector {
return &fakeUDPTunnelConnector{}
func NewUDPOverTCPConnector() Connector {
return &UDPOverTCPConnector{}
}
func (c *fakeUDPTunnelConnector) ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error) {
func (c *UDPOverTCPConnector) ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error) {
//defer conn.SetDeadline(time.Time{})
switch con := conn.(type) {
case *net.TCPConn:
@@ -32,113 +31,142 @@ func (c *fakeUDPTunnelConnector) ConnectContext(ctx context.Context, conn net.Co
if err != nil {
return nil, err
}
err = con.SetKeepAlivePeriod(15 * time.Second)
err = con.SetKeepAlivePeriod(config.KeepAliveTime)
if err != nil {
return nil, err
}
}
return newFakeUDPTunnelConnOverTCP(ctx, conn)
return newUDPConnOverTCP(ctx, conn)
}
type fakeUdpHandler struct {
type UDPOverTCPHandler struct {
// map[srcIP]net.Conn
routeMapTCP *sync.Map
packetChan chan *datagramPacket
packetChan chan *Packet
}
func TCPHandler() Handler {
return &fakeUdpHandler{
return &UDPOverTCPHandler{
routeMapTCP: RouteMapTCP,
packetChan: TCPPacketChan,
}
}
func (h *fakeUdpHandler) Handle(ctx context.Context, tcpConn net.Conn) {
func (h *UDPOverTCPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
tcpConn = NewBufferedTCP(tcpConn)
defer tcpConn.Close()
log.Debugf("[TCP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
plog.G(ctx).Infof("[TCP] Handle connection %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
defer func(addr net.Addr) {
var keys []string
h.routeMapTCP.Range(func(key, value any) bool {
if value.(net.Conn) == tcpConn {
keys = append(keys, key.(string))
}
return true
})
for _, key := range keys {
h.routeMapTCP.Delete(key)
}
log.Debugf("[TCP] To %s by conn %s from globle route map TCP", strings.Join(keys, " "), addr)
}(tcpConn.LocalAddr())
for {
select {
case <-ctx.Done():
return
default:
}
defer h.removeFromRouteMapTCP(ctx, tcpConn)
for ctx.Err() == nil {
buf := config.LPool.Get().([]byte)[:]
dgram, err := readDatagramPacketServer(tcpConn, buf[:])
datagram, err := readDatagramPacket(tcpConn, buf)
if err != nil {
log.Errorf("[TCP] %s -> %s : %v", tcpConn.RemoteAddr(), tcpConn.LocalAddr(), err)
plog.G(ctx).Errorf("[TCP] Failed to read from %s -> %s: %v", tcpConn.RemoteAddr(), tcpConn.LocalAddr(), err)
config.LPool.Put(buf[:])
return
}
var src net.IP
src, _, err = util.ParseIP(dgram.Data[:dgram.DataLength])
err = h.handlePacket(ctx, tcpConn, datagram)
if err != nil {
log.Errorf("[TCP] Unknown packet")
config.LPool.Put(buf[:])
continue
return
}
value, loaded := h.routeMapTCP.LoadOrStore(src.String(), tcpConn)
if loaded {
if tcpConn != value.(net.Conn) {
h.routeMapTCP.Store(src.String(), tcpConn)
log.Debugf("[TCP] Replace route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
}
} else {
log.Debugf("[TCP] Add new route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
}
util.SafeWrite(h.packetChan, dgram)
}
}
// fake udp connect over tcp
type fakeUDPTunnelConn struct {
func (h *UDPOverTCPHandler) handlePacket(ctx context.Context, tcpConn net.Conn, datagram *DatagramPacket) error {
src, dst, protocol, err := util.ParseIP(datagram.Data[:datagram.DataLength])
if err != nil {
plog.G(ctx).Errorf("[TCP] Unknown packet")
config.LPool.Put(datagram.Data[:])
return err
}
h.addToRouteMapTCP(ctx, src, tcpConn)
if conn, ok := h.routeMapTCP.Load(dst.String()); ok {
plog.G(ctx).Debugf("[TCP] Find TCP route SRC: %s to DST: %s -> %s", src, dst, conn.(net.Conn).RemoteAddr())
err = datagram.Write(conn.(net.Conn))
config.LPool.Put(datagram.Data[:])
if err != nil {
plog.G(ctx).Errorf("[TCP] Failed to write to %s <- %s : %s", conn.(net.Conn).RemoteAddr(), conn.(net.Conn).LocalAddr(), err)
return err
}
} else if (config.CIDR.Contains(dst) || config.CIDR6.Contains(dst)) && (!config.RouterIP.Equal(dst) && !config.RouterIP6.Equal(dst)) {
plog.G(ctx).Warnf("[TCP] No route for src: %s -> dst: %s, drop it", src, dst)
config.LPool.Put(datagram.Data[:])
} else {
plog.G(ctx).Debugf("[TCP] Forward to TUN device, SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(protocol).String(), datagram.DataLength)
util.SafeWrite(h.packetChan, NewPacket(datagram.Data, int(datagram.DataLength), src, dst), func(v *Packet) {
plog.G(context.Background()).Errorf("Stuck packet, SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(protocol).String(), v.length)
h.packetChan <- v
})
}
return nil
}
func (h *UDPOverTCPHandler) addToRouteMapTCP(ctx context.Context, src net.IP, tcpConn net.Conn) {
value, loaded := h.routeMapTCP.LoadOrStore(src.String(), tcpConn)
if loaded {
if value.(net.Conn) != tcpConn {
h.routeMapTCP.Store(src.String(), tcpConn)
plog.G(ctx).Infof("[TCP] Replace route map TCP to DST %s by connation %s -> %s", src, tcpConn.RemoteAddr(), tcpConn.LocalAddr())
}
} else {
plog.G(ctx).Infof("[TCP] Add new route map TCP to DST %s by connation %s -> %s", src, tcpConn.RemoteAddr(), tcpConn.LocalAddr())
}
}
func (h *UDPOverTCPHandler) removeFromRouteMapTCP(ctx context.Context, tcpConn net.Conn) {
h.routeMapTCP.Range(func(key, value any) bool {
if value.(net.Conn) == tcpConn {
h.routeMapTCP.Delete(key)
plog.G(ctx).Infof("[TCP] Delete to DST: %s by conn %s -> %s from globle route map TCP", key, tcpConn.RemoteAddr(), tcpConn.LocalAddr())
}
return true
})
}
var _ net.Conn = (*UDPConnOverTCP)(nil)
// UDPConnOverTCP fake udp connection over tcp connection
type UDPConnOverTCP struct {
// tcp connection
net.Conn
ctx context.Context
}
func newFakeUDPTunnelConnOverTCP(ctx context.Context, conn net.Conn) (net.Conn, error) {
return &fakeUDPTunnelConn{ctx: ctx, Conn: conn}, nil
func newUDPConnOverTCP(ctx context.Context, conn net.Conn) (net.Conn, error) {
return &UDPConnOverTCP{ctx: ctx, Conn: conn}, nil
}
func (c *fakeUDPTunnelConn) ReadFrom(b []byte) (int, net.Addr, error) {
func (c *UDPConnOverTCP) Read(b []byte) (int, error) {
select {
case <-c.ctx.Done():
return 0, nil, c.ctx.Err()
return 0, c.ctx.Err()
default:
dgram, err := readDatagramPacket(c.Conn, b)
datagram, err := readDatagramPacket(c.Conn, b)
if err != nil {
return 0, nil, err
return 0, err
}
return int(dgram.DataLength), dgram.Addr(), nil
return int(datagram.DataLength), nil
}
}
func (c *fakeUDPTunnelConn) WriteTo(b []byte, _ net.Addr) (int, error) {
dgram := newDatagramPacket(b)
if err := dgram.Write(c.Conn); err != nil {
func (c *UDPConnOverTCP) Write(b []byte) (int, error) {
buf := config.LPool.Get().([]byte)[:]
n := copy(buf, b)
defer config.LPool.Put(buf)
packet := newDatagramPacket(buf, n)
if err := packet.Write(c.Conn); err != nil {
return 0, err
}
return len(b), nil
}
func (c *fakeUDPTunnelConn) Close() error {
func (c *UDPConnOverTCP) Close() error {
if cc, ok := c.Conn.(interface{ CloseRead() error }); ok {
_ = cc.CloseRead()
}

View File

@@ -4,11 +4,11 @@ import (
"context"
"net"
"sync"
"time"
log "github.com/sirupsen/logrus"
"github.com/google/gopacket/layers"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
@@ -17,124 +17,98 @@ const (
)
type tunHandler struct {
chain *Chain
forward *Forwarder
node *Node
routeMapUDP *RouteMap
// map[srcIP]net.Conn
routeMapUDP *sync.Map
routeMapTCP *sync.Map
chExit chan error
}
type RouteMap struct {
lock *sync.RWMutex
routes map[string]net.Addr
}
func NewRouteMap() *RouteMap {
return &RouteMap{
lock: &sync.RWMutex{},
routes: map[string]net.Addr{},
}
}
func (n *RouteMap) LoadOrStore(to net.IP, addr net.Addr) (net.Addr, bool) {
n.lock.RLock()
route, load := n.routes[to.String()]
n.lock.RUnlock()
if load {
return route, true
}
n.lock.Lock()
defer n.lock.Unlock()
n.routes[to.String()] = addr
return addr, false
}
func (n *RouteMap) Store(to net.IP, addr net.Addr) {
n.lock.Lock()
defer n.lock.Unlock()
n.routes[to.String()] = addr
}
func (n *RouteMap) RouteTo(ip net.IP) net.Addr {
n.lock.RLock()
defer n.lock.RUnlock()
return n.routes[ip.String()]
errChan chan error
}
// TunHandler creates a handler for tun tunnel.
func TunHandler(chain *Chain, node *Node) Handler {
func TunHandler(forward *Forwarder, node *Node) Handler {
return &tunHandler{
chain: chain,
forward: forward,
node: node,
routeMapUDP: NewRouteMap(),
routeMapUDP: &sync.Map{},
routeMapTCP: RouteMapTCP,
chExit: make(chan error, 1),
errChan: make(chan error, 1),
}
}
func (h *tunHandler) Handle(ctx context.Context, tun net.Conn) {
if h.node.Remote != "" {
if remote := h.node.Remote; remote != "" {
h.HandleClient(ctx, tun)
} else {
h.HandleServer(ctx, tun)
}
}
func (h *tunHandler) HandleServer(ctx context.Context, tun net.Conn) {
device := &Device{
tun: tun,
tunInbound: make(chan *Packet, MaxSize),
tunOutbound: make(chan *Packet, MaxSize),
errChan: h.errChan,
}
defer device.Close()
go device.readFromTUN(ctx)
go device.writeToTUN(ctx)
go device.handlePacket(ctx, h.node.Addr, h.routeMapUDP, h.routeMapTCP)
select {
case err := <-device.errChan:
plog.G(ctx).Errorf("Device exit: %v", err)
return
case <-ctx.Done():
return
}
}
type Device struct {
tun net.Conn
tunInbound chan *DataElem
tunOutbound chan *DataElem
tunInbound chan *Packet
tunOutbound chan *Packet
// your main logic
tunInboundHandler func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem)
chExit chan error
errChan chan error
}
func (d *Device) readFromTun() {
func (d *Device) readFromTUN(ctx context.Context) {
defer util.HandleCrash()
for {
buf := config.LPool.Get().([]byte)[:]
n, err := d.tun.Read(buf[:])
if err != nil {
config.LPool.Put(buf[:])
log.Errorf("[TUN] Failed to read from tun: %v", err)
util.SafeWrite(d.chExit, err)
plog.G(ctx).Errorf("[TUN] Failed to read from tun device: %v", err)
util.SafeWrite(d.errChan, err)
return
}
if n == 0 {
log.Errorf("[TUN] Read packet length 0")
config.LPool.Put(buf[:])
continue
}
src, dst, err := util.ParseIP(buf[:n])
src, dst, protocol, err := util.ParseIP(buf[:n])
if err != nil {
log.Errorf("[TUN] Unknown packet")
plog.G(ctx).Errorf("[TUN] Unknown packet")
config.LPool.Put(buf[:])
continue
}
log.Debugf("[TUN] SRC: %s --> DST: %s, length: %d", src, dst, n)
util.SafeWrite(d.tunInbound, &DataElem{
data: buf[:],
length: n,
src: src,
dst: dst,
plog.G(ctx).Debugf("[TUN] SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(protocol).String(), n)
util.SafeWrite(d.tunInbound, NewPacket(buf[:], n, src, dst), func(v *Packet) {
config.LPool.Put(v.data[:])
plog.G(context.Background()).Errorf("Drop packet, SRC: %s, DST: %s, Protocol: %s, Length: %d", v.src, v.dst, layers.IPProtocol(protocol).String(), v.length)
})
}
}
func (d *Device) writeToTun() {
func (d *Device) writeToTUN(ctx context.Context) {
defer util.HandleCrash()
for e := range d.tunOutbound {
_, err := d.tun.Write(e.data[:e.length])
config.LPool.Put(e.data[:])
for packet := range d.tunOutbound {
_, err := d.tun.Write(packet.data[:packet.length])
config.LPool.Put(packet.data[:])
if err != nil {
util.SafeWrite(d.chExit, err)
plog.G(ctx).Errorf("[TUN] Failed to write to tun device: %v", err)
util.SafeWrite(d.errChan, err)
return
}
}
@@ -147,91 +121,46 @@ func (d *Device) Close() {
util.SafeClose(TCPPacketChan)
}
func heartbeats(ctx context.Context, tun net.Conn) {
tunIfi, err := util.GetTunDeviceByConn(tun)
if err != nil {
log.Errorf("Failed to get tun device: %s", err.Error())
return
}
srcIPv4, srcIPv6, dockerSrcIPv4, err := util.GetTunDeviceIP(tunIfi.Name)
func (d *Device) handlePacket(ctx context.Context, addr string, routeMapUDP *sync.Map, routeMapTCP *sync.Map) {
packetConn, err := (&net.ListenConfig{}).ListenPacket(ctx, "udp", addr)
if err != nil {
util.SafeWrite(d.errChan, err)
plog.G(ctx).Errorf("[TUN] Failed to listen %s: %v", addr, err)
return
}
ticker := time.NewTicker(time.Second * 5)
defer ticker.Stop()
for ; true; <-ticker.C {
select {
case <-ctx.Done():
return
default:
}
if srcIPv4 != nil {
go util.Ping(ctx, srcIPv4.String(), config.RouterIP.String())
}
if srcIPv6 != nil {
go util.Ping(ctx, srcIPv6.String(), config.RouterIP6.String())
}
if dockerSrcIPv4 != nil {
go util.Ping(ctx, dockerSrcIPv4.String(), config.DockerRouterIP.String())
}
p := &Peer{
conn: packetConn,
tunInbound: d.tunInbound,
tunOutbound: d.tunOutbound,
routeMapUDP: routeMapUDP,
routeMapTCP: routeMapTCP,
errChan: make(chan error, 1),
}
}
func (d *Device) Start(ctx context.Context) {
go d.readFromTun()
go d.tunInboundHandler(d.tunInbound, d.tunOutbound)
go d.writeToTun()
go p.readFromConn(ctx)
go p.routeTUN(ctx)
go p.routeTCPToTun(ctx)
select {
case err := <-d.chExit:
log.Errorf("Device exit: %v", err)
case err = <-p.errChan:
plog.G(ctx).Errorf("[TUN] %s: %v", d.tun.LocalAddr(), err)
util.SafeWrite(d.errChan, err)
return
case <-ctx.Done():
return
}
}
func (d *Device) SetTunInboundHandler(handler func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem)) {
d.tunInboundHandler = handler
}
func (h *tunHandler) HandleServer(ctx context.Context, tun net.Conn) {
device := &Device{
tun: tun,
tunInbound: make(chan *DataElem, MaxSize),
tunOutbound: make(chan *DataElem, MaxSize),
chExit: h.chExit,
}
device.SetTunInboundHandler(func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem) {
for ctx.Err() == nil {
packetConn, err := (&net.ListenConfig{}).ListenPacket(ctx, "udp", h.node.Addr)
if err != nil {
log.Errorf("[UDP] Failed to listen %s: %v", h.node.Addr, err)
return
}
err = transportTunServer(ctx, tunInbound, tunOutbound, packetConn, h.routeMapUDP, h.routeMapTCP)
if err != nil {
log.Errorf("[TUN] %s: %v", tun.LocalAddr(), err)
}
}
})
defer device.Close()
device.Start(ctx)
}
type DataElem struct {
type Packet struct {
data []byte
length int
src net.IP
dst net.IP
}
func NewDataElem(data []byte, length int, src net.IP, dst net.IP) *DataElem {
return &DataElem{
func NewPacket(data []byte, length int, src net.IP, dst net.IP) *Packet {
return &Packet{
data: data,
length: length,
src: src,
@@ -239,32 +168,22 @@ func NewDataElem(data []byte, length int, src net.IP, dst net.IP) *DataElem {
}
}
func (d *DataElem) Data() []byte {
func (d *Packet) Data() []byte {
return d.data
}
func (d *DataElem) Length() int {
func (d *Packet) Length() int {
return d.length
}
type udpElem struct {
from net.Addr
data []byte
length int
src net.IP
dst net.IP
}
type Peer struct {
conn net.PacketConn
connInbound chan *udpElem
tunInbound <-chan *DataElem
tunOutbound chan<- *DataElem
tunInbound chan *Packet
tunOutbound chan<- *Packet
// map[srcIP.String()]net.Addr for udp
routeMapUDP *RouteMap
routeMapUDP *sync.Map
// map[srcIP.String()]net.Conn for tcp
routeMapTCP *sync.Map
@@ -278,9 +197,9 @@ func (p *Peer) sendErr(err error) {
}
}
func (p *Peer) readFromConn() {
func (p *Peer) readFromConn(ctx context.Context) {
defer util.HandleCrash()
for {
for ctx.Err() == nil {
buf := config.LPool.Get().([]byte)[:]
n, from, err := p.conn.ReadFrom(buf[:])
if err != nil {
@@ -289,143 +208,62 @@ func (p *Peer) readFromConn() {
return
}
src, dst, err := util.ParseIP(buf[:n])
src, dst, protocol, err := util.ParseIP(buf[:n])
if err != nil {
config.LPool.Put(buf[:])
log.Errorf("[TUN] Unknown packet: %v", err)
continue
}
if addr, loaded := p.routeMapUDP.LoadOrStore(src, from); loaded {
if addr.String() != from.String() {
p.routeMapUDP.Store(src, from)
log.Debugf("[TUN] Replace route map UDP: %s -> %s", src, from)
}
} else {
log.Debugf("[TUN] Add new route map UDP: %s -> %s", src, from)
}
p.connInbound <- &udpElem{
from: from,
data: buf[:],
length: n,
src: src,
dst: dst,
plog.G(ctx).Errorf("[TUN] Unknown packet: %v", err)
p.sendErr(err)
return
}
p.addToRouteMapUDP(ctx, src, from)
plog.G(context.Background()).Errorf("[TUN] SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(protocol).String(), n)
p.tunInbound <- NewPacket(buf[:], n, src, dst)
}
}
func (p *Peer) readFromTCPConn() {
func (p *Peer) addToRouteMapUDP(ctx context.Context, src net.IP, from net.Addr) {
if addr, loaded := p.routeMapUDP.LoadOrStore(src.String(), from); loaded {
if addr.(net.Addr).String() != from.String() {
p.routeMapUDP.Store(src.String(), from)
plog.G(ctx).Infof("[TUN] Replace route map UDP: %s -> %s", src, from)
}
} else {
plog.G(ctx).Infof("[TUN] Add new route map UDP: %s -> %s", src, from)
}
}
func (p *Peer) routeTCPToTun(ctx context.Context) {
defer util.HandleCrash()
for packet := range TCPPacketChan {
src, dst, err := util.ParseIP(packet.Data)
if err != nil {
log.Errorf("[TUN] Unknown packet")
config.LPool.Put(packet.Data[:])
continue
}
u := &udpElem{
data: packet.Data[:],
length: int(packet.DataLength),
src: src,
dst: dst,
}
log.Debugf("[TCP] udp-tun %s >>> %s length: %d", u.src, u.dst, u.length)
p.connInbound <- u
p.tunOutbound <- packet
}
}
func (p *Peer) routePeer() {
func (p *Peer) routeTUN(ctx context.Context) {
defer util.HandleCrash()
for e := range p.connInbound {
if routeToAddr := p.routeMapUDP.RouteTo(e.dst); routeToAddr != nil {
log.Debugf("[UDP] Find UDP route to dst: %s -> %s", e.dst, routeToAddr)
_, err := p.conn.WriteTo(e.data[:e.length], routeToAddr)
config.LPool.Put(e.data[:])
for packet := range p.tunInbound {
if addr, ok := p.routeMapUDP.Load(packet.dst.String()); ok {
plog.G(ctx).Debugf("[TUN] Find UDP route to DST: %s -> %s, SRC: %s, DST: %s", packet.dst, addr, packet.src, packet.dst)
_, err := p.conn.WriteTo(packet.data[:packet.length], addr.(net.Addr))
config.LPool.Put(packet.data[:])
if err != nil {
plog.G(ctx).Errorf("[TUN] Failed wirte to route dst: %s -> %s", packet.dst, addr)
p.sendErr(err)
return
}
} else if conn, ok := p.routeMapTCP.Load(e.dst.String()); ok {
log.Debugf("[TCP] Find TCP route to dst: %s -> %s", e.dst.String(), conn.(net.Conn).RemoteAddr())
dgram := newDatagramPacket(e.data[:e.length])
} else if conn, ok := p.routeMapTCP.Load(packet.dst.String()); ok {
plog.G(ctx).Debugf("[TUN] Find TCP route to dst: %s -> %s", packet.dst.String(), conn.(net.Conn).RemoteAddr())
dgram := newDatagramPacket(packet.data, packet.length)
err := dgram.Write(conn.(net.Conn))
config.LPool.Put(e.data[:])
config.LPool.Put(packet.data[:])
if err != nil {
log.Errorf("[TCP] udp-tun %s <- %s : %s", conn.(net.Conn).RemoteAddr(), dgram.Addr(), err)
plog.G(ctx).Errorf("[TUN] Failed to write TCP %s <- %s : %s", conn.(net.Conn).RemoteAddr(), conn.(net.Conn).LocalAddr(), err)
p.sendErr(err)
return
}
} else {
log.Debugf("[TUN] Not found route to dst: %s, write to TUN device", e.dst.String())
p.tunOutbound <- &DataElem{
data: e.data,
length: e.length,
src: e.src,
dst: e.dst,
}
plog.G(ctx).Warnf("[TUN] No route for src: %s -> dst: %s, drop it", packet.src, packet.dst)
config.LPool.Put(packet.data[:])
}
}
}
func (p *Peer) routeTUN() {
defer util.HandleCrash()
for e := range p.tunInbound {
if addr := p.routeMapUDP.RouteTo(e.dst); addr != nil {
log.Debugf("[TUN] Find UDP route to dst: %s -> %s", e.dst, addr)
_, err := p.conn.WriteTo(e.data[:e.length], addr)
config.LPool.Put(e.data[:])
if err != nil {
log.Debugf("[TUN] Failed wirte to route dst: %s -> %s", e.dst, addr)
p.sendErr(err)
return
}
} else if conn, ok := p.routeMapTCP.Load(e.dst.String()); ok {
log.Debugf("[TUN] Find TCP route to dst: %s -> %s", e.dst.String(), conn.(net.Conn).RemoteAddr())
dgram := newDatagramPacket(e.data[:e.length])
err := dgram.Write(conn.(net.Conn))
config.LPool.Put(e.data[:])
if err != nil {
log.Errorf("[TUN] Failed to write TCP %s <- %s : %s", conn.(net.Conn).RemoteAddr(), dgram.Addr(), err)
p.sendErr(err)
return
}
} else {
log.Errorf("[TUN] No route for src: %s -> dst: %s, drop it", e.src, e.dst)
config.LPool.Put(e.data[:])
}
}
}
func (p *Peer) Start() {
go p.readFromConn()
go p.readFromTCPConn()
go p.routePeer()
go p.routeTUN()
}
func (p *Peer) Close() {
p.conn.Close()
}
func transportTunServer(ctx context.Context, tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem, packetConn net.PacketConn, routeMapUDP *RouteMap, routeMapTCP *sync.Map) error {
p := &Peer{
conn: packetConn,
connInbound: make(chan *udpElem, MaxSize),
tunInbound: tunInbound,
tunOutbound: tunOutbound,
routeMapUDP: routeMapUDP,
routeMapTCP: routeMapTCP,
errChan: make(chan error, 2),
}
defer p.Close()
p.Start()
select {
case err := <-p.errChan:
log.Errorf(err.Error())
return err
case <-ctx.Done():
return nil
}
}

View File

@@ -6,91 +6,84 @@ import (
"net"
"time"
"github.com/google/gopacket/layers"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (h *tunHandler) HandleClient(ctx context.Context, tun net.Conn) {
defer tun.Close()
remoteAddr, err := net.ResolveUDPAddr("udp", h.node.Remote)
if err != nil {
log.Errorf("[TUN-CLIENT] Failed to resolve udp addr %s: %v", h.node.Remote, err)
return
}
in := make(chan *DataElem, MaxSize)
out := make(chan *DataElem, MaxSize)
defer util.SafeClose(in)
defer util.SafeClose(out)
d := &ClientDevice{
device := &ClientDevice{
tun: tun,
tunInbound: in,
tunOutbound: out,
chExit: h.chExit,
tunInbound: make(chan *Packet, MaxSize),
tunOutbound: make(chan *Packet, MaxSize),
errChan: h.errChan,
}
d.SetTunInboundHandler(func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem) {
for ctx.Err() == nil {
packetConn, err := getRemotePacketConn(ctx, h.chain)
if err != nil {
log.Debugf("[TUN-CLIENT] Failed to get remote conn from %s -> %s: %s", tun.LocalAddr(), remoteAddr, err)
time.Sleep(time.Millisecond * 200)
continue
}
err = transportTunClient(ctx, tunInbound, tunOutbound, packetConn, remoteAddr)
if err != nil {
log.Debugf("[TUN-CLIENT] %s: %v", tun.LocalAddr(), err)
}
}
})
d.Start(ctx)
defer device.Close()
go device.handlePacket(ctx, h.forward)
go device.readFromTun(ctx)
go device.writeToTun(ctx)
go device.heartbeats(ctx)
select {
case <-device.errChan:
case <-ctx.Done():
}
}
func getRemotePacketConn(ctx context.Context, chain *Chain) (packetConn net.PacketConn, err error) {
defer func() {
if err != nil && packetConn != nil {
_ = packetConn.Close()
}
}()
if !chain.IsEmpty() {
var cc net.Conn
cc, err = chain.DialContext(ctx)
if err != nil {
return
}
var ok bool
if packetConn, ok = cc.(net.PacketConn); !ok {
err = errors.New("not a packet connection")
return
}
} else {
var lc net.ListenConfig
packetConn, err = lc.ListenPacket(ctx, "udp", "")
if err != nil {
return
}
}
return
type ClientDevice struct {
tun net.Conn
tunInbound chan *Packet
tunOutbound chan *Packet
errChan chan error
}
func transportTunClient(ctx context.Context, tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem, packetConn net.PacketConn, remoteAddr net.Addr) error {
func (d *ClientDevice) handlePacket(ctx context.Context, forward *Forwarder) {
for ctx.Err() == nil {
func() {
defer time.Sleep(time.Second * 2)
conn, err := forwardConn(ctx, forward)
if err != nil {
plog.G(ctx).Errorf("Failed to get remote conn from %s -> %s: %s", d.tun.LocalAddr(), forward.node.Remote, err)
return
}
defer conn.Close()
err = handlePacketClient(ctx, d.tunInbound, d.tunOutbound, conn)
if err != nil {
plog.G(ctx).Errorf("Failed to transport data to remote %s: %v", conn.RemoteAddr(), err)
return
}
}()
}
}
func forwardConn(ctx context.Context, forwarder *Forwarder) (net.Conn, error) {
conn, err := forwarder.DialContext(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to dial forwarder")
}
return conn, nil
}
func handlePacketClient(ctx context.Context, tunInbound <-chan *Packet, tunOutbound chan<- *Packet, conn net.Conn) error {
errChan := make(chan error, 2)
defer packetConn.Close()
go func() {
defer util.HandleCrash()
for e := range tunInbound {
if e.src.Equal(e.dst) {
util.SafeWrite(tunOutbound, e)
continue
}
_, err := packetConn.WriteTo(e.data[:e.length], remoteAddr)
config.LPool.Put(e.data[:])
for packet := range tunInbound {
err := conn.SetWriteDeadline(time.Now().Add(config.KeepAliveTime))
if err != nil {
util.SafeWrite(errChan, errors.Wrap(err, fmt.Sprintf("failed to write packet to remote %s", remoteAddr)))
plog.G(ctx).Errorf("Failed to set write deadline: %v", err)
util.SafeWrite(errChan, errors.Wrap(err, "failed to set write deadline"))
return
}
_, err = conn.Write(packet.data[:packet.length])
config.LPool.Put(packet.data[:])
if err != nil {
plog.G(ctx).Errorf("Failed to write packet to remote: %v", err)
util.SafeWrite(errChan, errors.Wrap(err, "failed to write packet to remote"))
return
}
}
@@ -100,13 +93,28 @@ func transportTunClient(ctx context.Context, tunInbound <-chan *DataElem, tunOut
defer util.HandleCrash()
for {
buf := config.LPool.Get().([]byte)[:]
n, _, err := packetConn.ReadFrom(buf[:])
err := conn.SetReadDeadline(time.Now().Add(config.KeepAliveTime))
if err != nil {
config.LPool.Put(buf[:])
util.SafeWrite(errChan, errors.Wrap(err, fmt.Sprintf("failed to read packet from remote %s", remoteAddr)))
plog.G(ctx).Errorf("Failed to set read deadline: %v", err)
util.SafeWrite(errChan, errors.Wrap(err, "failed to set read deadline"))
return
}
util.SafeWrite(tunOutbound, &DataElem{data: buf[:], length: n})
n, err := conn.Read(buf[:])
if err != nil {
config.LPool.Put(buf[:])
plog.G(ctx).Errorf("Failed to read packet from remote: %v", err)
util.SafeWrite(errChan, errors.Wrap(err, fmt.Sprintf("failed to read packet from remote %s", conn.RemoteAddr())))
return
}
if n == 0 {
plog.G(ctx).Warnf("Packet length 0")
config.LPool.Put(buf[:])
continue
}
util.SafeWrite(tunOutbound, NewPacket(buf[:], n, nil, nil), func(v *Packet) {
config.LPool.Put(v.data[:])
plog.G(context.Background()).Errorf("Drop packet, LocalAddr: %s, Remote: %s, Length: %d", conn.LocalAddr(), conn.RemoteAddr(), v.length)
})
}
}()
@@ -118,70 +126,84 @@ func transportTunClient(ctx context.Context, tunInbound <-chan *DataElem, tunOut
}
}
type ClientDevice struct {
tun net.Conn
tunInbound chan *DataElem
tunOutbound chan *DataElem
// your main logic
tunInboundHandler func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem)
chExit chan error
}
func (d *ClientDevice) Start(ctx context.Context) {
go d.tunInboundHandler(d.tunInbound, d.tunOutbound)
go heartbeats(ctx, d.tun)
go d.readFromTun()
go d.writeToTun()
select {
case err := <-d.chExit:
log.Errorf("[TUN-CLIENT]: %v", err)
return
case <-ctx.Done():
return
}
}
func (d *ClientDevice) SetTunInboundHandler(handler func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem)) {
d.tunInboundHandler = handler
}
func (d *ClientDevice) readFromTun() {
func (d *ClientDevice) readFromTun(ctx context.Context) {
defer util.HandleCrash()
for {
buf := config.LPool.Get().([]byte)[:]
n, err := d.tun.Read(buf[:])
if err != nil {
util.SafeWrite(d.chExit, err)
plog.G(ctx).Errorf("Failed to read packet from tun device: %s", err)
util.SafeWrite(d.errChan, err)
config.LPool.Put(buf[:])
return
}
if n == 0 {
config.LPool.Put(buf[:])
continue
}
// Try to determine network protocol number, default zero.
var src, dst net.IP
src, dst, err = util.ParseIP(buf[:n])
var protocol int
src, dst, protocol, err = util.ParseIP(buf[:n])
if err != nil {
log.Debugf("[TUN-GVISOR] Unknown packet: %v", err)
plog.G(ctx).Errorf("Unknown packet: %v", err)
config.LPool.Put(buf[:])
continue
}
log.Tracef("[TUN-RAW] SRC: %s, DST: %s, Length: %d", src.String(), dst, n)
util.SafeWrite(d.tunInbound, NewDataElem(buf[:], n, src, dst))
plog.G(context.Background()).Debugf("SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(protocol).String(), n)
packet := NewPacket(buf[:], n, src, dst)
f := func(v *Packet) {
config.LPool.Put(v.data[:])
plog.G(context.Background()).Errorf("Drop packet, SRC: %s, DST: %s, Protocol: %s, Length: %d", v.src, v.dst, layers.IPProtocol(protocol).String(), v.length)
}
if packet.src.Equal(packet.dst) {
util.SafeWrite(d.tunOutbound, packet, f)
continue
}
util.SafeWrite(d.tunInbound, packet, f)
}
}
func (d *ClientDevice) writeToTun() {
func (d *ClientDevice) writeToTun(ctx context.Context) {
defer util.HandleCrash()
for e := range d.tunOutbound {
_, err := d.tun.Write(e.data[:e.length])
config.LPool.Put(e.data[:])
for packet := range d.tunOutbound {
_, err := d.tun.Write(packet.data[:packet.length])
config.LPool.Put(packet.data[:])
if err != nil {
util.SafeWrite(d.chExit, err)
plog.G(ctx).Errorf("Failed to write packet to tun device: %v", err)
util.SafeWrite(d.errChan, err)
return
}
}
}
func (d *ClientDevice) Close() {
d.tun.Close()
util.SafeClose(d.tunInbound)
util.SafeClose(d.tunOutbound)
}
func (d *ClientDevice) heartbeats(ctx context.Context) {
tunIfi, err := util.GetTunDeviceByConn(d.tun)
if err != nil {
plog.G(ctx).Errorf("Failed to get tun device: %v", err)
return
}
srcIPv4, srcIPv6, dockerSrcIPv4, err := util.GetTunDeviceIP(tunIfi.Name)
if err != nil {
plog.G(ctx).Errorf("Failed to get tun device %s IP: %v", tunIfi.Name, err)
return
}
ticker := time.NewTicker(config.KeepAliveTime)
defer ticker.Stop()
for ; ctx.Err() == nil; <-ticker.C {
if srcIPv4 != nil {
util.Ping(ctx, srcIPv4.String(), config.RouterIP.String())
}
if srcIPv6 != nil {
util.Ping(ctx, srcIPv6.String(), config.RouterIP6.String())
}
if dockerSrcIPv4 != nil {
util.Ping(ctx, dockerSrcIPv4.String(), config.DockerRouterIP.String())
}
}
}

View File

@@ -2,69 +2,38 @@ package core
import (
"encoding/binary"
"fmt"
"io"
"net"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
)
type datagramPacket struct {
type DatagramPacket struct {
DataLength uint16 // [2]byte
Data []byte // []byte
}
func (addr *datagramPacket) String() string {
if addr == nil {
return ""
}
return fmt.Sprintf("DataLength: %d, Data: %v\n", addr.DataLength, addr.Data)
}
func newDatagramPacket(data []byte) (r *datagramPacket) {
return &datagramPacket{
DataLength: uint16(len(data)),
func newDatagramPacket(data []byte, length int) (r *DatagramPacket) {
return &DatagramPacket{
DataLength: uint16(length),
Data: data,
}
}
func (addr *datagramPacket) Addr() net.Addr {
var server8422, _ = net.ResolveUDPAddr("udp", "127.0.0.1:8422")
return server8422
}
func readDatagramPacket(r io.Reader, b []byte) (*datagramPacket, error) {
// this method will return all byte array in the way: b[:], len(DatagramPacket.Data)==64k
func readDatagramPacket(r io.Reader, b []byte) (*DatagramPacket, error) {
_, err := io.ReadFull(r, b[:2])
if err != nil {
return nil, err
}
dataLength := binary.BigEndian.Uint16(b[:2])
_, err = io.ReadFull(r, b[:dataLength])
if err != nil /*&& (err != io.ErrUnexpectedEOF || err != io.EOF)*/ {
return nil, err
}
return &datagramPacket{DataLength: dataLength, Data: b[:dataLength]}, nil
}
// this method will return all byte array in the way: b[:]
func readDatagramPacketServer(r io.Reader, b []byte) (*datagramPacket, error) {
_, err := io.ReadFull(r, b[:2])
if err != nil {
return nil, err
}
dataLength := binary.BigEndian.Uint16(b[:2])
_, err = io.ReadFull(r, b[:dataLength])
if err != nil /*&& (err != io.ErrUnexpectedEOF || err != io.EOF)*/ {
return nil, err
}
return &datagramPacket{DataLength: dataLength, Data: b[:]}, nil
return &DatagramPacket{DataLength: dataLength, Data: b[:]}, nil
}
func (addr *datagramPacket) Write(w io.Writer) error {
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
binary.BigEndian.PutUint16(buf[:2], uint16(len(addr.Data)))
n := copy(buf[2:], addr.Data)
_, err := w.Write(buf[:n+2])
func (d *DatagramPacket) Write(w io.Writer) error {
n := copy(d.Data[2:], d.Data[:d.DataLength])
binary.BigEndian.PutUint16(d.Data[:2], d.DataLength)
_, err := w.Write(d.Data[:n+2])
return err
}

View File

@@ -3,14 +3,15 @@ package cp
import (
"archive/tar"
"bytes"
"context"
"errors"
"fmt"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"io"
"os"
"runtime"
"strings"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericiooptions"
"k8s.io/client-go/kubernetes"
@@ -183,7 +184,7 @@ func (o *CopyOptions) copyToPod(src, dest fileSpec, options *exec.ExecOptions) e
go func(src localPath, dest remotePath, writer io.WriteCloser) {
defer writer.Close()
if err := makeTar(src, dest, writer); err != nil {
log.Errorf("Error making tar: %v", err)
plog.G(context.Background()).Errorf("Error making tar: %v", err)
}
}(srcFile, destFile, writer)
var cmdArr []string
@@ -266,7 +267,7 @@ func (t *TarPipe) initReadFrom(n uint64) {
go func() {
defer t.outStream.Close()
if err := t.o.execute(options); err != nil {
log.Errorf("Error executing command: %v", err)
plog.G(context.Background()).Errorf("Error executing command: %v", err)
}
}()
}

View File

@@ -4,26 +4,19 @@ import (
"context"
"io"
log "github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) (err error) {
defer func() {
util.InitLoggerForServer(true)
log.SetOutput(svr.LogFile)
config.Debug = false
}()
config.Debug = req.Level == int32(log.DebugLevel)
out := io.MultiWriter(newCloneWarp(resp), svr.LogFile)
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
logger := plog.GetLoggerForClient(req.Level, io.MultiWriter(newCloneWarp(resp), svr.LogFile))
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
connReq := &rpc.ConnectRequest{
KubeconfigBytes: req.KubeconfigBytes,
@@ -37,17 +30,18 @@ func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) (er
Level: req.Level,
OriginKubeconfigPath: req.OriginKubeconfigPath,
}
cli := svr.GetClient(false)
cli, err := svr.GetClient(false)
if err != nil {
return err
}
connResp, err := cli.Connect(resp.Context(), connReq)
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.ConnectResponse](connResp, out)
err = util.PrintGRPCStream[rpc.ConnectResponse](connResp, io.MultiWriter(newCloneWarp(resp), svr.LogFile))
if err != nil {
return err
}
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
options := &handler.CloneOptions{
Namespace: req.Namespace,
@@ -79,7 +73,7 @@ func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) (er
sshCtx, sshFunc := context.WithCancel(context.Background())
defer func() {
if err != nil {
_ = options.Cleanup()
_ = options.Cleanup(sshCtx)
sshFunc()
}
}()
@@ -95,15 +89,15 @@ func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) (er
f := util.InitFactoryByPath(path, req.Namespace)
err = options.InitClient(f)
if err != nil {
log.Errorf("Failed to init client: %v", err)
plog.G(context.Background()).Errorf("Failed to init client: %v", err)
return err
}
config.Image = req.Image
log.Infof("Clone workloads...")
logger.Infof("Clone workloads...")
options.SetContext(sshCtx)
err = options.DoClone(resp.Context(), []byte(req.KubeconfigBytes))
err = options.DoClone(plog.WithLogger(resp.Context(), logger), []byte(req.KubeconfigBytes))
if err != nil {
log.Errorf("Clone workloads failed: %v", err)
plog.G(context.Background()).Errorf("Clone workloads failed: %v", err)
return err
}
svr.clone = options

View File

@@ -2,86 +2,69 @@ package action
import (
"context"
"fmt"
"io"
defaultlog "log"
"os"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectForkServer) (err error) {
defer func() {
util.InitLoggerForServer(true)
log.SetOutput(svr.LogFile)
config.Debug = false
}()
config.Debug = req.Level == int32(log.DebugLevel)
out := io.MultiWriter(newConnectForkWarp(resp), svr.LogFile)
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
logger := plog.GetLoggerForClient(req.Level, io.MultiWriter(newConnectForkWarp(resp), svr.LogFile))
if !svr.IsSudo {
return svr.redirectConnectForkToSudoDaemon(req, resp)
return svr.redirectConnectForkToSudoDaemon(req, resp, logger)
}
ctx := resp.Context()
connect := &handler.ConnectOptions{
Namespace: req.Namespace,
Headers: req.Headers,
Workloads: req.Workloads,
Namespace: req.ManagerNamespace,
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
Engine: config.Engine(req.Engine),
OriginKubeconfigPath: req.OriginKubeconfigPath,
OriginNamespace: req.Namespace,
Lock: &svr.Lock,
ImagePullSecretName: req.ImagePullSecretName,
}
defaultlog.Default().SetOutput(io.Discard)
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
if err != nil {
return err
}
flags := pflag.NewFlagSet("", pflag.ContinueOnError)
flags.AddFlag(&pflag.Flag{
Name: "kubeconfig",
DefValue: file,
})
sshCtx, sshCancel := context.WithCancel(context.Background())
connect.AddRolloutFunc(func() error {
sshCancel()
os.Remove(file)
return nil
})
sshCtx = plog.WithLogger(sshCtx, logger)
defer plog.WithoutLogger(sshCtx)
defer func() {
if err != nil {
connect.Cleanup()
connect.Cleanup(plog.WithLogger(context.Background(), logger))
sshCancel()
}
}()
var path string
path, err = ssh.SshJump(sshCtx, ssh.ParseSshFromRPC(req.SshJump), flags, false)
err = connect.InitClient(util.InitFactoryByPath(file, req.ManagerNamespace))
if err != nil {
return err
}
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
if err != nil {
return err
}
err = connect.GetIPFromContext(ctx)
err = connect.GetIPFromContext(ctx, logger)
if err != nil {
return err
}
config.Image = req.Image
err = connect.DoConnect(sshCtx, true)
err = connect.DoConnect(sshCtx, true, ctx.Done())
if err != nil {
log.Errorf("Do connect error: %v", err)
logger.Errorf("Failed to connect...")
return err
}
@@ -92,18 +75,10 @@ func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectF
return nil
}
func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer) (err error) {
cli := svr.GetClient(true)
if cli == nil {
return fmt.Errorf("sudo daemon not start")
}
connect := &handler.ConnectOptions{
Namespace: req.Namespace,
Headers: req.Headers,
Workloads: req.Workloads,
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
Engine: config.Engine(req.Engine),
OriginKubeconfigPath: req.OriginKubeconfigPath,
func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer, logger *log.Logger) (err error) {
cli, err := svr.GetClient(true)
if err != nil {
return errors.Wrap(err, "sudo daemon not start")
}
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
@@ -116,12 +91,23 @@ func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp
DefValue: file,
})
sshCtx, sshCancel := context.WithCancel(context.Background())
sshCtx = plog.WithLogger(sshCtx, logger)
defer plog.WithoutLogger(sshCtx)
connect := &handler.ConnectOptions{
Namespace: req.Namespace,
OriginNamespace: req.Namespace,
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
Engine: config.Engine(req.Engine),
OriginKubeconfigPath: req.OriginKubeconfigPath,
}
connect.AddRolloutFunc(func() error {
sshCancel()
os.Remove(file)
return nil
})
defer func() {
if err != nil {
connect.Cleanup(plog.WithLogger(context.Background(), logger))
sshCancel()
}
}()
@@ -130,20 +116,41 @@ func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp
if err != nil {
return err
}
connect.AddRolloutFunc(func() error {
os.Remove(path)
return nil
})
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
if err != nil {
return err
}
if req.ManagerNamespace == "" {
req.ManagerNamespace, err = util.DetectManagerNamespace(plog.WithLogger(sshCtx, logger), connect.GetFactory(), req.Namespace)
if err != nil {
return err
}
}
if req.ManagerNamespace != "" {
logger.Infof("Use manager namespace %s", req.ManagerNamespace)
connect.Namespace = req.ManagerNamespace
} else {
logger.Infof("Use special namespace %s", req.Namespace)
req.ManagerNamespace = req.Namespace
}
for _, options := range svr.secondaryConnect {
isSameCluster, _ := util.IsSameCluster(
sshCtx,
options.GetClientset().CoreV1().ConfigMaps(options.Namespace), options.Namespace,
connect.GetClientset().CoreV1().ConfigMaps(connect.Namespace), connect.Namespace,
options.GetClientset().CoreV1(), options.Namespace,
connect.GetClientset().CoreV1(), connect.Namespace,
)
if isSameCluster {
sshCancel()
os.Remove(file)
os.Remove(path)
// same cluster, do nothing
log.Infof("Connected with cluster")
logger.Infof("Connected with cluster")
return nil
}
}
@@ -153,6 +160,13 @@ func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp
return err
}
// only ssh jump in user daemon
content, err := os.ReadFile(path)
if err != nil {
return err
}
req.KubeconfigBytes = string(content)
req.SshJump = ssh.SshConfig{}.ToRPC()
connResp, err := cli.ConnectFork(ctx, req)
if err != nil {
return err

View File

@@ -2,11 +2,11 @@ package action
import (
"context"
"fmt"
"io"
golog "log"
"os"
"time"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"google.golang.org/grpc/codes"
@@ -15,35 +15,26 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer) (e error) {
defer func() {
util.InitLoggerForServer(true)
log.SetOutput(svr.LogFile)
config.Debug = false
}()
config.Debug = req.Level == int32(log.DebugLevel)
out := io.MultiWriter(newWarp(resp), svr.LogFile)
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
logger := plog.GetLoggerForClient(req.Level, io.MultiWriter(newWarp(resp), svr.LogFile))
if !svr.IsSudo {
return svr.redirectToSudoDaemon(req, resp)
return svr.redirectToSudoDaemon(req, resp, logger)
}
ctx := resp.Context()
if !svr.t.IsZero() {
s := "Already connected to cluster in full mode, you can use options `--lite` to connect to another cluster"
log.Debugf(s)
// todo define already connect error?
s := "Only support one cluster connect with full mode, you can use options `--lite` to connect to another cluster"
return status.Error(codes.AlreadyExists, s)
}
defer func() {
if e != nil || ctx.Err() != nil {
if svr.connect != nil {
svr.connect.Cleanup()
svr.connect.Cleanup(plog.WithLogger(context.Background(), logger))
svr.connect = nil
}
svr.t = time.Time{}
@@ -51,76 +42,57 @@ func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServe
}()
svr.t = time.Now()
svr.connect = &handler.ConnectOptions{
Namespace: req.Namespace,
Headers: req.Headers,
PortMap: req.PortMap,
Workloads: req.Workloads,
Namespace: req.ManagerNamespace,
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
Engine: config.Engine(req.Engine),
OriginKubeconfigPath: req.OriginKubeconfigPath,
OriginNamespace: req.Namespace,
Lock: &svr.Lock,
ImagePullSecretName: req.ImagePullSecretName,
}
golog.Default().SetOutput(io.Discard)
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
if err != nil {
return err
}
flags := pflag.NewFlagSet("", pflag.ContinueOnError)
flags.AddFlag(&pflag.Flag{
Name: "kubeconfig",
DefValue: file,
})
sshCtx, sshCancel := context.WithCancel(context.Background())
svr.connect.AddRolloutFunc(func() error {
sshCancel()
os.Remove(file)
return nil
})
sshCtx = plog.WithLogger(sshCtx, logger)
defer plog.WithoutLogger(sshCtx)
defer func() {
if e != nil {
svr.connect.Cleanup(sshCtx)
svr.connect = nil
svr.t = time.Time{}
os.Remove(file)
sshCancel()
}
}()
var path string
path, err = ssh.SshJump(sshCtx, ssh.ParseSshFromRPC(req.SshJump), flags, false)
err = svr.connect.InitClient(util.InitFactoryByPath(file, req.ManagerNamespace))
if err != nil {
return err
}
err = svr.connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
if err != nil {
return err
}
err = svr.connect.GetIPFromContext(ctx)
err = svr.connect.GetIPFromContext(ctx, nil)
if err != nil {
return err
}
config.Image = req.Image
err = svr.connect.DoConnect(sshCtx, false)
err = svr.connect.DoConnect(sshCtx, false, ctx.Done())
if err != nil {
log.Errorf("Failed to connect: %v", err)
svr.connect.Cleanup()
svr.connect = nil
svr.t = time.Time{}
logger.Errorf("Failed to connect...")
return err
}
return nil
}
func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer) (e error) {
cli := svr.GetClient(true)
if cli == nil {
return fmt.Errorf("sudo daemon not start")
}
connect := &handler.ConnectOptions{
Namespace: req.Namespace,
Headers: req.Headers,
PortMap: req.PortMap,
Workloads: req.Workloads,
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
Engine: config.Engine(req.Engine),
OriginKubeconfigPath: req.OriginKubeconfigPath,
func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer, logger *log.Logger) (e error) {
cli, err := svr.GetClient(true)
if err != nil {
return errors.Wrap(err, "sudo daemon not start")
}
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
@@ -133,13 +105,25 @@ func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon
DefValue: file,
})
sshCtx, sshCancel := context.WithCancel(context.Background())
sshCtx = plog.WithLogger(sshCtx, logger)
defer plog.WithoutLogger(sshCtx)
connect := &handler.ConnectOptions{
Namespace: req.Namespace,
OriginNamespace: req.Namespace,
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
Engine: config.Engine(req.Engine),
OriginKubeconfigPath: req.OriginKubeconfigPath,
}
connect.AddRolloutFunc(func() error {
sshCancel()
os.Remove(file)
return nil
})
defer func() {
if e != nil {
connect.Cleanup(plog.WithLogger(context.Background(), logger))
sshCancel()
os.Remove(file)
}
}()
var path string
@@ -147,21 +131,45 @@ func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon
if err != nil {
return err
}
connect.AddRolloutFunc(func() error {
os.Remove(path)
return nil
})
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
if err != nil {
return err
}
if req.ManagerNamespace == "" {
req.ManagerNamespace, err = util.DetectManagerNamespace(plog.WithLogger(sshCtx, logger), connect.GetFactory(), req.Namespace)
if err != nil {
return err
}
}
if req.ManagerNamespace != "" {
logger.Infof("Use manager namespace %s", req.ManagerNamespace)
connect.Namespace = req.ManagerNamespace
} else {
logger.Infof("Use special namespace %s", req.Namespace)
req.ManagerNamespace = req.Namespace
}
if svr.connect != nil {
isSameCluster, _ := util.IsSameCluster(
sshCtx,
svr.connect.GetClientset().CoreV1().ConfigMaps(svr.connect.Namespace), svr.connect.Namespace,
connect.GetClientset().CoreV1().ConfigMaps(connect.Namespace), connect.Namespace,
svr.connect.GetClientset().CoreV1(), svr.connect.Namespace,
connect.GetClientset().CoreV1(), connect.Namespace,
)
if isSameCluster {
sshCancel()
os.Remove(path)
os.Remove(file)
// same cluster, do nothing
log.Infof("Connected to cluster")
logger.Infof("Connected to cluster")
return nil
} else {
s := "Only support one cluster connect with full mode, you can use options `--lite` to connect to another cluster"
return status.Error(codes.AlreadyExists, s)
}
}
@@ -170,6 +178,13 @@ func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon
return err
}
// only ssh jump in user daemon
content, err := os.ReadFile(path)
if err != nil {
return err
}
req.KubeconfigBytes = string(content)
req.SshJump = ssh.SshConfig{}.ToRPC()
connResp, err := cli.Connect(ctx, req)
if err != nil {
return err

View File

@@ -2,42 +2,55 @@ package action
import (
"context"
"fmt"
"io"
"time"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/dns"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (svr *Server) Disconnect(req *rpc.DisconnectRequest, resp rpc.Daemon_DisconnectServer) error {
defer func() {
util.InitLoggerForServer(true)
log.SetOutput(svr.LogFile)
config.Debug = false
}()
out := io.MultiWriter(newDisconnectWarp(resp), svr.LogFile)
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
logger := plog.GetLoggerForClient(int32(log.InfoLevel), io.MultiWriter(newDisconnectWarp(resp), svr.LogFile))
ctx := plog.WithLogger(resp.Context(), logger)
// disconnect sudo daemon first
// then disconnect from user daemon
// because only ssh jump in user daemon
if !svr.IsSudo {
cli, err := svr.GetClient(true)
if err != nil {
return errors.Wrap(err, "sudo daemon not start")
}
connResp, err := cli.Disconnect(resp.Context(), req)
if err != nil {
return err
}
err = util.CopyGRPCStream[rpc.DisconnectResponse](connResp, resp)
if err != nil {
return err
}
}
switch {
case req.GetAll():
if svr.clone != nil {
_ = svr.clone.Cleanup()
_ = svr.clone.Cleanup(ctx)
}
svr.clone = nil
connects := handler.Connects(svr.secondaryConnect).Append(svr.connect)
for _, connect := range connects.Sort() {
if connect != nil {
connect.Cleanup()
connect.Cleanup(ctx)
}
}
svr.secondaryConnect = nil
@@ -45,22 +58,22 @@ func (svr *Server) Disconnect(req *rpc.DisconnectRequest, resp rpc.Daemon_Discon
svr.t = time.Time{}
case req.ID != nil && req.GetID() == 0:
if svr.connect != nil {
svr.connect.Cleanup()
svr.connect.Cleanup(ctx)
}
svr.connect = nil
svr.t = time.Time{}
if svr.clone != nil {
_ = svr.clone.Cleanup()
_ = svr.clone.Cleanup(ctx)
}
svr.clone = nil
case req.ID != nil:
index := req.GetID() - 1
if index < int32(len(svr.secondaryConnect)) {
svr.secondaryConnect[index].Cleanup()
svr.secondaryConnect[index].Cleanup(ctx)
svr.secondaryConnect = append(svr.secondaryConnect[:index], svr.secondaryConnect[index+1:]...)
} else {
log.Errorf("Index %d out of range", req.GetID())
plog.G(ctx).Errorf("Index %d out of range", req.GetID())
}
case req.KubeconfigBytes != nil && req.Namespace != nil:
err := disconnectByKubeConfig(
@@ -90,14 +103,14 @@ func (svr *Server) Disconnect(req *rpc.DisconnectRequest, resp rpc.Daemon_Discon
}
for _, connect := range connects.Sort() {
if connect != nil {
connect.Cleanup()
connect.Cleanup(ctx)
}
}
if foundModeFull {
svr.connect = nil
svr.t = time.Time{}
if svr.clone != nil {
_ = svr.clone.Cleanup()
_ = svr.clone.Cleanup(ctx)
}
svr.clone = nil
}
@@ -108,22 +121,6 @@ func (svr *Server) Disconnect(req *rpc.DisconnectRequest, resp rpc.Daemon_Discon
_ = dns.CleanupHosts()
}
}
if !svr.IsSudo {
cli := svr.GetClient(true)
if cli == nil {
return fmt.Errorf("sudo daemon not start")
}
connResp, err := cli.Disconnect(resp.Context(), req)
if err != nil {
return err
}
err = util.CopyGRPCStream[rpc.DisconnectResponse](connResp, resp)
if err != nil {
return err
}
}
return nil
}
@@ -155,19 +152,19 @@ func disconnectByKubeConfig(ctx context.Context, svr *Server, kubeconfigBytes st
}
func disconnect(ctx context.Context, svr *Server, connect *handler.ConnectOptions) {
client := svr.GetClient(false)
if client == nil {
_, err := svr.GetClient(false)
if err != nil {
return
}
if svr.connect != nil {
isSameCluster, _ := util.IsSameCluster(
ctx,
svr.connect.GetClientset().CoreV1().ConfigMaps(svr.connect.Namespace), svr.connect.Namespace,
connect.GetClientset().CoreV1().ConfigMaps(connect.Namespace), connect.Namespace,
svr.connect.GetClientset().CoreV1(), svr.connect.OriginNamespace,
connect.GetClientset().CoreV1(), connect.Namespace,
)
if isSameCluster {
log.Infof("Disconnecting from the cluster...")
svr.connect.Cleanup()
plog.G(ctx).Infof("Disconnecting from the cluster...")
svr.connect.Cleanup(ctx)
svr.connect = nil
svr.t = time.Time{}
}
@@ -176,12 +173,12 @@ func disconnect(ctx context.Context, svr *Server, connect *handler.ConnectOption
options := svr.secondaryConnect[i]
isSameCluster, _ := util.IsSameCluster(
ctx,
options.GetClientset().CoreV1().ConfigMaps(options.Namespace), options.Namespace,
connect.GetClientset().CoreV1().ConfigMaps(connect.Namespace), connect.Namespace,
options.GetClientset().CoreV1(), options.OriginNamespace,
connect.GetClientset().CoreV1(), connect.Namespace,
)
if isSameCluster {
log.Infof("Disconnecting from the cluster...")
options.Cleanup()
plog.G(ctx).Infof("Disconnecting from the cluster...")
options.Cleanup(ctx)
svr.secondaryConnect = append(svr.secondaryConnect[:i], svr.secondaryConnect[i+1:]...)
i--
}

View File

@@ -6,51 +6,56 @@ import (
"time"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/controlplane"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/inject"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (svr *Server) Leave(req *rpc.LeaveRequest, resp rpc.Daemon_LeaveServer) error {
defer func() {
util.InitLoggerForServer(true)
log.SetOutput(svr.LogFile)
config.Debug = false
}()
out := io.MultiWriter(newLeaveWarp(resp), svr.LogFile)
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
logger := plog.GetLoggerForClient(int32(log.InfoLevel), io.MultiWriter(newLeaveWarp(resp), svr.LogFile))
if svr.connect == nil {
log.Infof("Not proxy any resource in cluster")
logger.Infof("Not proxy any resource in cluster")
return fmt.Errorf("not proxy any resource in cluster")
}
ctx := plog.WithLogger(resp.Context(), logger)
factory := svr.connect.GetFactory()
namespace := svr.connect.Namespace
maps := svr.connect.GetClientset().CoreV1().ConfigMaps(namespace)
mapInterface := svr.connect.GetClientset().CoreV1().ConfigMaps(namespace)
v4, _ := svr.connect.GetLocalTunIP()
for _, workload := range req.GetWorkloads() {
object, err := util.GetUnstructuredObject(factory, namespace, workload)
object, err := util.GetUnstructuredObject(factory, req.Namespace, workload)
if err != nil {
log.Errorf("Failed to get unstructured object: %v", err)
logger.Errorf("Failed to get unstructured object: %v", err)
return err
}
u := object.Object.(*unstructured.Unstructured)
templateSpec, _, err := util.GetPodTemplateSpecPath(u)
if err != nil {
logger.Errorf("Failed to get template spec path: %v", err)
return err
}
// add rollback func to remove envoy config
err = inject.UnPatchContainer(factory, maps, object, func(isFargateMode bool, rule *controlplane.Rule) bool {
var empty bool
empty, err = inject.UnPatchContainer(ctx, factory, mapInterface, object, func(isFargateMode bool, rule *controlplane.Rule) bool {
if isFargateMode {
return svr.connect.IsMe(util.ConvertWorkloadToUid(workload), rule.Headers)
return svr.connect.IsMe(req.Namespace, util.ConvertWorkloadToUid(workload), rule.Headers)
}
return rule.LocalTunIPv4 == v4
})
if err != nil {
log.Errorf("Leaving workload %s failed: %v", workload, err)
plog.G(ctx).Errorf("Leaving workload %s failed: %v", workload, err)
continue
}
svr.connect.LeavePortMap(workload)
err = util.RolloutStatus(resp.Context(), factory, namespace, workload, time.Minute*60)
if empty {
err = inject.ModifyServiceTargetPort(ctx, svr.connect.GetClientset(), req.Namespace, templateSpec.Labels, map[int32]int32{})
}
svr.connect.LeavePortMap(req.Namespace, workload)
err = util.RolloutStatus(ctx, factory, req.Namespace, workload, time.Minute*60)
}
return nil
}

View File

@@ -1,46 +1,84 @@
package action
import (
"bufio"
"io"
"log"
"os"
"github.com/hpcloud/tail"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
)
func (svr *Server) Logs(req *rpc.LogRequest, resp rpc.Daemon_LogsServer) error {
path := GetDaemonLogPath()
lines, err2 := countLines(path)
if err2 != nil {
return err2
}
// only show latest N lines
if req.Lines < 0 {
lines = -req.Lines
} else {
lines -= req.Lines
}
config := tail.Config{Follow: req.Follow, ReOpen: false, MustExist: true, Logger: log.New(io.Discard, "", log.LstdFlags)}
if !req.Follow {
// FATAL -- cannot set ReOpen without Follow.
config.ReOpen = false
}
file, err := tail.TailFile(path, config)
line := int64(max(req.Lines, -req.Lines))
sudoLine, sudoSize, err := seekToLastLine(config.GetDaemonLogPath(true), line)
if err != nil {
return err
}
defer file.Stop()
userLine, userSize, err := seekToLastLine(config.GetDaemonLogPath(false), line)
if err != nil {
return err
}
err = recent(resp, sudoLine, userLine)
if err != nil {
return err
}
if req.Follow {
err = tee(resp, sudoSize, userSize)
if err != nil {
return err
}
}
return nil
}
func tee(resp rpc.Daemon_LogsServer, sudoLine int64, userLine int64) error {
// FATAL -- cannot set ReOpen without Follow.
sudoConfig := tail.Config{
Follow: true,
ReOpen: true,
MustExist: true,
Logger: log.New(io.Discard, "", log.LstdFlags),
Location: &tail.SeekInfo{Offset: sudoLine, Whence: io.SeekStart},
}
userConfig := tail.Config{
Follow: true,
ReOpen: true,
MustExist: true,
Logger: log.New(io.Discard, "", log.LstdFlags),
Location: &tail.SeekInfo{Offset: userLine, Whence: io.SeekStart},
}
sudoFile, err := tail.TailFile(config.GetDaemonLogPath(true), sudoConfig)
if err != nil {
return err
}
defer sudoFile.Stop()
userFile, err := tail.TailFile(config.GetDaemonLogPath(false), userConfig)
if err != nil {
return err
}
defer userFile.Stop()
for {
select {
case <-resp.Context().Done():
return nil
case line, ok := <-file.Lines:
case line, ok := <-userFile.Lines:
if !ok {
return nil
}
if line.Err != nil {
return line.Err
}
err = resp.Send(&rpc.LogResponse{Message: "[USER] " + line.Text + "\n"})
if err != nil {
return err
}
case line, ok := <-sudoFile.Lines:
if !ok {
return nil
}
@@ -48,11 +86,7 @@ func (svr *Server) Logs(req *rpc.LogRequest, resp rpc.Daemon_LogsServer) error {
return err
}
if lines--; lines >= 0 {
continue
}
err = resp.Send(&rpc.LogResponse{Message: line.Text + "\n"})
err = resp.Send(&rpc.LogResponse{Message: "[ROOT] " + line.Text + "\n"})
if err != nil {
return err
}
@@ -60,23 +94,115 @@ func (svr *Server) Logs(req *rpc.LogRequest, resp rpc.Daemon_LogsServer) error {
}
}
func countLines(filename string) (int32, error) {
func recent(resp rpc.Daemon_LogsServer, sudoLine int64, userLine int64) error {
sudoConfig := tail.Config{
Follow: false,
ReOpen: false,
MustExist: true,
Logger: log.New(io.Discard, "", log.LstdFlags),
Location: &tail.SeekInfo{Offset: sudoLine, Whence: io.SeekStart},
}
userConfig := tail.Config{
Follow: false,
ReOpen: false,
MustExist: true,
Logger: log.New(io.Discard, "", log.LstdFlags),
Location: &tail.SeekInfo{Offset: userLine, Whence: io.SeekStart},
}
sudoFile, err := tail.TailFile(config.GetDaemonLogPath(true), sudoConfig)
if err != nil {
return err
}
defer sudoFile.Stop()
userFile, err := tail.TailFile(config.GetDaemonLogPath(false), userConfig)
if err != nil {
return err
}
defer userFile.Stop()
userOut:
for {
select {
case <-resp.Context().Done():
return nil
case line, ok := <-userFile.Lines:
if !ok {
break userOut
}
if line.Err != nil {
return line.Err
}
err = resp.Send(&rpc.LogResponse{Message: "[USER] " + line.Text + "\n"})
if err != nil {
return err
}
}
}
sudoOut:
for {
select {
case <-resp.Context().Done():
return nil
case line, ok := <-sudoFile.Lines:
if !ok {
break sudoOut
}
if line.Err != nil {
return line.Err
}
err = resp.Send(&rpc.LogResponse{Message: "[ROOT] " + line.Text + "\n"})
if err != nil {
return err
}
}
}
return nil
}
func seekToLastLine(filename string, lines int64) (int64, int64, error) {
file, err := os.Open(filename)
if err != nil {
return 0, err
return 0, 0, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
lineCount := int32(0)
for scanner.Scan() {
lineCount++
stat, err := file.Stat()
if err != nil {
return 0, 0, err
}
size := stat.Size()
bufSize := int64(4096)
lineCount := int64(0)
remaining := size
if err = scanner.Err(); err != nil {
return 0, err
for remaining > 0 {
chunkSize := bufSize
if remaining < bufSize {
chunkSize = remaining
}
pos := remaining - chunkSize
_, err = file.Seek(pos, io.SeekStart)
if err != nil {
return 0, 0, err
}
buf := make([]byte, chunkSize)
_, err = file.Read(buf)
if err != nil {
return 0, 0, err
}
for i := len(buf) - 1; i >= 0; i-- {
if buf[i] == '\n' {
lineCount++
if lineCount > lines {
targetPos := pos + int64(i) + 1
return targetPos, size, nil
}
}
}
remaining -= chunkSize
}
return lineCount, nil
return 0, 0, nil
}

View File

@@ -2,16 +2,19 @@ package action
import (
"context"
"fmt"
"io"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/utils/ptr"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
@@ -23,28 +26,10 @@ import (
// 2. if already connect to cluster
// 2.1 disconnect from cluster
// 2.2 same as step 1
func (svr *Server) Proxy(req *rpc.ConnectRequest, resp rpc.Daemon_ProxyServer) (e error) {
defer func() {
util.InitLoggerForServer(true)
log.SetOutput(svr.LogFile)
config.Debug = false
}()
out := io.MultiWriter(newProxyWarp(resp), svr.LogFile)
func (svr *Server) Proxy(req *rpc.ProxyRequest, resp rpc.Daemon_ProxyServer) (e error) {
logger := plog.GetLoggerForClient(int32(log.InfoLevel), io.MultiWriter(newProxyWarp(resp), svr.LogFile))
config.Image = req.Image
config.Debug = req.Level == int32(log.DebugLevel)
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
ctx := resp.Context()
connect := &handler.ConnectOptions{
Namespace: req.Namespace,
Headers: req.Headers,
PortMap: req.PortMap,
Workloads: req.Workloads,
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
Engine: config.Engine(req.Engine),
OriginKubeconfigPath: req.OriginKubeconfigPath,
ImagePullSecretName: req.ImagePullSecretName,
}
ctx := plog.WithLogger(resp.Context(), logger)
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
@@ -61,65 +46,62 @@ func (svr *Server) Proxy(req *rpc.ConnectRequest, resp rpc.Daemon_ProxyServer) (
if err != nil {
return err
}
connect := &handler.ConnectOptions{
Namespace: req.Namespace,
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
Engine: config.Engine(req.Engine),
OriginKubeconfigPath: req.OriginKubeconfigPath,
ImagePullSecretName: req.ImagePullSecretName,
}
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
if err != nil {
return err
}
var workloads []string
workloads, err = util.NormalizedResource(ctx, connect.GetFactory(), connect.GetClientset(), connect.Namespace, connect.Workloads)
workloads, err = util.NormalizedResource(ctx, connect.GetFactory(), connect.GetClientset(), req.Namespace, req.Workloads)
if err != nil {
return err
}
defer func() {
if e != nil && svr.connect != nil {
_ = svr.connect.LeaveAllProxyResources(context.Background())
_ = svr.connect.LeaveAllProxyResources(plog.WithLogger(context.Background(), logger))
}
}()
daemonClient := svr.GetClient(false)
if daemonClient == nil {
return fmt.Errorf("daemon is not avaliable")
}
if svr.connect != nil {
isSameCluster, _ := util.IsSameCluster(
ctx,
svr.connect.GetClientset().CoreV1().ConfigMaps(svr.connect.Namespace), svr.connect.Namespace,
connect.GetClientset().CoreV1().ConfigMaps(connect.Namespace), connect.Namespace,
)
if isSameCluster {
// same cluster, do nothing
log.Infof("Connected to cluster")
} else {
log.Infof("Disconnecting from another cluster...")
var disconnectResp rpc.Daemon_DisconnectClient
disconnectResp, err = daemonClient.Disconnect(ctx, &rpc.DisconnectRequest{
KubeconfigBytes: ptr.To(req.KubeconfigBytes),
Namespace: ptr.To(connect.Namespace),
SshJump: sshConf.ToRPC(),
})
if err != nil {
return err
}
err = util.CopyAndConvertGRPCStream[rpc.DisconnectResponse, rpc.ConnectResponse](
disconnectResp,
resp,
func(response *rpc.DisconnectResponse) *rpc.ConnectResponse {
return &rpc.ConnectResponse{Message: response.Message}
},
)
if err != nil {
return err
}
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
}
cli, err := svr.GetClient(false)
if err != nil {
return errors.Wrap(err, "daemon is not available")
}
if svr.connect == nil {
log.Debugf("Connectting to cluster")
var connResp rpc.Daemon_ConnectClient
connResp, err = daemonClient.Connect(ctx, req)
plog.G(ctx).Debugf("Connecting to cluster")
var connResp rpc.Daemon_ConnectClient
connResp, err = cli.Connect(ctx, convert(req))
if err != nil {
return err
}
err = util.CopyGRPCStream[rpc.ConnectResponse](connResp, resp)
if err != nil {
if status.Code(err) != codes.AlreadyExists {
return err
}
plog.G(ctx).Infof("Disconnecting from another cluster...")
var disconnectResp rpc.Daemon_DisconnectClient
disconnectResp, err = cli.Disconnect(ctx, &rpc.DisconnectRequest{ID: ptr.To[int32](0)})
if err != nil {
return err
}
err = util.CopyAndConvertGRPCStream[rpc.DisconnectResponse, rpc.ConnectResponse](
disconnectResp,
resp,
func(response *rpc.DisconnectResponse) *rpc.ConnectResponse {
return &rpc.ConnectResponse{Message: response.Message}
},
)
if err != nil {
return err
}
connResp, err = cli.Connect(ctx, convert(req))
if err != nil {
return err
}
@@ -127,13 +109,11 @@ func (svr *Server) Proxy(req *rpc.ConnectRequest, resp rpc.Daemon_ProxyServer) (
if err != nil {
return err
}
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
}
err = svr.connect.CreateRemoteInboundPod(ctx, workloads, req.Headers, req.PortMap)
err = svr.connect.CreateRemoteInboundPod(ctx, req.Namespace, workloads, req.Headers, req.PortMap)
if err != nil {
log.Errorf("Failed to inject inbound sidecar: %v", err)
plog.G(ctx).Errorf("Failed to inject inbound sidecar: %v", err)
return err
}
return nil
@@ -144,7 +124,7 @@ type proxyWarp struct {
}
func (r *proxyWarp) Write(p []byte) (n int, err error) {
_ = r.server.Send(&rpc.ConnectResponse{
_ = r.server.Send(&rpc.ProxyResponse{
Message: string(p),
})
return len(p), nil
@@ -153,3 +133,20 @@ func (r *proxyWarp) Write(p []byte) (n int, err error) {
func newProxyWarp(server rpc.Daemon_ProxyServer) io.Writer {
return &proxyWarp{server: server}
}
func convert(req *rpc.ProxyRequest) *rpc.ConnectRequest {
return &rpc.ConnectRequest{
KubeconfigBytes: req.KubeconfigBytes,
Namespace: req.Namespace,
Engine: req.Engine,
ExtraRoute: req.ExtraRoute,
SshJump: req.SshJump,
TransferImage: req.TransferImage,
Image: req.Image,
ImagePullSecretName: req.ImagePullSecretName,
Foreground: req.Foreground,
Level: req.Level,
OriginKubeconfigPath: req.OriginKubeconfigPath,
ManagerNamespace: req.ManagerNamespace,
}
}

View File

@@ -7,26 +7,25 @@ import (
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/dns"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (svr *Server) Quit(req *rpc.QuitRequest, resp rpc.Daemon_QuitServer) error {
defer func() {
util.InitLoggerForServer(true)
log.SetOutput(svr.LogFile)
config.Debug = false
}()
util.InitLoggerForClient(config.Debug)
log.SetOutput(io.MultiWriter(newQuitWarp(resp), svr.LogFile))
logger := plog.GetLoggerForClient(int32(log.InfoLevel), io.MultiWriter(newQuitWarp(resp), svr.LogFile))
ctx := context.Background()
if resp != nil {
ctx = resp.Context()
}
ctx = plog.WithLogger(ctx, logger)
if svr.clone != nil {
err := svr.clone.Cleanup()
err := svr.clone.Cleanup(ctx)
if err != nil {
log.Errorf("Cleanup clone failed: %v", err)
plog.G(ctx).Errorf("Cleanup clone failed: %v", err)
}
svr.clone = nil
}
@@ -34,7 +33,7 @@ func (svr *Server) Quit(req *rpc.QuitRequest, resp rpc.Daemon_QuitServer) error
connects := handler.Connects(svr.secondaryConnect).Append(svr.connect)
for _, conn := range connects.Sort() {
if conn != nil {
conn.Cleanup()
conn.Cleanup(ctx)
}
}
svr.secondaryConnect = nil

View File

@@ -5,27 +5,19 @@ import (
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
func (svr *Server) Remove(req *rpc.RemoveRequest, resp rpc.Daemon_RemoveServer) error {
defer func() {
util.InitLoggerForServer(true)
log.SetOutput(svr.LogFile)
config.Debug = false
}()
out := io.MultiWriter(newRemoveWarp(resp), svr.LogFile)
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
logger := plog.GetLoggerForClient(int32(log.InfoLevel), io.MultiWriter(newRemoveWarp(resp), svr.LogFile))
ctx := plog.WithLogger(resp.Context(), logger)
if svr.clone != nil {
err := svr.clone.Cleanup(req.Workloads...)
err := svr.clone.Cleanup(ctx, req.Workloads...)
svr.clone = nil
return err
} else {
log.Info("No clone resource found")
logger.Info("No clone resource found")
}
return nil
}

View File

@@ -6,26 +6,15 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (svr *Server) Reset(req *rpc.ResetRequest, resp rpc.Daemon_ResetServer) error {
defer func() {
util.InitLoggerForServer(true)
log.SetOutput(svr.LogFile)
config.Debug = false
}()
out := io.MultiWriter(newResetWarp(resp), svr.LogFile)
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
connect := &handler.ConnectOptions{
Namespace: req.Namespace,
}
logger := plog.GetLoggerForClient(int32(log.InfoLevel), io.MultiWriter(newResetWarp(resp), svr.LogFile))
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
if err != nil {
@@ -37,17 +26,18 @@ func (svr *Server) Reset(req *rpc.ResetRequest, resp rpc.Daemon_ResetServer) err
DefValue: file,
})
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
var ctx = resp.Context()
var ctx = plog.WithLogger(resp.Context(), logger)
var path string
path, err = ssh.SshJump(ctx, sshConf, flags, false)
if err != nil {
return err
}
connect := &handler.ConnectOptions{}
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
if err != nil {
return err
}
err = connect.Reset(ctx, req.Workloads)
err = connect.Reset(ctx, req.Namespace, req.Workloads)
if err != nil {
return err
}

View File

@@ -1,7 +1,6 @@
package action
import (
"path/filepath"
"sync"
"time"
@@ -9,7 +8,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/metadata/metadatainformer"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
)
@@ -18,7 +16,7 @@ type Server struct {
rpc.UnimplementedDaemonServer
Cancel func()
GetClient func(isSudo bool) rpc.DaemonClient
GetClient func(isSudo bool) (rpc.DaemonClient, error)
IsSudo bool
LogFile *lumberjack.Logger
Lock sync.Mutex
@@ -33,7 +31,3 @@ type Server struct {
ID string
}
func GetDaemonLogPath() string {
return filepath.Join(config.DaemonPath, config.LogFile)
}

View File

@@ -6,12 +6,12 @@ import (
"sync"
"github.com/containernetworking/cni/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/core"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/tun"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
@@ -31,7 +31,7 @@ func (svr *Server) SshStart(ctx context.Context, req *rpc.SshStartRequest) (resp
var clientCIDR *net.IPNet
clientIP, clientCIDR, err = net.ParseCIDR(req.ClientIP)
if err != nil {
log.Errorf("Failed to parse network CIDR: %v", err)
plog.G(ctx).Errorf("Failed to parse network CIDR: %v", err)
return
}
if serverIP == "" {
@@ -45,7 +45,7 @@ func (svr *Server) SshStart(ctx context.Context, req *rpc.SshStartRequest) (resp
}()
r := core.Route{
ServeNodes: []string{
Listeners: []string{
"tun://127.0.0.1:8422?net=" + DefaultServerIP,
"tcp://:10800",
},
@@ -54,7 +54,7 @@ func (svr *Server) SshStart(ctx context.Context, req *rpc.SshStartRequest) (resp
var servers []core.Server
servers, err = handler.Parse(r)
if err != nil {
log.Errorf("Failed to parse route: %v", err)
plog.G(ctx).Errorf("Failed to parse route: %v", err)
return
}
var ctx1 context.Context
@@ -62,7 +62,7 @@ func (svr *Server) SshStart(ctx context.Context, req *rpc.SshStartRequest) (resp
go func() {
err := handler.Run(ctx1, servers)
if err != nil {
log.Errorf("Failed to run route: %v", err)
plog.G(ctx).Errorf("Failed to run route: %v", err)
}
}()
serverIP = DefaultServerIP
@@ -86,7 +86,7 @@ func (svr *Server) SshStart(ctx context.Context, req *rpc.SshStartRequest) (resp
GW: nil,
})
if err != nil {
log.Errorf("Failed to add route: %v", err)
plog.G(ctx).Errorf("Failed to add route: %v", err)
return
}

View File

@@ -73,7 +73,7 @@ func genStatus(connect *handler.ConnectOptions, mode string, index int32) *rpc.S
Cluster: util.GetKubeconfigCluster(connect.GetFactory()),
Mode: mode,
Kubeconfig: connect.OriginKubeconfigPath,
Namespace: connect.Namespace,
Namespace: connect.OriginNamespace,
Status: status,
Netif: tunName,
}
@@ -108,18 +108,21 @@ func gen(ctx context.Context, connect *handler.ConnectOptions, clone *handler.Cl
var proxyRule []*rpc.ProxyRule
for _, rule := range virtual.Rules {
proxyRule = append(proxyRule, &rpc.ProxyRule{
Headers: rule.Headers,
LocalTunIPv4: rule.LocalTunIPv4,
LocalTunIPv6: rule.LocalTunIPv6,
CurrentDevice: util.If(isFargateMode, connect.IsMe(util.ConvertWorkloadToUid(virtual.Uid), rule.Headers), v4 == rule.LocalTunIPv4 && v6 == rule.LocalTunIPv6),
PortMap: useSecondPort(rule.PortMap),
Headers: rule.Headers,
LocalTunIPv4: rule.LocalTunIPv4,
LocalTunIPv6: rule.LocalTunIPv6,
CurrentDevice: util.If(isFargateMode,
connect.IsMe(virtual.Namespace, util.ConvertWorkloadToUid(virtual.Uid), rule.Headers),
v4 == rule.LocalTunIPv4 && v6 == rule.LocalTunIPv6,
),
PortMap: useSecondPort(rule.PortMap),
})
}
proxyList = append(proxyList, &rpc.Proxy{
ClusterID: connect.GetClusterID(),
Cluster: util.GetKubeconfigCluster(connect.GetFactory()),
Kubeconfig: connect.OriginKubeconfigPath,
Namespace: connect.Namespace,
Namespace: virtual.Namespace,
Workload: virtual.Uid,
RuleList: proxyRule,
})
@@ -133,7 +136,7 @@ func gen(ctx context.Context, connect *handler.ConnectOptions, clone *handler.Cl
clusterID = connect.GetClusterID()
cluster = util.GetKubeconfigCluster(connect.GetFactory())
kubeconfig = connect.OriginKubeconfigPath
namespace = connect.Namespace
namespace = connect.OriginNamespace
}
cloneList = append(cloneList, &rpc.Clone{
ClusterID: clusterID,

View File

@@ -6,27 +6,19 @@ import (
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
func (svr *Server) Stop(req *rpc.QuitRequest, resp rpc.Daemon_QuitServer) error {
defer func() {
util.InitLoggerForServer(true)
log.SetOutput(svr.LogFile)
config.Debug = false
}()
out := io.MultiWriter(newStopWarp(resp), svr.LogFile)
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
logger := plog.GetLoggerForClient(int32(log.InfoLevel), io.MultiWriter(newStopWarp(resp), svr.LogFile))
ctx := plog.WithLogger(resp.Context(), logger)
if svr.connect == nil {
log.Info("No connect")
plog.G(ctx).Info("No connect")
return nil
}
svr.connect.Cleanup()
svr.connect.Cleanup(ctx)
svr.t = time.Time{}
svr.connect = nil
return nil

Some files were not shown because too many files have changed in this diff Show More