Compare commits

...

413 Commits

Author SHA1 Message Date
naison
47b2f0006c hotfix: fix --remote-kubeconfig override temp kubeconfig (#699)
* hotfix: fix --remote-kubeconfig override temp kubeconfig

* hotfix: ignore ssh set env failed

* hotfix: use unix timestamp
2025-08-10 20:05:35 +08:00
naison
f59aa8aca1 feat: update krew index version to refs/tags/v2.9.4 (#696)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-08-10 15:22:50 +08:00
naison
1b5ff03463 Update charts/index.yaml (#697)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-08-10 15:22:36 +08:00
naison
d40e69e781 feat: cmd status show remote-kubeconfig name (#695)
* feat: cmd status show remote-kubeconfig name
2025-08-10 14:23:01 +08:00
naison
d2a6d78331 feat: add flags --kubeconfig-json (#694) 2025-08-09 22:29:05 +08:00
naison
5b0758be69 Update charts/index.yaml (#693)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-08-07 08:13:14 +08:00
naison
9fb5d01f28 feat: update krew index version to refs/tags/v2.9.3 (#692)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-08-07 08:13:00 +08:00
naison
4ddff991bc hotfix: fix cmd run (#691)
* hotfix: fix cmd run

* hotfix: docs
2025-08-06 23:47:57 +08:00
naison
faecc95653 hotfix: fix cmd run (#690) 2025-08-06 23:43:26 +08:00
naison
7411deab75 Update charts/index.yaml (#689)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-08-06 19:37:14 +08:00
naison
1b8b6f8390 feat: update krew index version to refs/tags/v2.9.2 (#688)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-08-06 19:36:58 +08:00
naison
6197138ad6 refactor: refactor code (#687) 2025-08-06 18:38:40 +08:00
fengcaiwen
a13540a258 docs: add status 2025-08-05 11:12:32 +08:00
naison
3a45a33fb9 Update charts/index.yaml (#686)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-08-04 23:39:27 +08:00
naison
d49a0dc3e5 feat: update krew index version to refs/tags/v2.9.1 (#685)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-08-04 23:39:10 +08:00
naison
b3b13fce86 refactor: rename cmd dev to run (#684)
* refactor: code

* refactor: optimize code
2025-08-04 23:00:38 +08:00
naison
2d5653ee2b hotfix: ipv6 skip checksum verification (#682) 2025-08-04 21:16:53 +08:00
naison
c4d28fd497 hotfix: heartbeats with gen icmp packet (#683) 2025-08-04 21:16:38 +08:00
naison
6a82b12646 Update charts/index.yaml (#679)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-07-27 22:05:14 +08:00
naison
b2c7fb07d3 feat: update krew index version to refs/tags/v2.9.0 (#678)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-07-27 22:05:02 +08:00
naison
05905bb8ba feat: proxy mode support multiple cluster (#677)
* feat: proxy mode support multiple cluster

* feat: ut

* feat: update readme

* feat: ut

* refactor: rename

* refactor: update service
2025-07-27 21:22:08 +08:00
naison
38584da9d3 refactor: remove options netstack (#673)
* refactor: remove options netstack

* refactor: remove options netstack

* refactor: forward chain use gvisor tcp

* refactor: docs

* refactor: remove forwarder options

* refactor: optimize code

* refactor: remove node type "tcp://"

* hotfix: packet read from tun needs to handle by gvisor

* hotfix: fix charts

* refactor: remove parameter engine
2025-07-27 17:26:14 +08:00
naison
5a97a5d6e2 refactor: remove no sense code (#675) 2025-07-23 19:55:32 +08:00
naison
1823567949 hotfix: fix wait envoy rule to works takes too long time (#674) 2025-07-23 19:52:12 +08:00
naison
43023d080f Update charts/index.yaml (#671)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-07-10 11:31:41 +08:00
naison
9cd8b32c6b feat: update krew index version to refs/tags/v2.8.1 (#670)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-07-10 11:31:30 +08:00
naison
4e568fe9e3 hotfix: fix CVE (#669)
* hotfix: fix CVE

* feat: prefer use cmd rather than magic dns to set dns on linux

* feat: update go work sum

* feat: update ut
2025-07-10 10:35:48 +08:00
naison
31ead176c6 Update charts/index.yaml (#667)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-07-05 22:29:46 +08:00
naison
620a14f066 feat: update krew index version to refs/tags/v2.8.0 (#666)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-07-05 22:29:37 +08:00
naison
211c9309b2 feat: handle local conn with gvisor (#665)
* feat: handle local conn with gvisor

* feat: remove udp route map

* feat: optimize code

* feat: length

* feat: works

* feat: should works

* feat: optimize code

* feat: optimize code

* feat: gudp not set remark

* feat: ut

* feat: set to default value 0

* feat: send reset to gvisor tcp forward request if error

* feat: not need to create firewall rule on windows

* feat: typo
2025-07-05 21:43:44 +08:00
naison
62725d8011 Update charts/index.yaml (#664)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-07-04 20:54:17 +08:00
naison
8eb646dfb8 feat: update krew index version to refs/tags/v2.7.21 (#663)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-07-04 20:54:04 +08:00
naison
e490f72a78 hotfix: swap envoy rule header nil to last position (#662) 2025-07-04 19:53:16 +08:00
naison
61a33ff5bd Update charts/index.yaml (#660)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-06-26 12:17:02 +08:00
naison
0cdb530cbd feat: update krew index version to refs/tags/v2.7.20 (#659)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-06-26 12:16:45 +08:00
naison
b5235a3a26 chore: update cmd example 2025-06-26 03:24:12 +00:00
naison
f14d074417 hotfix: add cmd once for generate tls cert after helm installed (#657)
* hotfix: add cmd once for generate tls cert after helm installed

* hotfix: update scale

* hotfix: update scale

* hotfix: fix bugs

* hotfix: print

* feat: add role for get cidr

* feat: add --image options for cmd once

* feat: add role watch pod

* feat: filter api-server
2025-06-26 11:08:42 +08:00
naison
4021480ad5 docs: add donate link 2025-06-18 22:04:39 +08:00
naison
4593ddcf24 Update charts/index.yaml (#655)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-06-18 18:42:43 +08:00
naison
480471efc5 feat: update krew index version to refs/tags/v2.7.19 (#654)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-06-18 18:25:44 +08:00
mazhong
edd2450880 fix: preserve env HOME on unix (#653) 2025-06-18 17:34:36 +08:00
mazhong
b3af39f840 fix: linux startup must use sudo (#651) 2025-06-18 16:48:50 +08:00
naison
9010d05198 refactor: optimize code (#650) 2025-06-18 10:16:30 +08:00
naison
de9bbcd1b0 Update charts/index.yaml (#648)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-06-14 15:14:34 +08:00
naison
0fa136cb7a feat: update krew index version to refs/tags/v2.7.18 (#647)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-06-14 15:14:22 +08:00
naison
306ba9be8f refactor: restore logic (#646) 2025-06-14 13:30:26 +08:00
naison
6ca22822f9 feat: support restore from local config (#645)
* refactor: optimize dhcp

* feat: support recover from config

* feat: optimize code

* feat: fix bug

* feat: fix bug

* feat: rename
2025-06-14 13:01:24 +08:00
naison
33fba01904 Update charts/index.yaml (#644)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-06-12 14:04:12 +08:00
naison
05be5c317e feat: update krew index version to refs/tags/v2.7.17 (#643)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-06-12 14:04:00 +08:00
naison
46d15c5742 refactor: optimize cmd disconnect (#642) 2025-06-12 13:15:41 +08:00
naison
4949df56ef hotfix: fix dev/clone mode bug (#640) 2025-06-12 12:59:39 +08:00
naison
072e67ce6c refactor: optimize cmd disconnect (#641) 2025-06-12 12:59:08 +08:00
naison
11bb5584ea Update charts/index.yaml (#639)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-06-10 23:16:53 +08:00
naison
bbbcebd9d5 feat: update krew index version to refs/tags/v2.7.16 (#638)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-06-10 23:16:25 +08:00
naison
423254b172 Revert "chore: add sync image to ccr.ccs.tencentyun.com (#636)" 2025-06-10 22:24:10 +08:00
naison
507da8a44c feat: proxy mode use traffic-manager pod image (#635) 2025-06-10 19:02:04 +08:00
naison
bfed866c04 chore: add sync image to ccr.ccs.tencentyun.com (#636) 2025-06-10 14:55:20 +08:00
naison
bdfa4f6d16 hotfix: grpc connect to daemon process ignore http_proxy and https_proxy (#633) 2025-06-09 14:59:03 +08:00
naison
5785ba0f42 Update charts/index.yaml (#632)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-06-07 13:37:58 +08:00
naison
1e6475379f feat: update krew index version to refs/tags/v2.7.15 (#631)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-06-07 13:37:41 +08:00
naison
bb991fc1d7 refactor: use grpc stream send cancel operation (#629)
* refactor: use grpc stream send cancel operation

* refactor: ssh jump remove flags
2025-06-07 12:24:28 +08:00
naison
4f3e443bca feat: update 2025-06-07 12:14:56 +08:00
naison
8d64dab79f feat: update envoy image tag to v1.34.1 2025-06-07 12:14:56 +08:00
naison
2614669671 chore: spelling mistake (#628) 2025-06-04 18:01:57 +08:00
naison
2f16b31eb6 chore: add issue template (#627) 2025-06-04 17:57:03 +08:00
naison
bdeb3d6d09 Update charts/index.yaml (#626) 2025-06-04 15:17:27 +08:00
naison
ab2a8d1821 feat: update krew index version to refs/tags/v2.7.14 (#625) 2025-06-04 15:17:13 +08:00
naison
fd59ed242c refactor: use origin workload of proxy mode (#621) 2025-06-04 14:30:24 +08:00
naison
a750327d9e hotfix: ignore some known top dns server on macOS 2025-06-04 06:26:09 +00:00
naison
69c5ed6c98 ut: add cache for setup minikube on macos 2025-06-03 17:23:37 +08:00
naison
d1dd44be35 ut: setup minikube on macos 2025-06-03 16:52:44 +08:00
fengcaiwen
229eb747a4 refactor: remove target-kubeconfig of clone mode 2025-06-02 19:05:36 +08:00
kubenetworks
dd3ba1c059 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-06-01 19:35:12 +08:00
wencaiwulue
db5ea99133 feat: update krew index version to refs/tags/v2.7.13 2025-06-01 19:34:43 +08:00
naison
fcbe2d64f7 chore: add ut for center install 2025-06-01 18:42:32 +08:00
fengcaiwen
a0c0860051 hotfix: fix center install cause mutate webhook not works 2025-06-01 18:39:02 +08:00
wencaiwulue
e374d6b51d feat: update krew index version to refs/tags/v2.7.12 2025-05-23 11:46:30 +08:00
kubenetworks
9703a12bc2 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-23 11:46:18 +08:00
naison
f9f52d1001 Merge pull request #612
ut: fix ut
2025-05-23 10:53:37 +08:00
naison
51c16989fe ut: fix ut 2025-05-23 02:52:37 +00:00
naison
75c609211b refactor: use informer to list&watch pod&service ip for adding to route table (#610) 2025-05-23 10:09:06 +08:00
naison
6d545dc5c9 hotfix: remove cidr if contains api-server ip 2025-05-20 22:12:19 +08:00
naison
b17da3cbcb feat: update krew index version to refs/tags/v2.7.11 (#607) 2025-05-18 17:32:49 +08:00
naison
d1108ebd86 Update charts/index.yaml (#608) 2025-05-18 17:32:35 +08:00
naison
792839a2d4 feat: support dump service into hosts in center cluster mode (#605) 2025-05-18 16:20:34 +08:00
fengcaiwen
f493931b41 hotfix: remove job before install 2025-05-18 16:20:13 +08:00
wencaiwulue
7df065ef93 feat: update krew index version to refs/tags/v2.7.10 2025-05-14 21:23:16 +08:00
kubenetworks
c265b3581c Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-14 21:23:03 +08:00
naison
f802e03d01 hotfix: add heartbeat to manager in the pod 2025-05-14 20:22:57 +08:00
wencaiwulue
c08cb461dd feat: update krew index version to refs/tags/v2.7.9 2025-05-12 17:18:47 +08:00
kubenetworks
1a2649a02a Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-12 17:18:34 +08:00
naison
facd6bdb3d hotfix: fix create temp kubeconfig but name container path separator (#599) 2025-05-12 16:28:15 +08:00
naison
a1117dee62 hotfix: handle not found route packet with gVisor instead of drop it 2025-05-12 15:49:40 +08:00
naison
b28eaef6a7 chore(mod): upgrade purego version to v0.8.3 2025-05-12 15:47:48 +08:00
naison
46aebef01f refactor: remove temp kubeconfig before daemon quit 2025-05-12 15:46:21 +08:00
naison
3791f48737 hotfix: fix create temp kubeconfig 2025-05-12 15:32:02 +08:00
wencaiwulue
fc76b70713 feat: update krew index version to refs/tags/v2.7.8 2025-05-11 00:17:22 +08:00
kubenetworks
e990dc1d0f Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-11 00:16:45 +08:00
naison
d636449073 feat: set read/write timeout to 60s for remote tcp conn (#590) 2025-05-10 23:02:31 +08:00
fengcaiwen
e85e1a6c40 refactor: show port-forward log 2025-05-10 18:05:44 +08:00
wencaiwulue
40d09716c4 feat: update krew index version to refs/tags/v2.7.7 2025-05-09 14:44:31 +08:00
kubenetworks
63792172bd Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-09 14:44:17 +08:00
naison
ca6a2be70f refactor: optimize get pod/service netowrk cidr logic (#585) 2025-05-09 13:06:32 +08:00
naison
e21fc8cda9 hotfix: duplicated definition of symbol dlopen on go1.23.9 2025-05-09 13:04:45 +08:00
wencaiwulue
1f4698c6f8 feat: update krew index version to refs/tags/v2.7.6 2025-05-08 10:09:00 +08:00
kubenetworks
efea780edf Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-08 10:08:49 +08:00
wencaiwulue
bdb21f8964 feat: update krew index version to refs/tags/v2.7.5 2025-05-07 17:00:00 +08:00
naison
e33d2f1928 hotfix: fix init dir 2025-05-07 16:08:56 +08:00
wencaiwulue
e6df115933 feat: update krew index version to refs/tags/v2.7.5 2025-05-07 10:36:28 +08:00
kubenetworks
549e56cd05 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-07 10:36:17 +08:00
fengcaiwen
54ed2b711f hotfix: fix init dir permission deny 2025-05-07 09:12:17 +08:00
wencaiwulue
56b81574ac feat: update krew index version to refs/tags/v2.7.4 2025-05-07 08:47:19 +08:00
kubenetworks
ce2b7a010e Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-05-07 08:47:05 +08:00
fengcaiwen
5df0c3ffdc hotfix: fix init dir permission deny 2025-05-07 00:17:35 +08:00
naison
8b0e87592a hotfix: fix init dir permission deny (#573) 2025-05-07 00:01:44 +08:00
naison
31a42c1fa7 feat: update krew index version to refs/tags/v2.7.3 (#571) 2025-05-06 23:55:53 +08:00
naison
ee0957a5c9 Update charts/index.yaml (#572) 2025-05-06 23:55:40 +08:00
naison
206d74c331 feat: use dns query as port-forward health check (#570) 2025-05-06 22:20:15 +08:00
naison
53ed72dee3 Merge pull request #567 from kubenetworks/refactor/refactor-code
refactor: refactor code
2025-04-29 22:16:59 +08:00
fengcaiwen
323235f268 refactor: optimize code 2025-04-29 21:53:34 +08:00
fengcaiwen
6af6622bd3 refactor: change server log level to info 2025-04-29 21:50:08 +08:00
fengcaiwen
18ef72fc20 refactor: forward only one port 2025-04-29 21:48:14 +08:00
fengcaiwen
fe08448249 refactor: split user and root daemon log 2025-04-29 21:40:46 +08:00
fengcaiwen
ebaa4098f1 refactor: change temp kubeconfig to ~/.kubevpn/tmp 2025-04-29 21:39:45 +08:00
fengcaiwen
9ba873494f feat: add heartbeats ping pod ip 2025-04-29 21:34:58 +08:00
naison
da40f3315b Merge pull request #566 from kubenetworks/hotfix/fix-bugs
hotfix: fix bugs
2025-04-27 23:19:40 +08:00
fengcaiwen
c4540b1930 refactor: use tcp conn instead of packet conn 2025-04-27 23:03:45 +08:00
fengcaiwen
a6ec321e46 hotfix: cmp running pod image tag and client version 2025-04-27 23:02:34 +08:00
fengcaiwen
79f8aca7df hotfix: close ssh session 2025-04-27 23:02:06 +08:00
fengcaiwen
6edfc3127d hotfix: quit sudo daemon before user daemon 2025-04-27 23:01:27 +08:00
naison
bed0a9168c Update charts/index.yaml (#564)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-04-25 23:40:24 +08:00
naison
d5ee35bfa8 feat: update krew index version to refs/tags/v2.7.2 (#563)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-04-25 23:39:56 +08:00
naison
9661a122bd refactor: optimize code (#561) 2025-04-25 19:37:03 +08:00
naison
28657e3832 refactor: remove deprecated options of config flags (#560) 2025-04-24 22:44:20 +08:00
naison
6a8a197f48 hotfix: close ssh client if ctx done (#559) 2025-04-24 22:41:24 +08:00
naison
31186fc1d9 refactor: only ssh jump in user daemon (#558) 2025-04-24 22:39:03 +08:00
naison
fca3baf47e refactor: optimize code (#557) 2025-04-23 15:00:00 +08:00
naison
1cae5d270b refactor: optimize ssh logic (#555) 2025-04-21 22:19:31 +08:00
naison
a3556a263d refactor: add additional [2]byte for packet length (#554) 2025-04-21 21:51:01 +08:00
naison
dd80717d8d refactor: return error if get nil daemon client (#553) 2025-04-20 15:49:03 +08:00
naison
537b2940fe perf: route packet by each tcp conn (#548) 2025-04-19 19:14:39 +08:00
naison
9aae88d54b hotfix: set recv/send buffer size 1024k for adding ip to route table on macos (#552) 2025-04-19 15:35:43 +08:00
naison
100a8df723 refactor: revert pr #517 (#551) 2025-04-19 12:35:09 +08:00
naison
48e30b4344 refactor: use go workspace for syncthing gui (#549) 2025-04-19 12:09:06 +08:00
naison
c9f1ce6522 chore: upgrade coredns version (#550) 2025-04-19 10:06:56 +08:00
naison
c42e3475f9 Update charts/index.yaml (#547)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-04-15 23:36:27 +08:00
naison
4fb338b5fc feat: update krew index version to refs/tags/v2.7.1 (#546)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-04-15 23:36:09 +08:00
naison
15243b3935 hotfix: remove closed conn from route map (#545) 2025-04-15 21:33:41 +08:00
naison
f0f9459976 hotfix: set mtu on windows (#544) 2025-04-15 21:32:47 +08:00
naison
ee7d5fa6f9 Update charts/index.yaml (#538) 2025-04-12 14:04:52 +08:00
naison
e393f8371e feat: update krew index version to refs/tags/v2.7.0 (#537)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-04-12 14:04:23 +08:00
naison
ca333fcdaf feat: encrypt with tls 1.3 (#522) 2025-04-12 12:30:05 +08:00
naison
7e4e9e1e0d refactor: add more log detect connect namespace (#536) 2025-04-12 10:53:27 +08:00
naison
58ee2df1a3 docs: add readme.md for helm charts (#534) 2025-04-11 22:06:00 +08:00
naison
15200f1caf refactor: add more log (#533)
* feat: add more log
2025-04-11 21:12:19 +08:00
naison
23baab449c refactor: optimize code (#531) 2025-04-11 19:13:06 +08:00
naison
0ddcaa8acc hotfix: fix bug (#530) 2025-04-11 19:12:15 +08:00
naison
0c122473ce hotfix: fix parse dig command output (#529) 2025-04-11 18:50:14 +08:00
naison
d08f74a57e hotfix: optimize code (#528)
* hotfix: optimize code
2025-04-10 22:53:28 +08:00
naison
cd66bb7907 feat: add log if drop packet (#527)
* feat: add log if drop packet
2025-04-09 22:19:37 +08:00
naison
f303616554 hotfix: fix []byte leak (#525) 2025-04-09 21:08:33 +08:00
naison
3973b85d25 hotfix: remove label (#524) 2025-04-08 22:02:07 +08:00
naison
4fd1f014bd refactor: adjust log level (#523) 2025-04-08 22:01:06 +08:00
naison
fe62cf6c4d hotfix: install missing command dig (#521) 2025-04-07 13:00:13 +08:00
naison
c5900d070c Update charts/index.yaml (#520)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-04-06 21:17:57 +08:00
naison
d84ca66cfb feat: update krew index version to refs/tags/v2.6.0 (#519)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-04-06 21:17:41 +08:00
naison
60c3030e65 hotfix: use echo instead of sysctl to set ipv4 ip_forward feature (#518) 2025-04-06 18:34:34 +08:00
naison
ea574a756b feat: support gcp auth (#517)
* feat: support gcp auth
2025-04-06 17:36:10 +08:00
naison
e8735a68be refactor: optimize logic (#515)
* refactor: optimize logic
2025-04-05 21:48:18 +08:00
naison
d55d290677 feat: update krew index version to refs/tags/v2.5.1 (#513)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-04-04 00:20:33 +08:00
naison
45435bcc48 Update charts/index.yaml (#514)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-04-04 00:04:07 +08:00
naison
dbe9f91ee0 hotfix: ut (#512)
* hotfix: ut
2025-04-03 23:00:03 +08:00
naison
b3d2e1e838 refactor: optimize code (#511) 2025-04-03 21:41:35 +08:00
yuyicai
fa0b343401 feat: change dockerfile (#491)
* feat: change dockerfile

Signed-off-by: yuyicai <yuyicai@hotmail.com>

* chore: change markfile for container test

Signed-off-by: yuyicai <yuyicai@hotmail.com>

* ut

* feat: install openssl

Signed-off-by: yuyicai <yuyicai@hotmail.com>

---------

Signed-off-by: yuyicai <yuyicai@hotmail.com>
Co-authored-by: naison <895703375@qq.com>
2025-04-03 21:10:31 +08:00
naison
a1bb338cdb refactor: optimize code (#510)
* refactor: rename
2025-04-03 20:51:55 +08:00
naison
dbc9df070b feat: add options netstack to helm charts (#509) 2025-04-03 20:45:12 +08:00
naison
804708aabe feat: add options --connect-namespace to proxy and dev mode (#508) 2025-04-03 20:44:59 +08:00
naison
21087fc708 hotfix: fix upgrade bug (#507)
* hotfix: fix upgrade bug
2025-04-03 20:44:43 +08:00
naison
94db7846d8 hotfix: fix detect helm ns but still use -n namespace (#506)
* hotfix: fix detect helm ns but still use -n namespace
2025-04-02 19:20:31 +08:00
naison
e205b77e41 Update charts/index.yaml (#505)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-03-31 14:02:55 +08:00
naison
2927261390 feat: update krew index version to refs/tags/v2.5.0 (#504)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-03-31 14:02:40 +08:00
naison
8f37488207 hotfix: fix upgrade logic (#503) 2025-03-31 12:50:10 +08:00
naison
d05a53a77f Update charts/index.yaml (#502)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-03-30 22:17:12 +08:00
naison
a2df9f7b59 feat: update krew index version to refs/tags/v2.4.3 (#501)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-03-30 22:14:33 +08:00
naison
cd68b1fb00 hotfix: gen envoy rule id by ns and resource uid (#500)
* hotfix: gen envoy rule id by ns and uid
2025-03-30 20:57:11 +08:00
naison
208f607f03 hotfix: fix dns slow (#499) 2025-03-30 11:56:57 +08:00
naison
116a1f1983 feat: detect namespace kubevpn installed by helm (#498) 2025-03-30 11:54:40 +08:00
naison
d191c927f4 feat: add helm to go mod (#497) 2025-03-30 11:52:21 +08:00
naison
a030dc582b feat: support connect one namespace but proxy workload in another namespace (#496) 2025-03-30 11:50:11 +08:00
naison
08bcbe1611 refactor: split connect and proxy mode (#495) 2025-03-30 11:46:37 +08:00
naison
fb428403a2 hotfix: set get running pod timeout 10s to 5s (#494) 2025-03-30 11:43:37 +08:00
naison
4f4bbd79f2 chore: optimize ut (#493)
* chore: optimize ut
2025-03-25 22:33:07 +08:00
naison
1ec3ca4637 hotfix: fix clone mode bug (#492) 2025-03-24 21:53:02 +08:00
yuyicai
484a5cafe4 Merge pull request #490 from kubenetworks/chart-releaser-qqesqc5oa54qow4n
Update index.yaml
2025-03-23 21:25:11 +08:00
yuyicai
b62a6b0185 Merge pull request #489 from kubenetworks/feat/update-krew-index-version
feat: update krew index version to refs/tags/v2.4.2
2025-03-23 21:24:34 +08:00
kubenetworks
90898c8047 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-03-23 12:53:36 +00:00
yuyicai
c06daf68e8 feat: update krew index version to refs/tags/v2.4.2 2025-03-23 12:52:53 +00:00
naison
d65da7ba66 chore(ut): add more ut (#475)
* chore: add more ut
2025-03-23 19:32:45 +08:00
naison
2ac187eb64 hotfix: delete old pod (#488) 2025-03-23 17:13:11 +08:00
naison
b46f7a9877 refactor: divide log to session and backend (#487)
* refactor: divide log to session and backend
2025-03-23 13:59:10 +08:00
naison
a5622b9439 chore: update sample bookinfo resource (#486) 2025-03-23 12:40:14 +08:00
yuyicai
0e8f655673 Merge pull request #483 from kubenetworks/docs-install-from-script
docs: install-from-script
2025-03-18 21:37:25 +08:00
yuyicai
f7250649af docs: install-from-script
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-18 20:54:47 +08:00
yuyicai
cbaff5e623 Merge pull request #481 from kubenetworks/install-kubevpn-by-shell-script
chore: install kubevpn by shell script
2025-03-17 22:30:02 +08:00
yuyicai
6aee9f0882 chore: install kubevpn by shell script
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-17 22:09:17 +08:00
yuyicai
1f63a15e01 Merge pull request #480 from kubenetworks/update-bookinfo-to-v1-20-2
chore: upgrade bookinfo to v1.20.2, add arm64 image
2025-03-17 22:05:44 +08:00
yuyicai
a65c26e446 chore: upgrade bookinfo to v1.20.2, add arm64 image
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-17 21:04:03 +08:00
naison
f5566f6ec2 feat: update krew index version to refs/tags/v2.4.1 (#478)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-03-16 17:55:36 +08:00
naison
543e2d716d Update charts/index.yaml (#479)
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
Co-authored-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-03-16 17:55:15 +08:00
naison
f267443c61 hotfix: delete pod force if check newer spec but have old env (#477) 2025-03-16 17:05:30 +08:00
naison
b6f90812f7 hotfix: restore service target port while leave resource in gvisor mode (#476) 2025-03-16 17:04:55 +08:00
naison
b5ea7b2016 chore: update github action (#474) 2025-03-14 22:24:32 +08:00
naison
30f904d7bb feat: update krew index version to refs/tags/v2.4.0 (#473)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2025-03-14 22:17:20 +08:00
kubenetworks
fde001009e Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-03-14 14:16:56 +00:00
naison
6908991461 refactor: optimize code (#472) 2025-03-14 21:12:11 +08:00
naison
031c2134d8 hotfix: also cleanup in user daemon if error occurs (#471) 2025-03-14 21:02:19 +08:00
naison
77570575ca refactor: upgrade deploy image if client version is incompatibility with image tag (#470) 2025-03-14 20:52:22 +08:00
Spongebob
a70ce62762 feat: add snap store release support (#469) 2025-03-14 19:23:41 +08:00
naison
5edd70452c feat: batch write route msg to unix socket: 'route ip+net: sysctl: no buffer space available' (#466) 2025-03-13 11:07:18 +08:00
naison
24d16b2791 hotfix: fix bugs (#468) 2025-03-13 11:04:49 +08:00
naison
6820dbb30d hotfix: do reverse opreation if operate cancelled (#465) 2025-03-12 18:36:12 +08:00
naison
ee26880bf5 refactor: use unix library to add route table and setup ip addr instead of use command route or ifconfig (#463) 2025-03-12 13:08:59 +08:00
naison
05b76094f0 feat: support service type externalName (#464) 2025-03-12 00:26:00 +08:00
yuyicai
2e79a331b4 Merge pull request #461 from kubenetworks/proxy-tun-arch-image
docs: add proxy tun arch image
2025-03-11 11:28:13 +08:00
yuyicai
ec5efc8253 docs: add proxy tun arch image
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-11 10:11:57 +08:00
naison
4547e84de9 Merge pull request #460 from kubenetworks/chore/update-bookinfo-resource
chore: update bookinfo resource
2025-03-10 22:00:30 +08:00
naison
f0694efeda Merge pull request #459 from kubenetworks/feat/extra-domain-support-ingress-record
feat: options extra-domain support ingress record
2025-03-10 19:39:18 +08:00
naison
8df6da1871 feat: options extra-domain support ingress record 2025-03-10 11:10:04 +00:00
yuyicai
ec7d939f8d Merge pull request #458 from kubenetworks/check-if-need-upgrade-server-image
feat: check if need to upgrade image
2025-03-09 16:28:03 +08:00
yuyicai
a682dfbc2c feat: check if need to upgrade image
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-09 16:01:56 +08:00
naison
a16c1ef007 Merge pull request #457 from kubenetworks/feat/setup-testcase-on-windows
feat: use kind instead of minikube on macos
2025-03-09 11:11:45 +08:00
naison
ec88fd82f0 feat: use kind on macos 2025-03-09 02:44:25 +00:00
yuyicai
3457a79328 Merge pull request #456 from kubenetworks/change-default-container-image
feat: change default container image
2025-03-09 10:29:58 +08:00
yuyicai
2780f67dd6 feat: change default container image
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-08 23:13:45 +08:00
naison
24b2195036 Merge pull request #455 from kubenetworks/chore/update-comment
chore: update comment
2025-03-07 17:06:47 +08:00
naison
d61d08694a chore: update comment 2025-03-07 09:04:10 +00:00
naison
f81c7ec3ce Merge pull request #454 from kubenetworks/feat/cmd-alias-support-env-KUBEVPNCONFIG
feat: cmd alias support env KUBEVPNCONFIG
2025-03-07 15:58:04 +08:00
naison
168db06979 feat: cmd alias support env KUBEVPNCONFIG 2025-03-07 07:57:06 +00:00
naison
8ad7463fc7 Merge pull request #453 from kubenetworks/hotfix/fix-cmd-ssh-npe
hotfix: fix cmd ssh resize terminal size npe
2025-03-07 15:38:18 +08:00
naison
8926577885 hotfix: fix cmd ssh resize terminal size npe 2025-03-07 07:37:39 +00:00
naison
0f94f58310 Merge pull request #452 from kubenetworks/refactor/optimize-code
refactor: optimize code
2025-03-07 10:54:40 +08:00
naison
210767d908 refactor: optimize code 2025-03-07 02:53:52 +00:00
yuyicai
ae9c23550f Merge pull request #451 from kubenetworks/build-ghcr-latest-container-image
chore: add ghcr.io latest container image
2025-03-07 09:29:36 +08:00
yuyicai
2f9a025f5b chore: add ghcr.io latest container image
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-06 22:18:01 +08:00
yuyicai
4d5c4fa426 Merge pull request #449 from kubenetworks/tun-ip-cidr
feat: change tun ip cidr
2025-03-06 21:31:28 +08:00
fengcaiwen
3a4bfa9241 feat: panic if parse network cidr error 2025-03-06 09:14:13 +08:00
yuyicai
db09cbbb6e feat: update tun cidr for kubevpn-traffic-manager
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-05 23:14:57 +08:00
yuyicai
a87cbf1e9a feat: change tun ip cidr
Signed-off-by: yuyicai <yuyicai@hotmail.com>
2025-03-05 22:27:43 +08:00
naison
547501fc41 Merge pull request #443 from kubenetworks/hotfix/fix-auto-upgrade-deploy-image
hotfix: fix auto upgrade deploy image
2025-02-25 22:25:00 +08:00
naison
7051f24313 hotfix: fix upgrade deploy image 2025-02-25 14:22:55 +00:00
naison
153fe3e5e7 Merge pull request #442 from kubenetworks/feat/update-krew-index-version
feat: update krew index version to refs/tags/v2.3.13
2025-02-23 22:32:32 +08:00
kubenetworks
78914e8765 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-02-23 14:30:35 +00:00
wencaiwulue
2fbfb080e0 feat: update krew index version to refs/tags/v2.3.13 2025-02-23 14:29:43 +00:00
naison
86585214d4 Merge pull request #438 from kubenetworks/hotfix/default-use-spdy-not-websocket-to-portforward
hotfix: default use spdy not websocket protocol to portforward
2025-02-23 21:42:21 +08:00
naison
867aefbc3a Merge pull request #441 from kubenetworks/hotfix/fix-daemon-process-unexpected-exit-on-linux
fix: fix daemon process unexpected exit on linux
2025-02-23 21:39:59 +08:00
fengcaiwen
2037d3b05f fix: fix daemon process unexpected exit on linux 2025-02-23 21:37:52 +08:00
naison
794fd861ba Merge pull request #440 from kubenetworks/hotfix/fix-podlabel-find-service-in-fargate-mode
use match not equal to find svc by pod label in fargate mode
2025-02-22 20:18:54 +08:00
naison
d10a4e3aef use match not equal to find svc by pod label in fargate mode 2025-02-22 12:00:56 +00:00
naison
5b39275f5b hotfix: default use spdy not websocket to portforward 2025-02-21 14:39:21 +00:00
naison
de38a35189 Revert "chore: upload charts to repo charts"
This reverts commit 2793ab20e6.
2025-02-13 10:40:33 +00:00
naison
04c0b33516 Merge pull request #436 from kubenetworks/feat/update-krew-index-version
feat: update krew index version to refs/tags/v2.3.12
2025-02-13 15:50:56 +08:00
kubenetworks
ffdefce23c Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-02-13 07:46:06 +00:00
wencaiwulue
2a3b4d89f7 feat: update krew index version to refs/tags/v2.3.12 2025-02-13 07:20:30 +00:00
naison
b1abafd7f4 Merge pull request #435 from kubenetworks/feat/add-cmd-image-copy
feat: add cmd image copy
2025-02-13 12:50:16 +08:00
naison
12f29f2528 Merge pull request #434 from kubenetworks/hotfix/fix-cmd-ssh-terminal-bug
hotfix: fix ssh terminal bug
2025-02-13 12:50:06 +08:00
naison
7f3f0305e4 feat: add cmd image copy 2025-02-13 04:48:39 +00:00
naison
c947472d47 hotfix: fix ssh terminal bug 2025-02-13 04:46:59 +00:00
naison
4013846cab Merge pull request #433 from kubenetworks/hotfix/use-default-krb5-config
hotfix: use default krb5 config and not cancel context after handle new local connection of PortmapUtil, otherwise ssh.client stop channel also closed
2025-02-13 11:48:14 +08:00
fengcaiwen
399bc4efe0 hotfix: not cancel context after handle new local connection of PortmapUtil, otherwise ssh.client stop channel also closed 2025-02-12 23:27:45 +08:00
fengcaiwen
24367b1b82 hotfix: use default krb5 config 2025-02-12 22:20:51 +08:00
naison
1a32d7a58e Merge pull request #432 from kubenetworks/chore/add-upload-charts-to-repo-charts
chore: upload charts to repo charts
2025-02-09 16:32:35 +08:00
naison
2793ab20e6 chore: upload charts to repo charts 2025-02-09 08:31:00 +00:00
naison
528ac55325 Merge pull request #431 from kubenetworks/chore/upgrade-go-mod-library
chore: upgrade go mod library
2025-02-09 11:19:10 +08:00
fengcaiwen
3896fd1642 chore: upgrade go mod library 2025-02-09 11:07:10 +08:00
naison
819b20bbdb Merge pull request #430 from kubenetworks/chore/upgrade-go-mod-library
chore: upgrade go mod library
2025-02-08 21:38:17 +08:00
fengcaiwen
2fc0bb3f0c chore: upgrade go mod library 2025-02-08 20:45:20 +08:00
naison
a6730613e7 Merge pull request #429 from kubenetworks/hotfix/add-platform-for-cmd-ssh
hotfix: add platform for cmd ssh
2025-02-08 20:12:06 +08:00
naison
3ad0b5d1a3 hotfix: add platform for cmd ssh 2025-02-08 12:04:25 +00:00
naison
3c2b7943b5 Merge pull request #427 from kubenetworks/feat/update-krew-index-version
feat: update krew index version to refs/tags/v2.3.11
2025-02-03 17:25:46 +08:00
kubenetworks
b2f5fc6ac1 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-02-03 09:24:54 +00:00
wencaiwulue
768e8b1931 feat: update krew index version to refs/tags/v2.3.11 2025-02-03 09:24:09 +00:00
naison
abe1bcafd6 Merge pull request #426 from kubenetworks/refactor/cmd-dev-use-exec-command-insead-of-library
refactor: dev mode use exec command instead of library
2025-02-03 14:21:55 +08:00
fengcaiwen
07cfb8b02e refactor: dev mode use exec command instead of library 2025-02-03 14:14:31 +08:00
naison
11a89d8609 Merge pull request #424 from kubenetworks/feat/use-regctl-copy-image-on-local-pc
feat: use regctl copy image on local pc
2025-01-29 14:05:47 +08:00
naison
98baec8253 feat: use regctl copy image on local pc 2025-01-29 06:03:36 +00:00
naison
1d40843e99 Merge pull request #423 from kubenetworks/docs/update-arch-image
docs: update arch image
2025-01-29 11:56:28 +08:00
naison
be327d571b docs: update arch image 2025-01-29 03:55:46 +00:00
naison
8c96431328 Merge pull request #422 from kubenetworks/refactor/logic-create-outbound-pod
refactor: create outbound pod
2025-01-25 21:51:37 +08:00
naison
666a69cdfb refactor: create outbound pod 2025-01-25 13:50:47 +00:00
naison
9a922ae084 Merge pull request #421 from kubenetworks/refactor/refactor-cmd-ssh-client
refactor: cmd ssh client
2025-01-25 20:28:42 +08:00
naison
f55a65e04c refactor: cmd ssh client 2025-01-25 12:27:51 +00:00
naison
a3c166dc7b Merge pull request #420 from kubenetworks/docs/update-gvsior-mesh-arch
docs: update gvisor mesh arch
2025-01-25 10:32:37 +08:00
naison
7426541e0f docs: update gvisor mesh arch 2025-01-25 02:28:27 +00:00
naison
d70ac3418e Merge pull request #418 from kubenetworks/feat/update-krew-index-version
feat: update krew index version to refs/tags/v2.3.10
2025-01-24 21:38:19 +08:00
kubenetworks
5c502c9d5f Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-01-24 13:36:35 +00:00
wencaiwulue
c7d8e381f4 feat: update krew index version to refs/tags/v2.3.10 2025-01-24 13:35:42 +00:00
naison
5ac2588e5d Merge pull request #417 from kubenetworks/docs/add-docs-gvisor-service-mode-proxy-arch
docs: add gvisor service proxy mode arch
2025-01-24 18:53:00 +08:00
naison
e0e45cf84e docs: add gvisor service proxy mode arch 2025-01-24 10:52:18 +00:00
naison
ebfb7168d2 Merge pull request #416 from kubenetworks/feat/add-image-pull-secret-name
feat: add image pull secret name
2025-01-24 14:53:06 +08:00
naison
caee039ffd feat: add image pull secret name 2025-01-24 06:51:34 +00:00
naison
3d4c8be963 Merge pull request #415 from kubenetworks/feat/not-realy-on-cap-net-admin-and-privliaged
feat: support AWS Fargate cluster
2025-01-18 21:26:04 +08:00
fengcaiwen
c6f59e46c9 hotfix: use pod label to match service selector for finding service by pod 2025-01-18 16:23:08 +08:00
fengcaiwen
7d028fc950 feat: proxy mode support proxy multiple workloads 2025-01-18 11:13:09 +08:00
fengcaiwen
12920650ba feat: aws fargate mode works 2025-01-18 11:10:20 +08:00
fengcaiwen
2e96247e74 feat: add cmd uninstall and rename cmd reset 2025-01-18 11:06:07 +08:00
fengcaiwen
b6cfba7db9 feat: fargate mode works basic 2025-01-18 10:52:51 +08:00
naison
8b771e82b5 docs: add supported by JETBRAINS (#411) 2025-01-05 20:25:41 +08:00
naison
d737a6b434 feat: update krew index version to refs/tags/v2.3.9 (#410)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-12-21 23:31:26 +08:00
kubenetworks
420fcd4abb Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-12-21 15:29:42 +00:00
naison
fd786caa0f hotfix: disable net.DefaultResolver.PreferGo (#409) 2024-12-21 22:51:03 +08:00
Constantin Revenko
d3c2ddecc4 return system resolver for sending requests to KubeAPI (#407)
Co-authored-by: Константин Ревенко <konstantin.revenko@mediascope.net>
2024-12-21 22:46:20 +08:00
naison
2e8d251b20 refactor: optimize alias output (#408) 2024-12-21 22:24:03 +08:00
naison
6cd7837d28 feat: update krew index version to refs/tags/v2.3.8 (#406)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-12-19 22:26:33 +08:00
kubenetworks
652a60ce1f Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-12-19 14:19:38 +00:00
naison
fad55dce28 feat(log): log trace if panic (#405) 2024-12-18 20:50:47 +08:00
naison
68d550a80d hotfix: use 64k buffer to read tun device packet for windows tun device mtu 65535 (#404) 2024-12-17 11:46:09 +08:00
Constantin Revenko
51166477c2 Fix panic when removing CIDRs containing API server IP addresses (#403)
* add func removeCIDRsContainingIPs

* remove comments

---------

Co-authored-by: Константин Ревенко <konstantin.revenko@mediascope.net>
2024-12-17 10:10:16 +08:00
naison
4476a38883 feat: update krew index version to refs/tags/v2.3.7 (#400)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-12-15 01:25:55 +08:00
kubenetworks
6597331740 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-12-14 17:25:08 +00:00
naison
6e594fa5a5 hotfix: ignore print cancel GRPC msg (#399) 2024-12-13 23:34:54 +08:00
naison
f046e474af hotfix: return error if resolve extra-domain ip is empty (#397) 2024-12-13 18:26:14 +08:00
naison
062c69de0e hotfix: add traffic-manager pod ip to route table (#396) 2024-12-13 18:21:01 +08:00
naison
b9c1f2a814 hotfix: fix print grpc msg bug (#395) 2024-12-13 18:16:20 +08:00
naison
5599dc6bdd refactor: optimize code (#393) 2024-12-11 21:04:09 +08:00
naison
d068125897 feat: update krew index version to refs/tags/v2.3.6 (#391)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-12-09 20:45:32 +08:00
kubenetworks
959d285294 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-12-09 11:52:05 +00:00
naison
d165dacd20 feat: use origin probe (#390) 2024-12-09 19:13:43 +08:00
naison
9ebc95352a hotfix: envoy control-plane detect enable ipv6 or not to add route (#389) 2024-12-09 18:50:44 +08:00
naison
d9d4091905 feat: update krew index version to refs/tags/v2.3.5 (#388)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-12-06 22:41:24 +08:00
kubenetworks
7618ae30ca Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-12-06 14:40:12 +00:00
naison
1dc3c057a7 hotfix: detect enable ipv6 for envoy (#387)
* hotfix: detect enable ipv6 for envoy

* hotfix: detect pod enable ipv6 for envoy

* hotfix: optimize code
2024-12-06 22:03:37 +08:00
naison
81f62eab31 refactor: refactor print GRPC message (#386) 2024-12-06 19:29:11 +08:00
naison
d9a978d330 hotfix: ignore setup ipv6 failed if not enable ipv6 (#385) 2024-12-05 15:05:54 +08:00
naison
c95cb5ba6c feat: update krew index version to refs/tags/v2.3.4 (#383)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-11-29 21:06:40 +08:00
kubenetworks
d418da83b0 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-11-29 13:03:24 +00:00
naison
24a97de5dc hotfix: add more resolver on macOS (#382) 2024-11-29 20:25:26 +08:00
naison
481b720da6 hotfix: close gvisor endpoint is tcp conn closed (#378) 2024-11-23 17:08:03 +08:00
naison
a1247995e7 feat: update krew index version to refs/tags/v2.3.3 (#377)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-11-22 22:54:56 +08:00
kubenetworks
7cb86d70b0 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-11-22 14:54:14 +00:00
naison
9edf0122a7 feat: alias add description (#376) 2024-11-22 22:09:35 +08:00
naison
5a0533c0fc feat: add gvisor endpoint log (#375) 2024-11-22 22:06:44 +08:00
naison
17a13a2672 hotfix: add idle timeout 120s for gvisor udp forwarder connection (#374) 2024-11-22 22:03:56 +08:00
naison
98c22ba9b7 refactor: refactor code (#373) 2024-11-22 22:00:50 +08:00
naison
880f842203 feat: update krew index version to refs/tags/v2.3.2 (#372)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-11-18 19:52:58 +08:00
kubenetworks
ab09f9e71c Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-11-18 11:52:12 +00:00
naison
ef16641675 refactor: refactor code (#371) 2024-11-18 18:47:54 +08:00
naison
d9a9000d7b hotfix: fix can not ping itself tun IP on windows 2024-11-18 10:43:59 +00:00
naison
a1212f5144 feat: update krew index version to refs/tags/v2.3.1 (#370)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-11-15 21:39:14 +08:00
kubenetworks
f4c22f3073 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-11-15 13:36:37 +00:00
naison
2aa7812cb1 feat: use gvisor parse network packet in pod (#369) 2024-11-15 20:56:10 +08:00
naison
cad5d23d33 feat: update krew index version to refs/tags/v2.2.22 (#367)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-10-30 16:52:55 +08:00
kubenetworks
85e8bd76d2 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-10-30 08:46:09 +00:00
naison
a243842052 hotfix: fix port-forward retry bug (#366) 2024-10-30 10:30:48 +08:00
naison
6e052a5a0b feat: logs lines support '-' sign means starting log file lines (#365) 2024-10-30 09:10:20 +08:00
naison
f966cd29d7 feat: add number of lines to logs (#364) 2024-10-29 18:44:41 +08:00
naison
bfb7ac441d feat: update krew index version to refs/tags/v2.2.21 (#362)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-10-25 22:41:23 +08:00
kubenetworks
0cc8b04bab Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-10-25 14:10:26 +00:00
naison
65ae890842 feat: add APIServer ip to route table if ssh info is not empty (#361) 2024-10-25 21:25:42 +08:00
naison
aa881a589e hotfix: fix ssh and port-forward retry bug (#360)
Co-authored-by: fengcaiwen <fengcaiwen@bytedance.com>
2024-10-25 21:25:03 +08:00
naison
07292fcde5 feat: update krew index version to refs/tags/v2.2.20 (#358)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-10-20 12:02:45 +08:00
kubenetworks
3071ff2439 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-10-20 04:00:07 +00:00
naison
a64eaf66da hotfix: fix port forward and ssh (#357)
* hotfix(portfoward): fix port-forward bug and ssh reconnect bug

* hotfix: remove larger overlapping cidrs for adding routes

* feat: retry port-forward if get pod err is not forbidden

* hotfix: fix ssh and port-forward

* feat: add more log

* hotfix: set go default revolver perfergo options to true
2024-10-20 11:23:49 +08:00
naison
9238e9914a feat(alias): show avaliable alias name (#354) 2024-10-18 16:15:35 +08:00
naison
6e4aeb288a hotfix: ssh daemon (#352) 2024-10-14 09:16:24 +08:00
wencaiwulue
105c3967e1 feat: update krew index version to refs/tags/v2.2.19 2024-10-10 12:12:34 +08:00
naison
5dae60ffbc hotfix: sleep 200ms reconnect 2024-10-10 11:35:39 +08:00
wencaiwulue
875cb8dc8c feat: update krew index version to refs/tags/v2.2.19 2024-10-10 11:07:19 +08:00
naison
15103837a7 hotfix: fix ssh re-connect logic 2024-10-10 10:28:58 +08:00
wencaiwulue
baf5b79a24 feat: update krew index version to refs/tags/v2.2.19 2024-10-10 08:48:44 +08:00
kubenetworks
5618500e66 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-10-10 00:47:09 +00:00
fengcaiwen
d28096d9fa hotfix: upgrade github action 2024-10-10 08:11:43 +08:00
fengcaiwen
bc960987ea hotfix: set version to tag if commitid is empty in github action workflow 2024-10-09 23:14:24 +08:00
fengcaiwen
1005075367 feat: upgrade go version in Dockerfile 2024-10-09 22:06:46 +08:00
fengcaiwen
8f4de1968a feat: upgrade dlv 2024-10-09 22:04:54 +08:00
fengcaiwen
a93f0b1667 feat: upgrade github action 2024-10-09 21:57:44 +08:00
fengcaiwen
941373a902 feat: upgrade syncthing gui 2024-10-09 21:50:32 +08:00
naison
605fe047ca feat: upgrade syncthing version 2024-10-09 21:50:32 +08:00
naison
4d075b29b3 feat: upgrade go version to 1.23 2024-10-09 21:50:32 +08:00
naison
d141ec869b fix: fix dns on linux (#336)
* fix: fix dns on linux

* feat: detect run in Github action or not to setup DNS
2024-10-09 19:17:50 +08:00
naison
e2757d3916 hotfix: fix setup docker failed on macos (#334) 2024-10-08 10:37:53 +08:00
naison
9d917ae9cb docs: update doc (#333) 2024-09-14 20:01:58 +08:00
naison
0763e8a201 hotfix: fix upgrade on windows (#330)
* hotfix: fix upgrade on windows
2024-09-13 14:12:04 +08:00
naison
274116e44f feat: update krew index version to refs/tags/v2.2.18 (#329)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-09-10 17:57:33 +08:00
kubenetworks
ed375be157 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-09-10 09:39:12 +00:00
naison
be8ef7a127 hotfix: use pro-bing to send heartbeats instead of genereating icmp packet (#328) 2024-09-10 16:56:25 +08:00
naison
2bfa82d936 docs: update readme.md (#327) 2024-09-08 15:37:18 +08:00
naison
394bc1a0e4 chore: add link to install tools (#326) 2024-09-07 09:26:20 +08:00
naison
e64b9a3311 feat: use scoop to install kubevpn on Windows (#325) 2024-09-07 08:44:37 +08:00
naison
f9bbaeb3cf chore: update command usage (#324) 2024-09-06 21:28:35 +08:00
naison
ac918b5009 feat: update krew index version to refs/tags/v2.2.17 (#319)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-08-03 16:54:54 +08:00
naison
69b6fa6318 hotfix: fix interface conversion panic (#318) 2024-08-03 16:05:33 +08:00
kubenetworks
63be89bf25 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-08-03 07:45:55 +00:00
naison
c4fb3c5ca0 refactor: remove useless heartbeats (#316) 2024-08-03 15:14:43 +08:00
naison
947d50af85 feat: add syncthing re-connect (#315) 2024-08-03 15:02:48 +08:00
naison
0826f2e20c refactor: refactor log make it more formal (#314) 2024-08-03 15:01:16 +08:00
naison
9f62e02f96 hotfix: use pod ip as dns server if service ip is unreachable (#313) 2024-08-02 12:07:10 +08:00
naison
a3b8c1586d refactor: refactor ssh structure (#311) 2024-07-27 10:37:48 +08:00
naison
675ce2a52f feat: update krew index version to refs/tags/v2.2.16 (#310)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-07-26 22:09:06 +08:00
kubenetworks
79e524e319 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-07-26 13:43:50 +00:00
naison
49adeac14c refactor: refactor command dev (#309) 2024-07-26 21:11:59 +08:00
naison
9283c2f8f7 refactor: shrink dev mode container options (#308) 2024-07-25 16:42:21 +08:00
naison
a48750c048 hotfix: fix clone sync init no permission (#307) 2024-07-24 18:44:15 +08:00
naison
bbf3914f1e hotfix: fix upgrade (#305) 2024-07-23 20:54:06 +08:00
naison
f13e21a049 refactor: refactor code (#306) 2024-07-23 19:11:58 +08:00
naison
a37bfc28da hotfix: fix upgrade use rename but cross device (#304) 2024-07-21 21:08:17 +08:00
naison
862238f65f feat: update krew index version to refs/tags/v2.2.15 (#301)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-07-19 23:03:54 +08:00
kubenetworks
18d6f67a5d Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-07-19 15:03:14 +00:00
naison
4ae09a9dd2 refactor: remove useless code (#300) 2024-07-19 22:28:58 +08:00
naison
1feaacaba9 refactor: refactor code (#299) 2024-07-19 22:25:23 +08:00
naison
bc7d205695 refactor: refactor DHCP logic (#298) 2024-07-19 22:07:35 +08:00
naison
78de74bf08 feat: enable tun ip forward on Windows (#297) 2024-07-19 22:06:14 +08:00
naison
8c0f2098c9 feat: update krew index version to refs/tags/v2.2.14 (#296)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-07-12 23:29:48 +08:00
kubenetworks
44320a792e Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-07-12 15:24:28 +00:00
8077 changed files with 622985 additions and 658463 deletions

28
.github/ISSUE_TEMPLATE/01-feature.yml vendored Normal file
View File

@@ -0,0 +1,28 @@
name: Feature request
description: File a new feature request
labels: ["enhancement", "needs-triage"]
body:
- type: textarea
id: feature
attributes:
label: Feature description
description: Please describe the behavior you'd like to see.
validations:
required: true
- type: textarea
id: problem-usecase
attributes:
label: Problem or use case
description: Please explain which problem this would solve, or what the use case is for the feature. Keep in mind that it's more likely to be implemented if it's generally useful for a larger number of users.
validations:
required: true
- type: textarea
id: alternatives
attributes:
label: Alternatives or workarounds
description: Please describe any alternatives or workarounds you have considered and, possibly, rejected.
validations:
required: true

47
.github/ISSUE_TEMPLATE/02-bug.yml vendored Normal file
View File

@@ -0,0 +1,47 @@
name: Bug report
description: File a new bug report
labels: ["bug", "needs-triage"]
body:
- type: textarea
id: what-happened
attributes:
label: What happened?
description: Also tell us, what did you expect to happen, and any steps we might use to reproduce the problem.
placeholder: Tell us what you see!
validations:
required: true
- type: input
id: version
attributes:
label: KubeVPN client version
description: What version of KubeVPN client are you running?
placeholder: v2.7.14
validations:
required: true
- type: input
id: server
attributes:
label: KubeVPN server Image tag
description: What version of KubeVPN server image tag are you running?
placeholder: v2.7.14
validations:
required: true
- type: input
id: platform
attributes:
label: Platform & operating system
description: On what platform(s) are you seeing the problem?
placeholder: Linux arm64
validations:
required: true
- type: textarea
id: logs
attributes:
label: Relevant log output
description: Please copy and paste any relevant log output or crash backtrace. This will be automatically formatted into code, so no need for backticks.
render: shell

9
.github/krew.yaml vendored
View File

@@ -5,12 +5,11 @@ metadata:
spec:
version: {{ .TagName }}
homepage: https://github.com/kubenetworks/kubevpn
shortDescription: "A vpn tunnel tools which can connect to kubernetes cluster network"
shortDescription: "KubeVPN offers a Cloud Native Dev Environment that connects to kubernetes cluster network"
description: |
KubeVPN is Cloud Native Dev Environment, connect to kubernetes cluster network, you can access remote kubernetes
cluster network, remote
kubernetes cluster service can also access your local service. and more, you can run your kubernetes pod on local Docker
container with same environment、volume、and network. you can develop your application on local PC totally.
KubeVPN offers a Cloud-Native Dev Environment that seamlessly connects to your Kubernetes cluster network.
Gain access to the Kubernetes cluster network effortlessly using service names or Pod IP/Service IP. Facilitate the interception of inbound traffic from remote Kubernetes cluster services to your local PC through a service mesh and more.
For instance, you have the flexibility to run your Kubernetes pod within a local Docker container, ensuring an identical environment, volume, and network setup. With KubeVPN, empower yourself to develop applications entirely on your local PC!
platforms:
- selector:

View File

@@ -10,12 +10,12 @@ jobs:
linux:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
- name: Setup Minikube
@@ -24,6 +24,8 @@ jobs:
uses: medyagh/setup-minikube@latest
with:
cache: true
cpus: 'max'
memory: 'max'
- name: Kubernetes info
run: |
@@ -33,20 +35,22 @@ jobs:
- name: Install demo bookinfo
run: |
minikube image load --remote istio/examples-bookinfo-details-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-ratings-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-reviews-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-productpage-v1:1.16.2
minikube image load --remote naison/authors:latest
minikube image load --remote nginx:latest
minikube image load --remote naison/kubevpn:test
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-details-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-ratings-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-reviews-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-productpage-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/authors:latest
minikube image load --remote ghcr.io/kubenetworks/nginx:latest
minikube image ls
eval $(minikube docker-env)
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
- name: Build
run: |
export VERSION=test
export VERSION=${{github.event.pull_request.head.sha}}
if [[ -z "$VERSION" ]]; then
export VERSION=${{ github.sha }}
fi
make kubevpn-linux-amd64
chmod +x ./bin/kubevpn
cp ./bin/kubevpn /usr/local/bin/kubevpn
@@ -54,8 +58,7 @@ jobs:
- name: Wait for pods reviews to be ready
run: |
kubectl wait pods -l app=reviews --for=condition=Ready --timeout=3600s
kubectl wait pods -l app=productpage --for=condition=Ready --timeout=3600s
kubectl wait --for=condition=Ready pods --all --timeout=3600s
kubectl get svc -A -o wide
kubectl get pod -A -o wide
kubectl get all -o wide

View File

@@ -11,12 +11,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -31,7 +31,7 @@ jobs:
run: |
RELEASE_VERSION=${GITHUB_REF#refs/*/}
PREVERSION=$(git for-each-ref --sort='-creatordate' --format='%(refname:lstrip=2)' --count=50 'refs/tags/*' | grep -v 'rc' | awk 'NR==2')
echo ${PREVERSION}
echo ${RELEASE_VERSION}
echo ${PREVERSION}
echo "$(./.github/release-note.sh ${PREVERSION} ${RELEASE_VERSION})" > release_note.md
- name: Create Release
@@ -53,13 +53,13 @@ jobs:
git reset --hard
- name: Upload RELEASE_VERSION
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v4
with:
name: RELEASE_VERSION
path: RELEASE_VERSION
- name: Upload UPLOAD_URL
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v4
with:
name: UPLOAD_URL
path: UPLOAD_URL
@@ -104,16 +104,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Helm tool installer
uses: Azure/setup-helm@v1
uses: azure/setup-helm@v4
with:
version: "v3.6.3"
- name: Change chart version
@@ -129,7 +129,7 @@ jobs:
tar --transform 's/^charts\/kubevpn/kubevpn/' -zcf kubevpn-${CHART_VERSION}.tgz charts/kubevpn
shasum -a 256 kubevpn-${CHART_VERSION}.tgz | awk '{print $1}' > kubevpn-${CHART_VERSION}.tgz-SHA256
- name: Download UPLOAD_URL
uses: actions/download-artifact@v2
uses: actions/download-artifact@v4
with:
name: UPLOAD_URL
- name: Get Release UPLOAD_URL
@@ -178,12 +178,12 @@ jobs:
needs: release-helm-chart
steps:
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure Git
@@ -191,7 +191,7 @@ jobs:
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@v3
uses: azure/setup-helm@v4
- name: Change chart version
run: |
VERSION=${GITHUB_REF#refs/*/}
@@ -220,4 +220,47 @@ jobs:
--charts-repo https://github.com/$owner/$repo \
--pages-branch master \
--pages-index-path charts/index.yaml \
--push
--pr
snapcraft:
runs-on: ubuntu-24.04
needs: [ github-pages-deploy ]
env:
SNAPCRAFT_STORE_CREDENTIALS: ${{ secrets.SNAPCRAFT_TOKEN }}
steps:
- name: Check out Git repository
uses: actions/checkout@v3
- name: Install Snapcraft
uses: samuelmeuli/action-snapcraft@v3
- name: Setup LXD
uses: canonical/setup-lxd@main
- name: Use Snapcraft
run: |
RELEASE_VERSION=${GITHUB_REF#refs/*/}
sed -i s#CRAFT_ARCH_BUILD_VERSION#$RELEASE_VERSION#g snap/snapcraft.yaml
snapcraft
snapcraft upload --release=stable kubevpn_${RELEASE_VERSION}_amd64.snap
snapcraft-arm:
runs-on: ubuntu-24.04-arm
needs: [ github-pages-deploy ]
env:
SNAPCRAFT_STORE_CREDENTIALS: ${{ secrets.SNAPCRAFT_TOKEN }}
steps:
- name: Check out Git repository
uses: actions/checkout@v3
- name: Install Snapcraft
uses: samuelmeuli/action-snapcraft@v3
- name: Setup LXD
uses: canonical/setup-lxd@main
- name: Use Snapcraft
run: |
RELEASE_VERSION=${GITHUB_REF#refs/*/}
sed -i s#CRAFT_ARCH_BUILD_VERSION#$RELEASE_VERSION#g snap/snapcraft.yaml
snapcraft
snapcraft upload --release=stable kubevpn_${RELEASE_VERSION}_arm64.snap

View File

@@ -10,29 +10,30 @@ jobs:
image:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
- name: Push image to docker hub
run: |
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USER }} --password-stdin
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
docker buildx create --use
export VERSION=test
make container
export VERSION=${{github.event.pull_request.head.sha}}
if [[ -z "$VERSION" ]]; then
export VERSION=${{ github.sha }}
fi
make container-test
linux:
runs-on: ubuntu-latest
needs: [ "image" ]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
- name: Setup Minikube
id: minikube
@@ -40,6 +41,8 @@ jobs:
uses: medyagh/setup-minikube@latest
with:
cache: true
cpus: 'max'
memory: 'max'
- name: Kubernetes info
run: |
@@ -48,20 +51,22 @@ jobs:
kubectl get pods -n kube-system -o wide
- name: Install demo bookinfo
run: |
minikube image load --remote istio/examples-bookinfo-details-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-ratings-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-reviews-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-productpage-v1:1.16.2
minikube image load --remote naison/authors:latest
minikube image load --remote nginx:latest
minikube image load --remote naison/kubevpn:test
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-details-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-ratings-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-reviews-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-productpage-v1:1.20.2
minikube image load --remote ghcr.io/kubenetworks/authors:latest
minikube image load --remote ghcr.io/kubenetworks/nginx:latest
minikube image ls
eval $(minikube docker-env)
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
- name: Build
run: |
export VERSION=test
export VERSION=${{github.event.pull_request.head.sha}}
if [[ -z "$VERSION" ]]; then
export VERSION=${{ github.sha }}
fi
make kubevpn-linux-amd64
chmod +x ./bin/kubevpn
cp ./bin/kubevpn /usr/local/bin/kubevpn
@@ -69,8 +74,7 @@ jobs:
- name: Wait for pods reviews to be ready
run: |
kubectl wait pods -l app=reviews --for=condition=Ready --timeout=3600s
kubectl wait pods -l app=productpage --for=condition=Ready --timeout=3600s
kubectl wait --for=condition=Ready pods --all --timeout=3600s
kubectl get svc -A -o wide
kubectl get pod -A -o wide
kubectl get all -o wide
@@ -83,50 +87,58 @@ jobs:
run: make ut
macos:
runs-on: macos-12
runs-on: macos-13
needs: [ "image" ]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
- name: Set up Docker
uses: crazy-max/ghaction-setup-docker@v3
# https://github.com/crazy-max/ghaction-setup-docker/issues/108
- name: Set up QEMU
uses: docker/actions-toolkit/.github/actions/macos-setup-qemu@19ca9ade20f5da695f76a10988d6532058575f82
- name: Set up Docker
uses: docker/setup-docker-action@v4
with:
daemon-config: |
{
"debug": true,
"features": {
"containerd-snapshotter": true
}
}
- uses: azure/setup-kubectl@v4
- name: Install minikube
run: |
set -x
docker version
brew install minikube
minikube start --driver=docker
kubectl get pod -A -o wide
minikube kubectl -- get pod -A -o wide
timeout-minutes: 30
uses: medyagh/setup-minikube@latest
with:
cache: true
cpus: 'max'
memory: 'max'
- name: Kubernetes info
run: |
kubectl config view --flatten --raw
kubectl get pod -A -o wide
kubectl cluster-info
cat ~/.kube/config
kubectl get pods -n kube-system -o wide
- name: Install demo bookinfo
run: |
minikube image load --remote istio/examples-bookinfo-details-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-ratings-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-reviews-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-productpage-v1:1.16.2
minikube image load --remote naison/authors:latest
minikube image load --remote nginx:latest
minikube image load --remote naison/kubevpn:test
minikube image ls
eval $(minikube docker-env)
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
- name: Build
run: |
export VERSION=test
export VERSION=${{github.event.pull_request.head.sha}}
if [[ -z "$VERSION" ]]; then
export VERSION=${{ github.sha }}
fi
make kubevpn-darwin-amd64
chmod +x ./bin/kubevpn
cp ./bin/kubevpn /usr/local/bin/kubevpn
@@ -134,8 +146,7 @@ jobs:
- name: Wait for pods reviews to be ready
run: |
kubectl wait pods -l app=reviews --for=condition=Ready --timeout=3600s
kubectl wait pods -l app=productpage --for=condition=Ready --timeout=3600s
kubectl wait --for=condition=Ready pods --all --timeout=3600s
kubectl get svc -A -o wide || true
kubectl get pod -A -o wide || true
kubectl get all -o wide || true
@@ -148,23 +159,41 @@ jobs:
windows:
runs-on: windows-latest
env:
VERSION: ${{ github.event.pull_request.head.sha || github.sha }}
needs: [ "image" ]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
- name: Set up Docker
uses: crazy-max/ghaction-setup-docker@v3
uses: docker/setup-docker-action@v4
with:
daemon-config: |
{
"debug": true,
"features": {
"containerd-snapshotter": true
}
}
- run: |
docker info --format '{{.OSType}}'
choco install kind
kind create cluster
kubectl cluster-info
kubectl config view --flatten --raw
- run: |
choco install minikube
minikube start --driver=docker
choco install make
- name: Build
run: make kubevpn-windows-amd64
run: |
make kubevpn-windows-amd64
./bin/kubevpn.exe version
./bin/kubevpn.exe status

View File

@@ -23,12 +23,12 @@ jobs:
arch: 386
steps:
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.22'
go-version: '1.23'
check-latest: true
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Build kubevpn
run: |

View File

@@ -17,13 +17,12 @@ NAMESPACE ?= naison
REPOSITORY ?= kubevpn
IMAGE ?= $(REGISTRY)/$(NAMESPACE)/$(REPOSITORY):$(VERSION)
IMAGE_LATEST ?= docker.io/naison/kubevpn:latest
IMAGE_TEST ?= docker.io/naison/kubevpn:test
IMAGE_GH ?= ghcr.io/kubenetworks/kubevpn:$(VERSION)
IMAGE_GH_LATEST ?= ghcr.io/kubenetworks/kubevpn:latest
# Setup the -ldflags option for go build here, interpolate the variable values
# add '-tag noassets' for syncthing gui
LDFLAGS=-tags noassets --ldflags "-s -w\
-X ${BASE}/pkg/config.Image=${IMAGE} \
LDFLAGS=--ldflags "-s -w\
-X ${BASE}/pkg/config.Image=${IMAGE_GH} \
-X ${BASE}/pkg/config.Version=${VERSION} \
-X ${BASE}/pkg/config.GitCommit=${GIT_COMMIT} \
-X ${BASE}/pkg/config.GitHubOAuthToken=${GitHubOAuthToken} \
@@ -87,16 +86,16 @@ kubevpn-linux-386:
.PHONY: container
container:
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE} -t ${IMAGE_LATEST} -t ${IMAGE_GH} -f $(BUILD_DIR)/Dockerfile --push .
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE} -t ${IMAGE_LATEST} -t ${IMAGE_GH} -t ${IMAGE_GH_LATEST} -f $(BUILD_DIR)/Dockerfile --push .
############################ build local
.PHONY: container-local
container-local: kubevpn-linux-amd64
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE_LATEST} -f $(BUILD_DIR)/local.Dockerfile --push .
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE_LATEST} -t ${IMAGE_GH_LATEST} -f $(BUILD_DIR)/local.Dockerfile --push .
.PHONY: container-test
container-test: kubevpn-linux-amd64
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE_TEST} -f $(BUILD_DIR)/test.Dockerfile --push .
docker build -t ${IMAGE_GH} -f $(BUILD_DIR)/test.Dockerfile --push .
.PHONY: version
version:
@@ -108,7 +107,7 @@ gen:
.PHONY: ut
ut:
go test -tags=noassets -coverprofile=coverage.txt -coverpkg=./... -v ./... -timeout=60m
go test -p=1 -v -timeout=60m -coverprofile=coverage.txt -coverpkg=./... ./...
.PHONY: cover
cover: ut

605
README.md
View File

@@ -9,6 +9,7 @@
[![Releases][7]](https://github.com/kubenetworks/kubevpn/releases)
[![GoDoc](https://godoc.org/github.com/kubenetworks/kubevpn?status.png)](https://pkg.go.dev/github.com/wencaiwulue/kubevpn/v2)
[![codecov](https://codecov.io/gh/wencaiwulue/kubevpn/graph/badge.svg?token=KMDSINSDEP)](https://codecov.io/gh/wencaiwulue/kubevpn)
[![Snapcraft](https://snapcraft.io/kubevpn/badge.svg)](https://snapcraft.io/kubevpn)
[1]: https://img.shields.io/github/actions/workflow/status/kubenetworks/kubevpn/release.yml?logo=github
@@ -38,45 +39,54 @@ For instance, you have the flexibility to run your Kubernetes pod within a local
environment, volume, and network setup.
With KubeVPN, empower yourself to develop applications entirely on your local PC!
![arch](docs/en/images/kubevpn-proxy-tun-arch.svg)
## Content
1. [QuickStart](./README.md#quickstart)
2. [Functions](./README.md#functions)
3. [FAQ](./README.md#faq)
4. [Architecture](./README.md#architecture)
5. [Contributions](./README.md#Contributions)
3. [Architecture](./README.md#architecture)
4. [Contributions](./README.md#Contributions)
## QuickStart
#### Install from brew
### Install from script ( macOS / Linux)
```shell
curl -fsSL https://kubevpn.dev/install.sh | sh
```
### Install from [brew](https://brew.sh/) (macOS / Linux)
```shell
brew install kubevpn
```
#### Install from custom krew index
### Install from [snap](https://snapcraft.io/kubevpn) (Linux)
```shell
(
kubectl krew index add kubevpn https://github.com/kubenetworks/kubevpn.git && \
kubectl krew install kubevpn/kubevpn && kubectl kubevpn
)
sudo snap install kubevpn
```
#### Install from GitHub release
[LINK](https://github.com/kubenetworks/kubevpn/releases/latest)
#### Install from build it manually
### Install from [scoop](https://scoop.sh/) (Windows)
```shell
(
git clone https://github.com/kubenetworks/kubevpn.git && \
cd kubevpn && make kubevpn && ./bin/kubevpn
)
scoop bucket add extras
scoop install kubevpn
```
### Install from [krew](https://krew.sigs.k8s.io/) (Windows / macOS / Linux)
```shell
kubectl krew index add kubevpn https://github.com/kubenetworks/kubevpn.git
kubectl krew install kubevpn/kubevpn
kubectl kubevpn
```
### Install from GitHub release (Windows / macOS / Linux)
[https://github.com/kubenetworks/kubevpn/releases/latest](https://github.com/kubenetworks/kubevpn/releases/latest)
### Install bookinfo as demo application
```shell
@@ -93,54 +103,55 @@ kubectl delete -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/
### Connect to k8s cluster network
use command `kubevpn connect` connect to k8s cluster network, prompt `Password:` need to input computer
password. to enable root operation (create a tun device).
```shell
➜ ~ kubevpn connect
Password:
start to connect
get cidr from cluster info...
get cidr from cluster info ok
get cidr from cni...
wait pod cni-net-dir-kubevpn to be running timeout, reason , ignore
get cidr from svc...
get cidr from svc ok
get cidr successfully
traffic manager not exist, try to create it...
label namespace default
create serviceAccount kubevpn-traffic-manager
create roles kubevpn-traffic-manager
create roleBinding kubevpn-traffic-manager
create service kubevpn-traffic-manager
create deployment kubevpn-traffic-manager
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
Starting connect
Getting network CIDR from cluster info...
Getting network CIDR from CNI...
Getting network CIDR from services...
Labeling Namespace default
Creating ServiceAccount kubevpn-traffic-manager
Creating Roles kubevpn-traffic-manager
Creating RoleBinding kubevpn-traffic-manager
Creating Service kubevpn-traffic-manager
Creating MutatingWebhookConfiguration kubevpn-traffic-manager
Creating Deployment kubevpn-traffic-manager
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
Container Reason Message
control-plane ContainerCreating
vpn ContainerCreating
webhook ContainerCreating
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
Container Reason Message
control-plane ContainerRunning
vpn ContainerRunning
webhook ContainerRunning
Creating mutatingWebhook_configuration for kubevpn-traffic-manager
update ref count successfully
port forward ready
tunnel connected
dns service ok
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
Now you can access resources in the kubernetes cluster !
➜ ~
```
already connected to cluster network, use command `kubevpn status` to check status
```shell
➜ ~ kubevpn status
ID Mode Cluster Kubeconfig Namespace Status
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
CURRENT CONNECTION ID CLUSTER KUBECONFIG NAMESPACE STATUS NETIF
* 03dc50feb8c3 ccijorbccotmqodvr189g /Users/naison/.kube/config default connected utun4
➜ ~
```
use pod `productpage-788df7ff7f-jpkcs` IP `172.29.2.134`
```shell
➜ ~ kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
@@ -152,6 +163,8 @@ ratings-77b6cd4499-zvl6c 1/1 Running 0
reviews-85c88894d9-vgkxd 1/1 Running 0 24d 172.29.2.249 192.168.0.5 <none> <none>
```
use `ping` to test connection, seems good
```shell
➜ ~ ping 172.29.2.134
PING 172.29.2.134 (172.29.2.134): 56 data bytes
@@ -165,18 +178,22 @@ PING 172.29.2.134 (172.29.2.134): 56 data bytes
round-trip min/avg/max/stddev = 54.293/55.380/56.270/0.728 ms
```
use service `productpage` IP `172.21.10.49`
```shell
➜ ~ kubectl get services -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
authors ClusterIP 172.21.5.160 <none> 9080/TCP 114d app=authors
details ClusterIP 172.21.6.183 <none> 9080/TCP 114d app=details
kubernetes ClusterIP 172.21.0.1 <none> 443/TCP 319d <none>
kubevpn-traffic-manager ClusterIP 172.21.2.86 <none> 8422/UDP,10800/TCP,9002/TCP,80/TCP 2m28s app=kubevpn-traffic-manager
kubevpn-traffic-manager ClusterIP 172.21.2.86 <none> 10801/TCP,9002/TCP,80/TCP 2m28s app=kubevpn-traffic-manager
productpage ClusterIP 172.21.10.49 <none> 9080/TCP 114d app=productpage
ratings ClusterIP 172.21.3.247 <none> 9080/TCP 114d app=ratings
reviews ClusterIP 172.21.8.24 <none> 9080/TCP 114d app=reviews
```
use command `curl` to test service connection
```shell
➜ ~ curl 172.21.10.49:9080
<!DOCTYPE html>
@@ -188,8 +205,18 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
<meta name="viewport" content="width=device-width, initial-scale=1">
```
seems good too~
### Domain resolve
support k8s dns name resolve.
a Pod/Service named `productpage` in the `default` namespace can successfully resolve by following name:
- `productpage`
- `productpage.default`
- `productpage.default.svc.cluster.local`
```shell
➜ ~ curl productpage.default.svc.cluster.local:9080
<!DOCTYPE html>
@@ -222,52 +249,63 @@ use [Domain resolve](./README.md#domain-resolve)
### Connect to multiple kubernetes cluster network
```shell
➜ ~ kubevpn status
ID Mode Cluster Kubeconfig Namespace Status
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
```
```shell
➜ ~ kubevpn connect -n default --kubeconfig ~/.kube/dev_config --lite
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
adding route...
dns service ok
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
```
already connected cluster `ccijorbccotmqodvr189g`
```shell
➜ ~ kubevpn status
ID Mode Cluster Kubeconfig Namespace Status
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
1 lite ccidd77aam2dtnc3qnddg /Users/naison/.kube/dev_config default Connected
CURRENT CONNECTION ID CLUSTER KUBECONFIG NAMESPACE STATUS NETIF
* 03dc50feb8c3 ccijorbccotmqodvr189g /Users/naison/.kube/config default connected utun4
```
then connect to another cluster `ccidd77aam2dtnc3qnddg`
```shell
➜ ~ kubevpn connect -n default --kubeconfig ~/.kube/dev_config
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
Now you can access resources in the kubernetes cluster !
```
use command `kubevpn status` to check connection status
```shell
➜ ~ kubevpn status
CURRENT CONNECTION ID CLUSTER KUBECONFIG NAMESPACE STATUS NETIF
03dc50feb8c3 ccijorbccotmqodvr189g /Users/naison/.kube/config default connected utun4
* 86bfdef0ed05 ccidd77aam2dtnc3qnddg /Users/naison/.kube/dev_config default connected utun5
➜ ~
```
### Reverse proxy
use command `kubevpn proxy` to proxy all inbound traffic to local computer.
```shell
➜ ~ kubevpn proxy deployment/productpage
already connect to cluster
start to create remote inbound pod for deployment/productpage
workload default/deployment/productpage is controlled by a controller
rollout status for deployment/productpage
Connected to cluster
Injecting inbound sidecar for deployment/productpage
Checking rollout status for deployment/productpage
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
deployment "productpage" successfully rolled out
rollout status for deployment/productpage successfully
create remote inbound pod for deployment/productpage successfully
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
Rollout successfully for deployment/productpage
Now you can access resources in the kubernetes cluster !
➜ ~
```
show status
```shell
➜ ~ kubevpn status
CURRENT CONNECTION ID CLUSTER KUBECONFIG NAMESPACE STATUS NETIF
* 03dc50feb8c3 ccijorbccotmqodvr189g /Users/naison/.kube/config default connected utun4
CONNECTION ID NAMESPACE NAME HEADERS PORTS CURRENT PC
03dc50feb8c3 default deployments.apps/productpage * 9080->9080 true
➜ ~
```
@@ -307,20 +345,20 @@ then run it
export selector=productpage
export pod=`kubectl get pods -l app=${selector} -n default -o jsonpath='{.items[0].metadata.name}'`
export pod_ip=`kubectl get pod $pod -n default -o jsonpath='{.status.podIP}'`
curl -v -H "a: 1" http://$pod_ip:9080/health
curl -v -H "foo: bar" http://$pod_ip:9080/health
```
response would like below
```
curl -v -H "a: 1" http://$pod_ip:9080/health
curl -v -H "foo: bar" http://$pod_ip:9080/health
* Trying 192.168.72.77:9080...
* Connected to 192.168.72.77 (192.168.72.77) port 9080 (#0)
> GET /health HTTP/1.1
> Host: 192.168.72.77:9080
> User-Agent: curl/7.87.0
> Accept: */*
> a: 1
> foo: bar
>
>>Received request: GET /health from xxx.xxx.xxx.xxx:52974
* Mark bundle as not supporting multiuse
@@ -344,26 +382,33 @@ Hello world!%
### Reverse proxy with mesh
Support HTTP, GRPC and WebSocket etc. with specific header `"a: 1"` will route to your local machine
Support HTTP, GRPC and WebSocket etc. with specific header `"foo: bar"` will route to your local machine
```shell
➜ ~ kubevpn proxy deployment/productpage --headers a=1
already connect to cluster
start to create remote inbound pod for deployment/productpage
patch workload default/deployment/productpage with sidecar
rollout status for deployment/productpage
➜ ~ kubevpn proxy deployment/productpage --headers foo=bar
Connected to cluster
Injecting inbound sidecar for deployment/productpage
Checking rollout status for deployment/productpage
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
deployment "productpage" successfully rolled out
rollout status for deployment/productpage successfully
create remote inbound pod for deployment/productpage successfully
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
Rollout successfully for deployment/productpage
Now you can access resources in the kubernetes cluster !
➜ ~
```
first access without header "a: 1", it will access existing pod on kubernetes cluster.
show status
```shell
➜ ~ kubevpn status
CURRENT CONNECTION ID CLUSTER KUBECONFIG NAMESPACE STATUS NETIF
* 03dc50feb8c3 ccijorbccotmqodvr189g /Users/naison/.kube/config default connected utun4
CONNECTION ID NAMESPACE NAME HEADERS PORTS CURRENT PC
03dc50feb8c3 default deployments.apps/productpage foo=bar 9080->9080 true
➜ ~
```
first access without header "foo: bar", it will access existing pod on kubernetes cluster.
```shell
➜ ~ curl productpage:9080
@@ -377,10 +422,10 @@ first access without header "a: 1", it will access existing pod on kubernetes cl
...
```
Now let's access local service with header `"a: 1"`
Now let's access local service with header `"foo: bar"`
```shell
➜ ~ curl productpage:9080 -H "a: 1"
➜ ~ curl productpage:9080 -H "foo: bar"
>>Received request: GET / from xxx.xxx.xxx.xxx:51296
Hello world!
```
@@ -389,35 +434,35 @@ If you want to cancel proxy, just run command:
```shell
➜ ~ kubevpn leave deployments/productpage
leave workload deployments/productpage
workload default/deployments/productpage is controlled by a controller
leave workload deployments/productpage successfully
Leaving workload deployments/productpage
Checking rollout status for deployments/productpage
Waiting for deployment "productpage" rollout to finish: 0 out of 1 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Rollout successfully for deployments/productpage
```
### Dev mode in local Docker 🐳
### Run mode in local Docker 🐳
Run the Kubernetes pod in the local Docker container, and cooperate with the service mesh to intercept the traffic with
the specified header to the local, or all the traffic to the local.
```shell
➜ ~ kubevpn dev deployment/authors --headers a=1 -it --rm --entrypoint sh
connectting to cluster
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
start to create remote inbound pod for Deployment.apps/authors
patch workload default/Deployment.apps/authors with sidecar
rollout status for Deployment.apps/authors
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
➜ ~ kubevpn run deployment/authors --headers foo=bar --entrypoint sh
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
Injecting inbound sidecar for deployment/authors
Patching workload deployment/authors
Checking rollout status for deployment/authors
Waiting for deployment "authors" rollout to finish: 0 out of 1 new replicas have been updated...
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
deployment "authors" successfully rolled out
rollout status for Deployment.apps/authors successfully
create remote inbound pod for Deployment.apps/authors successfully
Rollout successfully for Deployment.apps/authors
tar: removing leading '/' from member names
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/4563987760170736212:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading `/' from member names
@@ -457,16 +502,14 @@ OK: 8 MiB in 19 packages
{"status":"Authors is healthy"} /opt/microservices # echo "continue testing pod access..."
continue testing pod access...
/opt/microservices # exit
prepare to exit, cleaning up
update ref count successfully
tun device closed
leave resource: deployments.apps/authors
workload default/deployments.apps/authors is controlled by a controller
leave resource: deployments.apps/authors successfully
clean up successfully
prepare to exit, cleaning up
update ref count successfully
clean up successfully
Created container: default_authors
Wait container default_authors to be running...
Container default_authors is running now
Disconnecting from the cluster...
Leaving workload deployments.apps/authors
Disconnecting from the cluster...
Performing cleanup operations
Clearing DNS settings
➜ ~
```
@@ -488,7 +531,7 @@ Here is how to access pod in local docker container
```shell
export authors_pod=`kubectl get pods -l app=authors -n default -o jsonpath='{.items[0].metadata.name}'`
export authors_pod_ip=`kubectl get pod $authors_pod -n default -o jsonpath='{.status.podIP}'`
curl -kv -H "a: 1" http://$authors_pod_ip:80/health
curl -kv -H "foo: bar" http://$authors_pod_ip:80/health
```
Verify logs of nginx container
@@ -500,22 +543,20 @@ docker logs $(docker ps --format '{{.Names}}' | grep nginx_default_kubevpn)
If you just want to start up a docker image, you can use a simple way like this:
```shell
kubevpn dev deployment/authors --no-proxy -it --rm
kubevpn run deployment/authors --no-proxy
```
Example
```shell
➜ ~ kubevpn dev deployment/authors --no-proxy -it --rm
connectting to cluster
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
➜ ~ kubevpn run deployment/authors --no-proxy
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
tar: removing leading '/' from member names
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/5631078868924498209:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading `/' from member names
@@ -533,10 +574,10 @@ Created main container: authors_default_kubevpn_ff34b
Now the main process will hang up to show you log.
If you want to specify the image to start the container locally, you can use the parameter `--docker-image`. When the
If you want to specify the image to start the container locally, you can use the parameter `--dev-image`. When the
image does not exist locally, it will be pulled from the corresponding mirror warehouse. If you want to specify startup
parameters, you can use `--entrypoint` parameter, replace it with the command you want to execute, such
as `--entrypoint /bin/bash`, for more parameters, see `kubevpn dev --help`.
as `--entrypoint /bin/bash`, for more parameters, see `kubevpn run --help`.
### DinD ( Docker in Docker ) use kubevpn in Docker
@@ -548,57 +589,46 @@ need to special parameter `--network` (inner docker) for sharing network and pid
Example:
```shell
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 ghcr.io/kubenetworks/kubevpn:latest
```
```shell
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
Unable to find image 'naison/kubevpn:v2.0.0' locally
v2.0.0: Pulling from naison/kubevpn
445a6a12be2b: Already exists
bd6c670dd834: Pull complete
64a7297475a2: Pull complete
33fa2e3224db: Pull complete
e008f553422a: Pull complete
5132e0110ddc: Pull complete
5b2243de1f1a: Pull complete
662a712db21d: Pull complete
4f4fb700ef54: Pull complete
33f0298d1d4f: Pull complete
Digest: sha256:115b975a97edd0b41ce7a0bc1d8428e6b8569c91a72fe31ea0bada63c685742e
Status: Downloaded newer image for naison/kubevpn:v2.0.0
root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison -it --entrypoint sh
----------------------------------------------------------------------------------
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
Because of sudo user env and user env are different.
Current env KUBECONFIG value:
----------------------------------------------------------------------------------
hostname is d0b3dab8912a
connectting to cluster
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
start to create remote inbound pod for Deployment.apps/authors
patch workload default/Deployment.apps/authors with sidecar
rollout status for Deployment.apps/authors
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 ghcr.io/kubenetworks/kubevpn:latest
Unable to find image 'ghcr.io/kubenetworks/kubevpn:latest' locally
latest: Pulling from ghcr.io/kubenetworks/kubevpn
9c704ecd0c69: Already exists
4987d0a976b5: Pull complete
8aa94c4fc048: Pull complete
526fee014382: Pull complete
6c1c2bedceb6: Pull complete
97ac845120c5: Pull complete
ca82aef6a9eb: Pull complete
1fd9534c7596: Pull complete
588bd802eb9c: Pull complete
Digest: sha256:368db2e0d98f6866dcefd60512960ce1310e85c24a398fea2a347905ced9507d
Status: Downloaded newer image for ghcr.io/kubenetworks/kubevpn:latest
WARNING: image with reference ghcr.io/kubenetworks/kubevpn was found but does not match the specified platform: wanted linux/amd64, actual: linux/arm64
root@5732124e6447:/app# kubevpn run deployment/authors --headers user=naison --entrypoint sh
hostname is 5732124e6447
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
Injecting inbound sidecar for deployment/authors
Patching workload deployment/authors
Checking rollout status for deployment/authors
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
deployment "authors" successfully rolled out
rollout status for Deployment.apps/authors successfully
create remote inbound pod for Deployment.apps/authors successfully
Rollout successfully for Deployment.apps/authors
tar: removing leading '/' from member names
/tmp/6460902982794789917:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading `/' from member names
tar: Removing leading `/' from hard link targets
/tmp/5028895788722532426:/var/run/secrets/kubernetes.io/serviceaccount
network mode is container:d0b3dab8912a
Network mode is container:d0b3dab8912a
Created container: nginx_default_kubevpn_6df63
Wait container nginx_default_kubevpn_6df63 to be running...
Container nginx_default_kubevpn_6df63 is running now
@@ -607,7 +637,7 @@ Created main container: authors_default_kubevpn_6df5f
/opt/microservices # ps -ef
PID USER TIME COMMAND
1 root 0:00 {bash} /usr/bin/qemu-x86_64 /bin/bash /bin/bash
14 root 0:02 {kubevpn} /usr/bin/qemu-x86_64 /usr/local/bin/kubevpn kubevpn dev deployment/authors --headers
14 root 0:02 {kubevpn} /usr/bin/qemu-x86_64 /usr/local/bin/kubevpn kubevpn run deployment/authors --headers
25 root 0:01 {kubevpn} /usr/bin/qemu-x86_64 /usr/local/bin/kubevpn /usr/local/bin/kubevpn daemon
37 root 0:04 {kubevpn} /usr/bin/qemu-x86_64 /usr/local/bin/kubevpn /usr/local/bin/kubevpn daemon --sudo
53 root 0:00 nginx: master process nginx -g daemon off;
@@ -653,21 +683,19 @@ OK: 8 MiB in 19 packages
>> Container Received request: GET / from 127.0.0.1:41230
Hello world!/opt/microservices #
/opt/microservices # curl authors:9080/health -H "a: 1"
>>Received request: GET /health from 223.254.0.109:57930
/opt/microservices # curl authors:9080/health -H "foo: bar"
>>Received request: GET /health from 198.19.0.109:57930
Hello world!/opt/microservices #
/opt/microservices # curl localhost:9080/health
{"status":"Authors is healthy"}/opt/microservices # exit
prepare to exit, cleaning up
update ref count successfully
tun device closed
leave resource: deployments.apps/authors
workload default/deployments.apps/authors is controlled by a controller
leave resource: deployments.apps/authors successfully
clean up successfully
prepare to exit, cleaning up
update ref count successfully
clean up successfully
Created container: default_authors
Wait container default_authors to be running...
Container default_authors is running now
Disconnecting from the cluster...
Leaving workload deployments.apps/authors
Disconnecting from the cluster...
Performing cleanup operations
Clearing DNS settings
root@d0b3dab8912a:/app# exit
exit
➜ ~
@@ -677,10 +705,10 @@ during test, check what container is running
```text
➜ ~ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
d0b3dab8912a ghcr.io/kubenetworks/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
➜ ~
```
@@ -692,10 +720,13 @@ kubectl delete -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/
### Multiple Protocol
support OSI model layers 3 and above, protocols like `ICMP`, `TCP`, and `UDP`...
- TCP
- UDP
- ICMP
- GRPC
- gRPC
- Thrift
- WebSocket
- HTTP
- ...
@@ -706,169 +737,9 @@ kubectl delete -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/
- Linux
- Windows
## FAQ
### 1, What should I do if the dependent image cannot be pulled, or the inner environment cannot access docker.io?
Answer: here are two solutions to solve this problem
- Solution 1: In the network that can access docker.io, transfer the image in the command `kubevpn version` to your own
private image registry, and then add option `--image` to special image when starting the command.
Example:
``` shell
➜ ~ kubevpn version
KubeVPN: CLI
Version: v2.0.0
Daemon: v2.0.0
Image: docker.io/naison/kubevpn:v2.0.0
Branch: feature/daemon
Git commit: 7c3a87e14e05c238d8fb23548f95fa1dd6e96936
Built time: 2023-09-30 22:01:51
Built OS/Arch: darwin/arm64
Built Go version: go1.20.5
```
Image is `docker.io/naison/kubevpn:v2.0.0`, transfer this image to private docker registry
```text
docker pull docker.io/naison/kubevpn:v2.0.0
docker tag docker.io/naison/kubevpn:v2.0.0 [docker registry]/[namespace]/[repo]:[tag]
docker push [docker registry]/[namespace]/[repo]:[tag]
```
Then you can use this image, as follows:
```text
➜ ~ kubevpn connect --image [docker registry]/[namespace]/[repo]:[tag]
got cidr from cache
traffic manager not exist, try to create it...
pod [kubevpn-traffic-manager] status is Running
...
```
- Solution 2: Use options `--transfer-image`, enable this flags will transfer image from default image to `--image`
special address automatically。
Example
```shell
➜ ~ kubevpn connect --transfer-image --image nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn:v2.0.0
v2.0.0: Pulling from naison/kubevpn
Digest: sha256:450446850891eb71925c54a2fab5edb903d71103b485d6a4a16212d25091b5f4
Status: Image is up to date for naison/kubevpn:v2.0.0
The push refers to repository [nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn]
ecc065754c15: Preparing
f2b6c07cb397: Pushed
448eaa16d666: Pushed
f5507edfc283: Pushed
3b6ea9aa4889: Pushed
ecc065754c15: Pushed
feda785382bb: Pushed
v2.0.0: digest: sha256:85d29ebb53af7d95b9137f8e743d49cbc16eff1cdb9983128ab6e46e0c25892c size: 2000
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
➜ ~
```
### 2, When use `kubevpn dev`, but got error code 137, how to resolve?
```text
dns service ok
tar: Removing leading `/' from member names
tar: Removing leading `/' from hard link targets
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/7375606548554947868:/var/run/secrets/kubernetes.io/serviceaccount
Created container: server_vke-system_kubevpn_0db84
Wait container server_vke-system_kubevpn_0db84 to be running...
Container server_vke-system_kubevpn_0db84 is running on port 8888/tcp: 6789/tcp:6789 now
$ Status: , Code: 137
prepare to exit, cleaning up
port-forward occurs error, err: lost connection to pod, retrying
update ref count successfully
ref-count is zero, prepare to clean up resource
clean up successfully
```
This is because of your docker-desktop required resource is less than pod running request resource, it OOM killed, so
you can add more resource in your docker-desktop setting `Preferences --> Resources --> Memory`
### 3, Using WSL( Windows Sub Linux ) Docker, when use mode `kubevpn dev`, can not connect to cluster network, how to solve this problem?
Answer:
this is because WSL'Docker using Windows's Network, so if even start a container in WSL, this container will not use WSL
network, but use Windows network
Solution:
- 1): install docker in WSL, not use Windows Docker-desktop
- 2): use command `kubevpn connect` on Windows, and then startup `kubevpn dev` in WSL
- 3): startup a container using command `kubevpn connect` on Windows, and then
startup `kubevpn dev --network container:$CONTAINER_ID` in WSL
### 4After use command `kubevpn dev` enter develop modebut can't assess kubernetes api-serveroccur error `172.17.0.1:443 connect refusued`how to solve this problem?
Answer:
Maybe k8s network subnet is conflict with docker subnet
Solution:
- Use option `--connect-mode container` to startup command `kubevpn dev`
- Modify `~/.docker/daemon.json`, add not conflict subnet, eg: `"bip": "172.15.0.1/24"`.
```shell
➜ ~ cat ~/.docker/daemon.json
{
"builder": {
"gc": {
"defaultKeepStorage": "20GB",
"enabled": true
}
},
"experimental": false,
"features": {
"buildkit": true
},
"insecure-registries": [
],
}
```
add subnet not conflict, eg: 172.15.0.1/24
```shell
➜ ~ cat ~/.docker/daemon.json
{
"builder": {
"gc": {
"defaultKeepStorage": "20GB",
"enabled": true
}
},
"experimental": false,
"features": {
"buildkit": true
},
"insecure-registries": [
],
"bip": "172.15.0.1/24"
}
```
restart docker and retry
## Architecture
Architecture can be found [here](/docs/en/Architecture.md).
[architecture](https://kubevpn.dev/docs/architecture/connect).
## Contributions
@@ -879,4 +750,10 @@ If you want to debug this project on local PC. Please follow the steps bellow:
- Startup daemon and sudo daemon process with IDE debug mode. (Essentially two GRPC server)
- Add breakpoint to file `pkg/daemon/action/connect.go:21`.
- Open another terminal run `make kubevpn`.
- Then run `./bin/kubevpn connect` and it will hit breakpoint.
- Then run `./bin/kubevpn connect` and it will hit breakpoint.
### Supported by
[![JetBrains logo.](https://resources.jetbrains.com/storage/products/company/brand/logos/jetbrains.svg)](https://jb.gg/OpenSourceSupport)
### [Donate](https://kubevpn.dev/docs/donate)

View File

@@ -9,6 +9,7 @@
[![Releases][7]](https://github.com/kubenetworks/kubevpn/releases)
[![GoDoc](https://godoc.org/github.com/kubenetworks/kubevpn?status.png)](https://pkg.go.dev/github.com/wencaiwulue/kubevpn/v2)
[![codecov](https://codecov.io/gh/wencaiwulue/kubevpn/graph/badge.svg?token=KMDSINSDEP)](https://codecov.io/gh/wencaiwulue/kubevpn)
[![Snapcraft](https://snapcraft.io/kubevpn/badge.svg)](https://snapcraft.io/kubevpn)
[1]: https://img.shields.io/github/actions/workflow/status/kubenetworks/kubevpn/release.yml?logo=github
@@ -33,46 +34,55 @@ KubeVPN 提供一个云原生开发环境。通过连接云端 kubernetes 网络
Docker
模拟 k8s pod runtime 将容器运行在本地 (具有相同的环境变量,磁盘和网络)。
![架构](docs/en/images/kubevpn-proxy-tun-arch.svg)
## 内容
1. [快速开始](./README_ZH.md#快速开始)
2. [功能](./README_ZH.md#功能)
3. [问答](./README_ZH.md#问答)
4. [架构](./README_ZH.md#架构)
5. [贡献代码](./README_ZH.md#贡献代码)
3. [架构](./README_ZH.md#架构)
4. [贡献代码](./README_ZH.md#贡献代码)
## 快速开始
#### 使用 brew 安装
### 使用脚本安装 ( macOS / Linux)
```shell
curl -fsSL https://kubevpn.dev/install.sh | sh
```
### 使用 [brew](https://brew.sh/) 安装 (macOS / Linux)
```shell
brew install kubevpn
```
#### 从 自定义 Krew 仓库安装
### 使用 [snap](https://snapcraft.io/kubevpn) 安装 (Linux)
```shell
(
kubectl krew index add kubevpn https://github.com/kubenetworks/kubevpn.git && \
kubectl krew install kubevpn/kubevpn && kubectl kubevpn
)
sudo snap install kubevpn
```
#### 从 Github release 下载编译好的二进制文件
[链接](https://github.com/kubenetworks/kubevpn/releases/latest)
#### 自己构建二进制文件
### 使用 [scoop](https://scoop.sh/) (Windows)
```shell
(
git clone https://github.com/kubenetworks/kubevpn.git && \
cd kubevpn && make kubevpn && ./bin/kubevpn
)
scoop bucket add extras
scoop install kubevpn
```
#### 安装 bookinfo 作为 demo 应用
### 使用 [krew](https://krew.sigs.k8s.io/) (Windows / macOS / Linux)
```shell
kubectl krew index add kubevpn https://github.com/kubenetworks/kubevpn.git
kubectl krew install kubevpn/kubevpn
kubectl kubevpn
```
### 从 Github release 下载 (Windows / macOS / Linux)
[https://github.com/kubenetworks/kubevpn/releases/latest](https://github.com/kubenetworks/kubevpn/releases/latest)
### 安装 bookinfo 作为 demo 应用
```shell
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
@@ -82,44 +92,49 @@ kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/s
### 链接到集群网络
使用命令 `kubevpn connect` 链接到集群,请注意这里需要输入电脑密码。因为需要 `root` 权限。(创建虚拟网卡)
```shell
➜ ~ kubevpn connect
Password:
start to connect
get cidr from cluster info...
get cidr from cluster info ok
get cidr from cni...
wait pod cni-net-dir-kubevpn to be running timeout, reason , ignore
get cidr from svc...
get cidr from svc ok
get cidr successfully
traffic manager not exist, try to create it...
label namespace default
create serviceAccount kubevpn-traffic-manager
create roles kubevpn-traffic-manager
create roleBinding kubevpn-traffic-manager
create service kubevpn-traffic-manager
create deployment kubevpn-traffic-manager
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
Starting connect
Getting network CIDR from cluster info...
Getting network CIDR from CNI...
Getting network CIDR from services...
Labeling Namespace default
Creating ServiceAccount kubevpn-traffic-manager
Creating Roles kubevpn-traffic-manager
Creating RoleBinding kubevpn-traffic-manager
Creating Service kubevpn-traffic-manager
Creating MutatingWebhookConfiguration kubevpn-traffic-manager
Creating Deployment kubevpn-traffic-manager
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
Container Reason Message
control-plane ContainerCreating
vpn ContainerCreating
webhook ContainerCreating
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
Container Reason Message
control-plane ContainerRunning
vpn ContainerRunning
webhook ContainerRunning
Creating mutatingWebhook_configuration for kubevpn-traffic-manager
update ref count successfully
port forward ready
tunnel connected
dns service ok
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
Now you can access resources in the kubernetes cluster !
➜ ~
```
提示已经链接到集群了。使用命令 `kubevpn status` 检查一下状态。
```shell
➜ ~ kubevpn status
CURRENT CONNECTION ID CLUSTER KUBECONFIG NAMESPACE STATUS NETIF
* 03dc50feb8c3 ccijorbccotmqodvr189g /Users/naison/.kube/config default connected utun4
➜ ~
```
@@ -134,6 +149,8 @@ ratings-77b6cd4499-zvl6c 1/1 Running 0
reviews-85c88894d9-vgkxd 1/1 Running 0 24d 172.29.2.249 192.168.0.5 <none> <none>
```
找一个 pod 的 IP比如 `productpage-788df7ff7f-jpkcs` 的 IP `172.29.2.134`
```shell
➜ ~ ping 172.29.2.134
PING 172.29.2.134 (172.29.2.134): 56 data bytes
@@ -147,18 +164,22 @@ PING 172.29.2.134 (172.29.2.134): 56 data bytes
round-trip min/avg/max/stddev = 54.293/55.380/56.270/0.728 ms
```
测试应该可以直接 Ping 通,说明本地可以正常访问到集群网络了。
```shell
➜ ~ kubectl get services -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
authors ClusterIP 172.21.5.160 <none> 9080/TCP 114d app=authors
details ClusterIP 172.21.6.183 <none> 9080/TCP 114d app=details
kubernetes ClusterIP 172.21.0.1 <none> 443/TCP 319d <none>
kubevpn-traffic-manager ClusterIP 172.21.2.86 <none> 8422/UDP,10800/TCP,9002/TCP,80/TCP 2m28s app=kubevpn-traffic-manager
kubevpn-traffic-manager ClusterIP 172.21.2.86 <none> 10800/TCP,9002/TCP,80/TCP 2m28s app=kubevpn-traffic-manager
productpage ClusterIP 172.21.10.49 <none> 9080/TCP 114d app=productpage
ratings ClusterIP 172.21.3.247 <none> 9080/TCP 114d app=ratings
reviews ClusterIP 172.21.8.24 <none> 9080/TCP 114d app=reviews
```
找一个 service 的 IP比如 `productpage` 的 IP `172.21.10.49`,试着访问一下服务 `productpage`
```shell
➜ ~ curl 172.21.10.49:9080
<!DOCTYPE html>
@@ -170,8 +191,16 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
<meta name="viewport" content="width=device-width, initial-scale=1">
```
可以看到也可以正常访问,也就是可以在本地访问到集群的 pod 和 service 了~
### 域名解析功能
支持 k8s dns 解析。比如一个名为 `productpage` 的 Pod 或者 Service 处于 `default` 命名空间下可以被如下域名正常解析到:
- `productpage`
- `productpage.default`
- `productpage.default.svc.cluster.local`
```shell
➜ ~ curl productpage.default.svc.cluster.local:9080
<!DOCTYPE html>
@@ -183,8 +212,15 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
<meta name="viewport" content="width=device-width, initial-scale=1">
```
可以看到能够被正常解析,并且返回相应内容。
### 短域名解析功能
连接到此命名空间下,可以直接使用 `service` name 的方式访问,否则访问其它命令空间下的服务,需要带上命令空间作为域名的一部分,使用如下的域名即可。
- `productpage.default`
- `productpage.default.svc.cluster.local`
```shell
➜ ~ curl productpage:9080
<!DOCTYPE html>
@@ -196,57 +232,72 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
...
```
可以看到直接使用 service name 的方式,可以正常访问到集群资源。
### 链接到多集群网络
```shell
➜ ~ kubevpn status
ID Mode Cluster Kubeconfig Namespace Status
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
```
```shell
➜ ~ kubevpn connect -n default --kubeconfig ~/.kube/dev_config --lite
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
adding route...
dns service ok
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
```
可以看到已经链接到了一个集群 `ccijorbccotmqodvr189g`
```shell
➜ ~ kubevpn status
ID Mode Cluster Kubeconfig Namespace Status
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
1 lite ccidd77aam2dtnc3qnddg /Users/naison/.kube/dev_config default Connected
CURRENT CONNECTION ID CLUSTER KUBECONFIG NAMESPACE STATUS NETIF
* 03dc50feb8c3 ccijorbccotmqodvr189g /Users/naison/.kube/config default connected utun4
```
```shell
➜ ~ kubevpn connect -n default --kubeconfig ~/.kube/dev_config
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
Now you can access resources in the kubernetes cluster !
```
使用命令 `kubevpn status` 查看当前链接状态。
```shell
➜ ~ kubevpn status
CURRENT CONNECTION ID CLUSTER KUBECONFIG NAMESPACE STATUS NETIF
03dc50feb8c3 ccijorbccotmqodvr189g /Users/naison/.kube/config default connected utun4
* 86bfdef0ed05 ccidd77aam2dtnc3qnddg /Users/naison/.kube/dev_config default connected utun5
➜ ~
```
可以看到连接到了多个集群。
### 反向代理
使用命令 `kubevpn proxy` 代理所有的入站流量到本地电脑。
```shell
➜ ~ kubevpn proxy deployment/productpage
already connect to cluster
start to create remote inbound pod for deployment/productpage
workload default/deployment/productpage is controlled by a controller
rollout status for deployment/productpage
Connected to cluster
Injecting inbound sidecar for deployment/productpage
Checking rollout status for deployment/productpage
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
deployment "productpage" successfully rolled out
rollout status for deployment/productpage successfully
create remote inbound pod for deployment/productpage successfully
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
Rollout successfully for deployment/productpage
Now you can access resources in the kubernetes cluster !
➜ ~
```
查看一下状态
```shell
➜ ~ kubevpn status
CURRENT CONNECTION ID CLUSTER KUBECONFIG NAMESPACE STATUS NETIF
* 03dc50feb8c3 ccijorbccotmqodvr189g /Users/naison/.kube/config default connected utun4
CONNECTION ID NAMESPACE NAME HEADERS PORTS CURRENT PC
03dc50feb8c3 default deployments.apps/productpage * 9080->9080 true
➜ ~
```
此时在本地使用 `go` 启动一个服务,用于承接流量。
```go
package main
@@ -263,6 +314,8 @@ func main() {
}
```
使用 `service` name 的方式,直接访问集群中的 `productpage` 服务。
```shell
➜ ~ curl productpage:9080
Hello world!%
@@ -270,27 +323,38 @@ Hello world!%
Hello world!%
```
可以看到直接击中了本地电脑的服务。
### 反向代理支持 service mesh
支持 HTTP, GRPC 和 WebSocket 等, 携带了指定 header `"a: 1"` 的流量,将会路由到本地
支持 HTTP, GRPC 和 WebSocket 等, 携带了指定 header `"foo: bar"` 的流量,将会路由到本地
```shell
➜ ~ kubevpn proxy deployment/productpage --headers a=1
already connect to cluster
start to create remote inbound pod for deployment/productpage
patch workload default/deployment/productpage with sidecar
rollout status for deployment/productpage
➜ ~ kubevpn proxy deployment/productpage --headers foo=bar
Connected to cluster
Injecting inbound sidecar for deployment/productpage
Checking rollout status for deployment/productpage
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
deployment "productpage" successfully rolled out
rollout status for deployment/productpage successfully
create remote inbound pod for deployment/productpage successfully
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
Rollout successfully for deployment/productpage
Now you can access resources in the kubernetes cluster !
➜ ~
```
查询状态
```shell
➜ ~ kubevpn status
CURRENT CONNECTION ID CLUSTER KUBECONFIG NAMESPACE STATUS NETIF
* 03dc50feb8c3 ccijorbccotmqodvr189g /Users/naison/.kube/config default connected utun4
CONNECTION ID NAMESPACE NAME HEADERS PORTS CURRENT PC
03dc50feb8c3 default deployments.apps/productpage foo=bar 9080->9080 true
➜ ~
```
不带 header 直接访问集群资源,可以看到返回的是集群中的服务内容。
```shell
➜ ~ curl productpage:9080
<!DOCTYPE html>
@@ -303,8 +367,10 @@ create remote inbound pod for deployment/productpage successfully
...
```
带上特定 header 访问集群资源,可以看到返回了本地服务的内容。
```shell
➜ ~ curl productpage:9080 -H "a: 1"
➜ ~ curl productpage:9080 -H "foo: bar"
Hello world!%
```
@@ -312,35 +378,35 @@ Hello world!%
```shell
➜ ~ kubevpn leave deployments/productpage
leave workload deployments/productpage
workload default/deployments/productpage is controlled by a controller
leave workload deployments/productpage successfully
Leaving workload deployments/productpage
Checking rollout status for deployments/productpage
Waiting for deployment "productpage" rollout to finish: 0 out of 1 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Rollout successfully for deployments/productpage
```
### 本地进入开发模式 🐳
### 本地进入运行模式 🐳
将 Kubernetes pod 运行在本地的 Docker 容器中,同时配合 service mesh, 拦截带有指定 header 的流量到本地,或者所有的流量到本地。这个开发模式依赖于本地
Docker。
```shell
➜ ~ kubevpn dev deployment/authors --headers a=1 -it --rm --entrypoint sh
connectting to cluster
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
start to create remote inbound pod for Deployment.apps/authors
patch workload default/Deployment.apps/authors with sidecar
rollout status for Deployment.apps/authors
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
➜ ~ kubevpn run deployment/authors --headers foo=bar --entrypoint sh
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
Injecting inbound sidecar for deployment/authors
Patching workload deployment/authors
Checking rollout status for deployment/authors
Waiting for deployment "authors" rollout to finish: 0 out of 1 new replicas have been updated...
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
deployment "authors" successfully rolled out
rollout status for Deployment.apps/authors successfully
create remote inbound pod for Deployment.apps/authors successfully
Rollout successfully for Deployment.apps/authors
tar: removing leading '/' from member names
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/4563987760170736212:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading `/' from member names
@@ -377,17 +443,17 @@ OK: 8 MiB in 19 packages
/opt/microservices # 2023/09/30 13:41:58 Start listening http port 9080 ...
/opt/microservices # curl localhost:9080/health
{"status":"Authors is healthy"}/opt/microservices # exit
prepare to exit, cleaning up
update ref count successfully
tun device closed
leave resource: deployments.apps/authors
workload default/deployments.apps/authors is controlled by a controller
leave resource: deployments.apps/authors successfully
clean up successfully
prepare to exit, cleaning up
update ref count successfully
clean up successfully
{"status":"Authors is healthy"} /opt/microservices # echo "continue testing pod access..."
continue testing pod access...
/opt/microservices # exit
Created container: default_authors
Wait container default_authors to be running...
Container default_authors is running now
Disconnecting from the cluster...
Leaving workload deployments.apps/authors
Disconnecting from the cluster...
Performing cleanup operations
Clearing DNS settings
➜ ~
```
@@ -406,22 +472,20 @@ fc04e42799a5 nginx:latest "/docker-entrypoint.…" 37 sec
如果你只是想在本地启动镜像,可以用一种简单的方式:
```shell
kubevpn dev deployment/authors --no-proxy -it --rm
kubevpn run deployment/authors --no-proxy
```
例如:
```shell
➜ ~ kubevpn dev deployment/authors --no-proxy -it --rm
connectting to cluster
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
➜ ~ kubevpn run deployment/authors --no-proxy
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
tar: removing leading '/' from member names
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/5631078868924498209:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading `/' from member names
@@ -439,9 +503,9 @@ Created main container: authors_default_kubevpn_ff34b
此时程序会挂起,默认为显示日志
如果你想指定在本地启动容器的镜像, 可以使用参数 `--docker-image`, 当本地不存在该镜像时,
如果你想指定在本地启动容器的镜像, 可以使用参数 `--dev-image`, 当本地不存在该镜像时,
会从对应的镜像仓库拉取。如果你想指定启动参数,可以使用 `--entrypoint`
参数,替换为你想要执行的命令,比如 `--entrypoint /bin/bash`, 更多使用参数,请参见 `kubevpn dev --help`.
参数,替换为你想要执行的命令,比如 `--entrypoint /bin/bash`, 更多使用参数,请参见 `kubevpn run --help`.
### DinD ( Docker in Docker ) 在 Docker 中使用 kubevpn
@@ -452,57 +516,46 @@ Created main container: authors_default_kubevpn_ff34b
例如:
```shell
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 ghcr.io/kubenetworks/kubevpn:latest
```
```shell
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
Unable to find image 'naison/kubevpn:v2.0.0' locally
v2.0.0: Pulling from naison/kubevpn
445a6a12be2b: Already exists
bd6c670dd834: Pull complete
64a7297475a2: Pull complete
33fa2e3224db: Pull complete
e008f553422a: Pull complete
5132e0110ddc: Pull complete
5b2243de1f1a: Pull complete
662a712db21d: Pull complete
4f4fb700ef54: Pull complete
33f0298d1d4f: Pull complete
Digest: sha256:115b975a97edd0b41ce7a0bc1d8428e6b8569c91a72fe31ea0bada63c685742e
Status: Downloaded newer image for naison/kubevpn:v2.0.0
root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison -it --entrypoint sh
----------------------------------------------------------------------------------
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
Because of sudo user env and user env are different.
Current env KUBECONFIG value:
----------------------------------------------------------------------------------
hostname is d0b3dab8912a
connectting to cluster
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
start to create remote inbound pod for Deployment.apps/authors
patch workload default/Deployment.apps/authors with sidecar
rollout status for Deployment.apps/authors
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 ghcr.io/kubenetworks/kubevpn:latest
Unable to find image 'ghcr.io/kubenetworks/kubevpn:latest' locally
latest: Pulling from ghcr.io/kubenetworks/kubevpn
9c704ecd0c69: Already exists
4987d0a976b5: Pull complete
8aa94c4fc048: Pull complete
526fee014382: Pull complete
6c1c2bedceb6: Pull complete
97ac845120c5: Pull complete
ca82aef6a9eb: Pull complete
1fd9534c7596: Pull complete
588bd802eb9c: Pull complete
Digest: sha256:368db2e0d98f6866dcefd60512960ce1310e85c24a398fea2a347905ced9507d
Status: Downloaded newer image for ghcr.io/kubenetworks/kubevpn:latest
WARNING: image with reference ghcr.io/kubenetworks/kubevpn was found but does not match the specified platform: wanted linux/amd64, actual: linux/arm64
root@5732124e6447:/app# kubevpn run deployment/authors --headers user=naison --entrypoint sh
hostname is 5732124e6447
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
Injecting inbound sidecar for deployment/authors
Patching workload deployment/authors
Checking rollout status for deployment/authors
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
deployment "authors" successfully rolled out
rollout status for Deployment.apps/authors successfully
create remote inbound pod for Deployment.apps/authors successfully
Rollout successfully for Deployment.apps/authors
tar: removing leading '/' from member names
/tmp/6460902982794789917:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading `/' from member names
tar: Removing leading `/' from hard link targets
/tmp/5028895788722532426:/var/run/secrets/kubernetes.io/serviceaccount
network mode is container:d0b3dab8912a
Network mode is container:d0b3dab8912a
Created container: nginx_default_kubevpn_6df63
Wait container nginx_default_kubevpn_6df63 to be running...
Container nginx_default_kubevpn_6df63 is running now
@@ -511,7 +564,7 @@ Created main container: authors_default_kubevpn_6df5f
/opt/microservices # ps -ef
PID USER TIME COMMAND
1 root 0:00 {bash} /usr/bin/qemu-x86_64 /bin/bash /bin/bash
14 root 0:02 {kubevpn} /usr/bin/qemu-x86_64 /usr/local/bin/kubevpn kubevpn dev deployment/authors --headers
14 root 0:02 {kubevpn} /usr/bin/qemu-x86_64 /usr/local/bin/kubevpn kubevpn run deployment/authors --headers
25 root 0:01 {kubevpn} /usr/bin/qemu-x86_64 /usr/local/bin/kubevpn /usr/local/bin/kubevpn daemon
37 root 0:04 {kubevpn} /usr/bin/qemu-x86_64 /usr/local/bin/kubevpn /usr/local/bin/kubevpn daemon --sudo
53 root 0:00 nginx: master process nginx -g daemon off;
@@ -519,77 +572,82 @@ PID USER TIME COMMAND
Executing busybox-1.33.1-r3.trigger
OK: 8 MiB in 19 packagesnx: worker process
/opt/microservices #
/opt/microservices # cat > hello.go <<EOF
package main
import (
"fmt"
"io"
"net/http"
)
func main() {
http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
_, _ = io.WriteString(writer, "Hello world!")
fmt.Println(">> Container Received request: %s %s from %s\n", request.Method, request.RequestURI, request.RemoteAddr)
})
fmt.Println("Start listening http port 9080 ...")
_ = http.ListenAndServe(":9080", nil)
}
EOF
/opt/microservices # go build hello.go
/opt/microservices #
//opt/microservices # ls -alh
total 12M
drwxr-xr-x 1 root root 26 Nov 4 10:29 .
drwxr-xr-x 1 root root 26 Oct 18 2021 ..
-rwxr-xr-x 1 root root 6.3M Oct 18 2021 app
-rwxr-xr-x 1 root root 5.8M Nov 4 10:29 hello
-rw-r--r-- 1 root root 387 Nov 4 10:28 hello.go
/opt/microservices #
/opt/microservices # apk add curl
OK: 8 MiB in 19 packages
/opt/microservices # curl localhost:80
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
/opt/microservices # ./hello &
/opt/microservices # Start listening http port 9080 ...
[2]+ Done ./hello
/opt/microservices # curl localhost:9080
>> Container Received request: GET / from 127.0.0.1:41230
Hello world!/opt/microservices #
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
/opt/microservices # ls
app
/opt/microservices # ls -alh
total 6M
drwxr-xr-x 2 root root 4.0K Oct 18 2021 .
drwxr-xr-x 1 root root 4.0K Oct 18 2021 ..
-rwxr-xr-x 1 root root 6.3M Oct 18 2021 app
/opt/microservices # ./app &
/opt/microservices # 2023/09/30 14:27:32 Start listening http port 9080 ...
/opt/microservices # curl authors:9080/health
/opt/microservices # curl authors:9080/health
{"status":"Authors is healthy"}/opt/microservices #
/opt/microservices # curl authors:9080/health -H "foo: bar"
>>Received request: GET /health from 198.19.0.109:57930
Hello world!/opt/microservices #
/opt/microservices # curl localhost:9080/health
{"status":"Authors is healthy"}/opt/microservices # exit
prepare to exit, cleaning up
update ref count successfully
tun device closed
leave resource: deployments.apps/authors
workload default/deployments.apps/authors is controlled by a controller
leave resource: deployments.apps/authors successfully
clean up successfully
prepare to exit, cleaning up
update ref count successfully
clean up successfully
Created container: default_authors
Wait container default_authors to be running...
Container default_authors is running now
Disconnecting from the cluster...
Leaving workload deployments.apps/authors
Disconnecting from the cluster...
Performing cleanup operations
Clearing DNS settings
root@d0b3dab8912a:/app# exit
exit
➜ ~
```
可以看到实际上是在本地使用 `Docker` 启动了三个容器。
```text
➜ ~ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
d0b3dab8912a ghcr.io/kubenetworks/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
➜ ~
```
### 支持多种协议
支持 OSI 模型三层及三层以上的协议,例如:
- TCP
- UDP
- ICMP
- GRPC
- gRPC
- Thrift
- WebSocket
- HTTP
- ...
@@ -600,164 +658,9 @@ d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago
- Linux
- Windows
## 问答
### 1依赖的镜像拉不下来或者内网环境无法访问 docker.io 怎么办?
答:有两种方法可以解决
- 第一种,在可以访问 docker.io 的网络中,将命令 `kubevpn version` 中的 image 镜像,
转存到自己的私有镜像仓库,然后启动命令的时候,加上 `--image 新镜像` 即可。
例如:
``` shell
➜ ~ kubevpn version
KubeVPN: CLI
Version: v2.0.0
Daemon: v2.0.0
Image: docker.io/naison/kubevpn:v2.0.0
Branch: feature/daemon
Git commit: 7c3a87e14e05c238d8fb23548f95fa1dd6e96936
Built time: 2023-09-30 22:01:51
Built OS/Arch: darwin/arm64
Built Go version: go1.20.5
```
镜像是 `docker.io/naison/kubevpn:v2.0.0`,将此镜像转存到自己的镜像仓库。
```text
docker pull docker.io/naison/kubevpn:v2.0.0
docker tag docker.io/naison/kubevpn:v2.0.0 [镜像仓库地址]/[命名空间]/[镜像仓库]:[镜像版本号]
docker push [镜像仓库地址]/[命名空间]/[镜像仓库]:[镜像版本号]
```
然后就可以使用这个镜像了,如下:
```text
➜ ~ kubevpn connect --image [docker registry]/[namespace]/[repo]:[tag]
got cidr from cache
traffic manager not exist, try to create it...
pod [kubevpn-traffic-manager] status is Running
...
```
- 第二种,使用选项 `--transfer-image`, 这个选项将会自动转存镜像到选项 `--image` 指定的地址。
例如:
```shell
➜ ~ kubevpn connect --transfer-image --image nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn:v2.0.0
v2.0.0: Pulling from naison/kubevpn
Digest: sha256:450446850891eb71925c54a2fab5edb903d71103b485d6a4a16212d25091b5f4
Status: Image is up to date for naison/kubevpn:v2.0.0
The push refers to repository [nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn]
ecc065754c15: Preparing
f2b6c07cb397: Pushed
448eaa16d666: Pushed
f5507edfc283: Pushed
3b6ea9aa4889: Pushed
ecc065754c15: Pushed
feda785382bb: Pushed
v2.0.0: digest: sha256:85d29ebb53af7d95b9137f8e743d49cbc16eff1cdb9983128ab6e46e0c25892c size: 2000
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
➜ ~
```
### 2在使用 `kubevpn dev` 进入开发模式的时候,有出现报错 137, 改怎么解决 ?
```text
dns service ok
tar: Removing leading `/' from member names
tar: Removing leading `/' from hard link targets
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/7375606548554947868:/var/run/secrets/kubernetes.io/serviceaccount
Created container: server_vke-system_kubevpn_0db84
Wait container server_vke-system_kubevpn_0db84 to be running...
Container server_vke-system_kubevpn_0db84 is running on port 8888/tcp: 6789/tcp:6789 now
$ Status: , Code: 137
prepare to exit, cleaning up
port-forward occurs error, err: lost connection to pod, retrying
update ref count successfully
ref-count is zero, prepare to clean up resource
clean up successfully
```
这是因为你的 `Docker-desktop` 声明的资源, 小于 container 容器启动时所需要的资源, 因此被 OOM 杀掉了,
你可以增加 `Docker-desktop` 对于 resources
的设置, 目录是:`Preferences --> Resources --> Memory`
### 3使用 WSL( Windows Sub Linux ) Docker, 用命令 `kubevpn dev` 进入开发模式的时候, 在 terminal 中无法提示链接集群网络, 这是为什么, 如何解决?
答案: 这是因为 WSL 的 Docker 使用的是 主机 Windows 的网络, 所以即便在 WSL 中启动 container, 这个 container 不会使用 WSL
的网络,而是使用 Windows 的网络。
解决方案:
- 1): 在 WSL 中安装 Docker, 不要使用 Windows 版本的 Docker-desktop
- 2): 在主机 Windows 使用命令 `kubevpn connect`, 然后在 WSL 中使用 `kubevpn dev` 进入开发模式
- 3): 在主机 Windows 上启动一个 container在 container 中使用命令 `kubevpn connect`, 然后在 WSL
中使用 `kubevpn dev --network container:$CONTAINER_ID`
### 4在使用 `kubevpn dev` 进入开发模式后,无法访问容器网络,出现错误 `172.17.0.1:443 connect refusued`,该如何解决?
答案:大概率是因为 k8s 容器网络和 docker 网络网段冲突了。
解决方案:
- 使用参数 `--connect-mode container` 在容器中链接,也可以解决此问题
- 可以修改文件 `~/.docker/daemon.json` 增加不冲突的网络,例如 `"bip": "172.15.0.1/24"`.
```shell
➜ ~ cat ~/.docker/daemon.json
{
"builder": {
"gc": {
"defaultKeepStorage": "20GB",
"enabled": true
}
},
"experimental": false,
"features": {
"buildkit": true
},
"insecure-registries": [
],
}
```
增加不冲突的网段
```shell
➜ ~ cat ~/.docker/daemon.json
{
"builder": {
"gc": {
"defaultKeepStorage": "20GB",
"enabled": true
}
},
"experimental": false,
"features": {
"buildkit": true
},
"insecure-registries": [
],
"bip": "172.15.0.1/24"
}
```
重启 docker重新操作即可
## 架构
架构信息可以从这里找到 [这里](/docs/en/Architecture.md).
[架构](https://kubevpn.dev/docs/architecture/connect)
## 贡献代码
@@ -768,4 +671,10 @@ clean up successfully
- 使用喜欢的 IDE Debug 启动 daemon 和 sudo daemon 两个后台进程。(本质上是两个 GRPC server
- 添加断点给文件 `pkg/daemon/action/connect.go:21`
- 新开个终端,执行命令 `make kubevpn`
- 然后运行命令 `./bin/kubevpn connect` 这样将会击中断点
- 然后运行命令 `./bin/kubevpn connect` 这样将会击中断点
### 支持者
[![JetBrains logo.](https://resources.jetbrains.com/storage/products/company/brand/logos/jetbrains.svg)](https://jb.gg/OpenSourceSupport)
### [捐赠支持](https://kubevpn.dev/zh/docs/donate/)

View File

@@ -1,45 +1,22 @@
FROM envoyproxy/envoy:v1.25.0 AS envoy
FROM golang:1.22 AS builder
FROM envoyproxy/envoy:v1.34.1 AS envoy
FROM golang:1.23 AS builder
ARG BASE=github.com/wencaiwulue/kubevpn
COPY . /go/src/$BASE
WORKDIR /go/src/$BASE
RUN go env -w GO111MODULE=on && go env -w GOPROXY=https://goproxy.cn,direct
RUN make kubevpn
RUN go install github.com/go-delve/delve/cmd/dlv@latest
FROM ubuntu:latest
FROM debian:bookworm-slim
ARG BASE=github.com/wencaiwulue/kubevpn
RUN sed -i s@/security.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list \
&& sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list
RUN apt-get clean && apt-get update && apt-get install -y wget dnsutils vim curl \
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3 \
apt-transport-https ca-certificates curl
RUN if [ $(uname -m) = "x86_64" ]; then \
echo "The architecture is AMD64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
elif [ $(uname -m) = "aarch64" ]; then \
echo "The architecture is ARM64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
else \
echo "Unsupported architecture."; \
fi
ENV TZ=Asia/Shanghai \
DEBIAN_FRONTEND=noninteractive
RUN apt update \
&& apt install -y tzdata \
&& ln -fs /usr/share/zoneinfo/${TZ} /etc/localtime \
&& echo ${TZ} > /etc/timezone \
&& dpkg-reconfigure --frontend noninteractive tzdata \
RUN apt-get update && apt-get install -y iptables dnsutils \
&& apt-get autoremove -y \
&& apt-get clean -y \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY --from=builder /go/src/$BASE/bin/kubevpn /usr/local/bin/kubevpn
COPY --from=builder /go/bin/dlv /usr/local/bin/dlv
COPY --from=envoy /usr/local/bin/envoy /usr/local/bin/envoy

View File

@@ -1,6 +1,6 @@
FROM golang:1.22 as delve
RUN curl --location --output delve-1.20.1.tar.gz https://github.com/go-delve/delve/archive/v1.20.1.tar.gz \
&& tar xzf delve-1.20.1.tar.gz
RUN cd delve-1.20.1 && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /go/dlv -ldflags '-extldflags "-static"' ./cmd/dlv/
FROM golang:1.23 as delve
RUN curl --location --output delve-1.23.1.tar.gz https://github.com/go-delve/delve/archive/v1.23.1.tar.gz \
&& tar xzf delve-1.23.1.tar.gz
RUN cd delve-1.23.1 && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /go/dlv -ldflags '-extldflags "-static"' ./cmd/dlv/
FROM busybox
COPY --from=delve /go/dlv /bin/dlv

View File

@@ -1,4 +1,4 @@
FROM golang:1.22 AS builder
FROM golang:1.23 AS builder
RUN go env -w GO111MODULE=on && go env -w GOPROXY=https://goproxy.cn,direct
RUN go install github.com/go-delve/delve/cmd/dlv@latest
@@ -11,16 +11,6 @@ RUN apt-get clean && apt-get update && apt-get install -y wget dnsutils vim curl
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3 \
apt-transport-https ca-certificates curl
RUN if [ $(uname -m) = "x86_64" ]; then \
echo "The architecture is AMD64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
elif [ $(uname -m) = "aarch64" ]; then \
echo "The architecture is ARM64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
else \
echo "Unsupported architecture."; \
fi
ENV TZ=Asia/Shanghai \
DEBIAN_FRONTEND=noninteractive
RUN apt update \

View File

@@ -1,5 +1,22 @@
FROM naison/kubevpn:latest
FROM envoyproxy/envoy:v1.34.1 AS envoy
FROM golang:1.23 AS builder
ARG BASE=github.com/wencaiwulue/kubevpn
COPY . /go/src/$BASE
WORKDIR /go/src/$BASE
RUN make kubevpn
FROM debian:bookworm-slim
ARG BASE=github.com/wencaiwulue/kubevpn
RUN apt-get update && apt-get install -y iptables dnsutils \
&& apt-get autoremove -y \
&& apt-get clean -y \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY bin/kubevpn /usr/local/bin/kubevpn
COPY --from=builder /go/src/$BASE/bin/kubevpn /usr/local/bin/kubevpn
COPY --from=envoy /usr/local/bin/envoy /usr/local/bin/envoy

View File

@@ -1,6 +1,652 @@
apiVersion: v1
entries:
kubevpn:
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.9.4
created: "2025-08-10T07:02:14.35432727Z"
description: A Helm chart for KubeVPN
digest: 331ae4c250ba070f1928b2dd248631fb5e842e576f83fe85c4591186fecaf84e
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.9.4/kubevpn-2.9.4.tgz
version: 2.9.4
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.9.3
created: "2025-08-06T16:24:32.06997396Z"
description: A Helm chart for KubeVPN
digest: 1e0e611ac03a86ed07e5766747902b5f495e8365136bf38d63abf513f5dce18e
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.9.3/kubevpn-2.9.3.tgz
version: 2.9.3
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.9.2
created: "2025-08-06T11:30:00.252464538Z"
description: A Helm chart for KubeVPN
digest: dbdda813051ab2bb37c93ea58c3722c11730ebd66dc68fc9b9fb6f2c9e5fa3a6
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.9.2/kubevpn-2.9.2.tgz
version: 2.9.2
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.9.1
created: "2025-08-04T15:38:16.369392863Z"
description: A Helm chart for KubeVPN
digest: 16834555367165a0411bafbf15d63d50f28434ad4aada8a9cf5622cc51fd6a30
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.9.1/kubevpn-2.9.1.tgz
version: 2.9.1
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.9.0
created: "2025-07-27T13:58:23.265991449Z"
description: A Helm chart for KubeVPN
digest: a7f7b8dd5a05c1f251d01cdf0bb697d98904b41d3a24ae569966c41a7a576701
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.9.0/kubevpn-2.9.0.tgz
version: 2.9.0
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.8.1
created: "2025-07-10T03:19:24.597366484Z"
description: A Helm chart for KubeVPN
digest: 94c8af0d1731b8936c60c7334f1b38dd760fa40b383094a46dc47b20da7b6154
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.8.1/kubevpn-2.8.1.tgz
version: 2.8.1
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.8.0
created: "2025-07-05T14:28:46.57918859Z"
description: A Helm chart for KubeVPN
digest: e1cd48c247366a3751b35b10baaed4d81891cc6d4a7b290468356f14fe9b22c6
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.8.0/kubevpn-2.8.0.tgz
version: 2.8.0
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.21
created: "2025-07-04T12:51:41.427883149Z"
description: A Helm chart for KubeVPN
digest: 3f98084d53d9c49ae3acfcaecb5cca8f75629a17172c0ec83404067b9c2537af
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.21/kubevpn-2.7.21.tgz
version: 2.7.21
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.20
created: "2025-06-26T04:09:53.467005029Z"
description: A Helm chart for KubeVPN
digest: 692beb037244d85f730b8bccffcc7fdbfc421b2e8fc7965fa8db0574a674539b
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.20/kubevpn-2.7.20.tgz
version: 2.7.20
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.19
created: "2025-06-18T10:22:35.562802687Z"
description: A Helm chart for KubeVPN
digest: df20091ef30e5666b5e859a91fa95fe88e195b3bf822154dbf2b9773ffca767c
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.19/kubevpn-2.7.19.tgz
version: 2.7.19
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.18
created: "2025-06-14T06:12:56.525323112Z"
description: A Helm chart for KubeVPN
digest: 5e58ac6222a9de298719246e644d2c95098d55ee479e5415153fa460b2562960
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.18/kubevpn-2.7.18.tgz
version: 2.7.18
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.17
created: "2025-06-12T06:01:29.967945392Z"
description: A Helm chart for KubeVPN
digest: 2b680300b9d203fe2df91fe19876908477e9216569065e54d6746d88fb9c6014
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.17/kubevpn-2.7.17.tgz
version: 2.7.17
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.16
created: "2025-06-10T15:10:07.72781521Z"
description: A Helm chart for KubeVPN
digest: cf5f4af9e33ba051a3cdb861f9c74ff8b8552c94fd64d13143ada7cbbeb8681d
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.16/kubevpn-2.7.16.tgz
version: 2.7.16
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.15
created: "2025-06-07T05:29:43.999105856Z"
description: A Helm chart for KubeVPN
digest: 030abfc966892a0ee644ceeb641e39d098eb94447f87cb1b59eba11fcd12f783
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.15/kubevpn-2.7.15.tgz
version: 2.7.15
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.14
created: "2025-06-04T07:14:42.336616618Z"
description: A Helm chart for KubeVPN
digest: dd1d35c9a7cb6411793cbf00a784aa0ded32de9354f0db3559014e339c6985d4
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.14/kubevpn-2.7.14.tgz
version: 2.7.14
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.13
created: "2025-06-01T11:26:39.769366159Z"
description: A Helm chart for KubeVPN
digest: 167b52f7f59b0a6c01c2660d1a0a9a71c315eeeabc0cf9ff1047ef5dfd4a06e6
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.13/kubevpn-2.7.13.tgz
version: 2.7.13
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.12
created: "2025-05-23T03:38:02.484001975Z"
description: A Helm chart for KubeVPN
digest: b9e28ceda8bb07b42ec37eb2d6b283496d83645479b2f1f4e921d9c462eeb54e
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.12/kubevpn-2.7.12.tgz
version: 2.7.12
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.11
created: "2025-05-18T09:03:21.60777933Z"
description: A Helm chart for KubeVPN
digest: ee30c2533dff51fa389767e56931583cdfff8c5fca7d6c9698f521c6fc508d42
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.11/kubevpn-2.7.11.tgz
version: 2.7.11
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.10
created: "2025-05-14T13:08:51.09371872Z"
description: A Helm chart for KubeVPN
digest: fd23dd5bf0c3a9343d73276c4997a34027a93c1a88667265d92297630579d165
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.10/kubevpn-2.7.10.tgz
version: 2.7.10
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.9
created: "2025-05-12T09:14:52.66116293Z"
description: A Helm chart for KubeVPN
digest: 56e022017177603290575849553c2e9c19f6a1691288dbd67c32a2fdcbde0834
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.9/kubevpn-2.7.9.tgz
version: 2.7.9
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.8
created: "2025-05-10T15:46:13.342045201Z"
description: A Helm chart for KubeVPN
digest: bfab5a7e4e1e795071a7ce3fd7713b517aa447d967ec58500e5a551564869109
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.8/kubevpn-2.7.8.tgz
version: 2.7.8
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.7
created: "2025-05-09T06:43:01.403047355Z"
description: A Helm chart for KubeVPN
digest: 14b3e7873aa71fa7a380631c83be8df1dfb8d0ccb49eb6746aa4f83e3df934f6
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.7/kubevpn-2.7.7.tgz
version: 2.7.7
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.6
created: "2025-05-07T11:46:09.644201893Z"
description: A Helm chart for KubeVPN
digest: 2146d5245440dff7d551ccc745aa1d9476d4f42053ff8a80f33f835d8da57712
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.6/kubevpn-2.7.6.tgz
version: 2.7.6
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.5
created: "2025-05-07T01:56:15.201307242Z"
description: A Helm chart for KubeVPN
digest: 34799e9605b3048aac75484bb32fb6c70f9e7eb7470e9b77c51be075a548c25e
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.5/kubevpn-2.7.5.tgz
version: 2.7.5
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.4
created: "2025-05-06T17:01:13.789138284Z"
description: A Helm chart for KubeVPN
digest: 5c6f2d1a178e917ac83ec72d0a46de9a0ff68f80a3aeb813d15dfb92c8ad36be
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.4/kubevpn-2.7.4.tgz
version: 2.7.4
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.3
created: "2025-05-06T15:40:24.505449375Z"
description: A Helm chart for KubeVPN
digest: 86ef4b1de6ea15f6738824f7c389a891f53500b9163b1288847172eb7dc6817e
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.3/kubevpn-2.7.3.tgz
version: 2.7.3
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.2
created: "2025-04-25T15:40:08.296727519Z"
description: A Helm chart for KubeVPN
digest: 8711dae30f4ff9bc9cea018fa16ae70087a17af42262f7f31c43950a34fffa08
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.2/kubevpn-2.7.2.tgz
version: 2.7.2
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.1
created: "2025-04-15T15:18:20.818055207Z"
description: A Helm chart for KubeVPN
digest: 79c40c942fd2cfcca63dd82921e04871680838f01717c6fcb3ee06bfb7f59535
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.1/kubevpn-2.7.1.tgz
version: 2.7.1
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.7.0
created: "2025-04-12T05:37:01.063235951Z"
description: A Helm chart for KubeVPN
digest: a4b4de15f474fba43367fc7239c31e2020a6a1e0e3b29e02eb653cb9922b02e8
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.0/kubevpn-2.7.0.tgz
version: 2.7.0
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.6.0
created: "2025-04-06T12:54:49.852649414Z"
description: A Helm chart for KubeVPN
digest: 58d930de19ac808e9f0ee501fe6f74b6f38376692708fc94fe7200496d9c5ca2
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.6.0/kubevpn-2.6.0.tgz
version: 2.6.0
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.5.1
created: "2025-04-03T15:46:28.062220333Z"
description: A Helm chart for KubeVPN
digest: 6daf003256c42bb0db414eb17eb06294e46d33bc6c63f01419012a37318d0d2f
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.5.1/kubevpn-2.5.1.tgz
version: 2.5.1
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.5.0
created: "2025-03-31T05:36:16.050204161Z"
description: A Helm chart for KubeVPN
digest: 301137b1599c232efd61ce9360e0a60da89e0a5c2eb076750bf461b38d26cfaf
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.5.0/kubevpn-2.5.0.tgz
version: 2.5.0
- annotations:
app: kubevpn
apiVersion: v2
appVersion: v2.4.3
created: "2025-03-30T13:48:42.333380676Z"
description: A Helm chart for KubeVPN
digest: 8ef28a43cb3d04f071445cf7d1199aba7392d78e1941707bab82853c5541c93c
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.4.3/kubevpn-2.4.3.tgz
version: 2.4.3
- apiVersion: v2
appVersion: v2.4.2
created: "2025-03-23T12:53:35.793492243Z"
description: A Helm chart for KubeVPN
digest: c627f69ac904ddb41c396909873425d85264fb3393d550fa1b0e8d2abfc402e9
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.4.2/kubevpn-2.4.2.tgz
version: 2.4.2
- apiVersion: v2
appVersion: v2.4.1
created: "2025-03-16T09:48:30.691242519Z"
description: A Helm chart for KubeVPN
digest: 1766431ce46b43758353928188cc993832e41cd0e352c9bc7991390bbbf41b04
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.4.1/kubevpn-2.4.1.tgz
version: 2.4.1
- apiVersion: v2
appVersion: v2.4.0
created: "2025-03-14T14:16:56.392516206Z"
description: A Helm chart for KubeVPN
digest: ffece68d3234ba629e02456fd3b0d31b5d2d1330c4c7f5d82ac2e0e1e97d82f3
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.4.0/kubevpn-2.4.0.tgz
version: 2.4.0
- apiVersion: v2
appVersion: v2.3.13
created: "2025-02-23T14:30:35.221348419Z"
description: A Helm chart for KubeVPN
digest: e79cdd07eae2ba3f36997debf898b091e1e68412fde7a34e823bad902e803105
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.13/kubevpn-2.3.13.tgz
version: 2.3.13
- apiVersion: v2
appVersion: v2.3.12
created: "2025-02-13T07:46:06.029130129Z"
description: A Helm chart for KubeVPN
digest: 0b7d9f8b4cd306377e4452a9d86530387afcae379e11665909b90e15f2d82a04
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.12/kubevpn-2.3.12.tgz
version: 2.3.12
- apiVersion: v2
appVersion: v2.3.11
created: "2025-02-03T09:24:54.033585049Z"
description: A Helm chart for KubeVPN
digest: a54a2ed19e6f4aa5c274186d6b188c0230244582055905155c4620ebe8864838
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.11/kubevpn-2.3.11.tgz
version: 2.3.11
- apiVersion: v2
appVersion: v2.3.10
created: "2025-01-24T13:36:34.489289734Z"
description: A Helm chart for KubeVPN
digest: 987b73399637eee01570492115114696fdb054074507f0d16e47d077e4ea770c
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.10/kubevpn-2.3.10.tgz
version: 2.3.10
- apiVersion: v2
appVersion: v2.3.9
created: "2024-12-21T15:29:42.173109915Z"
description: A Helm chart for KubeVPN
digest: 0f9dd91504c1d1c3149cca785f0a9d72ef860d002ee73590f41e3d8decc99365
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.9/kubevpn-2.3.9.tgz
version: 2.3.9
- apiVersion: v2
appVersion: v2.3.8
created: "2024-12-19T14:19:38.126241384Z"
description: A Helm chart for KubeVPN
digest: 84239f1bce053eaa9314e53b820ad0ba32bbc51c37dcac6ae8abd03bef6f7fd2
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.8/kubevpn-2.3.8.tgz
version: 2.3.8
- apiVersion: v2
appVersion: v2.3.7
created: "2024-12-14T17:25:08.398840622Z"
description: A Helm chart for KubeVPN
digest: 437faa6cd98e81c4ad2c1b48c9ef7a33e7d435cf6343c5cc2c88ea251b2a545b
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.7/kubevpn-2.3.7.tgz
version: 2.3.7
- apiVersion: v2
appVersion: v2.3.6
created: "2024-12-09T11:52:04.779835011Z"
description: A Helm chart for KubeVPN
digest: 7b23d14f6aea4410d68911d202199f15c88cb96cef8edbd94d4a95e9b9254bf7
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.6/kubevpn-2.3.6.tgz
version: 2.3.6
- apiVersion: v2
appVersion: v2.3.5
created: "2024-12-06T14:40:11.685095653Z"
description: A Helm chart for KubeVPN
digest: c2a85f446af834b60308b1384e6cae5662229c34370053319c0f759f650a1cb5
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.5/kubevpn-2.3.5.tgz
version: 2.3.5
- apiVersion: v2
appVersion: v2.3.4
created: "2024-11-29T13:03:24.255324387Z"
description: A Helm chart for KubeVPN
digest: 2804aa624f6139695f3fb723bdc6ba087492bcd8810baf7196a1ae88bd2a62b5
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.4/kubevpn-2.3.4.tgz
version: 2.3.4
- apiVersion: v2
appVersion: v2.3.3
created: "2024-11-22T14:54:13.795282085Z"
description: A Helm chart for KubeVPN
digest: 33cbbc9312e7b7e415fb14f80f17df50d305194617bcf75d1501227cb90b8f32
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.3/kubevpn-2.3.3.tgz
version: 2.3.3
- apiVersion: v2
appVersion: v2.3.2
created: "2024-11-18T11:52:12.076510627Z"
description: A Helm chart for KubeVPN
digest: cdb38ab84bf1649ac4280f6996060c49a095f9c056044cd5f691e7bf4f259dad
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.2/kubevpn-2.3.2.tgz
version: 2.3.2
- apiVersion: v2
appVersion: v2.3.1
created: "2024-11-15T13:36:37.056311943Z"
description: A Helm chart for KubeVPN
digest: 10c1200241309be4ec2eb88e9689ebbf96704c8fad270e6fda30047135aeccf2
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.1/kubevpn-2.3.1.tgz
version: 2.3.1
- apiVersion: v2
appVersion: v2.2.22
created: "2024-10-30T08:46:08.845218523Z"
description: A Helm chart for KubeVPN
digest: c2dc336383d7de2fb97cfd40a15e9f6c29a9a598484b88515a98bcaeb4925eda
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.22/kubevpn-2.2.22.tgz
version: 2.2.22
- apiVersion: v2
appVersion: v2.2.21
created: "2024-10-25T14:10:25.545716679Z"
description: A Helm chart for KubeVPN
digest: 98ae51247535525ff6a10b5f493d8bfc573af62759432f7aa54dd7eb6edeffd5
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.21/kubevpn-2.2.21.tgz
version: 2.2.21
- apiVersion: v2
appVersion: v2.2.20
created: "2024-10-20T04:00:07.263734809Z"
description: A Helm chart for KubeVPN
digest: 7863701dff5b3fce0795ee8e0b73044b7c88f8777c86a65adc1f5563123565dc
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.20/kubevpn-2.2.20.tgz
version: 2.2.20
- apiVersion: v2
appVersion: v2.2.19
created: "2024-10-10T00:47:08.858011096Z"
description: A Helm chart for KubeVPN
digest: be2c672081307c03b7fe6b635d524c8f3f73d70ae3316efa85e781a62c25a46d
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.19/kubevpn-2.2.19.tgz
version: 2.2.19
- apiVersion: v2
appVersion: v2.2.18
created: "2024-09-10T09:39:11.71407425Z"
description: A Helm chart for KubeVPN
digest: 2d953103425ca2a087a2d521c9297662f97b72e78cf831e947942f292bbcc643
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.18/kubevpn-2.2.18.tgz
version: 2.2.18
- apiVersion: v2
appVersion: v2.2.17
created: "2024-08-03T07:45:55.228743946Z"
description: A Helm chart for KubeVPN
digest: 476317ad82b2c59a623e1fca968c09a28554ebcabec337c1c363e7296bb27514
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.17/kubevpn-2.2.17.tgz
version: 2.2.17
- apiVersion: v2
appVersion: v2.2.16
created: "2024-07-26T13:43:50.473565863Z"
description: A Helm chart for KubeVPN
digest: 6cdb809d04687197a8defbf4349871c505ac699924833fecc210d8a6d82a9f20
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.16/kubevpn-2.2.16.tgz
version: 2.2.16
- apiVersion: v2
appVersion: v2.2.15
created: "2024-07-19T15:03:13.558586823Z"
description: A Helm chart for KubeVPN
digest: 279b24976cef25e1dd8a4cd612a7c6a5767cecd4ba386ccab80fc00db76117e7
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.15/kubevpn-2.2.15.tgz
version: 2.2.15
- apiVersion: v2
appVersion: v2.2.14
created: "2024-07-12T15:24:27.825047662Z"
description: A Helm chart for KubeVPN
digest: 52ab9b89ea3773792bf3839e4a7c23a9ea60a6c72547024dc0907c973a8d34b3
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.14/kubevpn-2.2.14.tgz
version: 2.2.14
- apiVersion: v2
appVersion: v2.2.13
created: "2024-07-05T15:08:40.140645659Z"
@@ -121,4 +767,4 @@ entries:
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.2/kubevpn-2.2.2.tgz
version: 2.2.2
generated: "2024-07-05T15:08:40.140749013Z"
generated: "2025-08-10T07:02:14.35462193Z"

View File

@@ -4,3 +4,5 @@ description: A Helm chart for KubeVPN
type: application
version: 0.1.0
appVersion: "1.16.0"
annotations:
app: kubevpn

36
charts/kubevpn/README.md Normal file
View File

@@ -0,0 +1,36 @@
# Helm charts for KubeVPN server
Use helm to install kubevpn server means use cluster mode. All user will use this instance.
- Please make sure users should have permission to namespace `kubevpn`.
- Otherwise, will fall back to create `kubevpn` deployment in own namespace.
## Add helm repository kubevpn
```shell
helm repo add kubevpn https://kubenetworks.github.io/charts
```
## Install with default mode
```shell
helm install kubevpn kubevpn/kubevpn -n kubevpn --create-namespace
```
in China, you can use tencent image registry
```shell
helm install kubevpn kubevpn/kubevpn --set image.repository=ccr.ccs.tencentyun.com/kubevpn/kubevpn -n kubevpn --create-namespace
```
## AWS Fargate cluster
```shell
helm install kubevpn kubevpn/kubevpn -n kubevpn --create-namespace
```
*Proxy/ServiceMesh mode only support k8s service*
```shell
kubevpn proxy service/authors
```

View File

@@ -1,4 +1,4 @@
1. Connect to cluster network by running these commands:
kubevpn connect --namespace {{ .Release.Namespace }}
export POD_IP=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kubevpn.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].status.podIP}")
kubevpn connect --namespace {{ include "kubevpn.namespace" . }}
export POD_IP=$(kubectl get pods --namespace {{ include "kubevpn.namespace" . }} -l "app.kubernetes.io/name={{ include "kubevpn.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].status.podIP}")
ping $POD_IP

View File

@@ -61,3 +61,22 @@ Create the name of the service account to use
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Namespace
1. special by -n
2. use default namespace kubevpn
*/}}
{{- define "kubevpn.namespace" -}}
{{- if .Release.Namespace }}
{{- if eq .Release.Namespace "default" }}
{{- .Values.namespace }}
{{- else }}
{{- .Release.Namespace }}
{{- end }}
{{- else if .Values.namespace }}
{{- .Values.namespace }}
{{- else }}
{{- .Values.namespace }}
{{- end }}
{{- end }}

View File

@@ -2,6 +2,7 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
data:
DHCP: ""
DHCP6: ""

View File

@@ -2,6 +2,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
labels:
{{- include "kubevpn.labels" . | nindent 4 }}
spec:
@@ -31,34 +32,12 @@ spec:
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- args:
- |2-
sysctl -w net.ipv4.ip_forward=1
sysctl -w net.ipv6.conf.all.disable_ipv6=0
sysctl -w net.ipv6.conf.all.forwarding=1
update-alternatives --set iptables /usr/sbin/iptables-legacy
iptables -F
ip6tables -F
iptables -P INPUT ACCEPT
ip6tables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
ip6tables -P FORWARD ACCEPT
iptables -t nat -A POSTROUTING -s ${CIDR4} -o eth0 -j MASQUERADE
ip6tables -t nat -A POSTROUTING -s ${CIDR6} -o eth0 -j MASQUERADE
kubevpn serve -L "tcp://:10800" -L "tun://:8422?net=${TunIPv4}" -L "gtcp://:10801" -L "gudp://:10802" --debug=true
command:
- /bin/sh
- -c
env:
- name: CIDR4
value: 223.254.0.0/16
- name: CIDR6
value: efff:ffff:ffff:ffff::/64
- name: TunIPv4
value: 223.254.0.100/16
- name: TunIPv6
value: efff:ffff:ffff:ffff:ffff:ffff:ffff:9999/64
- command:
- kubevpn
args:
- server
- -l gtcp://:10801
- -l gudp://:10802
envFrom:
- secretRef:
name: {{ include "kubevpn.fullname" . }}
@@ -66,24 +45,13 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: vpn
ports:
- containerPort: {{ .Values.service.port8422 }}
name: 8422-for-udp
protocol: UDP
- containerPort: {{ .Values.service.port10800 }}
name: 10800-for-tcp
- containerPort: {{ .Values.service.port10801 }}
name: 10801-for-tcp
protocol: TCP
resources:
{{- toYaml .Values.resources | nindent 12 }}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
runAsUser: 0
- args:
- control-plane
- --watchDirectoryFilename
- /etc/envoy/envoy-config.yaml
command:
- kubevpn
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
@@ -95,10 +63,6 @@ spec:
protocol: TCP
resources:
{{- toYaml .Values.resourcesSmall | nindent 12 }}
volumeMounts:
- mountPath: /etc/envoy
name: envoy-config
readOnly: true
- args:
- webhook
command:
@@ -106,6 +70,11 @@ spec:
envFrom:
- secretRef:
name: {{ include "kubevpn.fullname" . }}
env:
- name: "POD_NAMESPACE"
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: webhook

View File

@@ -3,6 +3,7 @@ apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
labels:
{{- include "kubevpn.labels" . | nindent 4 }}
spec:

View File

@@ -2,6 +2,7 @@ apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
labels:
{{- include "kubevpn.labels" . | nindent 4 }}
annotations:
@@ -31,42 +32,7 @@ spec:
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- /bin/bash
- -c
args:
- |2-
echo "Label namespace {{ .Release.Namespace }}"
kubectl label ns {{ .Release.Namespace }} ns={{ .Release.Namespace }}
echo "Generating https certificate"
openssl req -x509 -nodes -days 36500 -newkey rsa:2048 -subj "/CN={{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}.svc" -addext "subjectAltName=DNS:{{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local,DNS:{{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}.svc" -keyout server.key -out server.crt
export TLS_CRT=$(cat server.crt | base64 | tr -d '\n')
echo "Patch mutatingwebhookconfigurations {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}"
kubectl patch mutatingwebhookconfigurations {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }} -p "{\"webhooks\":[{\"name\":\"{{ include "kubevpn.fullname" . }}.naison.io\",\"sideEffects\":\"None\",\"admissionReviewVersions\":[\"v1\", \"v1beta1\"],\"clientConfig\":{\"service\":{\"namespace\":\"{{ .Release.Namespace }}\",\"name\":\"{{ include "kubevpn.fullname" . }}\"},\"caBundle\":\"$TLS_CRT\"}}]}"
export TLS_KEY=$(cat server.key | base64 | tr -d '\n')
echo "Patch secret {{ include "kubevpn.fullname" . }}"
kubectl patch secret {{ include "kubevpn.fullname" . }} -n {{ .Release.Namespace }} -p "{\"data\":{\"tls_key\":\"$TLS_KEY\",\"tls_crt\":\"$TLS_CRT\"}}"
echo "Restart the pods..."
kubectl scale -n {{ .Release.Namespace }} --replicas=0 deployment/{{ include "kubevpn.fullname" . }}
kubectl scale -n {{ .Release.Namespace }} --replicas=1 deployment/{{ include "kubevpn.fullname" . }}
export POOLS=$(kubectl get cm {{ include "kubevpn.fullname" . }} -n {{ .Release.Namespace }} -o jsonpath='{.data.IPv4_POOLS}')
if [[ -z "${POOLS// }" ]];then
echo "Cidr is empty"
echo "Get pod cidr..."
export POD_CIDR=$(kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' | tr -s '\n' ' ')
echo "Get service cidr..."
export SVC_CIDR=$(echo '{"apiVersion":"v1","kind":"Service","metadata":{"name":"kubevpn-get-svc-cidr-{{ .Release.Namespace }}", "namespace": "{{ .Release.Namespace }}"},"spec":{"clusterIP":"1.1.1.1","ports":[{"port":443}]}}' | kubectl apply -f - 2>&1 | sed 's/.*valid IPs is //')
echo "Pod cidr: $POD_CIDR, service cidr: $SVC_CIDR"
echo "Patch configmap {{ include "kubevpn.fullname" . }}"
kubectl patch configmap {{ include "kubevpn.fullname" . }} -n {{ .Release.Namespace }} -p "{\"data\":{\"IPv4_POOLS\":\"$POD_CIDR $SVC_CIDR\"}}"
else
echo "Cidr is NOT empty"
fi
echo "Done~"
exit 0
- kubevpn
- once
- --image
- "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"

View File

@@ -1,7 +1,8 @@
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
name: {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
namespace: {{ include "kubevpn.namespace" . }}
webhooks:
- admissionReviewVersions:
- v1
@@ -10,15 +11,13 @@ webhooks:
caBundle: {{ .Values.tls.crt }}
service:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "kubevpn.namespace" . }}
path: /pods
port: 80
failurePolicy: Ignore
matchPolicy: Equivalent
name: {{ include "kubevpn.fullname" . }}.naison.io
namespaceSelector:
matchLabels:
ns: {{ .Release.Namespace }}
namespaceSelector: { }
objectSelector: { }
reinvocationPolicy: Never
rules:

View File

@@ -2,6 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
rules:
- apiGroups:
- ""
@@ -20,10 +21,10 @@ rules:
- delete
- apiGroups: [ "" ]
resources: [ "namespaces" ]
resourceNames: [{{ .Release.Namespace }}]
resourceNames: [{{ include "kubevpn.namespace" . }}]
verbs:
- get
- patch
- update
- apiGroups: [ "apps" ]
resources: [ "deployments/scale", "deployments" ]
resourceNames:
@@ -42,23 +43,42 @@ rules:
- get
- update
- patch
- list
# for get network cidr
- apiGroups:
- ""
resources:
- pods/exec
verbs:
- create
- apiGroups:
- ""
resources:
- pods
- pods/log
verbs:
- list
- get
- create
- delete
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
name: {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
resourceNames:
- {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
- {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
verbs:
- get
- list
- patch
- update
- apiGroups:
- ""
resources:
@@ -66,4 +86,11 @@ rules:
verbs:
- get
- list
- watch
- watch
# for get network cidr
- apiGroups:
- ""
resources:
- pods
verbs:
- list

View File

@@ -2,6 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -9,18 +10,18 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ include "kubevpn.fullname" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "kubevpn.namespace" . }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
name: {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
subjects:
- kind: ServiceAccount
name: {{ include "kubevpn.fullname" . }}
namespace: {{ .Release.Namespace }}
namespace: {{ include "kubevpn.namespace" . }}
roleRef:
kind: ClusterRole
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
name: {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,8 +1,10 @@
apiVersion: v1
data:
tls_crt: {{ .Values.tls.crt }}
tls_key: {{ .Values.tls.key }}
kind: Secret
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
type: Opaque
stringData:
tls_crt: {{ .Values.tls.crt }}
tls_key: {{ .Values.tls.key }}
tls_server_name: {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}

View File

@@ -2,19 +2,16 @@ apiVersion: v1
kind: Service
metadata:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ include "kubevpn.namespace" . }}
labels:
{{- include "kubevpn.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- name: 8422-for-udp
port: {{ .Values.service.port8422 }}
protocol: UDP
targetPort: 8422
- name: 10800-for-tcp
port: {{ .Values.service.port10800 }}
- name: 10801-for-tcp
port: {{ .Values.service.port10801 }}
protocol: TCP
targetPort: 10800
targetPort: 10801
- name: 9002-for-envoy
port: {{ .Values.service.port9002 }}
protocol: TCP

View File

@@ -3,6 +3,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "kubevpn.serviceAccountName" . }}
namespace: {{ include "kubevpn.namespace" . }}
labels:
{{- include "kubevpn.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}

View File

@@ -2,10 +2,13 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# default namespace
namespace: kubevpn
replicaCount: 1
image:
repository: naison/kubevpn
repository: ghcr.io/kubenetworks/kubevpn
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
@@ -21,8 +24,9 @@ cidr:
service: ""
tls:
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURXVENDQWtHZ0F3SUJBZ0lJU0NmUDdHeHVhUkl3RFFZSktvWklodmNOQVFFTEJRQXdNREV1TUN3R0ExVUUKQXd3bGEzVmlaWFp3YmkxMGNtRm1abWxqTFcxaGJtRm5aWEl0WTJGQU1UY3dOamsyTnpjd01EQWVGdzB5TkRBeQpNRE14TWpReE5EQmFGdzB5TlRBeU1ESXhNalF4TkRCYU1DMHhLekFwQmdOVkJBTU1JbXQxWW1WMmNHNHRkSEpoClptWnBZeTF0WVc1aFoyVnlRREUzTURZNU5qYzNNREF3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXcKZ2dFS0FvSUJBUURzVnNleEVpVG00dmlleUhEeU5SbldKbXNiaFBWV24yTkgvNi9wUGVBT3ZUbXgwSDdHUnZJLwpzMzVoZW9EWExhdFVmaDlXT1hXdzRqaGZsdUdWQWlzZGs2Y2ZkS1hVVzJheXpRbFpZd1ZMTzdUUHFoeWF0UHVpCmpRYVB2bUErRGNYMHJRc2Y3SFJwVWhjVTJ1QTJ4WGhZNy9QWWFUdzhkU0NTTHFTK2ZLM3poc0lONTFrYnIzdG4KU2FKcWFybDNhSU82N1JvdmNZbmxERG9XTzFwS1ZSUmROVkM1anVtREJOSWdOam5TSTY5QTFydzR0REkwdjcxWQpPRmhjYnUwNnFVdkNNU1JzR3F5ZkhOeUlXakVvcnk4Wk0xVExlcnZhTk12WlFTRndRNk5SRExHYXNlbTBlNTRXCmVublA0OVpIR1FhTjllYnJQSkJuL2pQQ3p0NlFDMkg5QWdNQkFBR2plakI0TUE0R0ExVWREd0VCL3dRRUF3SUYKb0RBVEJnTlZIU1VFRERBS0JnZ3JCZ0VGQlFjREFUQU1CZ05WSFJNQkFmOEVBakFBTUI4R0ExVWRJd1FZTUJhQQpGQVA3WmhvcGsvbEc3MVNCMk42QkpKdDI2eXhuTUNJR0ExVWRFUVFiTUJtQ0YydDFZbVYyY0c0dGRISmhabVpwCll5MXRZVzVoWjJWeU1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQVhYWk1WazhhQWwwZTlqUWRQTDc3ZVZOL3kKY1ZZZzRBVDlhdkh0UXV2UkZnOU80Z3JMaFVDQnoyN25wdlZZcHNMbmdEMTFRTXpYdHlsRDNMNDJNQ3V0Wnk5VQorL1BCL291ajQzWkZUckJDbk9DZDl6elE2MXZSL1RmbUFrTUhObTNZYjE1OGt2V0ZhNVlBdytRVi9vRDNUcGlXClREVTZXNkxvRFg5N0lNSFk0L3VLNTNzbXVLMjh5VzduSVVrbnpqN3h5UzVOWTFZaVNUN0w2ZFZ0VVppR1FUK00KRk16ODVRcTJOTWVXU1lKTmhhQVk5WEpwMXkrcEhoeWpPVFdjSEFNYmlPR29mODM5N1R6YmUyWHdNQ3BGMWc5NwpMaHZERnNsNzcyOWs1NFJVb1d2ZjFIVFFxL2R6cVBQTTNhWGpTbXFWUEV2Zk5qeGNhZnFnNHBaRmdzYzEKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJREpEQ0NBZ3lnQXdJQkFnSUlJMmROaFBKY0Uxc3dEUVlKS29aSWh2Y05BUUVMQlFBd01ERXVNQ3dHQTFVRQpBd3dsYTNWaVpYWndiaTEwY21GbVptbGpMVzFoYm1GblpYSXRZMkZBTVRjd05qazJOemN3TURBZUZ3MHlOREF5Ck1ETXhNalF4TkRCYUZ3MHlOVEF5TURJeE1qUXhOREJhTURBeExqQXNCZ05WQkFNTUpXdDFZbVYyY0c0dGRISmgKWm1acFl5MXRZVzVoWjJWeUxXTmhRREUzTURZNU5qYzNNREF3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQgpEd0F3Z2dFS0FvSUJBUURBQVpBdEZaTzJEZG9BVTUxWnRiVjI0QkVGN3RkakMzTzBPdEE2UURYTlVwNWlZZGdjCjdORVlGZE55YXltTWZMUVFGTWZqZFcxNWpDQ0N4KzFFMm1KQTVZa0ZFcXJTeDA3Z1pkKy9hcU13ZkhDT0ZTM0UKSUROdzBKYlBGVHZuSGsyZHVXby8zT1BnVmpONWw2UTBWaE10WkJEc2haVHVvSUhWaTJZcldDdnNkMU9mWFVyMwo0Y0ZJUkJ2OW5mNDIzdWthajYxdisrRDd6K3Y0bEN4R0JtUDhpYXFaNFVlckxIdWF2N1hQUnZ4QmQzNDBGY2diCm5TZVUxTXZmcTgvOUg4VTRzeWRGaUpZVUs1RFhkWU15NEw0RlMvbXZRaWR1TU5lWUw1Y2xHSXZTNGFzQjl2QlMKM0ZIY1IrQk1xVzFQWUdDc2YyL0RvdVNRVVNhcnB5VU5aczZKQWdNQkFBR2pRakJBTUE0R0ExVWREd0VCL3dRRQpBd0lDcERBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJRRCsyWWFLWlA1UnU5VWdkamVnU1NiCmR1c3NaekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBVGFNR0NLK2YxSmdKaXplVjlla3ZhckhDZHpmZzJNZkQKV2pCeFUzMXNabE1vZU9GS0hPdndjMVliTzVNTStHTGM0bGhMS2VHV1pwQmVRV0lFamo4V01wa3k2M2VtUUl5eQpOT2hjdVBUTFhCQ0JkS1lhUU1LcU9mN3c4MEw2cVRKclFER0t0a0MxVzEwbFJzbUd0TEtBbDVjU0w4VFRSZVhXCjhiNXRGOFd5Yms1Vm12VWtxdEpkSVNJTjdVOG5nV21WRUVOZFcvckNqclI5TllaSXZBZk9mS1Zrc1JuZEJaQ0kKOXdxVUI2K2JITEJBWjNpV293ZFhpRGhLMSt5Z2ZwNnpUcW9LRmxOWi8rRTNkS0tpbStyZFFGSmIvNTNvU2xaaApwMkVkT1ZNYU1mRjh1ZFhDdE44WjZnVHpPWkJxN1pmWjVpMlU1eFQ2aFNxRjFjT1ZuQS9idmc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBN0ZiSHNSSWs1dUw0bnNodzhqVVoxaVpyRzRUMVZwOWpSLyt2NlQzZ0RyMDVzZEIrCnhrYnlQN04rWVhxQTF5MnJWSDRmVmpsMXNPSTRYNWJobFFJckhaT25IM1NsMUZ0bXNzMEpXV01GU3p1MHo2b2MKbXJUN29vMEdqNzVnUGczRjlLMExIK3gwYVZJWEZOcmdOc1Y0V08vejJHazhQSFVna2k2a3ZueXQ4NGJDRGVkWgpHNjk3WjBtaWFtcTVkMmlEdXUwYUwzR0o1UXc2Rmp0YVNsVVVYVFZRdVk3cGd3VFNJRFk1MGlPdlFOYThPTFF5Ck5MKzlXRGhZWEc3dE9xbEx3akVrYkJxc254emNpRm94S0s4dkdUTlV5M3E3MmpUTDJVRWhjRU9qVVF5eG1ySHAKdEh1ZUZucDV6K1BXUnhrR2pmWG02enlRWi80endzN2VrQXRoL1FJREFRQUJBb0lCQVFEWkRaWVdsS0JaZ0Nodgp3NHlmbFk4bDgyQzVCSEpCM041VWVJbjVmejh3cWk2N2xNMXBraXpYdmlTYXArUitPczQ0S2lEamtwLzVGTHBMCmFBbkRUUnVGN1Y0MmNHNEFTdlZWenlMLytnWVpvenNhNFpPbHJnUFF0UTVLbzhCR0hXWXBvV2N2S1gxOFlNMGIKOVN5b2dORlhkUUNSUjR6dnhXNWxjdnNRaXZkRFNFTUJhbW00bFpEM0ZtUm5HVGlpaUVNSis2SFdlR1lBS1RMSgoxN0NnejZaWjg1bGtUZ0dxeEUrWkQwNDJGYWdJZlJORVI0QmZOMlp6NU5CU3RnMTJFdUpXWmRGcWpxSHlwbnNjCjNjbEd0U1Z5VStvWUFUWnV5Y2VMNVIwZUdzdTB6ZHhLT3ZzSm9yVWZ0dlMrUGovclJxWHVjOVdXSkFLU1FDVm0Ka1I1Y2M4ak5Bb0dCQU8wYkVrNTdtZWYwcXNKT0U3TFlFV1hRRFZiTmhnZ1E2eTlBZlNnVjZDMFFDdC93YkVGaQo0Rm41bTdhSHdqZUJ5OFJnMGhGbTdVNThCb3FyNnBQNFp6MEhwY1ZSTmVLeTF6R0wreFRJRXFnTXQxei9TYVE0CkIwWEZ4Ulg3d2pjeit2OC9GOVdsOElLbHhBWjhxNXd6aHNFUVVYcVIxTzF1T2FjRktSdXg3OU1UQW9HQkFQOHMKRVJBa1R3WEV3UU9ya2dQOW5tTHZLYXMwM0J6UXUrOFBtQWlsOGFmbVR5ZEFWdzJKaHBwOFlUQzl6NDM3VXU4Ngpta2lOVHpRL3MvQ1lCNEdJVVFCMEdlTDJtc2VjdWNaUHhTSW10dElSOWY4bjk2NEpuL3RtVUd4VXRFaWhWdER4ClZCdFBiWmNzc2E5VVVCRFVqRnZJSUdPTGlqSVdxbW8zM3htT0tJaXZBb0dCQU5HV2k0RWFtdnBCK1N1V3JxejUKZDYrQzBEZTVwcys4Zk5nZzdrRWYxRUw1R2xQSGh6bnBPQjN3bWFjb3JCSTZ4cTlKVW9lVmJ4RmdhcnZycVlpeApIRGtEYUpKWjdnTDlTV0YvdGlzeGkrUkdrVk5BU28xQ0JaTzBkVG13ZUlZcGlhWlUxREhENUN6b2NMVzNRRTdyCjhTTDUxTHcrNm5RU2FoM3NYdUVmVWJwSEFvR0JBTk1FNlROMUkxaDg1cldYVEJBNnk2RzdjTFVoNktsM3dRTW8KM1N6QnRyK0h5WXVIUExaNE5iVktDTUhiSm1xZkhXMnpBK1hkM2xNeUh5ZG5Ra1hQcWxUNnJuR3dTRDJ0RVVDNwp0U1hSNkR4L0YvVWpZME1zdUgyWmxnYVFZZXJ5YWE0dTlNUUZBbmNUUWZuaGVya0FYUGFGNEtzUnVYNUVtamR1Cjd2UGVTUTBIQW9HQUM0ZlJmZnFFM3RRdWxSeUJVeHhKNHlPaWJiVlpCV1hxWHRzMU0wczdsZ1YxaGVrYis1VmMKVTZ3MFh2T0pTaEZPaGF6UVdseVZUejhmSVdSa1BXa2MzSzE1dWx6cmh6NWZVa0dYOEw0OGMrTHlaSzZ1M2ZRVgpyL1pRV3JsYlZSWlhRVGhuaGhOM1Jodm96SlZZV0lpckVyMGp3VmRaQWRUYW1XZEpTQ3J4WE1NPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
# will auto generate in job
crt: ''''''
key: ''''''
serviceAccount:
# Specifies whether a service account should be created
@@ -40,20 +44,18 @@ podLabels:
podSecurityContext: { }
# fsGroup: 2000
securityContext: { }
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
runAsUser: 0
runAsGroup: 0
service:
type: ClusterIP
port8422: 8422
port9002: 9002
port10800: 10800
port10801: 10801
port80: 80
port53: 53
@@ -89,15 +91,7 @@ autoscaling:
# targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes:
- configMap:
defaultMode: 420
items:
- key: ENVOY_CONFIG
path: envoy-config.yaml
name: kubevpn-traffic-manager
optional: false
name: envoy-config
volumes: {}
# Additional volumeMounts on the output Deployment definition.

View File

@@ -1,6 +1,7 @@
package cmds
import (
"errors"
"fmt"
"io"
"os"
@@ -21,15 +22,16 @@ import (
// CmdAlias
/**
Name: test
Description: this is a test environment
Needs: test1
Flags:
- connect
- --kubeconfig=~/.kube/config
- --namespace=test
- --lite
---
Name: test1
Description: this is another test environment
Flags:
- connect
- --kubeconfig=~/.kube/jumper_config
@@ -43,9 +45,15 @@ func CmdAlias(f cmdutil.Factory) *cobra.Command {
Short: i18n.T("Config file alias to execute command simply"),
Long: templates.LongDesc(i18n.T(`
Config file alias to execute command simply, just like ssh alias config
Please point to an existing, complete config file:
1. Via the command-line flag --kubevpnconfig
2. Via the KUBEVPNCONFIG environment variable
3. In your home directory as ~/.kubevpn/config.yaml
It will read ~/.kubevpn/config.yaml file as config, also support special file path
by flag -f. It also support depends relationship, like one cluster api server needs to
by flag -f. It also supports depends relationship, like one cluster api server needs to
access via another cluster, you can use syntax needs. it will do action to needs cluster first
and then do action to target cluster
`)),
@@ -53,20 +61,29 @@ func CmdAlias(f cmdutil.Factory) *cobra.Command {
If you have following config in your ~/.kubevpn/config.yaml
Name: dev
Description: This is dev k8s environment
Needs: jumper
Flags:
- connect
- --kubeconfig=~/.kube/config
- --namespace=default
- --lite
---
Name: jumper
Description: This is jumper k8s environment
Flags:
- connect
- --kubeconfig=~/.kube/jumper_config
- --namespace=test
- --extra-hosts=xxx.com
Name: all-in-one
Description: use special flags '--kubeconfig-json', no need to special kubeconfig path
Flags:
- connect
- --kubeconfig-json={"apiVersion":"v1","clusters":[{"cluster":{"certificate-authority-data":"LS0tLS1CRU..."}}]}
- --namespace=test
- --extra-hosts=xxx.com
Config file support three field: Name,Needs,Flags
@@ -75,6 +92,9 @@ func CmdAlias(f cmdutil.Factory) *cobra.Command {
# kubevpn alias jumper, just connect to cluster jumper
kubevpn alias jumper
# support special flags '--kubeconfig-json', it will save kubeconfig into ~/.kubevpn/temp/[ALIAS_NAME]
kubevpn alias all-in-one
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
if localFile != "" {
@@ -92,12 +112,20 @@ func CmdAlias(f cmdutil.Factory) *cobra.Command {
if err != nil {
return err
}
for _, config := range configs {
c := exec.Command(name, config.Flags...)
for _, conf := range configs {
err = ParseArgs(cmd, &conf)
if err != nil {
return err
}
c := exec.Command(name, conf.Flags...)
c.Stdout = os.Stdout
c.Stdin = os.Stdin
c.Stderr = os.Stderr
fmt.Println(c.Args)
fmt.Println(fmt.Sprintf("Name: %s", conf.Name))
if conf.Description != "" {
fmt.Println(fmt.Sprintf("Description: %s", conf.Description))
}
fmt.Println(fmt.Sprintf("Command: %v", c.Args))
err = c.Run()
if err != nil {
return err
@@ -106,7 +134,7 @@ func CmdAlias(f cmdutil.Factory) *cobra.Command {
return nil
},
}
cmd.Flags().StringVarP(&localFile, "file", "f", config.GetConfigFilePath(), "Config file location")
cmd.Flags().StringVarP(&localFile, "kubevpnconfig", "f", util.If(os.Getenv("KUBEVPNCONFIG") != "", os.Getenv("KUBEVPNCONFIG"), config.GetConfigFile()), "Path to the kubevpnconfig file to use for CLI requests.")
cmd.Flags().StringVarP(&remoteAddr, "remote", "r", "", "Remote config file, eg: https://raw.githubusercontent.com/kubenetworks/kubevpn/master/pkg/config/config.yaml")
return cmd
}
@@ -122,7 +150,7 @@ func ParseAndGet(localFile, remoteAddr string, aliasName string) ([]Config, erro
path = remoteAddr
content, err = util.DownloadFileStream(path)
} else {
path = config.GetConfigFilePath()
path = config.GetConfigFile()
content, err = os.ReadFile(path)
}
if err != nil {
@@ -137,7 +165,13 @@ func ParseAndGet(localFile, remoteAddr string, aliasName string) ([]Config, erro
return nil, err
}
if len(configs) == 0 {
err = fmt.Errorf("can not found any alias for name %s, please check your config file %s", aliasName, path)
var names []string
for _, c := range list {
if c.Name != "" {
names = append(names, c.Name)
}
}
err = errors.New(fmt.Sprintf("Can't find any alias for the name: '%s', avaliable: \n[\"%s\"]\nPlease check config file: %s", aliasName, strings.Join(names, "\", \""), path))
return nil, err
}
return configs, nil
@@ -180,11 +214,12 @@ func GetConfigs(configs []Config, name string) ([]Config, error) {
return result, nil
}
}
return nil, fmt.Errorf("detect loop jump: %s, please check your config", strings.Join(append(set, name), " -> "))
return nil, fmt.Errorf("loop jump detected: %s. verify your configuration", strings.Join(append(set, name), " -> "))
}
type Config struct {
Name string `yaml:"Name"`
Needs string `yaml:"Needs,omitempty"`
Flags []string `yaml:"Flags,omitempty"`
Name string `yaml:"Name"`
Description string `yaml:"Description"`
Needs string `yaml:"Needs,omitempty"`
Flags []string `yaml:"Flags,omitempty"`
}

View File

@@ -1,9 +1,11 @@
package cmds
import (
"log"
"context"
"reflect"
"testing"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
func TestAlias(t *testing.T) {
@@ -22,7 +24,7 @@ Flags:
- --extra-hosts=xxx.com`
_, err := ParseConfig([]byte(str))
if err != nil {
log.Fatal(err)
plog.G(context.Background()).Fatal(err)
}
}
@@ -42,7 +44,7 @@ Flags:
- --extra-hosts=xxx.com`
_, err := ParseConfig([]byte(str))
if err != nil {
log.Fatal(err)
plog.G(context.Background()).Fatal(err)
}
}
@@ -206,11 +208,11 @@ Flags:
for _, datum := range data {
configs, err := ParseConfig([]byte(datum.Config))
if err != nil {
log.Fatal(err)
plog.G(context.Background()).Fatal(err)
}
getConfigs, err := GetConfigs(configs, datum.Run)
if err != nil && !datum.ExpectError {
log.Fatal(err)
plog.G(context.Background()).Fatal(err)
} else if err != nil {
}
if datum.ExpectError {
@@ -221,7 +223,7 @@ Flags:
c = append(c, config.Name)
}
if !reflect.DeepEqual(c, datum.ExpectOrder) {
log.Fatalf("not match, expect: %v, real: %v", datum.ExpectOrder, c)
plog.G(context.Background()).Fatalf("Not match, expect: %v, real: %v", datum.ExpectOrder, c)
}
}
}

View File

@@ -1,173 +0,0 @@
package cmds
import (
"fmt"
"io"
"os"
pkgerr "github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
utilcomp "k8s.io/kubectl/pkg/util/completion"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
// CmdClone multiple cluster operate, can start up one deployment to another cluster
// kubectl exec POD_NAME -c CONTAINER_NAME /sbin/killall5 or ephemeralcontainers
func CmdClone(f cmdutil.Factory) *cobra.Command {
var options = handler.CloneOptions{}
var sshConf = &util.SshConfig{}
var extraRoute = &handler.ExtraRouteInfo{}
var transferImage bool
var syncDir string
cmd := &cobra.Command{
Use: "clone",
Short: i18n.T("Clone workloads to target-kubeconfig cluster with same volume、env、and network"),
Long: templates.LongDesc(i18n.T(`
Clone workloads to target-kubeconfig cluster with same volume、env、and network
In this way, you can startup another deployment in same cluster or not, but with different image version,
it also support service mesh proxy. only traffic with special header will hit to cloned_resource.
`)),
Example: templates.Examples(i18n.T(`
# clone
- clone deployment in current cluster and current namespace
kubevpn clone deployment/productpage
- clone deployment in current cluster with different namespace
kubevpn clone deployment/productpage -n test
- clone deployment to another cluster
kubevpn clone deployment/productpage --target-kubeconfig ~/.kube/other-kubeconfig
- clone multiple workloads
kubevpn clone deployment/authors deployment/productpage
or
kubevpn clone deployment authors productpage
# clone with mesh, traffic with header a=1, will hit cloned workloads, otherwise hit origin workloads
kubevpn clone deployment/productpage --headers a=1
# clone workloads which api-server behind of bastion host or ssh jump host
kubevpn clone deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers a=1
# It also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn clone service/productpage --ssh-alias <alias> --headers a=1
# Support ssh auth GSSAPI
kubevpn clone service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
kubevpn clone service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
kubevpn clone service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
// not support temporally
if options.Engine == config.EngineGvisor {
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
}
// startup daemon process and sudo process
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
_, _ = fmt.Fprintf(os.Stdout, "You must specify the type of resource to proxy. %s\n\n", cmdutil.SuggestAPIResources("kubevpn"))
fullCmdName := cmd.Parent().CommandPath()
usageString := "Required resource not specified."
if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") {
usageString = fmt.Sprintf("%s\nUse \"%s explain <resource>\" for a detailed description of that resource (e.g. %[2]s explain pods).", usageString, fullCmdName)
}
return cmdutil.UsageErrorf(cmd, usageString)
}
// special empty string, eg: --target-registry ""
options.IsChangeTargetRegistry = cmd.Flags().Changed("target-registry")
if syncDir != "" {
local, remote, err := util.ParseDirMapping(syncDir)
if err != nil {
return pkgerr.Wrapf(err, "options 'sync' is invalid, %s", syncDir)
}
options.LocalDir = local
options.RemoteDir = remote
} else {
options.RemoteDir = config.DefaultRemoteDir
}
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return err
}
logLevel := log.ErrorLevel
if config.Debug {
logLevel = log.DebugLevel
}
req := &rpc.CloneRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
Headers: options.Headers,
Workloads: args,
ExtraRoute: extraRoute.ToRPC(),
OriginKubeconfigPath: util.GetKubeConfigPath(f),
Engine: string(options.Engine),
SshJump: sshConf.ToRPC(),
TargetKubeconfig: options.TargetKubeconfig,
TargetNamespace: options.TargetNamespace,
TargetContainer: options.TargetContainer,
TargetImage: options.TargetImage,
TargetRegistry: options.TargetRegistry,
IsChangeTargetRegistry: options.IsChangeTargetRegistry,
TransferImage: transferImage,
Image: config.Image,
Level: int32(logLevel),
LocalDir: options.LocalDir,
RemoteDir: options.RemoteDir,
}
cli := daemon.GetClient(false)
resp, err := cli.Clone(cmd.Context(), req)
if err != nil {
return err
}
for {
recv, err := resp.Recv()
if err == io.EOF {
break
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
return nil
} else if err != nil {
return err
}
fmt.Fprint(os.Stdout, recv.GetMessage())
}
util.Print(os.Stdout, "Now clone workloads running successfully on other cluster, enjoy it :)")
return nil
},
}
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers (use `and` to match all headers) with reverse it to local PC, If not special, redirect all traffic to local PC. eg: --headers a=1 --headers b=2")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug mode or not, true or false")
cmd.Flags().StringVar(&config.Image, "image", config.Image, "Use this image to startup container")
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
cmd.Flags().StringVar((*string)(&options.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
cmd.Flags().StringVar(&options.TargetImage, "target-image", "", "Clone container use this image to startup container, if not special, use origin image")
cmd.Flags().StringVar(&options.TargetContainer, "target-container", "", "Clone container use special image to startup this container, if not special, use origin image")
cmd.Flags().StringVar(&options.TargetNamespace, "target-namespace", "", "Clone workloads in this namespace, if not special, use origin namespace")
cmd.Flags().StringVar(&options.TargetKubeconfig, "target-kubeconfig", "", "Clone workloads will create in this cluster, if not special, use origin cluster")
cmd.Flags().StringVar(&options.TargetRegistry, "target-registry", "", "Clone workloads will create this registry domain to replace origin registry, if not special, use origin registry")
cmd.Flags().StringVar(&syncDir, "sync", "", "Sync local dir to remote pod dir. format: LOCAL_DIR:REMOTE_DIR, eg: ~/code:/app/code")
handler.AddExtraRoute(cmd.Flags(), extraRoute)
util.AddSshFlags(cmd.Flags(), sshConf)
cmd.ValidArgsFunction = utilcomp.ResourceTypeAndNameCompletionFunc(f)
return cmd
}

View File

@@ -1,99 +0,0 @@
package cmds
import (
"fmt"
"os"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdConfig(f cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "config",
Short: "Proxy kubeconfig which behind of ssh jump server",
}
cmd.AddCommand(cmdConfigAdd(f))
cmd.AddCommand(cmdConfigRemove(f))
return cmd
}
func cmdConfigAdd(f cmdutil.Factory) *cobra.Command {
var sshConf = &util.SshConfig{}
cmd := &cobra.Command{
Use: "add",
Short: i18n.T("Proxy kubeconfig"),
Long: templates.LongDesc(i18n.T(`proxy kubeconfig which behind of ssh jump server`)),
Example: templates.Examples(i18n.T(`
# proxy api-server which api-server behind of bastion host or ssh jump host
kubevpn config add --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# It also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn config add --ssh-alias <alias>
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
// startup daemon process and sudo process
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return err
}
req := &rpc.ConfigAddRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
SshJump: sshConf.ToRPC(),
}
cli := daemon.GetClient(false)
resp, err := cli.ConfigAdd(cmd.Context(), req)
if err != nil {
return err
}
fmt.Fprint(os.Stdout, resp.ClusterID)
return nil
},
}
util.AddSshFlags(cmd.Flags(), sshConf)
return cmd
}
func cmdConfigRemove(f cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "remove",
Short: i18n.T("Remove proxy kubeconfig"),
Long: templates.LongDesc(i18n.T(`
Remove proxy kubeconfig which behind of ssh jump server
`)),
Example: templates.Examples(i18n.T(`
# remove proxy api-server which api-server behind of bastion host or ssh jump host
kubevpn config remove --kubeconfig /var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/947048961.kubeconfig
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
// startup daemon process and sudo process
return daemon.StartupDaemon(cmd.Context())
},
Args: cobra.MatchAll(cobra.OnlyValidArgs, cobra.ExactArgs(1)),
RunE: func(cmd *cobra.Command, args []string) error {
req := &rpc.ConfigRemoveRequest{
ClusterID: args[0],
}
cli := daemon.GetClient(false)
_, err := cli.ConfigRemove(cmd.Context(), req)
if err != nil {
return err
}
return nil
},
}
return cmd
}

View File

@@ -3,11 +3,11 @@ package cmds
import (
"context"
"fmt"
"io"
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
@@ -19,14 +19,18 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
)
func CmdConnect(f cmdutil.Factory) *cobra.Command {
var connect = &handler.ConnectOptions{}
var extraRoute = &handler.ExtraRouteInfo{}
var sshConf = &util.SshConfig{}
var transferImage, foreground, lite bool
var sshConf = &pkgssh.SshConfig{}
var transferImage, foreground bool
var imagePullSecretName string
var managerNamespace string
cmd := &cobra.Command{
Use: "connect",
Short: i18n.T("Connect to kubernetes cluster network"),
@@ -34,9 +38,9 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
Connect to kubernetes cluster network
After connect to kubernetes cluster network, you can ping PodIP or
curl ServiceIP in local PC, it also support k8s dns resolve.
curl ServiceIP in local PC, it also supports k8s DNS resolve.
Like: curl authors/authors.default/authors.default.svc/authors.default.svc.cluster.local.
So you can startup your application in local PC. depends on anything in
So you can start up your application in local PC. depends on anything in
k8s cluster is ok, connect to them just like in k8s cluster.
`)),
Example: templates.Examples(i18n.T(`
@@ -46,7 +50,7 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
# Connect to api-server behind of bastion host or ssh jump host
kubevpn connect --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# It also support ProxyJump, like
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
@@ -61,109 +65,107 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
kubevpn connect --ssh-jump "--ssh-addr jump.naison.org --ssh-username naison --gssapi-password xxx" --ssh-username root --ssh-addr 127.0.0.1:22 --ssh-keyfile ~/.ssh/dst.pem
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
plog.InitLoggerForClient()
// startup daemon process and sudo process
err := daemon.StartupDaemon(cmd.Context())
if err != nil {
return err
}
// not support temporally
if connect.Engine == config.EngineGvisor {
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
if transferImage {
err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image)
}
return nil
return err
},
RunE: func(cmd *cobra.Command, args []string) error {
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return err
}
logLevel := log.ErrorLevel
if config.Debug {
logLevel = log.DebugLevel
if !sshConf.IsEmpty() {
if ip := util.GetAPIServerFromKubeConfigBytes(bytes); ip != nil {
extraRoute.ExtraCIDR = append(extraRoute.ExtraCIDR, ip.String())
}
}
req := &rpc.ConnectRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
ExtraRoute: extraRoute.ToRPC(),
Engine: string(connect.Engine),
OriginKubeconfigPath: util.GetKubeConfigPath(f),
SshJump: sshConf.ToRPC(),
TransferImage: transferImage,
Image: config.Image,
Level: int32(logLevel),
SshJump: sshConf.ToRPC(),
TransferImage: transferImage,
Image: config.Image,
ImagePullSecretName: imagePullSecretName,
Level: int32(util.If(config.Debug, log.DebugLevel, log.InfoLevel)),
ManagerNamespace: managerNamespace,
}
// if is foreground, send to sudo daemon server
cli := daemon.GetClient(false)
if lite {
resp, err := cli.ConnectFork(cmd.Context(), req)
if err != nil {
return err
}
for {
recv, err := resp.Recv()
if err == io.EOF {
break
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
return nil
} else if err != nil {
return err
}
fmt.Fprint(os.Stdout, recv.GetMessage())
}
} else {
resp, err := cli.Connect(cmd.Context(), req)
if err != nil {
return err
}
for {
recv, err := resp.Recv()
if err == io.EOF {
break
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
return nil
} else if err != nil {
return err
}
fmt.Fprint(os.Stdout, recv.GetMessage())
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
var resp grpc.BidiStreamingClient[rpc.ConnectRequest, rpc.ConnectResponse]
resp, err = cli.Connect(context.Background())
if err != nil {
return err
}
err = resp.Send(req)
if err != nil {
return err
}
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.ConnectResponse](cmd.Context(), resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
if !foreground {
util.Print(os.Stdout, "Now you can access resources in the kubernetes cluster, enjoy it :)")
_, _ = fmt.Fprintln(os.Stdout, config.Slogan)
} else {
<-cmd.Context().Done()
disconnect, err := cli.Disconnect(context.Background(), &rpc.DisconnectRequest{
KubeconfigBytes: ptr.To(string(bytes)),
Namespace: ptr.To(ns),
SshJump: sshConf.ToRPC(),
})
err = disconnect(cli, bytes, ns, sshConf)
if err != nil {
log.Errorf("disconnect error: %v", err)
return err
}
for {
recv, err := disconnect.Recv()
if err == io.EOF {
break
} else if err != nil {
log.Errorf("receive disconnect error: %v", err)
return err
}
log.Info(recv.Message)
}
log.Info("disconnect successfully")
_, _ = fmt.Fprint(os.Stdout, "Disconnect completed")
}
return nil
},
}
cmd.Flags().BoolVar(&config.Debug, "debug", false, "enable debug mode or not, true or false")
cmd.Flags().StringVar(&config.Image, "image", config.Image, "use this image to startup container")
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
cmd.Flags().StringVar((*string)(&connect.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName)
cmd.Flags().BoolVar(&foreground, "foreground", false, "Hang up")
cmd.Flags().BoolVar(&lite, "lite", false, "connect to multiple cluster in lite mode, you needs to special this options")
cmd.Flags().StringVar(&managerNamespace, "manager-namespace", "", "The namespace where the traffic manager is to be found. Only works in cluster mode (install kubevpn server by helm)")
handler.AddExtraRoute(cmd.Flags(), extraRoute)
util.AddSshFlags(cmd.Flags(), sshConf)
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
return cmd
}
func disconnect(cli rpc.DaemonClient, bytes []byte, ns string, sshConf *pkgssh.SshConfig) error {
resp, err := cli.Disconnect(context.Background())
if err != nil {
plog.G(context.Background()).Errorf("Disconnect error: %v", err)
return err
}
err = resp.Send(&rpc.DisconnectRequest{
KubeconfigBytes: ptr.To(string(bytes)),
Namespace: ptr.To(ns),
SshJump: sshConf.ToRPC(),
})
if err != nil {
plog.G(context.Background()).Errorf("Disconnect error: %v", err)
return err
}
err = util.PrintGRPCStream[rpc.DisconnectResponse](nil, resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
return nil
}

View File

@@ -0,0 +1,98 @@
package cmds
import (
"bytes"
"fmt"
"os"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/printers"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
)
func CmdConnection(f cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "connection",
Short: "Connection management",
Aliases: []string{"conn"},
}
cmd.AddCommand(cmdConnectionList(f))
cmd.AddCommand(cmdConnectionUse(f))
return cmd
}
func cmdConnectionList(f cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "list",
Short: i18n.T("List all connections"),
Aliases: []string{"ls"},
Long: templates.LongDesc(i18n.T(`List all connections connected to cluster network`)),
Example: templates.Examples(i18n.T(`
# list all connections
kubevpn connection ls
# list connections by alias conn
kubevpn conn ls
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
// startup daemon process and sudo process
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
req := &rpc.ConnectionListRequest{}
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
resp, err := cli.ConnectionList(cmd.Context(), req)
if err != nil {
return err
}
var sb = new(bytes.Buffer)
w := printers.GetNewTabWriter(sb)
genConnectMsg(w, resp.CurrentConnectionID, resp.List)
_ = w.Flush()
_, _ = fmt.Fprint(os.Stdout, sb.String())
return nil
},
}
return cmd
}
func cmdConnectionUse(f cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "use",
Short: i18n.T("Use a specific connection"),
Long: templates.LongDesc(i18n.T(`
Use a specific connection.
`)),
Example: templates.Examples(i18n.T(`
# use a specific connection, change current connection to special id, cmd sync/unsync will use this connection
kubevpn connection use 03dc50feb8c3
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
// startup daemon process and sudo process
return daemon.StartupDaemon(cmd.Context())
},
Args: cobra.MatchAll(cobra.OnlyValidArgs, cobra.ExactArgs(1)),
RunE: func(cmd *cobra.Command, args []string) error {
req := &rpc.ConnectionUseRequest{
ConnectionID: args[0],
}
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
_, err = cli.ConnectionUse(cmd.Context(), req)
if err != nil {
return err
}
return nil
},
}
return cmd
}

View File

@@ -2,11 +2,9 @@ package cmds
import (
"context"
"time"
"github.com/docker/docker/libnetwork/resolvconf"
miekgdns "github.com/miekg/dns"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
@@ -15,14 +13,12 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/controlplane"
"github.com/wencaiwulue/kubevpn/v2/pkg/dns"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdControlPlane(_ cmdutil.Factory) *cobra.Command {
var (
watchDirectoryFilename string
port uint = 9002
)
func CmdControlPlane(f cmdutil.Factory) *cobra.Command {
var port uint = 9002
cmd := &cobra.Command{
Use: "control-plane",
Hidden: true,
@@ -31,23 +27,18 @@ func CmdControlPlane(_ cmdutil.Factory) *cobra.Command {
Control-plane is a envoy xds server, distribute envoy route configuration
`)),
RunE: func(cmd *cobra.Command, args []string) error {
util.InitLoggerForServer(config.Debug)
go util.StartupPProf(0)
go func(ctx context.Context) {
go util.StartupPProfForServer(0)
go func() {
conf, err := miekgdns.ClientConfigFromFile(resolvconf.Path())
if err != nil {
return
plog.G(context.Background()).Fatal(err)
}
for ctx.Err() == nil {
dns.ListenAndServe("udp", ":53", conf)
time.Sleep(time.Second * 5)
}
}(cmd.Context())
err := controlplane.Main(cmd.Context(), watchDirectoryFilename, port, log.StandardLogger())
plog.G(context.Background()).Fatal(dns.ListenAndServe("udp", ":53", conf))
}()
err := controlplane.Main(cmd.Context(), f, port, plog.G(context.Background()))
return err
},
}
cmd.Flags().StringVarP(&watchDirectoryFilename, "watchDirectoryFilename", "w", "/etc/envoy/envoy-config.yaml", "full path to directory to watch for files")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "true/false")
return cmd
}

View File

@@ -1,140 +0,0 @@
package cmds
import (
"fmt"
"os"
"strings"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericiooptions"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/completion"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/cp"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
var cpExample = templates.Examples(i18n.T(`
# !!!Important Note!!!
# Requires that the 'tar' binary is present in your container
# image. If 'tar' is not present, 'kubectl cp' will fail.
#
# For advanced use cases, such as symlinks, wildcard expansion or
# file mode preservation, consider using 'kubectl exec'.
# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace <some-namespace>
tar cf - /tmp/foo | kubectl exec -i -n <some-namespace> <some-pod> -- tar xf - -C /tmp/bar
# Copy /tmp/foo from a remote pod to /tmp/bar locally
kubectl exec -n <some-namespace> <some-pod> -- tar cf - /tmp/foo | tar xf - -C /tmp/bar
# Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in the default namespace
kubectl cp /tmp/foo_dir <some-pod>:/tmp/bar_dir
# Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific container
kubectl cp /tmp/foo <some-pod>:/tmp/bar -c <specific-container>
# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace <some-namespace>
kubectl cp /tmp/foo <some-namespace>/<some-pod>:/tmp/bar
# Copy /tmp/foo from a remote pod to /tmp/bar locally
kubectl cp <some-namespace>/<some-pod>:/tmp/foo /tmp/bar
# copy reverse proxy api-server behind of bastion host or ssh jump host
kubevpn cp deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# It also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn cp deployment/productpage:/tmp/foo /tmp/bar --ssh-alias <alias>
# Support ssh auth GSSAPI
kubevpn cp deployment/productpage:/tmp/foo /tmp/bar --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
kubevpn cp deployment/productpage:/tmp/foo /tmp/bar --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
kubevpn cp deployment/productpage:/tmp/foo /tmp/bar --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`,
))
func CmdCp(f cmdutil.Factory) *cobra.Command {
o := cp.NewCopyOptions(genericiooptions.IOStreams{
In: os.Stdin,
Out: os.Stdout,
ErrOut: os.Stderr,
})
var sshConf = &util.SshConfig{}
cmd := &cobra.Command{
Use: "cp <file-spec-src> <file-spec-dest>",
DisableFlagsInUseLine: true,
Hidden: true,
Short: i18n.T("Copy files and directories to and from containers"),
Long: i18n.T("Copy files and directories to and from containers. Different between kubectl cp is it will de-reference symbol link."),
Example: cpExample,
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
cmdutil.CheckErr(util.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false))
var comps []string
if len(args) == 0 {
if strings.IndexAny(toComplete, "/.~") == 0 {
// Looks like a path, do nothing
} else if strings.Contains(toComplete, ":") {
// TODO: complete remote files in the pod
} else if idx := strings.Index(toComplete, "/"); idx > 0 {
// complete <namespace>/<pod>
namespace := toComplete[:idx]
template := "{{ range .items }}{{ .metadata.namespace }}/{{ .metadata.name }}: {{ end }}"
comps = completion.CompGetFromTemplate(&template, f, namespace, []string{"pod"}, toComplete)
} else {
// Complete namespaces followed by a /
for _, ns := range completion.CompGetResource(f, "namespace", toComplete) {
comps = append(comps, fmt.Sprintf("%s/", ns))
}
// Complete pod names followed by a :
for _, pod := range completion.CompGetResource(f, "pod", toComplete) {
comps = append(comps, fmt.Sprintf("%s:", pod))
}
// Finally, provide file completion if we need to.
// We only do this if:
// 1- There are other completions found (if there are no completions,
// the shell will do file completion itself)
// 2- If there is some input from the user (or else we will end up
// listing the entire content of the current directory which could
// be too many choices for the user)
if len(comps) > 0 && len(toComplete) > 0 {
if files, err := os.ReadDir("."); err == nil {
for _, file := range files {
filename := file.Name()
if strings.HasPrefix(filename, toComplete) {
if file.IsDir() {
filename = fmt.Sprintf("%s/", filename)
}
// We are completing a file prefix
comps = append(comps, filename)
}
}
}
} else if len(toComplete) == 0 {
// If the user didn't provide any input to complete,
// we provide a hint that a path can also be used
comps = append(comps, "./", "/")
}
}
}
return comps, cobra.ShellCompDirectiveNoSpace
},
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(o.Complete(f, cmd, args))
cmdutil.CheckErr(o.Validate())
cmdutil.CheckErr(o.Run())
},
}
cmdutil.AddContainerVarFlags(cmd, &o.Container, o.Container)
cmd.Flags().BoolVarP(&o.NoPreserve, "no-preserve", "", false, "The copied file/directory's ownership and permissions will not be preserved in the container")
cmd.Flags().IntVarP(&o.MaxTries, "retries", "", 0, "Set number of retries to complete a copy operation from a container. Specify 0 to disable or any negative value for infinite retrying. The default is 0 (no retry).")
util.AddSshFlags(cmd.Flags(), sshConf)
return cmd
}

View File

@@ -16,11 +16,11 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/action"
"github.com/wencaiwulue/kubevpn/v2/pkg/dns"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdDaemon(_ cmdutil.Factory) *cobra.Command {
func CmdDaemon(cmdutil.Factory) *cobra.Command {
var opt = &daemon.SvrOption{}
cmd := &cobra.Command{
Use: "daemon",
@@ -35,10 +35,13 @@ func CmdDaemon(_ cmdutil.Factory) *cobra.Command {
if opt.IsSudo {
go util.StartupPProf(config.SudoPProfPort)
_ = os.RemoveAll("/etc/resolver")
_ = dns.CleanupHosts()
_ = util.CleanupTempKubeConfigFile()
} else {
go util.StartupPProf(config.PProfPort)
}
return initLogfile(action.GetDaemonLogPath())
return initLogfile(config.GetDaemonLogPath(opt.IsSudo))
},
RunE: func(cmd *cobra.Command, args []string) (err error) {
defer opt.Stop()
@@ -49,7 +52,7 @@ func CmdDaemon(_ cmdutil.Factory) *cobra.Command {
if opt.IsSudo {
for _, profile := range pprof.Profiles() {
func() {
file, e := os.Create(filepath.Join(config.PprofPath, profile.Name()))
file, e := os.Create(filepath.Join(config.GetPProfPath(), profile.Name()))
if e != nil {
return
}

View File

@@ -1,166 +0,0 @@
package cmds
import (
"fmt"
"os"
dockercomp "github.com/docker/cli/cli/command/completion"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/completion"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/dev"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdDev(f cmdutil.Factory) *cobra.Command {
client, cli, err := util.GetClient()
if err != nil {
log.Fatal(err)
}
var options = &dev.Options{
Factory: f,
NoProxy: false,
Cli: client,
DockerCli: cli,
ExtraRouteInfo: handler.ExtraRouteInfo{},
}
var sshConf = &util.SshConfig{}
var transferImage bool
cmd := &cobra.Command{
Use: "dev TYPE/NAME [-c CONTAINER] [flags] -- [args...]",
Short: i18n.T("Startup your kubernetes workloads in local Docker container"),
Long: templates.LongDesc(i18n.T(`
Startup your kubernetes workloads in local Docker container with same volume、env、and network
## What did i do:
- Download volume which MountPath point to, mount to docker container
- Connect to cluster network, set network to docker container
- Get all environment with command (env), set env to docker container
`)),
Example: templates.Examples(i18n.T(`
# Develop workloads
- develop deployment
kubevpn dev deployment/productpage
- develop service
kubevpn dev service/productpage
# Develop workloads with mesh, traffic with header a=1, will hit local PC, otherwise no effect
kubevpn dev service/productpage --headers a=1
# Develop workloads without proxy traffic
kubevpn dev service/productpage --no-proxy
# Develop workloads which api-server behind of bastion host or ssh jump host
kubevpn dev deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# It also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn dev deployment/productpage --ssh-alias <alias>
# Switch to terminal mode; send stdin to 'bash' and sends stdout/stderror from 'bash' back to the client
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev -i -t --entrypoint /bin/bash
or
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev -it --entrypoint /bin/bash
# Support ssh auth GSSAPI
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab -it --entrypoint /bin/bash
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache -it --entrypoint /bin/bash
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD> -it --entrypoint /bin/bash
`)),
ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f),
Args: cobra.MatchAll(cobra.OnlyValidArgs),
DisableFlagsInUseLine: true,
PreRunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
fmt.Fprintf(os.Stdout, "You must specify the type of resource to proxy. %s\n\n", cmdutil.SuggestAPIResources("kubevpn"))
fullCmdName := cmd.Parent().CommandPath()
usageString := "Required resource not specified."
if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") {
usageString = fmt.Sprintf("%s\nUse \"%s explain <resource>\" for a detailed description of that resource (e.g. %[2]s explain pods).", usageString, fullCmdName)
}
return cmdutil.UsageErrorf(cmd, usageString)
}
err = cmd.Flags().Parse(args[1:])
if err != nil {
return err
}
util.InitLogger(false)
// not support temporally
if options.Engine == config.EngineGvisor {
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
}
err = daemon.StartupDaemon(cmd.Context())
if err != nil {
return err
}
return util.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false)
},
RunE: func(cmd *cobra.Command, args []string) error {
options.Workload = args[0]
for i, arg := range args {
if arg == "--" && i != len(args)-1 {
options.Copts.Args = args[i+1:]
break
}
}
defer func() {
for _, function := range options.GetRollbackFuncList() {
if function != nil {
if er := function(); er != nil {
log.Errorf("roll back failed, error: %s", er.Error())
}
}
}
}()
err = dev.DoDev(cmd.Context(), options, sshConf, cmd.Flags(), f, transferImage)
return err
},
}
cmd.Flags().SortFlags = false
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "enable debug mode or not, true or false")
cmd.Flags().StringVar(&config.Image, "image", config.Image, "use this image to startup container")
cmd.Flags().BoolVar(&options.NoProxy, "no-proxy", false, "Whether proxy remote workloads traffic into local or not, true: just startup container on local without inject containers to intercept traffic, false: intercept traffic and forward to local")
cmdutil.AddContainerVarFlags(cmd, &options.ContainerName, options.ContainerName)
cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc("container", completion.ContainerCompletionFunc(f)))
cmd.Flags().StringVar((*string)(&options.ConnectMode), "connect-mode", string(dev.ConnectModeHost), "Connect to kubernetes network in container or in host, eg: ["+string(dev.ConnectModeContainer)+"|"+string(dev.ConnectModeHost)+"]")
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
cmd.Flags().StringVar((*string)(&options.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
// diy docker options
cmd.Flags().StringVar(&options.DevImage, "dev-image", "", "Use to startup docker container, Default is pod image")
// origin docker options
dev.AddDockerFlags(options, cmd.Flags(), cli)
_ = cmd.RegisterFlagCompletionFunc(
"env",
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return os.Environ(), cobra.ShellCompDirectiveNoFileComp
},
)
_ = cmd.RegisterFlagCompletionFunc(
"env-file",
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return nil, cobra.ShellCompDirectiveDefault
},
)
_ = cmd.RegisterFlagCompletionFunc(
"network",
dockercomp.NetworkNames(cli),
)
handler.AddExtraRoute(cmd.Flags(), &options.ExtraRouteInfo)
util.AddSshFlags(cmd.Flags(), sshConf)
return cmd
}

View File

@@ -1,10 +1,9 @@
package cmds
import (
"context"
"fmt"
"io"
"os"
"strconv"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
@@ -16,11 +15,12 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdDisconnect(f cmdutil.Factory) *cobra.Command {
var all = false
var clusterIDs []string
cmd := &cobra.Command{
Use: "disconnect",
Short: i18n.T("Disconnect from kubernetes cluster network"),
@@ -29,62 +29,57 @@ func CmdDisconnect(f cmdutil.Factory) *cobra.Command {
This command is to disconnect from cluster. after use command 'kubevpn connect',
you can use this command to disconnect from a specific cluster.
before disconnect, it will leave proxy resource and clone resource if resource depends on this cluster
after disconnect it will also cleanup dns and host
- Before disconnect, it will leave proxy resource and sync resource if resource depends on this cluster.
- After disconnect, it will also cleanup DNS and host.
`)),
Example: templates.Examples(i18n.T(`
# disconnect from cluster network and restore proxy resource
kubevpn disconnect
# disconnect from special connection id
kubevpn disconnect 03dc50feb8c3
# disconnect from all cluster
kubevpn disconnect --all
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
plog.InitLoggerForClient()
err = daemon.StartupDaemon(cmd.Context())
return err
},
Args: cobra.MatchAll(cobra.OnlyValidArgs),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 && all {
return fmt.Errorf("either specify --all or ID, not both")
if len(args) == 0 && !all {
return fmt.Errorf("either specify --all or connecetion ID")
}
if len(clusterIDs) > 0 && all {
return fmt.Errorf("either specify --all or cluster-id, not both")
}
if len(args) == 0 && !all && len(clusterIDs) == 0 {
return fmt.Errorf("either specify --all or ID or cluster-id")
}
var ids *int32
var id string
if len(args) > 0 {
integer, err := strconv.Atoi(args[0])
if err != nil {
return fmt.Errorf("invalid ID: %s: %v", args[0], err)
}
ids = pointer.Int32(int32(integer))
id = args[0]
}
client, err := daemon.GetClient(false).Disconnect(
cmd.Context(),
&rpc.DisconnectRequest{
ID: ids,
ClusterIDs: clusterIDs,
All: pointer.Bool(all),
},
)
var resp *rpc.DisconnectResponse
for {
resp, err = client.Recv()
if err == io.EOF {
break
} else if err == nil {
fmt.Fprint(os.Stdout, resp.Message)
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
break
} else {
return err
}
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
fmt.Fprint(os.Stdout, "disconnect successfully")
req := &rpc.DisconnectRequest{
ConnectionID: &id,
All: pointer.Bool(all),
}
resp, err := cli.Disconnect(context.Background())
if err != nil {
return err
}
err = resp.Send(req)
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.DisconnectResponse](cmd.Context(), resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
_, _ = fmt.Fprint(os.Stdout, "Disconnect completed")
return nil
},
}
cmd.Flags().BoolVar(&all, "all", all, "Disconnect all cluster, disconnect from all cluster network")
cmd.Flags().StringArrayVar(&clusterIDs, "cluster-id", []string{}, "Cluster id, command status -o yaml/json will show cluster-id")
return cmd
}

View File

@@ -1,109 +0,0 @@
package cmds
import (
"cmp"
"encoding/json"
"os"
"slices"
"strings"
"github.com/spf13/cobra"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/printers"
cmdget "k8s.io/kubectl/pkg/cmd/get"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/scheme"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
)
func CmdGet(f cmdutil.Factory) *cobra.Command {
var printFlags = cmdget.NewGetPrintFlags()
cmd := &cobra.Command{
Use: "get",
Hidden: true,
Short: i18n.T("Get cluster resources which connected"),
Long: templates.LongDesc(i18n.T(`Get cluster resources which connected`)),
Example: templates.Examples(i18n.T(`
# Get resource to k8s cluster network
kubevpn get pods
# Get api-server behind of bastion host or ssh jump host
kubevpn get deployment --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# It also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn get service --ssh-alias <alias>
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
// startup daemon process and sudo process
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
namespace, _, err := f.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
client, err := daemon.GetClient(true).Get(
cmd.Context(),
&rpc.GetRequest{
Namespace: namespace,
Resource: args[0],
},
)
if err != nil {
return err
}
w := printers.GetNewTabWriter(os.Stdout)
var toPrinter = func() (printers.ResourcePrinterFunc, error) {
var flags = printFlags.Copy()
_ = flags.EnsureWithNamespace()
printer, err := flags.ToPrinter()
if err != nil {
return nil, err
}
printer, err = printers.NewTypeSetter(scheme.Scheme).WrapToPrinter(printer, nil)
if err != nil {
return nil, err
}
outputOption := cmd.Flags().Lookup("output").Value.String()
if strings.Contains(outputOption, "custom-columns") || outputOption == "yaml" || strings.Contains(outputOption, "json") {
} else {
printer = &cmdget.TablePrinter{Delegate: printer}
}
return printer.PrintObj, nil
}
var list []*v1.PartialObjectMetadata
for _, m := range client.Metadata {
var data v1.PartialObjectMetadata
err = json.Unmarshal([]byte(m), &data)
if err != nil {
continue
}
list = append(list, &data)
}
slices.SortStableFunc(list, func(a, b *v1.PartialObjectMetadata) int {
compare := cmp.Compare(a.GetNamespace(), b.GetNamespace())
if compare == 0 {
return cmp.Compare(a.GetName(), b.GetName())
}
return compare
})
for _, m := range list {
printer, err := toPrinter()
if err != nil {
return err
}
_ = printer.PrintObj(m, w)
}
return w.Flush()
},
}
printFlags.AddFlags(cmd)
return cmd
}

View File

@@ -0,0 +1,43 @@
package cmds
import (
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
)
func CmdImageCopy(cmdutil.Factory) *cobra.Command {
var imageCmd = &cobra.Command{
Use: "image <cmd>",
Short: "copy images",
}
copyCmd := &cobra.Command{
Use: "copy <src_image_ref> <dst_image_ref>",
Aliases: []string{"cp"},
Short: "copy or re-tag image",
Long: `Copy or re-tag an image. This works between registries and only pulls layers
that do not exist at the target. In the same registry it attempts to mount
the layers between repositories. And within the same repository it only
sends the manifest with the new tag.`,
Example: `
# copy an image
kubevpn image copy ghcr.io/kubenetworks/kubevpn:latest registry.example.org/kubevpn/kubevpn:latest
# re-tag an image
kubevpn image copy ghcr.io/kubenetworks/kubevpn:latest ghcr.io/kubenetworks/kubevpn:v2.3.4`,
Args: cobra.MatchAll(cobra.ExactArgs(2), cobra.OnlyValidArgs),
PreRunE: func(cmd *cobra.Command, args []string) error {
plog.InitLoggerForClient()
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
err := regctl.TransferImageWithRegctl(cmd.Context(), args[0], args[1])
return err
},
}
imageCmd.AddCommand(copyCmd)
return imageCmd
}

View File

@@ -1,9 +1,7 @@
package cmds
import (
"fmt"
"io"
"os"
"context"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
@@ -14,6 +12,7 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdLeave(f cmdutil.Factory) *cobra.Command {
@@ -33,27 +32,39 @@ func CmdLeave(f cmdutil.Factory) *cobra.Command {
# leave proxy resource and restore it to origin
kubevpn leave deployment/authors
`)),
Args: cobra.MatchAll(cobra.OnlyValidArgs, cobra.MinimumNArgs(1)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
leave, err := daemon.GetClient(false).Leave(cmd.Context(), &rpc.LeaveRequest{
Workloads: args,
})
_, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return err
}
for {
recv, err := leave.Recv()
if err == io.EOF {
return nil
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
return nil
} else if err != nil {
return err
}
fmt.Fprint(os.Stdout, recv.GetMessage())
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
req := &rpc.LeaveRequest{
Namespace: ns,
Workloads: args,
}
resp, err := cli.Leave(context.Background())
if err != nil {
return err
}
err = resp.Send(req)
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.LeaveResponse](cmd.Context(), resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
return nil
},
}
return leaveCmd

View File

@@ -1,41 +0,0 @@
package cmds
import (
"fmt"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
)
func CmdList(f cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "list",
Short: i18n.T("List proxy resources"),
Long: templates.LongDesc(i18n.T(`List proxy resources`)),
Example: templates.Examples(i18n.T(`
# list proxy resources
kubevpn list
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
client, err := daemon.GetClient(true).List(
cmd.Context(),
&rpc.ListRequest{},
)
if err != nil {
return err
}
fmt.Println(client.GetMessage())
return nil
},
Hidden: true,
}
return cmd
}

View File

@@ -1,10 +1,6 @@
package cmds
import (
"fmt"
"io"
"os"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -14,6 +10,8 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdLogs(f cmdutil.Factory) *cobra.Command {
@@ -31,30 +29,34 @@ func CmdLogs(f cmdutil.Factory) *cobra.Command {
kubevpn logs -f
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
plog.InitLoggerForClient()
// startup daemon process and sudo process
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
client, err := daemon.GetClient(true).Logs(cmd.Context(), req)
cli, err := daemon.GetClient(true)
if err != nil {
return err
}
var resp *rpc.LogResponse
for {
resp, err = client.Recv()
if err == io.EOF {
break
} else if err == nil {
fmt.Fprintln(os.Stdout, resp.Message)
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
resp, err := cli.Logs(cmd.Context())
if err != nil {
return err
}
err = resp.Send(req)
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.LogResponse](cmd.Context(), resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
} else {
return err
}
return err
}
return nil
},
}
cmd.Flags().BoolVarP(&req.Follow, "follow", "f", false, "Specify if the logs should be streamed.")
cmd.Flags().Int32VarP(&req.Lines, "lines", "l", 10, "Lines of recent log file to display.")
return cmd
}

26
cmd/kubevpn/cmds/once.go Normal file
View File

@@ -0,0 +1,26 @@
package cmds
import (
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
)
func CmdOnce(factory cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "once",
Short: i18n.T("Once generate TLS and restart deployment"),
Long: templates.LongDesc(i18n.T(`Once generate TLS and restart deployment for helm installation.`)),
RunE: func(cmd *cobra.Command, args []string) (err error) {
return handler.Once(cmd.Context(), factory)
},
Hidden: true,
DisableFlagsInUseLine: true,
}
cmd.Flags().StringVar(&config.Image, "image", config.Image, "use this image to startup container")
return cmd
}

View File

@@ -3,7 +3,6 @@ package cmds
import (
"context"
"fmt"
"io"
"os"
log "github.com/sirupsen/logrus"
@@ -19,14 +18,20 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
)
func CmdProxy(f cmdutil.Factory) *cobra.Command {
var connect = handler.ConnectOptions{}
var headers = make(map[string]string)
var portmap []string
var extraRoute = &handler.ExtraRouteInfo{}
var sshConf = &util.SshConfig{}
var sshConf = &pkgssh.SshConfig{}
var transferImage, foreground bool
var imagePullSecretName string
var managerNamespace string
cmd := &cobra.Command{
Use: "proxy",
Short: i18n.T("Proxy kubernetes workloads inbound traffic into local PC"),
@@ -53,25 +58,25 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
or
kubevpn proxy deployment authors productpage
# Reverse proxy with mesh, traffic with header a=1, will hit local PC, otherwise no effect
kubevpn proxy service/productpage --headers a=1
# Reverse proxy with mesh, traffic with header foo=bar, will hit local PC, otherwise no effect
kubevpn proxy deployment/productpage --headers foo=bar
# Reverse proxy with mesh, traffic with header a=1 and b=2, will hit local PC, otherwise no effect
kubevpn proxy service/productpage --headers a=1 --headers b=2
# Reverse proxy with mesh, traffic with header foo=bar and env=dev, will hit local PC, otherwise no effect
kubevpn proxy deployment/productpage --headers foo=bar --headers env=dev
# Connect to api-server behind of bastion host or ssh jump host and proxy kubernetes resource traffic into local PC
kubevpn proxy deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers a=1
kubevpn proxy deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers foo=bar
# It also support ProxyJump, like
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn proxy service/productpage --ssh-alias <alias> --headers a=1
kubevpn proxy deployment/productpage --ssh-alias <alias> --headers foo=bar
# Support ssh auth GSSAPI
kubevpn proxy service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
kubevpn proxy service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
kubevpn proxy service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
kubevpn proxy deployment/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
kubevpn proxy deployment/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
kubevpn proxy deployment/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
# Support port map, you can proxy container port to local port by command:
kubevpn proxy deployment/productpage --portmap 80:8080
@@ -85,105 +90,107 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
# Auto proxy container port to same local port, and auto detect protocol
kubevpn proxy deployment/productpage
`)),
Args: cobra.MatchAll(cobra.OnlyValidArgs, cobra.MinimumNArgs(1)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
plog.InitLoggerForClient()
if err = daemon.StartupDaemon(cmd.Context()); err != nil {
return err
}
// not support temporally
if connect.Engine == config.EngineGvisor {
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
if transferImage {
err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image)
}
return err
},
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
fmt.Fprintf(os.Stdout, "You must specify the type of resource to proxy. %s\n\n", cmdutil.SuggestAPIResources("kubevpn"))
fullCmdName := cmd.Parent().CommandPath()
usageString := "Required resource not specified."
if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") {
usageString = fmt.Sprintf("%s\nUse \"%s explain <resource>\" for a detailed description of that resource (e.g. %[2]s explain pods).", usageString, fullCmdName)
}
return cmdutil.UsageErrorf(cmd, usageString)
}
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return err
}
// todo 将 doConnect 方法封装?内部使用 client 发送到daemon
cli := daemon.GetClient(false)
logLevel := log.ErrorLevel
if config.Debug {
logLevel = log.DebugLevel
if !sshConf.IsEmpty() {
if ip := util.GetAPIServerFromKubeConfigBytes(bytes); ip != nil {
extraRoute.ExtraCIDR = append(extraRoute.ExtraCIDR, ip.String())
}
}
client, err := cli.Proxy(
cmd.Context(),
&rpc.ConnectRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
Headers: connect.Headers,
PortMap: connect.PortMap,
Workloads: args,
ExtraRoute: extraRoute.ToRPC(),
Engine: string(connect.Engine),
SshJump: sshConf.ToRPC(),
TransferImage: transferImage,
Image: config.Image,
Level: int32(logLevel),
OriginKubeconfigPath: util.GetKubeConfigPath(f),
},
)
// todo 将 doConnect 方法封装?内部使用 client 发送到daemon
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
var resp *rpc.ConnectResponse
for {
resp, err = client.Recv()
if err == io.EOF {
break
} else if err == nil {
fmt.Fprint(os.Stdout, resp.Message)
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
return nil
} else {
return err
}
req := &rpc.ProxyRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
Headers: headers,
PortMap: portmap,
Workloads: args,
ExtraRoute: extraRoute.ToRPC(),
SshJump: sshConf.ToRPC(),
TransferImage: transferImage,
Image: config.Image,
ImagePullSecretName: imagePullSecretName,
Level: int32(util.If(config.Debug, log.DebugLevel, log.InfoLevel)),
OriginKubeconfigPath: util.GetKubeConfigPath(f),
ManagerNamespace: managerNamespace,
}
util.Print(os.Stdout, "Now you can access resources in the kubernetes cluster, enjoy it :)")
resp, err := cli.Proxy(context.Background())
if err != nil {
return err
}
err = resp.Send(req)
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.ConnectResponse](cmd.Context(), resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
_, _ = fmt.Fprintln(os.Stdout, config.Slogan)
// hangup
if foreground {
// leave from cluster resources
<-cmd.Context().Done()
stream, err := cli.Leave(context.Background(), &rpc.LeaveRequest{
Workloads: args,
})
var resp *rpc.LeaveResponse
for {
resp, err = stream.Recv()
if err == io.EOF {
return nil
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
return nil
} else if err != nil {
return err
}
fmt.Fprint(os.Stdout, resp.Message)
err = leave(cli, ns, args)
if err != nil {
return err
}
}
return nil
},
}
cmd.Flags().StringToStringVarP(&connect.Headers, "headers", "H", map[string]string{}, "Traffic with special headers (use `and` to match all headers) with reverse it to local PC, If not special, redirect all traffic to local PC. eg: --headers a=1 --headers b=2")
cmd.Flags().StringArrayVar(&connect.PortMap, "portmap", []string{}, "Port map, map container port to local port, format: [tcp/udp]/containerPort:localPort, If not special, localPort will use containerPort. eg: tcp/80:8080 or udp/5000:5001 or 80 or 80:8080")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug mode or not, true or false")
cmd.Flags().StringVar(&config.Image, "image", config.Image, "Use this image to startup container")
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
cmd.Flags().StringVar((*string)(&connect.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
cmd.Flags().StringToStringVarP(&headers, "headers", "H", map[string]string{}, "Traffic with special headers (use `and` to match all headers) with reverse it to local PC, If not special, redirect all traffic to local PC. format: <KEY>=<VALUE> eg: --headers foo=bar --headers env=dev")
cmd.Flags().StringArrayVar(&portmap, "portmap", []string{}, "Port map, map container port to local port, format: [tcp/udp]/containerPort:localPort, If not special, localPort will use containerPort. eg: tcp/80:8080 or udp/5000:5001 or 80 or 80:8080")
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName)
cmd.Flags().BoolVar(&foreground, "foreground", false, "foreground hang up")
cmd.Flags().StringVar(&managerNamespace, "manager-namespace", "", "The namespace where the traffic manager is to be found. Only works in cluster mode (install kubevpn server by helm)")
handler.AddExtraRoute(cmd.Flags(), extraRoute)
util.AddSshFlags(cmd.Flags(), sshConf)
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
cmd.ValidArgsFunction = utilcomp.ResourceTypeAndNameCompletionFunc(f)
return cmd
}
func leave(cli rpc.DaemonClient, ns string, args []string) error {
req := &rpc.LeaveRequest{
Namespace: ns,
Workloads: args,
}
resp, err := cli.Leave(context.Background())
if err != nil {
return err
}
err = resp.Send(req)
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.LeaveResponse](nil, resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
return nil
}

View File

@@ -3,7 +3,6 @@ package cmds
import (
"context"
"fmt"
"io"
"os"
"github.com/spf13/cobra"
@@ -23,17 +22,17 @@ func CmdQuit(f cmdutil.Factory) *cobra.Command {
Use: "quit",
Short: i18n.T("Quit kubevpn daemon grpc server"),
Long: templates.LongDesc(i18n.T(`
Disconnect from cluster, leave proxy resources, quit daemon grpc server and cleanup dns/host
Disconnect from cluster, leave proxy resources, quit daemon grpc server and cleanup dns/hosts
`)),
Example: templates.Examples(i18n.T(`
# before quit kubevpn, it will leave proxy resources to origin and disconnect from cluster
kubevpn quit
`)),
RunE: func(cmd *cobra.Command, args []string) error {
_ = quit(cmd.Context(), false)
_ = quit(cmd.Context(), true)
_ = quit(cmd.Context(), false)
util.CleanExtensionLib()
fmt.Fprint(os.Stdout, "quit successfully")
_, _ = fmt.Fprint(os.Stdout, "Exited")
return nil
},
}
@@ -41,26 +40,24 @@ func CmdQuit(f cmdutil.Factory) *cobra.Command {
}
func quit(ctx context.Context, isSudo bool) error {
cli := daemon.GetClient(isSudo)
if cli == nil {
return nil
}
client, err := cli.Quit(ctx, &rpc.QuitRequest{})
cli, err := daemon.GetClient(isSudo)
if err != nil {
return err
}
var resp *rpc.QuitResponse
for {
resp, err = client.Recv()
if err == io.EOF {
break
} else if err == nil {
fmt.Fprint(os.Stdout, resp.Message)
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
resp, err := cli.Quit(context.Background())
if err != nil {
return err
}
err = resp.Send(&rpc.QuitRequest{})
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.QuitResponse](ctx, resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
} else {
return err
}
return err
}
return nil
}

View File

@@ -1,58 +0,0 @@
package cmds
import (
"fmt"
"io"
"os"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
)
func CmdRemove(f cmdutil.Factory) *cobra.Command {
var cmd = &cobra.Command{
Use: "remove",
Short: "Remove clone resource",
Long: templates.LongDesc(i18n.T(`
Remove clone resource
This command is design to remove clone resources, after use command 'kubevpn clone xxx',
it will generate and create a new resource in target k8s cluster with format [resource_name]_clone_xxxxx,
use this command to remove this created resources.
`)),
Example: templates.Examples(i18n.T(`
# leave proxy resources to origin
kubevpn remove deployment/authors
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
leave, err := daemon.GetClient(false).Remove(cmd.Context(), &rpc.RemoveRequest{
Workloads: args,
})
if err != nil {
return err
}
for {
recv, err := leave.Recv()
if err == io.EOF {
return nil
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
return nil
} else if err != nil {
return err
}
fmt.Fprint(os.Stdout, recv.GetMessage())
}
},
}
return cmd
}

View File

@@ -1,119 +1,92 @@
package cmds
import (
"fmt"
"io"
"os"
"context"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"k8s.io/utils/ptr"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdReset(f cmdutil.Factory) *cobra.Command {
var sshConf = &util.SshConfig{}
var sshConf = &pkgssh.SshConfig{}
cmd := &cobra.Command{
Use: "reset",
Short: "Reset all resource create by kubevpn in k8s cluster",
Short: "Reset workloads to origin status",
Long: templates.LongDesc(i18n.T(`
Reset all resource create by kubevpn in k8s cluster
Reset workloads to origin status
Reset will delete all resoucres create by kubevpn in k8s cluster, like deployment, service, serviceAccount...
and it will also delete local develop docker containers, docker networks. delete hosts entry added by kubevpn,
cleanup dns.
Reset will remove injected container envoy-proxy and vpn, and restore service mesh rules.
`)),
Example: templates.Examples(i18n.T(`
# Reset default namespace
kubevpn reset
# Reset default namespace workloads depooyment/productpage
kubevpn reset deployment/productpage
# Reset another namespace test
kubevpn reset -n test
# Reset another namespace test workloads depooyment/productpage
kubevpn reset deployment/productpage -n test
# Reset cluster api-server behind of bastion host or ssh jump host
kubevpn reset --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# Reset workloads depooyment/productpage which api-server behind of bastion host or ssh jump host
kubevpn reset deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# It also support ProxyJump, like
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn reset --ssh-alias <alias>
kubevpn reset deployment/productpage --ssh-alias <alias>
# Support ssh auth GSSAPI
kubevpn reset --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
kubevpn reset --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
kubevpn reset --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
kubevpn reset deployment/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
kubevpn reset deployment/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
kubevpn reset deployment/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
plog.InitLoggerForClient()
return daemon.StartupDaemon(cmd.Context())
},
Args: cobra.MatchAll(cobra.ExactArgs(1)),
RunE: func(cmd *cobra.Command, args []string) error {
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return err
}
cli := daemon.GetClient(false)
disconnect, err := cli.Disconnect(cmd.Context(), &rpc.DisconnectRequest{
KubeconfigBytes: ptr.To(string(bytes)),
Namespace: ptr.To(ns),
SshJump: sshConf.ToRPC(),
})
if err != nil {
log.Warnf("failed to disconnect from cluter: %v", err)
} else {
_ = printDisconnectResp(disconnect)
}
req := &rpc.ResetRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
SshJump: sshConf.ToRPC(),
}
resp, err := cli.Reset(cmd.Context(), req)
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
err = printResetResp(resp)
return err
req := &rpc.ResetRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
Workloads: args,
SshJump: sshConf.ToRPC(),
}
resp, err := cli.Reset(context.Background())
if err != nil {
return err
}
err = resp.Send(req)
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.ResetResponse](cmd.Context(), resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
return nil
},
}
util.AddSshFlags(cmd.Flags(), sshConf)
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
return cmd
}
func printResetResp(resp rpc.Daemon_ResetClient) error {
for {
recv, err := resp.Recv()
if err == io.EOF {
return nil
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
return nil
} else if err != nil {
return err
}
fmt.Fprint(os.Stdout, recv.GetMessage())
}
}
func printDisconnectResp(disconnect rpc.Daemon_DisconnectClient) error {
for {
recv, err := disconnect.Recv()
if err == io.EOF {
return nil
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
return nil
} else if err != nil {
return err
}
fmt.Fprint(os.Stdout, recv.GetMessage())
}
}

View File

@@ -5,6 +5,7 @@ import (
"strings"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/rest"
restclient "k8s.io/client-go/rest"
@@ -16,6 +17,7 @@ import (
"k8s.io/utils/ptr"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func NewKubeVPNCommand() *cobra.Command {
@@ -31,7 +33,7 @@ func NewKubeVPNCommand() *cobra.Command {
}
flags := cmd.PersistentFlags()
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
configFlags := genericclioptions.NewConfigFlags(true)
configFlags.WrapConfigFn = func(c *rest.Config) *rest.Config {
if path, ok := os.LookupEnv(config.EnvSSHJump); ok {
kubeconfigBytes, err := os.ReadFile(path)
@@ -56,30 +58,30 @@ func NewKubeVPNCommand() *cobra.Command {
CmdDisconnect(factory),
CmdProxy(factory),
CmdLeave(factory),
CmdClone(factory),
CmdRemove(factory),
CmdDev(factory),
CmdSync(factory),
CmdUnsync(factory),
CmdRun(factory),
// Hidden, Server Commands (DO NOT USE IT !!!)
CmdControlPlane(factory),
CmdServe(factory),
CmdServer(factory),
CmdDaemon(factory),
CmdWebhook(factory),
CmdSyncthing(factory),
CmdOnce(factory),
},
},
{
Message: "Management commands:",
Commands: []*cobra.Command{
CmdStatus(factory),
CmdList(factory),
CmdAlias(factory),
CmdGet(factory),
CmdConfig(factory),
CmdConnection(factory),
CmdSSH(factory),
CmdSSHDaemon(factory),
CmdImageCopy(factory),
CmdLogs(factory),
CmdCp(factory),
CmdReset(factory),
CmdUninstall(factory),
CmdQuit(factory),
},
},
@@ -97,8 +99,13 @@ func NewKubeVPNCommand() *cobra.Command {
return cmd
}
func AddKubeconfigJsonFlag(flags *pflag.FlagSet, kubeconfigJson *string) {
flags.StringVar(kubeconfigJson, "kubeconfig-json", ptr.Deref[string](kubeconfigJson, ""), "Json format of kubeconfig to use for CLI requests.")
}
type warp struct {
*genericclioptions.ConfigFlags
KubeconfigJson string
}
func (f *warp) ToRawKubeConfigLoader() clientcmd.ClientConfig {
@@ -106,5 +113,11 @@ func (f *warp) ToRawKubeConfigLoader() clientcmd.ClientConfig {
home := homedir.HomeDir()
f.KubeConfig = ptr.To(strings.Replace(*f.KubeConfig, "~", home, 1))
}
if strings.TrimSpace(f.KubeconfigJson) != "" {
path, err := util.ConvertToTempKubeconfigFile([]byte(f.KubeconfigJson), "")
if err == nil {
f.KubeConfig = ptr.To(path)
}
}
return f.ConfigFlags.ToRawKubeConfigLoader()
}

151
cmd/kubevpn/cmds/run.go Normal file
View File

@@ -0,0 +1,151 @@
package cmds
import (
"context"
"github.com/docker/cli/cli/command"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/completion"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/run"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
)
func CmdRun(f cmdutil.Factory) *cobra.Command {
var options = &run.Options{
NoProxy: false,
ExtraRouteInfo: handler.ExtraRouteInfo{},
}
var sshConf = &pkgssh.SshConfig{}
var transferImage bool
var imagePullSecretName string
var managerNamespace string
cmd := &cobra.Command{
Use: "run TYPE/NAME [-c CONTAINER] [flags] -- [args...]",
Short: i18n.T("Run kubernetes workloads in local Docker container"),
Long: templates.LongDesc(i18n.T(`
Run kubernetes workloads in local Docker container with same volume、env、and network
## What did it do:
- Download volume which MountPath point to, mount to docker container
- Connect to cluster network, set network to docker container
- Get all environment with command (env), set env to docker container
`)),
Example: templates.Examples(i18n.T(`
# Run workloads
- run deployment
kubevpn run deployment/productpage
# Run workloads with mesh, traffic with header foo=bar, will hit local PC, otherwise no effect
kubevpn run deployment/productpage --headers foo=bar
# Run workloads without proxy traffic
kubevpn run deployment/productpage --no-proxy
# Run workloads which api-server behind of bastion host or ssh jump host
kubevpn run deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn run deployment/productpage --ssh-alias <alias>
# Switch to terminal mode; send stdin to 'bash' and sends stdout/stderror from 'bash' back to the client
kubevpn run deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev --entrypoint /bin/bash
or
kubevpn run deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev --entrypoint /bin/bash
# Support ssh auth GSSAPI
kubevpn run deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab --entrypoint /bin/bash
kubevpn run deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache --entrypoint /bin/bash
kubevpn run deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD> --entrypoint /bin/bash
`)),
ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f),
Args: cobra.MatchAll(cobra.OnlyValidArgs, cobra.MinimumNArgs(1)),
DisableFlagsInUseLine: true,
PreRunE: func(cmd *cobra.Command, args []string) error {
err := cmd.Flags().Parse(args[1:])
if err != nil {
return err
}
plog.InitLoggerForClient()
err = daemon.StartupDaemon(cmd.Context())
if err != nil {
return err
}
if transferImage {
err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image)
}
if err != nil {
return err
}
if sshConf.IsEmpty() {
return nil
}
bytes, _, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return err
}
return pkgssh.SshJumpAndSetEnv(cmd.Context(), sshConf, bytes, false)
},
RunE: func(cmd *cobra.Command, args []string) error {
options.Workload = args[0]
for i, arg := range args {
if arg == "--" && i != len(args)-1 {
options.ContainerOptions.Args = args[i+1:]
break
}
}
if err := options.InitClient(f); err != nil {
return err
}
conf, hostConfig, err := run.Parse(cmd.Flags(), options.ContainerOptions)
if err != nil {
return err
}
defer func() {
for _, function := range options.GetRollbackFuncList() {
if function != nil {
if err := function(); err != nil {
plog.G(context.Background()).Errorf("Rollback failed, error: %s", err.Error())
}
}
}
}()
return options.Main(cmd.Context(), sshConf, conf, hostConfig, imagePullSecretName, managerNamespace)
},
}
cmd.Flags().SortFlags = false
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
cmd.Flags().BoolVar(&options.NoProxy, "no-proxy", false, "Whether proxy remote workloads traffic into local or not, true: just startup container on local without inject containers to intercept traffic, false: intercept traffic and forward to local")
cmdutil.AddContainerVarFlags(cmd, &options.ContainerName, options.ContainerName)
cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc("container", completion.ContainerCompletionFunc(f)))
cmd.Flags().StringVar((*string)(&options.ConnectMode), "connect-mode", string(run.ConnectModeHost), "Connect to kubernetes network in container or in host, eg: ["+string(run.ConnectModeContainer)+"|"+string(run.ConnectModeHost)+"]")
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName)
cmd.Flags().StringVar(&managerNamespace, "manager-namespace", "", "The namespace where the traffic manager is to be found. Only works in cluster mode (install kubevpn server by helm)")
// diy docker options
cmd.Flags().StringVar(&options.DevImage, "dev-image", "", "Use to startup docker container, Default is pod image")
// -- origin docker options -- start
options.ContainerOptions = run.AddFlags(cmd.Flags())
cmd.Flags().StringVar(&options.RunOptions.Pull, "pull", run.PullImageMissing, `Pull image before running ("`+run.PullImageAlways+`"|"`+run.PullImageMissing+`"|"`+run.PullImageNever+`")`)
command.AddPlatformFlag(cmd.Flags(), &options.RunOptions.Platform)
// -- origin docker options -- end
handler.AddExtraRoute(cmd.Flags(), &options.ExtraRouteInfo)
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
return cmd
}

View File

@@ -1,59 +0,0 @@
package cmds
import (
"math/rand"
"runtime"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"go.uber.org/automaxprocs/maxprocs"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/core"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdServe(_ cmdutil.Factory) *cobra.Command {
var route = &core.Route{}
cmd := &cobra.Command{
Use: "serve",
Hidden: true,
Short: "Server side, startup traffic manager, forward inbound and outbound traffic",
Long: templates.LongDesc(i18n.T(`
Server side, startup traffic manager, forward inbound and outbound traffic.
`)),
Example: templates.Examples(i18n.T(`
# serve node
kubevpn serve -L "tcp://:10800" -L "tun://127.0.0.1:8422?net=223.254.0.123/32"
`)),
PreRun: func(*cobra.Command, []string) {
util.InitLoggerForServer(config.Debug)
runtime.GOMAXPROCS(0)
go util.StartupPProf(0)
},
RunE: func(cmd *cobra.Command, args []string) error {
rand.Seed(time.Now().UnixNano())
_, _ = maxprocs.Set(maxprocs.Logger(nil))
ctx := cmd.Context()
err := handler.Complete(ctx, route)
if err != nil {
return err
}
servers, err := handler.Parse(*route)
if err != nil {
log.Errorf("parse server failed: %v", err)
return err
}
return handler.Run(ctx, servers)
},
}
cmd.Flags().StringArrayVarP(&route.ServeNodes, "node", "L", []string{}, "Startup node server. eg: tcp://localhost:1080")
cmd.Flags().StringVarP(&route.ChainNode, "chain", "F", "", "Forward chain. eg: tcp://192.168.1.100:2345")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug log or not")
return cmd
}

View File

@@ -0,0 +1,59 @@
package cmds
import (
"math/rand"
"os"
"runtime"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"go.uber.org/automaxprocs/maxprocs"
glog "gvisor.dev/gvisor/pkg/log"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/core"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdServer(cmdutil.Factory) *cobra.Command {
var route = &core.Route{}
cmd := &cobra.Command{
Use: "server",
Hidden: true,
Short: "Server side, startup traffic manager, forward inbound and outbound traffic",
Long: templates.LongDesc(i18n.T(`
Server side, startup traffic manager, forward inbound and outbound traffic.
`)),
Example: templates.Examples(i18n.T(`
# server listener
kubevpn server -l "gtcp://:10801" -l "tun://?net=198.19.0.123/32"
`)),
PreRun: func(*cobra.Command, []string) {
runtime.GOMAXPROCS(0)
go util.StartupPProfForServer(config.PProfPort)
glog.SetTarget(plog.ServerEmitter{Writer: &glog.Writer{Next: os.Stderr}})
},
RunE: func(cmd *cobra.Command, args []string) error {
rand.Seed(time.Now().UnixNano())
_, _ = maxprocs.Set(maxprocs.Logger(nil))
ctx := cmd.Context()
logger := plog.InitLoggerForServer()
logger.SetLevel(util.If(config.Debug, log.DebugLevel, log.InfoLevel))
servers, err := handler.Parse(*route)
if err != nil {
plog.G(ctx).Errorf("Parse server failed: %v", err)
return err
}
return handler.Run(plog.WithLogger(ctx, logger), servers)
},
}
cmd.Flags().StringArrayVarP(&route.Listeners, "listener", "l", []string{}, "Startup listener server. eg: tcp://localhost:1080")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug log or not")
return cmd
}

View File

@@ -6,11 +6,10 @@ import (
"fmt"
"io"
"os"
"strconv"
"strings"
"github.com/containerd/containerd/platforms"
"github.com/google/uuid"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/net/websocket"
@@ -20,14 +19,19 @@ import (
"k8s.io/kubectl/pkg/util/term"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
// CmdSSH
// Remember to use network mask 32, because ssh using unique network cidr 223.255.0.0/16
func CmdSSH(_ cmdutil.Factory) *cobra.Command {
var sshConf = &util.SshConfig{}
var ExtraCIDR []string
// Remember to use network mask 32, because ssh using unique network CIDR 198.18.0.0/16
func CmdSSH(cmdutil.Factory) *cobra.Command {
var sshConf = &pkgssh.SshConfig{}
var extraCIDR []string
var platform string
var lite bool
cmd := &cobra.Command{
Use: "ssh",
Short: "Ssh to jump server",
@@ -38,7 +42,7 @@ func CmdSSH(_ cmdutil.Factory) *cobra.Command {
# Jump to server behind of bastion host or ssh jump host
kubevpn ssh --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# It also support ProxyJump, like
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────┘
@@ -50,9 +54,14 @@ func CmdSSH(_ cmdutil.Factory) *cobra.Command {
kubevpn ssh --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
plog.InitLoggerForClient()
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
plat, err := platforms.Parse(platform)
if err != nil {
return err
}
config, err := websocket.NewConfig("ws://test/ws", "http://test")
if err != nil {
return err
@@ -61,26 +70,29 @@ func CmdSSH(_ cmdutil.Factory) *cobra.Command {
if !terminal.IsTerminal(fd) {
return fmt.Errorf("stdin is not a terminal")
}
state, err := terminal.MakeRaw(fd)
if err != nil {
return fmt.Errorf("terminal make raw: %s", err)
}
defer terminal.Restore(fd, state)
width, height, err := terminal.GetSize(fd)
if err != nil {
return fmt.Errorf("terminal get size: %s", err)
}
marshal, err := json.Marshal(sshConf)
sessionID := uuid.NewString()
ssh := handler.Ssh{
Config: *sshConf,
ExtraCIDR: extraCIDR,
Width: width,
Height: height,
Platform: platforms.Format(platforms.Normalize(plat)),
SessionID: sessionID,
Lite: lite,
}
marshal, err := json.Marshal(ssh)
if err != nil {
return err
}
sessionID := uuid.NewString()
config.Header.Set("ssh", string(marshal))
config.Header.Set("extra-cidr", strings.Join(ExtraCIDR, ","))
config.Header.Set("terminal-width", strconv.Itoa(width))
config.Header.Set("terminal-height", strconv.Itoa(height))
config.Header.Set("session-id", sessionID)
client := daemon.GetTCPClient(true)
if client == nil {
return fmt.Errorf("client is nil")
}
conn, err := websocket.NewClient(config, client)
if err != nil {
return err
@@ -91,25 +103,54 @@ func CmdSSH(_ cmdutil.Factory) *cobra.Command {
go func() {
errChan <- monitorSize(cmd.Context(), sessionID)
}()
readyCtx, cancelFunc := context.WithCancel(cmd.Context())
checker := func(log string) bool {
isReady := strings.Contains(log, fmt.Sprintf(handler.SshTerminalReadyFormat, sessionID))
if isReady {
cancelFunc()
}
return isReady
}
var state *terminal.State
go func() {
select {
case <-cmd.Context().Done():
return
case <-readyCtx.Done():
}
if state, err = terminal.MakeRaw(fd); err != nil {
plog.G(context.Background()).Errorf("terminal make raw: %s", err)
}
}()
go func() {
_, err := io.Copy(conn, os.Stdin)
errChan <- err
}()
go func() {
_, err := io.Copy(os.Stdout, conn)
_, err := io.Copy(io.MultiWriter(os.Stdout, util.NewWriter(checker)), conn)
errChan <- err
}()
defer func() {
if state != nil {
terminal.Restore(fd, state)
}
}()
select {
case err = <-errChan:
case err := <-errChan:
return err
case <-cmd.Context().Done():
return cmd.Context().Err()
}
},
}
util.AddSshFlags(cmd.Flags(), sshConf)
cmd.Flags().StringArrayVar(&ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
cmd.Flags().StringArrayVar(&extraCIDR, "extra-cidr", []string{}, "Extra network CIDR string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
cmd.Flags().StringVar(&platform, "platform", util.If(os.Getenv("KUBEVPN_DEFAULT_PLATFORM") != "", os.Getenv("KUBEVPN_DEFAULT_PLATFORM"), "linux/amd64"), "Set ssh server platform if needs to install command kubevpn")
cmd.Flags().BoolVar(&lite, "lite", false, "connect to ssh server in lite mode. mode \"lite\": design for only connect to ssh server. mode \"full\": not only connect to ssh server, it also create a two-way tunnel communicate with inner ip")
return cmd
}
@@ -146,7 +187,7 @@ func monitorSize(ctx context.Context, sessionID string) error {
return nil
}
if err = encoder.Encode(&size); err != nil {
log.Errorf("Encode resize: %s", err)
plog.G(ctx).Errorf("Encode resize: %s", err)
return err
}
}

View File

@@ -14,8 +14,8 @@ import (
)
// CmdSSHDaemon
// set local tun ip 223.254.0.1/32, remember to use mask 32
func CmdSSHDaemon(_ cmdutil.Factory) *cobra.Command {
// set local tun ip 198.19.0.1/32, remember to use mask 32
func CmdSSHDaemon(cmdutil.Factory) *cobra.Command {
var clientIP string
cmd := &cobra.Command{
Use: "ssh-daemon",
@@ -24,23 +24,22 @@ func CmdSSHDaemon(_ cmdutil.Factory) *cobra.Command {
Long: templates.LongDesc(i18n.T(`Ssh daemon server`)),
Example: templates.Examples(i18n.T(`
# SSH daemon server
kubevpn ssh-daemon --client-ip 223.254.0.123/32
kubevpn ssh-daemon --client-ip 198.19.0.123/32
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
err := daemon.StartupDaemon(cmd.Context())
return err
},
RunE: func(cmd *cobra.Command, args []string) error {
client, err := daemon.GetClient(true).SshStart(
cmd.Context(),
&rpc.SshStartRequest{
ClientIP: clientIP,
},
)
cli, err := daemon.GetClient(true)
if err != nil {
return err
}
_, err = fmt.Fprint(os.Stdout, client.ServerIP)
resp, err := cli.SshStart(cmd.Context(), &rpc.SshStartRequest{ClientIP: clientIP})
if err != nil {
return err
}
_, err = fmt.Fprint(os.Stdout, resp.ServerIP)
return err
},
}

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/liggitt/tabwriter"
@@ -21,6 +22,8 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
@@ -37,16 +40,16 @@ func CmdStatus(f cmdutil.Factory) *cobra.Command {
var format string
cmd := &cobra.Command{
Use: "status",
Short: i18n.T("Show connect status and list proxy/clone resource"),
Short: i18n.T("Show connect status and list proxy/sync resource"),
Long: templates.LongDesc(i18n.T(`
Show connect status and list proxy/clone resource
Show connect status and list proxy/sync resource
Show connect status and list proxy or clone resource, you can check connect status by filed status and netif.
Show connect status and list proxy or sync resource, you can check connect status by filed status and netif.
if netif is empty, means tun device closed, so it's unhealthy, it will also show route info, if proxy workloads,
not only show myself proxy resource, another route info will also display.
`)),
Example: templates.Examples(i18n.T(`
# show status for connect status and list proxy/clone resource
# show status for connect status and list proxy/sync resource
kubevpn status
# query status by alias config name dev_new
@@ -59,30 +62,30 @@ func CmdStatus(f cmdutil.Factory) *cobra.Command {
kubevpn status -o yaml
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
plog.InitLoggerForClient()
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
var clusterIDs []string
var connectionIDs []string
if aliasName != "" {
configs, err := ParseAndGet(localFile, remoteAddr, aliasName)
if err != nil {
return err
}
for _, config := range configs {
clusterID, err := GetClusterIDByConfig(cmd, config)
for _, conf := range configs {
connectionID, err := GetConnectionIDByConfig(cmd, conf)
if err != nil {
return err
}
clusterIDs = append(clusterIDs, clusterID)
connectionIDs = append(connectionIDs, connectionID)
}
}
resp, err := daemon.GetClient(false).Status(
cmd.Context(),
&rpc.StatusRequest{
ClusterIDs: clusterIDs,
},
)
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
resp, err := cli.Status(cmd.Context(), &rpc.StatusRequest{ConnectionIDs: connectionIDs})
if err != nil {
return err
}
@@ -90,12 +93,12 @@ func CmdStatus(f cmdutil.Factory) *cobra.Command {
if err != nil {
return err
}
fmt.Fprint(os.Stdout, output)
_, _ = fmt.Fprint(os.Stdout, output)
return nil
},
}
cmd.Flags().StringVar(&aliasName, "alias", "", "Alias name, query connect status by alias config name")
cmd.Flags().StringVarP(&localFile, "file", "f", config.GetConfigFilePath(), "Config file location")
cmd.Flags().StringVarP(&localFile, "kubevpnconfig", "f", util.If(os.Getenv("KUBEVPNCONFIG") != "", os.Getenv("KUBEVPNCONFIG"), config.GetConfigFile()), "Path to the kubevpnconfig file to use for CLI requests.")
cmd.Flags().StringVarP(&remoteAddr, "remote", "r", "", "Remote config file, eg: https://raw.githubusercontent.com/kubenetworks/kubevpn/master/pkg/config/config.yaml")
cmd.Flags().StringVarP(&format, "output", "o", FormatTable, fmt.Sprintf("Output format. One of: (%s, %s, %s)", FormatJson, FormatYaml, FormatTable))
return cmd
@@ -107,7 +110,7 @@ func genOutput(status *rpc.StatusResponse, format string) (string, error) {
if len(status.List) == 0 {
return "", nil
}
marshal, err := json.Marshal(status.List)
marshal, err := json.Marshal(status)
if err != nil {
return "", err
}
@@ -117,7 +120,7 @@ func genOutput(status *rpc.StatusResponse, format string) (string, error) {
if len(status.List) == 0 {
return "", nil
}
marshal, err := yaml.Marshal(status.List)
marshal, err := yaml.Marshal(status)
if err != nil {
return "", err
}
@@ -125,18 +128,19 @@ func genOutput(status *rpc.StatusResponse, format string) (string, error) {
default:
var sb = new(bytes.Buffer)
w := printers.GetNewTabWriter(sb)
genConnectMsg(w, status.List)
genConnectMsg(w, status.CurrentConnectionID, status.List)
genProxyMsg(w, status.List)
genCloneMsg(w, status.List)
genSyncMsg(w, status.List)
_ = w.Flush()
return sb.String(), nil
}
}
func genConnectMsg(w *tabwriter.Writer, status []*rpc.Status) {
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Mode", "Cluster", "Kubeconfig", "Namespace", "Status", "Netif")
func genConnectMsg(w *tabwriter.Writer, currentConnectionID string, status []*rpc.Status) {
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", "CURRENT", "CONNECTION ID", "CLUSTER", "KUBECONFIG", "NAMESPACE", "STATUS", "NETIF")
for _, c := range status {
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\t%s\n", c.ID, c.Mode, c.Cluster, c.Kubeconfig, c.Namespace, c.Status, c.Netif)
current := util.If[string](c.ConnectionID == currentConnectionID, "*", "")
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", current, c.ConnectionID, c.Cluster, c.Kubeconfig, c.Namespace, c.Status, c.Netif)
}
}
@@ -154,7 +158,7 @@ func genProxyMsg(w *tabwriter.Writer, list []*rpc.Status) {
_, _ = fmt.Fprintf(w, "\n")
w.SetRememberedWidths(nil)
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Name", "Headers", "IP", "PortMap", "CurrentPC")
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", strings.Repeat(" ", len("CURRENT")), "CONNECTION ID", "NAMESPACE", "NAME", "HEADERS", "PORTS", "CURRENT PC")
for _, c := range list {
for _, proxy := range c.ProxyList {
for _, rule := range proxy.RuleList {
@@ -169,11 +173,12 @@ func genProxyMsg(w *tabwriter.Writer, list []*rpc.Status) {
for k, v := range rule.PortMap {
portmap = append(portmap, fmt.Sprintf("%d->%d", k, v))
}
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%v\n",
c.ID,
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%v\n",
"",
c.ConnectionID,
proxy.Namespace,
proxy.Workload,
strings.Join(headers, ","),
rule.LocalTunIPv4,
strings.Join(portmap, ","),
rule.CurrentDevice,
)
@@ -182,10 +187,10 @@ func genProxyMsg(w *tabwriter.Writer, list []*rpc.Status) {
}
}
func genCloneMsg(w *tabwriter.Writer, list []*rpc.Status) {
func genSyncMsg(w *tabwriter.Writer, list []*rpc.Status) {
var needsPrint bool
for _, status := range list {
if len(status.CloneList) != 0 {
if len(status.SyncList) != 0 {
needsPrint = true
break
}
@@ -196,11 +201,11 @@ func genCloneMsg(w *tabwriter.Writer, list []*rpc.Status) {
_, _ = fmt.Fprintf(w, "\n")
w.SetRememberedWidths(nil)
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Name", "Headers", "ToName", "ToKubeconfig", "ToNamespace", "SyncthingGUI")
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", strings.Repeat(" ", len("CURRENT")), "CONNECTION ID", "NAMESPACE", "NAME", "HEADERS", "TO NAME", "SYNCTHING GUI")
for _, c := range list {
for _, clone := range c.CloneList {
//_, _ = fmt.Fprintf(w, "%s\n", clone.Workload)
for _, rule := range clone.RuleList {
for _, sync := range c.SyncList {
//_, _ = fmt.Fprintf(w, "%s\n", sync.Workload)
for _, rule := range sync.RuleList {
var headers []string
for k, v := range rule.Headers {
headers = append(headers, fmt.Sprintf("%s=%s", k, v))
@@ -208,30 +213,32 @@ func genCloneMsg(w *tabwriter.Writer, list []*rpc.Status) {
if len(headers) == 0 {
headers = []string{"*"}
}
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\t%s\n",
c.ID,
clone.Workload,
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
"",
c.ConnectionID,
sync.Namespace,
sync.Workload,
strings.Join(headers, ","),
rule.DstWorkload,
rule.DstKubeconfig,
rule.DstNamespace,
clone.SyncthingGUIAddr,
sync.SyncthingGUIAddr,
)
}
}
}
}
func GetClusterIDByConfig(cmd *cobra.Command, config Config) (string, error) {
func GetConnectionIDByConfig(cmd *cobra.Command, config Config) (string, error) {
flags := flag.NewFlagSet("", flag.ContinueOnError)
var sshConf = &util.SshConfig{}
util.AddSshFlags(flags, sshConf)
var sshConf = &pkgssh.SshConfig{}
pkgssh.AddSshFlags(flags, sshConf)
handler.AddExtraRoute(flags, &handler.ExtraRouteInfo{})
configFlags := genericclioptions.NewConfigFlags(false).WithDeprecatedPasswordFlag()
configFlags := genericclioptions.NewConfigFlags(true)
configFlags.AddFlags(flags)
matchVersionFlags := cmdutil.NewMatchVersionFlags(&warp{ConfigFlags: configFlags})
var kubeconfigJson string
AddKubeconfigJsonFlag(flags, &kubeconfigJson)
matchVersionFlags := cmdutil.NewMatchVersionFlags(&warp{ConfigFlags: configFlags, KubeconfigJson: kubeconfigJson})
matchVersionFlags.AddFlags(flags)
factory := cmdutil.NewFactory(matchVersionFlags)
f := cmdutil.NewFactory(matchVersionFlags)
for _, command := range cmd.Parent().Commands() {
command.Flags().VisitAll(func(f *flag.Flag) {
@@ -245,36 +252,81 @@ func GetClusterIDByConfig(cmd *cobra.Command, config Config) (string, error) {
_ = flags.Set(flag.Name, value)
return nil
})
bytes, ns, err := util.ConvertToKubeConfigBytes(factory)
kubeConfigBytes, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return "", err
}
file, err := util.ConvertToTempKubeconfigFile(bytes)
if err != nil {
return "", err
var file string
defer os.Remove(file)
if !sshConf.IsEmpty() {
file, err = pkgssh.SshJump(cmd.Context(), sshConf, kubeConfigBytes, "", false)
} else {
file, err = util.ConvertToTempKubeconfigFile(kubeConfigBytes, "")
}
flags = flag.NewFlagSet("", flag.ContinueOnError)
flags.AddFlag(&flag.Flag{
Name: "kubeconfig",
DefValue: file,
})
flags.AddFlag(&flag.Flag{
Name: "namespace",
DefValue: ns,
})
var path string
path, err = util.SshJump(cmd.Context(), sshConf, flags, false)
if err != nil {
return "", err
}
var c = &handler.ConnectOptions{}
err = c.InitClient(util.InitFactoryByPath(path, ns))
err = c.InitClient(util.InitFactoryByPath(file, ns))
if err != nil {
return "", err
}
err = c.InitDHCP(cmd.Context())
id, err := util.GetConnectionID(cmd.Context(), c.GetClientset().CoreV1().Namespaces(), ns)
if err != nil {
return "", err
}
return c.GetClusterID(), nil
return id, nil
}
func ParseArgs(cmd *cobra.Command, conf *Config) error {
var str string
for i := 0; i < len(conf.Flags); i++ {
kubeconfigJson, err := parseKubeconfigJson(cmd, []string{conf.Flags[i]})
if err != nil {
return err
}
if kubeconfigJson != "" {
str = kubeconfigJson
conf.Flags = append(conf.Flags[:i], conf.Flags[i+1:]...)
i--
}
}
if str == "" {
return nil
}
file, err := util.ConvertToTempKubeconfigFile([]byte(str), filepath.Join(config.GetTempPath(), conf.Name))
if err != nil {
return err
}
conf.Flags = append(conf.Flags, fmt.Sprintf("%s=%s", "--kubeconfig", file))
return nil
}
func parseKubeconfigJson(cmd *cobra.Command, args []string) (string, error) {
flags := flag.NewFlagSet("", flag.ContinueOnError)
var sshConf = &pkgssh.SshConfig{}
pkgssh.AddSshFlags(flags, sshConf)
handler.AddExtraRoute(flags, &handler.ExtraRouteInfo{})
configFlags := genericclioptions.NewConfigFlags(true)
configFlags.AddFlags(flags)
var kubeconfigJson string
AddKubeconfigJsonFlag(flags, &kubeconfigJson)
matchVersionFlags := cmdutil.NewMatchVersionFlags(&warp{ConfigFlags: configFlags})
matchVersionFlags.AddFlags(flags)
for _, command := range cmd.Parent().Commands() {
command.Flags().VisitAll(func(f *flag.Flag) {
if flags.Lookup(f.Name) == nil && flags.ShorthandLookup(f.Shorthand) == nil {
flags.AddFlag(f)
}
})
}
err := flags.ParseAll(args, func(flag *flag.Flag, value string) error {
_ = flags.Set(flag.Name, value)
return nil
})
return kubeconfigJson, err
}

View File

@@ -7,65 +7,57 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
)
func TestPrintProxyAndClone(t *testing.T) {
func TestPrintProxyAndSync(t *testing.T) {
var status = &rpc.StatusResponse{
List: []*rpc.Status{
{
ID: 0,
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun4",
ConnectionID: "9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Status: "connected",
Netif: "utun4",
ProxyList: []*rpc.Proxy{
{
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Workload: "deployment.apps/authors",
ConnectionID: "9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Workload: "deployment.apps/authors",
RuleList: []*rpc.ProxyRule{
{
Headers: map[string]string{"user": "naison"},
LocalTunIPv4: "223.254.0.103",
LocalTunIPv6: "efff:ffff:ffff:ffff:ffff:ffff:ffff:999d",
LocalTunIPv4: "198.19.0.103",
LocalTunIPv6: "2001:2::999d",
CurrentDevice: false,
PortMap: map[int32]int32{8910: 8910},
},
},
},
},
CloneList: []*rpc.Clone{
SyncList: []*rpc.Sync{
{
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Workload: "deployment.apps/ratings",
RuleList: []*rpc.CloneRule{{
Headers: map[string]string{"user": "naison"},
DstClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
DstCluster: "ccm6epn7qvcplhs3o8p00",
DstKubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
DstNamespace: "vke-system",
DstWorkload: "deployment.apps/ratings-clone-5ngn6",
ConnectionID: "9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Workload: "deployment.apps/ratings",
RuleList: []*rpc.SyncRule{{
Headers: map[string]string{"user": "naison"},
DstWorkload: "deployment.apps/ratings-sync-5ngn6",
}},
},
},
},
{
ID: 1,
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
Cluster: "ccnepblsebp68ivej4a20",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun5",
ProxyList: []*rpc.Proxy{},
CloneList: []*rpc.Clone{},
ConnectionID: "38e6a2f11443",
Cluster: "ccnepblsebp68ivej4a20",
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
Namespace: "vke-system",
Status: "connected",
Netif: "utun5",
ProxyList: []*rpc.Proxy{},
SyncList: []*rpc.Sync{},
},
},
}
@@ -80,45 +72,41 @@ func TestPrintProxy(t *testing.T) {
var status = &rpc.StatusResponse{
List: []*rpc.Status{
{
ID: 0,
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun4",
ConnectionID: "9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Status: "connected",
Netif: "utun4",
ProxyList: []*rpc.Proxy{
{
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Workload: "deployment.apps/authors",
ConnectionID: "9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Workload: "deployment.apps/authors",
RuleList: []*rpc.ProxyRule{
{
Headers: map[string]string{"user": "naison"},
LocalTunIPv4: "223.254.0.103",
LocalTunIPv6: "efff:ffff:ffff:ffff:ffff:ffff:ffff:999d",
LocalTunIPv4: "198.19.0.103",
LocalTunIPv6: "2001:2::999d",
CurrentDevice: false,
PortMap: map[int32]int32{8910: 8910},
},
},
},
},
CloneList: []*rpc.Clone{},
SyncList: []*rpc.Sync{},
},
{
ID: 1,
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
Cluster: "ccnepblsebp68ivej4a20",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun5",
ProxyList: []*rpc.Proxy{},
CloneList: []*rpc.Clone{},
ConnectionID: "38e6a2f11443",
Cluster: "ccnepblsebp68ivej4a20",
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
Namespace: "vke-system",
Status: "connected",
Netif: "utun5",
ProxyList: []*rpc.Proxy{},
SyncList: []*rpc.Sync{},
},
},
}
@@ -129,48 +117,40 @@ func TestPrintProxy(t *testing.T) {
fmt.Println(output)
}
func TestPrintClone(t *testing.T) {
func TestPrintSync(t *testing.T) {
var status = &rpc.StatusResponse{
List: []*rpc.Status{
{
ID: 0,
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun4",
ProxyList: []*rpc.Proxy{},
CloneList: []*rpc.Clone{
ConnectionID: "9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Status: "connected",
Netif: "utun4",
ProxyList: []*rpc.Proxy{},
SyncList: []*rpc.Sync{
{
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Workload: "deployment.apps/ratings",
RuleList: []*rpc.CloneRule{{
Headers: map[string]string{"user": "naison"},
DstClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
DstCluster: "ccm6epn7qvcplhs3o8p00",
DstKubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
DstNamespace: "vke-system",
DstWorkload: "deployment.apps/ratings-clone-5ngn6",
ConnectionID: "9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Workload: "deployment.apps/ratings",
RuleList: []*rpc.SyncRule{{
Headers: map[string]string{"user": "naison"},
DstWorkload: "deployment.apps/ratings-sync-5ngn6",
}},
},
},
},
{
ID: 1,
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
Cluster: "ccnepblsebp68ivej4a20",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun5",
ProxyList: []*rpc.Proxy{},
CloneList: []*rpc.Clone{},
ConnectionID: "38e6a2f11443",
Cluster: "ccnepblsebp68ivej4a20",
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
Namespace: "vke-system",
Status: "connected",
Netif: "utun5",
ProxyList: []*rpc.Proxy{},
SyncList: []*rpc.Sync{},
},
},
}
@@ -185,28 +165,24 @@ func TestPrint(t *testing.T) {
var status = &rpc.StatusResponse{
List: []*rpc.Status{
{
ID: 0,
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun4",
ProxyList: []*rpc.Proxy{},
CloneList: []*rpc.Clone{},
ConnectionID: "9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Status: "connected",
Netif: "utun4",
ProxyList: []*rpc.Proxy{},
SyncList: []*rpc.Sync{},
},
{
ID: 1,
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
Cluster: "ccnepblsebp68ivej4a20",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun5",
ProxyList: []*rpc.Proxy{},
CloneList: []*rpc.Clone{},
ConnectionID: "38e6a2f11443",
Cluster: "ccnepblsebp68ivej4a20",
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
Namespace: "vke-system",
Status: "connected",
Netif: "utun5",
ProxyList: []*rpc.Proxy{},
SyncList: []*rpc.Sync{},
},
},
}

153
cmd/kubevpn/cmds/sync.go Normal file
View File

@@ -0,0 +1,153 @@
package cmds
import (
"context"
"fmt"
"os"
pkgerr "github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
utilcomp "k8s.io/kubectl/pkg/util/completion"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
)
// CmdSync multiple cluster operate, can start up one deployment to another cluster
// kubectl exec POD_NAME -c CONTAINER_NAME /sbin/killall5 or ephemeralcontainers
func CmdSync(f cmdutil.Factory) *cobra.Command {
var options = handler.SyncOptions{}
var sshConf = &pkgssh.SshConfig{}
var extraRoute = &handler.ExtraRouteInfo{}
var transferImage bool
var syncDir string
var imagePullSecretName string
cmd := &cobra.Command{
Use: "sync",
Short: i18n.T("Sync workloads run in current namespace with same volume、env、and network"),
Long: templates.LongDesc(i18n.T(`
Sync local dir to workloads which run in current namespace with same volume、env、and network
In this way, you can startup another deployment in current namespace, but with different image version,
it also supports service mesh proxy. only traffic with special header will hit to sync resource.
`)),
Example: templates.Examples(i18n.T(`
# sync
- sync deployment run in current namespace with sync ~/code to /code/app
kubevpn sync deployment/productpage --sync ~/code:/code/app
# sync with mesh, traffic with header foo=bar, will hit sync workloads, otherwise hit origin workloads
kubevpn sync deployment/productpage --sync ~/code:/code/app --headers foo=bar
# sync workloads which api-server behind of bastion host or ssh jump host
kubevpn sync deployment/productpage --sync ~/code:/code/app --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers foo=bar
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn sync deployment/productpage --sync ~/code:/code/app --ssh-alias <alias> --headers foo=bar
# Support ssh auth GSSAPI
kubevpn sync deployment/productpage --sync ~/code:/code/app --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
kubevpn sync deployment/productpage --sync ~/code:/code/app --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
kubevpn sync deployment/productpage --sync ~/code:/code/app --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
Args: cobra.MatchAll(cobra.OnlyValidArgs, cobra.MinimumNArgs(1)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
plog.InitLoggerForClient()
// startup daemon process and sudo process
err = daemon.StartupDaemon(cmd.Context())
if err != nil {
return err
}
if transferImage {
err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image)
}
return err
},
RunE: func(cmd *cobra.Command, args []string) error {
if syncDir != "" {
local, remote, err := util.ParseDirMapping(syncDir)
if err != nil {
return pkgerr.Wrapf(err, "options 'sync' is invalid, %s", syncDir)
}
options.LocalDir = local
options.RemoteDir = remote
} else {
options.RemoteDir = config.DefaultRemoteDir
}
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return err
}
if !sshConf.IsEmpty() {
if ip := util.GetAPIServerFromKubeConfigBytes(bytes); ip != nil {
extraRoute.ExtraCIDR = append(extraRoute.ExtraCIDR, ip.String())
}
}
req := &rpc.SyncRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
Headers: options.Headers,
Workloads: args,
ExtraRoute: extraRoute.ToRPC(),
OriginKubeconfigPath: util.GetKubeConfigPath(f),
SshJump: sshConf.ToRPC(),
TargetContainer: options.TargetContainer,
TargetImage: options.TargetImage,
TransferImage: transferImage,
Image: config.Image,
ImagePullSecretName: imagePullSecretName,
Level: int32(util.If(config.Debug, log.DebugLevel, log.InfoLevel)),
LocalDir: options.LocalDir,
RemoteDir: options.RemoteDir,
}
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
resp, err := cli.Sync(context.Background())
if err != nil {
return err
}
err = resp.Send(req)
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.SyncResponse](cmd.Context(), resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
_, _ = fmt.Fprintln(os.Stdout, config.Slogan)
return nil
},
}
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers (use `and` to match all headers) with reverse it to target cluster sync workloads, If not special, redirect all traffic to target cluster sync workloads. eg: --headers foo=bar --headers env=dev")
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName)
cmdutil.AddContainerVarFlags(cmd, &options.TargetContainer, options.TargetContainer)
cmd.Flags().StringVar(&options.TargetImage, "target-image", "", "Sync container use this image to startup container, if not special, use origin image")
cmd.Flags().StringVar(&syncDir, "sync", "", "Sync local dir to remote pod dir. format: LOCAL_DIR:REMOTE_DIR, eg: ~/code:/app/code")
handler.AddExtraRoute(cmd.Flags(), extraRoute)
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
cmd.ValidArgsFunction = utilcomp.ResourceTypeAndNameCompletionFunc(f)
return cmd
}

View File

@@ -10,7 +10,7 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdSyncthing(_ cmdutil.Factory) *cobra.Command {
func CmdSyncthing(cmdutil.Factory) *cobra.Command {
var detach bool
var dir string
cmd := &cobra.Command{
@@ -18,7 +18,7 @@ func CmdSyncthing(_ cmdutil.Factory) *cobra.Command {
Short: i18n.T("Syncthing"),
Long: templates.LongDesc(i18n.T(`Syncthing`)),
RunE: func(cmd *cobra.Command, args []string) (err error) {
go util.StartupPProf(0)
go util.StartupPProfForServer(0)
return syncthing.StartServer(cmd.Context(), detach, dir)
},
Hidden: true,

View File

@@ -0,0 +1,108 @@
package cmds
import (
"context"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"k8s.io/utils/ptr"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdUninstall(f cmdutil.Factory) *cobra.Command {
var sshConf = &pkgssh.SshConfig{}
cmd := &cobra.Command{
Use: "uninstall",
Short: "Uninstall all resource create by kubevpn in k8s cluster",
Long: templates.LongDesc(i18n.T(`
Uninstall all resource create by kubevpn in k8s cluster
Uninstall will delete all resources create by kubevpn in k8s cluster, like deployment, service, serviceAccount...
and it will also delete local develop docker containers, docker networks. delete hosts entry added by kubevpn,
cleanup DNS settings.
`)),
Example: templates.Examples(i18n.T(`
# Uninstall default namespace
kubevpn uninstall
# Uninstall another namespace test
kubevpn uninstall -n test
# Uninstall cluster api-server behind of bastion host or ssh jump host
kubevpn uninstall --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn uninstall --ssh-alias <alias>
# Support ssh auth GSSAPI
kubevpn uninstall --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
kubevpn uninstall --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
kubevpn uninstall --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
plog.InitLoggerForClient()
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return err
}
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
disconnectResp, err := cli.Disconnect(context.Background())
if err != nil {
plog.G(cmd.Context()).Warnf("Failed to disconnect from cluter: %v", err)
} else {
err = disconnectResp.Send(&rpc.DisconnectRequest{
KubeconfigBytes: ptr.To(string(bytes)),
Namespace: ptr.To(ns),
SshJump: sshConf.ToRPC(),
})
if err != nil {
plog.G(cmd.Context()).Warnf("Failed to disconnect from cluter: %v", err)
}
_ = util.PrintGRPCStream[rpc.DisconnectResponse](cmd.Context(), disconnectResp)
}
req := &rpc.UninstallRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
SshJump: sshConf.ToRPC(),
}
resp, err := cli.Uninstall(context.Background())
if err != nil {
return err
}
err = resp.Send(req)
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.UninstallResponse](cmd.Context(), resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
return nil
},
}
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
return cmd
}

View File

@@ -0,0 +1,63 @@
package cmds
import (
"context"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdUnsync(f cmdutil.Factory) *cobra.Command {
var cmd = &cobra.Command{
Use: "unsync",
Short: "unsync target resource",
Long: templates.LongDesc(i18n.T(`
Remove sync resource
This command is design to remove sync resources, after use command 'kubevpn sync xxx',
it will generate and create a new resource in target k8s cluster with format [resource_name]_sync_xxxxx,
use this command to remove this created resources.
`)),
Example: templates.Examples(i18n.T(`
# leave proxy resources to origin
kubevpn unsync deployment/authors-sync-645d7
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
cli, err := daemon.GetClient(false)
if err != nil {
return err
}
req := &rpc.UnsyncRequest{
Workloads: args,
}
resp, err := cli.Unsync(context.Background())
if err != nil {
return err
}
err = resp.Send(req)
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.UnsyncResponse](cmd.Context(), resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
return nil
},
}
return cmd
}

View File

@@ -1,25 +1,58 @@
package cmds
import (
"fmt"
"net/http"
"os"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/upgrade"
)
func CmdUpgrade(_ cmdutil.Factory) *cobra.Command {
func CmdUpgrade(cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "upgrade",
Short: i18n.T("Upgrade kubevpn client to latest version"),
Long: templates.LongDesc(i18n.T(`
Upgrade kubevpn client to latest version, automatically download and install latest kubevpn from GitHub.
disconnect all from k8s cluster, leave all resources, remove all clone resource, and then,
disconnect all from k8s cluster, leave all resources, remove all sync resource, and then,
upgrade local daemon grpc server to latest version.
`)),
RunE: func(cmd *cobra.Command, args []string) error {
return upgrade.Main(cmd.Context())
const (
envLatestUrl = "KUBEVPN_LATEST_VERSION_URL"
)
plog.InitLoggerForClient()
var client = http.DefaultClient
if config.GitHubOAuthToken != "" {
client = oauth2.NewClient(cmd.Context(), oauth2.StaticTokenSource(&oauth2.Token{AccessToken: config.GitHubOAuthToken, TokenType: "Bearer"}))
}
var url = os.Getenv(envLatestUrl)
if url == "" {
var latestVersion string
var needsUpgrade bool
var err error
url, latestVersion, needsUpgrade, err = upgrade.NeedsUpgrade(cmd.Context(), client, config.Version)
if err != nil {
return err
}
if !needsUpgrade {
_, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("Already up to date, don't needs to upgrade, version: %s", latestVersion))
return nil
}
_, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("Current version is: %s less than latest version: %s, needs to upgrade", config.Version, latestVersion))
_ = os.Setenv(envLatestUrl, url)
_ = quit(cmd.Context(), true)
_ = quit(cmd.Context(), false)
}
return upgrade.Main(cmd.Context(), client, url)
},
}
return cmd

View File

@@ -64,12 +64,13 @@ func init() {
}
func getDaemonVersion() string {
cli := daemon.GetClient(false)
if cli != nil {
version, err := cli.Version(context.Background(), &rpc.VersionRequest{})
if err == nil {
return version.Version
}
cli, err := daemon.GetClient(false)
if err != nil {
return "unknown"
}
return "unknown"
version, err := cli.Version(context.Background(), &rpc.VersionRequest{})
if err != nil {
return "unknown"
}
return version.Version
}

View File

@@ -1,11 +1,16 @@
package cmds
import (
"fmt"
"os"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/dhcp"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/webhook"
)
@@ -22,11 +27,19 @@ func CmdWebhook(f cmdutil.Factory) *cobra.Command {
`)),
Args: cobra.MaximumNArgs(0),
PreRun: func(cmd *cobra.Command, args []string) {
util.InitLoggerForServer(true)
go util.StartupPProf(0)
go util.StartupPProfForServer(0)
},
RunE: func(cmd *cobra.Command, args []string) error {
return webhook.Main(f)
ns := os.Getenv(config.EnvPodNamespace)
if ns == "" {
return fmt.Errorf("failed to get pod namespace")
}
clientset, err := f.KubernetesClientSet()
if err != nil {
return err
}
manager := dhcp.NewDHCPManager(clientset, ns)
return webhook.Main(manager)
},
}
return cmd

View File

@@ -1,10 +1,11 @@
package main
import (
_ "net/http/pprof"
ctrl "sigs.k8s.io/controller-runtime"
_ "k8s.io/client-go/plugin/pkg/client/auth"
_ "net/http/pprof"
"github.com/wencaiwulue/kubevpn/v2/cmd/kubevpn/cmds"
)

View File

@@ -1,10 +1,15 @@
## Architecture
### Connect mode
create a tunnel with port-forward, add route to virtual interface, like tun0, forward traffic though tunnel to remote traffic manager.
create a tunnel with port-forward, add route to virtual interface, like tun0, forward traffic though tunnel to remote
traffic manager.
![connect-mode](/docs/en/images/connect-mode.drawio.svg)
### Reverse mode
base on connect mode, inject a container to controller, use iptables to block all inbound traffic and forward to local though tunnel.
base on connect mode, inject a container to controller, use iptables to block all inbound traffic and forward to local
though tunnel.
```text
┌──────────┐ ┌─────────┌──────────┐ ┌──────────┐
@@ -20,7 +25,10 @@ base on connect mode, inject a container to controller, use iptables to block al
```
### Mesh mode
base on reverse mode, using envoy as proxy, if headers have special key-value pair, it will route to local machine, if not, use origin service.
base on reverse mode, using envoy as proxy, if headers have special key-value pair, it will route to local machine, if
not, use origin service.
```text
┌──────────┐ ┌─────────┌────────────┐ ┌──────────┐
│ ServiceA ├───►│ sidecar ├─► ServiceB │─►┌─►│ ServiceC │
@@ -28,8 +36,10 @@ base on reverse mode, using envoy as proxy, if headers have special key-value pa
│ │ cloud
─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─┘─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
│ │ local
header: a=1
header: foo=bar
┌───┘──────┐ │
│ ServiceB'├─────────────┘
└──────────┘
```
```
![arch.svg](/docs/en/images/proxy-arch.svg)

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 372 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 488 KiB

343
go.mod
View File

@@ -1,23 +1,26 @@
module github.com/wencaiwulue/kubevpn/v2
go 1.22.1
go 1.23.2
require (
github.com/cilium/ipam v0.0.0-20230509084518-fd66eae7909b
github.com/containerd/containerd v1.7.14
github.com/containerd/containerd v1.7.27
github.com/containernetworking/cni v1.1.2
github.com/coredns/caddy v1.1.1
github.com/coredns/coredns v1.11.2
github.com/distribution/reference v0.6.0
github.com/docker/cli v26.0.0+incompatible
github.com/docker/docker v26.0.0+incompatible
github.com/docker/cli v27.5.1+incompatible
github.com/docker/docker v27.5.1+incompatible
github.com/docker/go-connections v0.5.0
github.com/docker/go-units v0.5.0
github.com/docker/libcontainer v2.2.1+incompatible
github.com/envoyproxy/go-control-plane v0.12.0
github.com/fsnotify/fsnotify v1.7.0
github.com/envoyproxy/go-control-plane v0.13.4
github.com/envoyproxy/go-control-plane/envoy v1.32.4
github.com/fsnotify/fsnotify v1.8.0
github.com/gliderlabs/ssh v0.3.8
github.com/golang/protobuf v1.5.4
github.com/google/go-cmp v0.7.0
github.com/google/gopacket v1.1.19
github.com/google/uuid v1.6.0
github.com/hashicorp/go-version v1.6.0
github.com/hashicorp/go-version v1.7.0
github.com/hpcloud/tail v1.0.0
github.com/jcmturner/gofork v1.7.6
github.com/jcmturner/gokrb5/v8 v8.4.4
@@ -25,252 +28,254 @@ require (
github.com/libp2p/go-netroute v0.2.1
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de
github.com/mattbaird/jsonpatch v0.0.0-20240118010651-0ba75a80ca38
github.com/miekg/dns v1.1.58
github.com/moby/sys/signal v0.7.0
github.com/moby/term v0.5.0
github.com/opencontainers/image-spec v1.1.0
github.com/miekg/dns v1.1.64
github.com/moby/term v0.5.2
github.com/opencontainers/image-spec v1.1.1
github.com/pkg/errors v0.9.1
github.com/prometheus-community/pro-bing v0.4.0
github.com/regclient/regclient v0.8.0
github.com/schollz/progressbar/v3 v3.14.2
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.0
github.com/spf13/pflag v1.0.5
github.com/syncthing/syncthing v1.27.7
github.com/thejerf/suture/v4 v4.0.5
go.uber.org/automaxprocs v1.5.3
golang.org/x/crypto v0.23.0
golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0
golang.org/x/net v0.25.0
golang.org/x/oauth2 v0.18.0
golang.org/x/sync v0.7.0
golang.org/x/sys v0.20.0
golang.org/x/text v0.15.0
golang.org/x/time v0.5.0
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6
github.com/syncthing/syncthing v1.29.2
github.com/thejerf/suture/v4 v4.0.6
go.uber.org/automaxprocs v1.6.0
golang.org/x/crypto v0.37.0
golang.org/x/net v0.39.0
golang.org/x/oauth2 v0.28.0
golang.org/x/sys v0.32.0
golang.org/x/term v0.31.0
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
golang.zx2c4.com/wireguard v0.0.0-20220920152132-bb719d3a6e2c
golang.zx2c4.com/wireguard/windows v0.5.3
google.golang.org/grpc v1.62.1
google.golang.org/protobuf v1.33.0
google.golang.org/grpc v1.71.1
google.golang.org/protobuf v1.36.6
gopkg.in/natefinch/lumberjack.v2 v2.2.1
gvisor.dev/gvisor v0.0.0-20240403010941-979aae3d2c21
k8s.io/api v0.31.0-alpha.0
k8s.io/apimachinery v0.31.0-alpha.0
k8s.io/cli-runtime v0.29.3
k8s.io/client-go v0.31.0-alpha.0
k8s.io/klog/v2 v2.120.1
k8s.io/kubectl v0.29.3
k8s.io/utils v0.0.0-20240310230437-4693a0247e57
sigs.k8s.io/controller-runtime v0.17.1-0.20240327193027-21368602d84b
sigs.k8s.io/kustomize/api v0.16.0
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987
helm.sh/helm/v4 v4.0.0-20250324191910-0199b748aaea
k8s.io/api v0.32.3
k8s.io/apimachinery v0.32.3
k8s.io/cli-runtime v0.32.3
k8s.io/client-go v0.32.3
k8s.io/klog/v2 v2.130.1
k8s.io/kubectl v0.32.3
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e
sigs.k8s.io/controller-runtime v0.20.4
sigs.k8s.io/kustomize/api v0.19.0
sigs.k8s.io/yaml v1.4.0
tags.cncf.io/container-device-interface v0.7.0
tailscale.com v1.74.1
)
require (
cel.dev/expr v0.15.0 // indirect
cloud.google.com/go/compute v1.25.1 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
dario.cat/mergo v1.0.0 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.29 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
cel.dev/expr v0.19.1 // indirect
dario.cat/mergo v1.0.1 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/DataDog/appsec-internal-go v1.5.0 // indirect
github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.0 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.0 // indirect
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
github.com/DataDog/go-libddwaf/v2 v2.4.2 // indirect
github.com/DataDog/go-sqllexer v0.0.11 // indirect
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/sketches-go v1.4.4 // indirect
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/hcsshim v0.12.2 // indirect
github.com/antonmedv/expr v1.15.5 // indirect
github.com/apparentlymart/go-cidr v1.1.0 // indirect
github.com/aws/aws-sdk-go v1.51.12 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.3.0 // indirect
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Microsoft/hcsshim v0.12.9 // indirect
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/calmh/incontainer v1.0.0 // indirect
github.com/calmh/xdr v1.1.0 // indirect
github.com/ccding/go-stun v0.1.4 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
github.com/calmh/xdr v1.2.0 // indirect
github.com/ccding/go-stun v0.1.5 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
github.com/cncf/xds/go v0.0.0-20240329184929-0c46c01016dc // indirect
github.com/cilium/ebpf v0.16.0 // indirect
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/dnstap/golang-dnstap v0.4.0 // indirect
github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.8.1 // indirect
github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/ebitengine/purego v0.7.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/ebitengine/purego v0.8.3 // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/farsightsec/golang-framestream v0.3.0 // indirect
github.com/fatih/camelcase v1.0.0 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/fvbommel/sortorder v1.1.0 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/gaissmai/bart v0.11.1 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
github.com/go-errors/errors v1.5.1 // indirect
github.com/go-ldap/ldap/v3 v3.4.6 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 // indirect
github.com/go-ldap/ldap/v3 v3.4.10 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonpointer v0.21.1 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/go-openapi/swag v0.23.1 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20240402174815-29b9bb013b0f // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect
github.com/google/pprof v0.0.0-20250202011525-fc3143867406 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.3 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/greatroar/blobloom v0.7.2 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/greatroar/blobloom v0.8.0 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/hashicorp/hcl v1.0.1-vault-5 // indirect
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
github.com/illarion/gonotify/v2 v2.0.3 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/infobloxopen/go-trees v0.0.0-20221216143356-66ceba885ebc // indirect
github.com/jackpal/gateway v1.0.14 // indirect
github.com/jackpal/gateway v1.0.16 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jinzhu/gorm v1.9.16 // indirect
github.com/jmoiron/sqlx v1.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect
github.com/jsimonetti/rtnetlink v1.4.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.17.7 // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
github.com/mailru/easyjson v0.9.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mdlayher/netlink v1.7.2 // indirect
github.com/mdlayher/socket v0.5.0 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/symlink v0.2.0 // indirect
github.com/moby/sys/user v0.1.0 // indirect
github.com/moby/spdystream v0.5.0 // indirect
github.com/moby/sys/sequential v0.6.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/onsi/ginkgo/v2 v2.17.1 // indirect
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 // indirect
github.com/openzipkin/zipkin-go v0.4.2 // indirect
github.com/oschwald/geoip2-golang v1.9.0 // indirect
github.com/oschwald/maxminddb-golang v1.12.0 // indirect
github.com/outcaste-io/ristretto v0.2.3 // indirect
github.com/pelletier/go-toml/v2 v2.0.9 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/prometheus/client_golang v1.19.0 // indirect
github.com/prometheus/client_golang v1.21.1 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.52.2 // indirect
github.com/prometheus/procfs v0.13.0 // indirect
github.com/quic-go/quic-go v0.42.0 // indirect
github.com/prometheus/common v0.63.0 // indirect
github.com/prometheus/procfs v0.16.0 // indirect
github.com/quic-go/quic-go v0.50.1 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rubenv/sql-migrate v1.7.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
github.com/shirou/gopsutil/v3 v3.24.3 // indirect
github.com/shirou/gopsutil/v4 v4.25.1 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/stretchr/testify v1.9.0 // indirect
github.com/stretchr/testify v1.10.0 // indirect
github.com/syncthing/notify v0.0.0-20210616190510-c6b7342338d2 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 // indirect
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/tinylib/msgp v1.1.9 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.etcd.io/etcd/api/v3 v3.5.13 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.13 // indirect
go.etcd.io/etcd/client/v3 v3.5.13 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
go.opentelemetry.io/otel v1.24.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.starlark.net v0.0.0-20240329153429-e6e8e7ce1b7a // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/mock v0.4.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/mod v0.17.0 // indirect
golang.org/x/term v0.20.0 // indirect
golang.org/x/tools v0.21.0 // indirect
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/api v0.172.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
gopkg.in/DataDog/dd-trace-go.v1 v1.62.0 // indirect
gopkg.in/evanphx/json-patch.v5 v5.9.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
go.opentelemetry.io/otel v1.35.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect
go.opentelemetry.io/otel/metric v1.35.0 // indirect
go.opentelemetry.io/otel/sdk v1.35.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
go.opentelemetry.io/otel/trace v1.35.0 // indirect
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/mock v0.5.0 // indirect
go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect
golang.org/x/exp v0.0.0-20250207012021-f9890c6ad9f3 // indirect
golang.org/x/mod v0.23.0 // indirect
golang.org/x/sync v0.13.0 // indirect
golang.org/x/text v0.24.0 // indirect
golang.org/x/time v0.11.0 // indirect
golang.org/x/tools v0.30.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250409194420-de1ac958c67a // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.31.0-alpha.0 // indirect
k8s.io/component-base v0.31.0-alpha.0 // indirect
k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/kyaml v0.16.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
k8s.io/apiextensions-apiserver v0.32.3 // indirect
k8s.io/apiserver v0.32.3 // indirect
k8s.io/component-base v0.32.3 // indirect
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
oras.land/oras-go/v2 v2.5.0 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
)

914
go.sum

File diff suppressed because it is too large Load Diff

6
go.work Normal file
View File

@@ -0,0 +1,6 @@
go 1.23.2
use (
.
./pkg/syncthing/auto
)

403
go.work.sum Normal file
View File

@@ -0,0 +1,403 @@
4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs=
4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU=
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA=
fyne.io/systray v1.11.0/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs=
github.com/Abirdcfly/dupword v0.0.11/go.mod h1:wH8mVGuf3CP5fsBTkfWwwwKTjDnVVCxtU8d8rgeVYXA=
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU=
github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0=
github.com/Antonboom/errname v0.1.9/go.mod h1:nLTcJzevREuAsgTbG85UsuiWpMpAqbKD1HNZ29OzE58=
github.com/Antonboom/nilnil v0.1.4/go.mod h1:iOov/7gRcXkeEU+EMGpBu2ORih3iyVEiWjeste1SJm8=
github.com/AudriusButkevicius/recli v0.0.7-0.20220911121932-d000ce8fbf0f/go.mod h1:Nhfib1j/VFnLrXL9cHgA+/n2O6P5THuWelOnbfPNd78=
github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0/go.mod h1:b3g59n2Y+T5xmcxJL+UEG2f8cQploZm1mR/v6BW0mU0=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/Masterminds/vcs v1.13.3/go.mod h1:TiE7xuEjl1N4j016moRd6vezp6e6Lz23gypeXfzXeW8=
github.com/Microsoft/cosesign1go v1.2.0/go.mod h1:1La/HcGw19rRLhPW0S6u55K6LKfti+GQSgGCtrfhVe8=
github.com/Microsoft/didx509go v0.0.3/go.mod h1:wWt+iQsLzn3011+VfESzznLIp/Owhuj7rLF7yLglYbk=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc=
github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw=
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
github.com/alecthomas/kong v1.6.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE=
github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I=
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/ashanbrown/forbidigo v1.5.1/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU=
github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU=
github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.64/go.mod h1:4Q7R9MFpXRdjO3YnAfUTdnuENs32WzBkASt6VxSYDYQ=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw=
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.25/go.mod h1:SUbB4wcbSEyCvqBxv/O/IBf93RbEze7U7OnoTlpPB+g=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.28/go.mod h1:spfrICMD6wCAhjhzHuy6DOZZ+LAIY10UxhUmLzpJTTs=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.2/go.mod h1:4tfW5l4IAB32VWCDEBxCRtR9T4BWy4I4kr1spr8NgZM=
github.com/aws/aws-sdk-go-v2/service/s3 v1.33.0/go.mod h1:J9kLNzEiHSeGMyN7238EjJmBpCniVzFda75Gxl/NqB8=
github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM=
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8=
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U=
github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
github.com/bazelbuild/rules_go v0.44.2/go.mod h1:Dhcz716Kqg1RHNWos+N6MlXNkjNP2EwZQ0LukRKJfMs=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI=
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI=
github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k=
github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo=
github.com/bramvdbogaerde/go-scp v1.4.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
github.com/breml/bidichk v0.2.4/go.mod h1:7Zk0kRFt1LIZxtQdl9W9JwGAcLTTkOs+tN7wuEYGJ3s=
github.com/breml/errchkjson v0.3.1/go.mod h1:XroxrzKjdiutFyW3nWhw34VGg7kiMsDQox73yWCGI2U=
github.com/butuzov/ireturn v0.2.0/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
github.com/cavaliergopher/cpio v1.0.1/go.mod h1:pBdaqQjnvXxdS/6CvNDwIANIFSP0xRKI16PX4xejRQc=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ=
github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8/go.mod h1:gakxgyXaaPkxvLw1XQxNGK4I37ys9iBRzNUx/B7pUCo=
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
github.com/containerd/btrfs/v2 v2.0.0/go.mod h1:swkD/7j9HApWpzl8OHfrHNxppPd9l44DFZdF94BUj9k=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc=
github.com/containerd/continuity v0.4.4/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM=
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
github.com/containerd/imgcrypt v1.1.8/go.mod h1:x6QvFIkMyO2qGIY2zXc88ivEzcbgvLdWjoZyGqDap5U=
github.com/containerd/nri v0.8.0/go.mod h1:uSkgBrCdEtAiEz4vnrq8gmAC4EnVAM5Klt0OuK5rZYQ=
github.com/containerd/protobuild v0.3.0/go.mod h1:5mNMFKKAwCIAkFBPiOdtRx2KiQlyEJeMXnL5R1DsWu8=
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
github.com/containerd/zfs v1.1.0/go.mod h1:oZF9wBnrnQjpWLaPKEinrx3TQ9a+W/RJO7Zb41d8YLE=
github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4=
github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8=
github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc=
github.com/daixiang0/gci v0.10.1/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps=
github.com/dave/astrid v0.0.0-20170323122508-8c2895878b14/go.mod h1:Sth2QfxfATb/nW4EsrSi2KyJmbcniZ8TgTaji17D6ms=
github.com/dave/brenda v1.1.0/go.mod h1:4wCUr6gSlu5/1Tk7akE5X7UorwiQ8Rij0SKH3/BGMOM=
github.com/dave/courtney v0.4.0/go.mod h1:3WSU3yaloZXYAxRuWt8oRyVb9SaRiMBt5Kz/2J227tM=
github.com/dave/patsy v0.0.0-20210517141501-957256f50cba/go.mod h1:qfR88CgEGLoiqDaE+xxDCi5QA5v4vUoW0UCX2Nd5Tlc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c=
github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A=
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
github.com/elastic/crd-ref-docs v0.0.12/go.mod h1:X83mMBdJt05heJUYiS3T0yJ/JkCuliuhSUNav5Gjo/U=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0=
github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY=
github.com/evanw/esbuild v0.19.11/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48=
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI=
github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/go-critic/go-critic v0.8.0/go.mod h1:5TjdkPI9cu/yKbYS96BTsslihjKd6zg6vd8O9RZXj2s=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU=
github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw=
github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ=
github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4=
github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA=
github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ=
github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig=
github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-yaml v1.12.0/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
github.com/godror/godror v0.40.4/go.mod h1:i8YtVTHUJKfFT3wTat4A9UoqScUtZXiYB9Rf3SVARgc=
github.com/godror/knownpb v0.1.1/go.mod h1:4nRFbQo1dDuwKnblRXDxrfCFYeT4hjg3GjMqef58eRE=
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/mock v1.7.0-rc.1/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs=
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ=
github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs=
github.com/golangci/golangci-lint v1.52.2/go.mod h1:S5fhC5sHM5kE22/HcATKd1XLWQxX+y7mHj8B5H91Q/0=
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc=
github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs=
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8=
github.com/google/go-containerregistry v0.20.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
github.com/google/go-github/v56 v56.0.0/go.mod h1:D8cdcX98YWJvi7TLo7zM4/h8ZTx6u6fwGEkCdisopo0=
github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4=
github.com/google/rpmpack v0.5.0/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI=
github.com/google/subcommands v1.0.2-0.20190508160503-636abe8753b8/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0=
github.com/goreleaser/chglog v0.5.0/go.mod h1:Ri46M3lrMuv76FHszs3vtABR8J8k1w9JHYAzxeeOl28=
github.com/goreleaser/fileglob v1.3.0/go.mod h1:Jx6BoXv3mbYkEzwm9THo7xbr5egkAraxkGorbJb4RxU=
github.com/goreleaser/nfpm/v2 v2.33.1/go.mod h1:8wwWWvJWmn84xo/Sqiv0aMvEGTHlHZTXTEuVSgQpkIM=
github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk=
github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc=
github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM=
github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak=
github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hanwen/go-fuse/v2 v2.3.0/go.mod h1:xKwi1cF7nXAOBCXujD5ie0ZKsxc8GGSA1rlMJc+8IJs=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inetaf/tcpproxy v0.0.0-20240214030015-3ce58045626c/go.mod h1:Di7LXRyUcnvAcLicFhtM9/MlZl/TNgRSDHORM2c6CMI=
github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI=
github.com/intel/goresctrl v0.5.0/go.mod h1:mIe63ggylWYr0cU/l8n11FAkesqfvuP3oktIsxvu0T0=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4=
github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc=
github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/josephspurrier/goversioninfo v1.4.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE=
github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0=
github.com/junk1tm/musttag v0.5.0/go.mod h1:PcR7BA+oREQYvHwgjIDmw3exJeds5JzRcvEJTfjrA0M=
github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw=
github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I=
github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes=
github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA=
github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0=
github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4=
github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY=
github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU=
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4=
github.com/lestrrat-go/jwx v1.2.29/go.mod h1:hU8k2l6WF0ncx20uQdOmik/Gjg6E3/wIRtXSNFeZuB8=
github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM=
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE=
github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc=
github.com/maruel/panicparse/v2 v2.4.0/go.mod h1:nOY2OKe8csO3F3SA5+hsxot05JLgukrF54B9x88fVp4=
github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/maxbrunsfeld/counterfeiter/v6 v6.8.1/go.mod h1:eyp4DdUJAKkr9tvxR3jWhw2mDK7CWABMG5r9uyaKC7I=
github.com/maxmind/geoipupdate/v6 v6.1.0/go.mod h1:cZYCDzfMzTY4v6dKRdV7KTB6SStxtn3yFkiJ1btTGGc=
github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc=
github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o=
github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE=
github.com/mgechev/revive v1.3.1/go.mod h1:YlD6TTWl2B8A103R9KWJSPVI9DrEf+oqr15q21Ld+5I=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/mohae/deepcopy v0.0.0-20170308212314-bb9b5e7adda9/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE=
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
github.com/nelsam/hel/v2 v2.3.3/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
github.com/nishanths/exhaustive v0.10.0/go.mod h1:IbwrGdVMizvDcIxPYGVdQn5BqWJaOwpCvg4RGb8r/TA=
github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c=
github.com/nunnatsa/ginkgolinter v0.11.2/go.mod h1:dJIGXYXbkBswqa/pIzG0QlVTTDSBMxDoCFwhsl4Uras=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/open-policy-agent/opa v0.68.0/go.mod h1:5E5SvaPwTpwt2WM177I9Z3eT7qUpmOGjk1ZdHs+TZ4w=
github.com/opencontainers/runc v1.1.14/go.mod h1:E4C2z+7BxR7GHXp0hAY53mek+x49X1LjPNeMTfRGvOA=
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/oschwald/geoip2-golang v1.11.0/go.mod h1:P9zG+54KPEFOliZ29i7SeYZ/GM6tfEL+rgSn03hYuUo=
github.com/oschwald/maxminddb-golang v1.13.1/go.mod h1:K4pgV9N/GcK694KSTmVSDTODk4IsCNThNdTmnaBZ/F8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
github.com/polyfloyd/go-errorlint v1.4.1/go.mod h1:k6fU/+fQe38ednoZS51T7gSIGQW1y94d6TkSr35OzH8=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff/go.mod h1:FvE8dtQ1Ww63IlyKBn1V4s+zMwF9kHkVNkQBR1pM4CU=
github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/quasilyte/go-ruleguard v0.3.19/go.mod h1:lHSn69Scl48I7Gt9cX3VrbsZYvYiBYszZOZW4A+oTEw=
github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng=
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
github.com/riywo/loginshell v0.0.0-20200815045211-7d26008be1ab/go.mod h1:/PfPXh0EntGc3QAAyUaviy4S9tzy4Zp0e2ilq4voC6E=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww=
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50=
github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ=
github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs=
github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ=
github.com/sashamelentyev/usestdlibvars v1.23.0/go.mod h1:YPwr/Y1LATzHI93CqoPUN/2BzGQ/6N/cl/KwgR0B/aU=
github.com/securego/gosec/v2 v2.15.0/go.mod h1:VOjTrZOkUtSDt2QLSJmQBMWnvwiQPEjg0l+5juIqGk8=
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4=
github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY=
github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg=
github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo=
github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I=
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M=
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/studio-b12/gowebdav v0.9.0/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk=
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e/go.mod h1:XrBNfAFN+pwoWuksbFS9Ccxnopa15zJGgXRFN90l3K4=
github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8=
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg=
github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41/go.mod h1:/roCdA6gg6lQyw/Oz6gIIGu3ggJKYhF+WC/AQReE5XQ=
github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ=
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8=
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8=
github.com/tailscale/mkctr v0.0.0-20240628074852-17ca944da6ba/go.mod h1:DxnqIXBplij66U2ZkL688xy07q97qQ83P+TVueLiHq4=
github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU=
github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y=
github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg=
github.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk=
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
github.com/tcnksm/go-httpstat v0.2.0/go.mod h1:s3JVJFtQxtBEBC9dwcdTTXS9xFnM3SXAZwPG41aurT8=
github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg=
github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8=
github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ=
github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE=
github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM=
github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI=
github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264=
github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po=
github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
github.com/veraison/go-cose v1.1.0/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4=
github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
github.com/willabides/kongplete v0.4.0/go.mod h1:0P0jtWD9aTsqPSUAl4de35DLghrr57XcayPyvqSi2X8=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk=
github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA=
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE=
gitlab.com/digitalxero/go-conventional-commit v1.0.7/go.mod h1:05Xc2BFsSyC5tKhK0y+P3bs0AwUtNuTp+mTpbCU/DZ0=
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28=
go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E=
go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE=
go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50=
go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY=
go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI=
go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
golang.org/x/exp/typeparams v0.0.0-20240119083558-1b970713d09a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA=
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs=
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
k8s.io/code-generator v0.32.3/go.mod h1:+mbiYID5NLsBuqxjQTygKM/DAdKpAjvBzrJd64NU1G8=
k8s.io/component-helpers v0.32.3/go.mod h1:utTBXk8lhkJewBKNuNf32Xl3KT/0VV19DmiXU/SV4Ao=
k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0=
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/kms v0.32.3/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM=
k8s.io/metrics v0.32.3/go.mod h1:9R1Wk5cb+qJpCQon9h52mgkVCcFeYxcY+YkumfwHVCU=
mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js=
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8/go.mod h1:Oh/d7dEtzsNHGOq1Cdv8aMm3KdKhVvPbRQcM8WFpBR8=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/controller-tools v0.15.1-0.20240618033008-7824932b0cab/go.mod h1:egedX5jq2KrZ3A2zaOz3e2DSsh5BhFyyjvNcBRIQel8=
sigs.k8s.io/kustomize/kustomize/v5 v5.5.0/go.mod h1:AeFCmgCrXzmvjWWaeZCyBp6XzG1Y0w1svYus8GhJEOE=
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
tags.cncf.io/container-device-interface v0.8.1/go.mod h1:Apb7N4VdILW0EVdEMRYXIDVRZfNJZ+kmEUss2kRRQ6Y=
tags.cncf.io/container-device-interface/specs-go v0.8.0/go.mod h1:BhJIkjjPh4qpys+qm4DAYtUyryaTDg9zris+AczXyws=

235
install.sh Executable file
View File

@@ -0,0 +1,235 @@
#!/bin/sh
# KubeVPN installation script
# This script installs KubeVPN CLI to your system
# Created for https://github.com/kubenetworks/kubevpn
# curl -fsSL https://kubevpn.dev/install.sh | sh
set -e
# Colors and formatting
YELLOW='\033[0;33m'
GREEN='\033[0;32m'
RED='\033[0;31m'
BLUE='\033[0;34m'
BOLD='\033[1m'
RESET='\033[0m'
# Installation configuration
INSTALL_DIR=${INSTALL_DIR:-"/usr/local/bin"}
GITHUB_REPO="kubenetworks/kubevpn"
GITHUB_URL="https://github.com/${GITHUB_REPO}"
VERSION_URL="https://raw.githubusercontent.com/kubenetworks/kubevpn/refs/heads/master/plugins/stable.txt"
ZIP_FILE="kubevpn.zip"
log() {
echo "${BLUE}${BOLD}==> ${RESET}$1"
}
success() {
echo "${GREEN}${BOLD}==> $1${RESET}"
}
warn() {
echo "${YELLOW}${BOLD}==> $1${RESET}"
}
error() {
echo "${RED}${BOLD}==> $1${RESET}"
}
get_system_info() {
OS=$(uname | tr '[:upper:]' '[:lower:]')
log "Detected OS: ${OS}"
case $OS in
linux | darwin) ;;
msys_nt | msys | cygwin)
error "Windows is not supported, please install KubeVPN manually using scoop. More info: ${GITHUB_URL}"
exit 1
;;
*)
error "Unsupported operating system: ${OS}"
exit 1
;;
esac
ARCH=$(uname -m)
case $ARCH in
x86_64)
ARCH="amd64"
;;
aarch64 | arm64)
ARCH="arm64"
;;
i386 | i686)
ARCH="386"
;;
*)
error "Unsupported architecture: ${ARCH}"
exit 1
;;
esac
log "Detected architecture: ${ARCH}"
}
check_requirements() {
if command -v curl >/dev/null 2>&1; then
DOWNLOADER="curl"
elif command -v wget >/dev/null 2>&1; then
DOWNLOADER="wget"
else
error "Either curl or wget is required for installation"
exit 1
fi
if ! command -v unzip >/dev/null 2>&1; then
error "unzip is required but not installed"
exit 1
fi
if [ ! -d "$INSTALL_DIR" ]; then
log "Installation directory $INSTALL_DIR does not exist, attempting to create it"
if ! mkdir -p "$INSTALL_DIR" 2>/dev/null; then
if ! command -v sudo >/dev/null 2>&1 && ! command -v su >/dev/null 2>&1; then
error "Cannot create $INSTALL_DIR and neither sudo nor su is available"
exit 1
fi
fi
fi
if [ ! -w "$INSTALL_DIR" ] && ! command -v sudo >/dev/null 2>&1 && ! command -v su >/dev/null 2>&1; then
error "No write permission to $INSTALL_DIR and neither sudo nor su is available"
exit 1
fi
}
get_latest_version() {
log "Fetching the latest release version..."
if [ "$DOWNLOADER" = "curl" ]; then
VERSION=$(curl -s "$VERSION_URL")
else
VERSION=$(wget -qO- "$VERSION_URL")
fi
if [ -z "$VERSION" ]; then
error "Could not determine the latest version"
exit 1
fi
VERSION=$(echo "$VERSION" | tr -d 'v' | tr -d '\n')
success "Latest version: ${VERSION}"
}
download_binary() {
DOWNLOAD_URL="$GITHUB_URL/releases/download/v${VERSION}/kubevpn_v${VERSION}_${OS}_${ARCH}.zip"
log "Downloading KubeVPN binary from $DOWNLOAD_URL"
if [ "$DOWNLOADER" = "curl" ]; then
curl -L -o "$ZIP_FILE" "$DOWNLOAD_URL" || {
error "Failed to download KubeVPN"
exit 1
}
else
wget -O "$ZIP_FILE" "$DOWNLOAD_URL" || {
error "Failed to download KubeVPN"
exit 1
}
fi
}
install_binary() {
log "Installing KubeVPN..."
TMP_DIR=$(mktemp -d)
BINARY="$TMP_DIR/bin/kubevpn"
unzip -o -q "$ZIP_FILE" -d "$TMP_DIR"
if [ -f "$TMP_DIR/checksums.txt" ]; then
EXPECTED_CHECKSUM=$(cat "$TMP_DIR/checksums.txt" | awk '{print $1}')
if command -v shasum >/dev/null 2>&1; then
ACTUAL_CHECKSUM=$(shasum -a 256 "$BINARY" | awk '{print $1}')
elif command -v sha256sum >/dev/null 2>&1; then
ACTUAL_CHECKSUM=$(sha256sum "$BINARY" | awk '{print $1}')
else
warn "No checksum tool available, skipping verification"
ACTUAL_CHECKSUM=$EXPECTED_CHECKSUM
fi
[ "$ACTUAL_CHECKSUM" = "$EXPECTED_CHECKSUM" ] || {
error "Checksum verification failed (Expected: $EXPECTED_CHECKSUM, Got: $ACTUAL_CHECKSUM)"
# Clean up
rm -rf "$TMP_DIR"
rm -f "$ZIP_FILE"
exit 1
}
fi
# Check if we need sudo
if [ -w "$INSTALL_DIR" ]; then
mv "$BINARY" "$INSTALL_DIR/kubevpn"
chmod +x "$INSTALL_DIR/kubevpn"
else
warn "Elevated permissions required to install to $INSTALL_DIR"
if command -v sudo >/dev/null 2>&1; then
sudo mv "$BINARY" "$INSTALL_DIR/kubevpn"
sudo chmod +x "$INSTALL_DIR/kubevpn"
else
su -c "mv \"$BINARY\" \"$INSTALL_DIR/kubevpn\" && chmod +x \"$INSTALL_DIR/kubevpn\""
fi
fi
# Clean up
rm -f "$ZIP_FILE"
rm -rf "$TMP_DIR"
}
verify_installation() {
if [ -x "$INSTALL_DIR/kubevpn" ]; then
VERSION_OUTPUT=$("$INSTALL_DIR/kubevpn" version 2>&1 || echo "unknown")
success "KubeVPN installed successfully"
log "$VERSION_OUTPUT"
log "KubeVPN has been installed to: $INSTALL_DIR/kubevpn"
# Check if the installed binary is in PATH
if command -v kubevpn >/dev/null 2>&1; then
FOUND_PATH=$(command -v kubevpn)
if [ "$FOUND_PATH" != "$INSTALL_DIR/kubevpn" ]; then
warn "Another kubevpn binary was found in your PATH at: $FOUND_PATH"
warn "Make sure $INSTALL_DIR is in your PATH to use the newly installed version"
fi
else
warn "Make sure $INSTALL_DIR is in your PATH to use kubevpn"
fi
echo ""
log "To connect to a Kubernetes cluster:"
if [ "$FOUND_PATH" != "$INSTALL_DIR/kubevpn" ]; then
echo " $INSTALL_DIR/kubevpn connect"
else
echo " kubevpn connect"
fi
echo ""
log "For more information, visit:"
echo " $GITHUB_URL"
success "Done! enjoy KubeVPN 🚀"
else
error "KubeVPN installation failed"
exit 1
fi
}
main() {
log "Starting KubeVPN installation..."
get_system_info
check_requirements
get_latest_version
download_binary
install_binary
verify_installation
}
main

View File

@@ -2,8 +2,6 @@ package config
import (
"net"
"os"
"path/filepath"
"sync"
"time"
@@ -14,48 +12,58 @@ const (
// configmap name
ConfigMapPodTrafficManager = "kubevpn-traffic-manager"
// helm app name kubevpn
HelmAppNameKubevpn = "kubevpn"
// default installed namespace
DefaultNamespaceKubevpn = "kubevpn"
// config map keys
KeyDHCP = "DHCP"
KeyDHCP6 = "DHCP6"
KeyEnvoy = "ENVOY_CONFIG"
KeyClusterIPv4POOLS = "IPv4_POOLS"
KeyRefCount = "REF_COUNT"
// secret keys
// TLSCertKey is the key for tls certificates in a TLS secret.
TLSCertKey = "tls_crt"
// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
TLSPrivateKeyKey = "tls_key"
// TLSServerName for tls config server name
TLSServerName = "tls_server_name"
// container name
ContainerSidecarEnvoyProxy = "envoy-proxy"
ContainerSidecarControlPlane = "control-plane"
ContainerSidecarWebhook = "webhook"
ContainerSidecarVPN = "vpn"
ContainerSidecarSyncthing = "syncthing"
VolumeEnvoyConfig = "envoy-config"
VolumeSyncthing = "syncthing"
VolumeSyncthing = "syncthing"
innerIPv4Pool = "223.254.0.100/16"
// 原因在docker环境中设置docker的 gateway 和 subnet不能 inner 的冲突,也不能和 docker的 172.17 冲突
// 不然的话,请求会不通的
// 解决的问题:在 k8s 中的 名叫 kubernetes 的 service ip 为
// ➜ ~ kubectl get service kubernetes
//NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
//kubernetes ClusterIP 172.17.0.1 <none> 443/TCP 190d
//
// ➜ ~ docker network inspect bridge | jq '.[0].IPAM.Config'
//[
// {
// "Subnet": "172.17.0.0/16",
// "Gateway": "172.17.0.1"
// }
//]
// 如果不创建 network那么是无法请求到 这个 kubernetes 的 service 的
dockerInnerIPv4Pool = "223.255.0.100/16"
// IPv4Pool is used as tun ip
// 198.19.0.0/16 network is part of the 198.18.0.0/15 (reserved for benchmarking).
// https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
// so we split it into 2 parts: 198.18.0.0/15 --> [198.19.0.0/16, 198.19.0.0/16]
IPv4Pool = "198.19.0.0/16"
// 2001:2::/64 network is part of the 2001:2::/48 (reserved for benchmarking)
// https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
IPv6Pool = "2001:2::/64"
/*
reasondocker use 172.17.0.0/16 network conflict with k8s service kubernetes
➜ ~ kubectl get service kubernetes
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 172.17.0.1 <none> 443/TCP 190d
//The IPv6 address prefixes FE80::/10 and FF02::/16 are not routable
innerIPv6Pool = "efff:ffff:ffff:ffff:ffff:ffff:ffff:9999/64"
➜ ~ docker network inspect bridge | jq '.[0].IPAM.Config'
[
{
"Subnet": "172.17.0.0/16",
"Gateway": "172.17.0.1"
}
]
*/
DockerIPv4Pool = "198.18.0.1/16"
DefaultNetDir = "/etc/cni/net.d"
@@ -70,14 +78,8 @@ const (
EnvPodNamespace = "POD_NAMESPACE"
// header name
HeaderPodName = "POD_NAME"
HeaderPodNamespace = "POD_NAMESPACE"
HeaderIPv4 = "IPv4"
HeaderIPv6 = "IPv6"
// api
APIRentIP = "/rent/ip"
APIReleaseIP = "/release/ip"
HeaderIPv4 = "IPv4"
HeaderIPv6 = "IPv6"
KUBECONFIG = "kubeconfig"
@@ -89,30 +91,22 @@ const (
SudoPProfPort = 33345
PProfDir = "pprof"
// startup by KubeVPN
EnvStartSudoKubeVPNByKubeVPN = "DEPTH_SIGNED_BY_NAISON"
EnvSSHJump = "SSH_JUMP_BY_KUBEVPN"
EnvSSHJump = "SSH_JUMP_BY_KUBEVPN"
// transport mode
ConfigKubeVPNTransportEngine = "transport-engine"
// hosts entry key word
HostsKeyWord = "# Add by KubeVPN"
)
var (
// Image inject --ldflags -X
Image = "docker.io/naison/kubevpn:latest"
Image = "ghcr.io/kubenetworks/kubevpn:latest"
Version = "latest"
GitCommit = ""
// GitHubOAuthToken --ldflags -X
GitHubOAuthToken = ""
OriginImage = "docker.io/naison/kubevpn:" + Version
DaemonPath string
HomePath string
PprofPath string
OriginImage = "ghcr.io/kubenetworks/kubevpn:" + Version
)
var (
@@ -127,25 +121,31 @@ var (
)
func init() {
RouterIP, CIDR, _ = net.ParseCIDR(innerIPv4Pool)
RouterIP6, CIDR6, _ = net.ParseCIDR(innerIPv6Pool)
DockerRouterIP, DockerCIDR, _ = net.ParseCIDR(dockerInnerIPv4Pool)
dir, _ := os.UserHomeDir()
DaemonPath = filepath.Join(dir, HOME, Daemon)
HomePath = filepath.Join(dir, HOME)
PprofPath = filepath.Join(dir, HOME, Daemon, PProfDir)
var err error
RouterIP, CIDR, err = net.ParseCIDR(IPv4Pool)
if err != nil {
panic(err)
}
RouterIP6, CIDR6, err = net.ParseCIDR(IPv6Pool)
if err != nil {
panic(err)
}
DockerRouterIP, DockerCIDR, err = net.ParseCIDR(DockerIPv4Pool)
if err != nil {
panic(err)
}
}
var Debug bool
var (
SmallBufferSize = (1 << 13) - 1 // 8KB small buffer
MediumBufferSize = (1 << 15) - 1 // 32KB medium buffer
LargeBufferSize = (1 << 16) - 1 // 64KB large buffer
SmallBufferSize = 8 * 1024 // 8KB small buffer
MediumBufferSize = 32 * 1024 // 32KB medium buffer
LargeBufferSize = 64 * 1024 // 64KB large buffer
)
var (
KeepAliveTime = 180 * time.Second
KeepAliveTime = 60 * time.Second
DialTimeout = 15 * time.Second
HandshakeTimeout = 5 * time.Second
ConnectTimeout = 5 * time.Second
@@ -154,30 +154,65 @@ var (
)
var (
// network layer ip needs 20 bytes
// transport layer UDP header needs 8 bytes
// UDP over TCP header needs 22 bytes
DefaultMTU = 1500 - 20 - 8 - 21
// DefaultMTU
/**
+--------------------------------------------------------------------+
| Original IP Packet from TUN |
+-------------------+------------------------------------------------+
| IP Header (20B) | Payload (MTU size) |
+-------------------+------------------------------------------------+
After adding custom 2-byte header:
+----+-------------------+-------------------------------------------+
| LH | IP Header (20B) | Payload |
+----+-------------------+-------------------------------------------+
| 2B | 20B | 1453 - 20 = 1433B |
+----+-------------------+-------------------------------------------+
TLS 1.3 Record Structure Breakdown:
+---------------------+--------------------------+-------------------+
| TLS Header (5B) | Encrypted Data (N) | Auth Tag (16B) |
+---------------------+--------------------------+-------------------+
| Content Type (1) | ↑ | AEAD Authentication
| Version (2) | Encrypted Payload | (e.g. AES-GCM) |
| Length (2) | (Original Data + LH2) | |
+---------------------+--------------------------+-------------------+
|←------- 5B --------→|←---- Length Field ------→|←----- 16B -------→|
Final Ethernet Frame:
+--------+----------------+----------------+-----------------------+--------+
| EthHdr | IP Header | TCP Header | TLS Components |
| (14B) | (20B) | (20B) +---------+-------------+--------+
| | | | Hdr(5B) | Data+LH2 | Tag(16)|
+--------+----------------+----------------+---------+-------------+--------+
|←------------------- Total 1500B Ethernet Frame --------------------------→|
ipv4: 20
ipv6: 40
mtu = 1500 - ip header(20/40 v4/v6) - tcp header (20) - tls1.3(5+1+16) - packet over tcp(length(2)+remark(1)) = 1415
*/
DefaultMTU = 1500 - max(20, 40) - 20 - (5 + 1 + 16) - (2 + 1)
)
var (
LPool = &sync.Pool{
SPool = &sync.Pool{
New: func() interface{} {
return make([]byte, SmallBufferSize)
},
}
MPool = sync.Pool{
New: func() any {
return make([]byte, MediumBufferSize)
},
}
LPool = sync.Pool{
New: func() any {
return make([]byte, LargeBufferSize)
},
}
)
var SPool = sync.Pool{
New: func() any {
return make([]byte, 2)
},
}
type Engine string
const (
EngineGvisor Engine = "gvisor"
EngineMix Engine = "mix"
EngineRaw Engine = "raw"
)
const Slogan = "Now you can access resources in the kubernetes cluster !"

View File

@@ -1,19 +1,20 @@
# Here is an example config kubevpn config file, please change it into your custom config.
# Support three filed: Name,Needs,Flags
# Exec command: kubevpn alias qa <===> kubevpn connect --kubeconfig=~/.kube/jumper_config --namespace=default
# Simple is Good ~
# Just keep simple
Name: dev
Description: This is dev k8s environment, needs jump by qa env
Needs: qa
Flags:
- connect
- --kubeconfig=~/.kube/config
- --namespace=default
- --lite
---
Name: qa
Description: This is QA k8s environment
Flags:
- connect
- --kubeconfig=~/.kube/jumper_config

View File

@@ -11,42 +11,60 @@ import (
const (
HOME = ".kubevpn"
Daemon = "daemon"
Log = "log"
SockPath = "daemon.sock"
SudoSockPath = "sudo_daemon.sock"
SockPath = "user_daemon.sock"
SudoSockPath = "root_daemon.sock"
PidPath = "daemon.pid"
SudoPidPath = "sudo_daemon.pid"
PidPath = "user_daemon.pid"
SudoPidPath = "root_daemon.pid"
LogFile = "daemon.log"
KubeVPNRestorePatchKey = "kubevpn-probe-restore-patch"
UserLogFile = "user_daemon.log"
SudoLogFile = "root_daemon.log"
ConfigFile = "config.yaml"
TempDir = "temp"
DBFile = "db"
)
//go:embed config.yaml
var config []byte
var (
homePath string
daemonPath string
logPath string
//go:embed config.yaml
config []byte
)
func init() {
err := os.MkdirAll(DaemonPath, 0755)
dir, err := os.UserHomeDir()
if err != nil {
panic(err)
}
err = os.Chmod(DaemonPath, 0755)
if err != nil {
panic(err)
}
err = os.MkdirAll(PprofPath, 0755)
if err != nil {
panic(err)
}
err = os.Chmod(PprofPath, 0755)
if err != nil {
panic(err)
homePath = filepath.Join(dir, HOME)
daemonPath = filepath.Join(dir, HOME, Daemon)
logPath = filepath.Join(dir, HOME, Log)
var paths = []string{homePath, daemonPath, logPath, GetPProfPath(), GetSyncthingPath(), GetTempPath()}
for _, path := range paths {
_, err = os.Stat(path)
if errors.Is(err, os.ErrNotExist) {
err = os.MkdirAll(path, 0755)
if err != nil {
panic(err)
}
err = os.Chmod(path, 0755)
if err != nil {
panic(err)
}
} else if err != nil {
panic(err)
}
}
path := filepath.Join(HomePath, ConfigFile)
path := filepath.Join(homePath, ConfigFile)
_, err = os.Stat(path)
if errors.Is(err, os.ErrNotExist) {
err = os.WriteFile(path, config, 0644)
@@ -61,7 +79,7 @@ func GetSockPath(isSudo bool) string {
if isSudo {
name = SudoSockPath
}
return filepath.Join(DaemonPath, name)
return filepath.Join(daemonPath, name)
}
func GetPidPath(isSudo bool) string {
@@ -69,17 +87,32 @@ func GetPidPath(isSudo bool) string {
if isSudo {
name = SudoPidPath
}
return filepath.Join(DaemonPath, name)
return filepath.Join(daemonPath, name)
}
func GetSyncthingPath() string {
return filepath.Join(DaemonPath, SyncthingDir)
return filepath.Join(daemonPath, SyncthingDir)
}
func GetSyncthingGUIPath() string {
return filepath.Join(DaemonPath, SyncthingDir, SyncthingGUIDir)
func GetConfigFile() string {
return filepath.Join(homePath, ConfigFile)
}
func GetConfigFilePath() string {
return filepath.Join(HomePath, ConfigFile)
func GetTempPath() string {
return filepath.Join(homePath, TempDir)
}
func GetDaemonLogPath(isSudo bool) string {
if isSudo {
return filepath.Join(logPath, SudoLogFile)
}
return filepath.Join(logPath, UserLogFile)
}
func GetPProfPath() string {
return filepath.Join(daemonPath, PProfDir)
}
func GetDBPath() string {
return filepath.Join(daemonPath, DBFile)
}

View File

@@ -9,12 +9,9 @@ import (
const (
SyncthingDir = "syncthing"
SyncthingGUIDir = "gui"
DefaultRemoteDir = "/kubevpn-data"
// EnvDisableSyncthingLog disable syncthing log, because it can not set output writer, only write os.Stdout or io.Discard
EnvDisableSyncthingLog = "LOGGER_DISCARD"
SyncthingAPIKey = "kubevpn"
)
var LocalCert tls.Certificate

View File

@@ -2,14 +2,18 @@ package controlplane
import (
"fmt"
"strconv"
"strings"
"time"
v31 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
accesslogfilev3 "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/file/v3"
corsv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/cors/v3"
grpcwebv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_web/v3"
routerv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3"
@@ -22,27 +26,72 @@ import (
"github.com/envoyproxy/go-control-plane/pkg/cache/types"
"github.com/envoyproxy/go-control-plane/pkg/resource/v3"
"github.com/envoyproxy/go-control-plane/pkg/wellknown"
log "github.com/sirupsen/logrus"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/wrapperspb"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
type Virtual struct {
Uid string // group.resource.name
Ports []corev1.ContainerPort
Rules []*Rule
Namespace string
Uid string // group.resource.name
Ports []ContainerPort
Rules []*Rule
}
type ContainerPort struct {
// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
// named port in a pod must have a unique name. Name for the port that can be
// referred to by services.
// +optional
Name string `json:"name,omitempty"`
// Number of port to expose on the host.
// If specified, this must be a valid port number, 0 < x < 65536.
// envoy listener port, if is not 0, means fargate mode
// +optional
EnvoyListenerPort int32 `json:"envoyListenerPort,omitempty"`
// Number of port to expose on the pod's IP address.
// This must be a valid port number, 0 < x < 65536.
ContainerPort int32 `json:"containerPort"`
// Protocol for port. Must be UDP, TCP, or SCTP.
// Defaults to "TCP".
// +optional
// +default="TCP"
Protocol corev1.Protocol `json:"protocol,omitempty"`
}
func ConvertContainerPort(ports ...corev1.ContainerPort) []ContainerPort {
var result []ContainerPort
for _, port := range ports {
result = append(result, ContainerPort{
Name: port.Name,
EnvoyListenerPort: 0,
ContainerPort: port.ContainerPort,
Protocol: port.Protocol,
})
}
return result
}
type Rule struct {
Headers map[string]string
LocalTunIPv4 string
LocalTunIPv6 string
PortMap map[int32]int32
// for no privileged mode (AWS Fargate mode), don't have cap NET_ADMIN and privileged: true. so we can not use OSI layer 3 proxy
// containerPort -> envoyRulePort:localPort
// envoyRulePort for envoy forward to localhost:envoyRulePort
// localPort for local pc listen localhost:localPort
// use ssh reverse tunnel, envoy rule endpoint localhost:envoyRulePort will forward to local pc localhost:localPort
// localPort is required and envoyRulePort is optional
PortMap map[int32]string
}
func (a *Virtual) To() (
func (a *Virtual) To(enableIPv6 bool, logger *log.Logger) (
listeners []types.Resource,
clusters []types.Resource,
routes []types.Resource,
@@ -50,21 +99,57 @@ func (a *Virtual) To() (
) {
//clusters = append(clusters, OriginCluster())
for _, port := range a.Ports {
listenerName := fmt.Sprintf("%s_%v_%s", a.Uid, port.ContainerPort, port.Protocol)
isFargateMode := port.EnvoyListenerPort != 0
listenerName := fmt.Sprintf("%s_%s_%v_%s", a.Namespace, a.Uid, util.If(isFargateMode, port.EnvoyListenerPort, port.ContainerPort), port.Protocol)
routeName := listenerName
listeners = append(listeners, ToListener(listenerName, routeName, port.ContainerPort, port.Protocol))
listeners = append(listeners, ToListener(listenerName, routeName, util.If(isFargateMode, port.EnvoyListenerPort, port.ContainerPort), port.Protocol, isFargateMode))
var rr []*route.Route
for _, rule := range a.Rules {
for _, ip := range []string{rule.LocalTunIPv4, rule.LocalTunIPv6} {
clusterName := fmt.Sprintf("%s_%v", ip, rule.PortMap[port.ContainerPort])
var ips []string
if enableIPv6 {
ips = []string{rule.LocalTunIPv4, rule.LocalTunIPv6}
} else {
ips = []string{rule.LocalTunIPv4}
}
ports := rule.PortMap[port.ContainerPort]
if isFargateMode {
if strings.Index(ports, ":") > 0 {
ports = strings.Split(ports, ":")[0]
} else {
logger.Errorf("fargate mode port should have two pair: %s", ports)
}
}
envoyRulePort, _ := strconv.Atoi(ports)
for _, ip := range ips {
clusterName := fmt.Sprintf("%s_%v", ip, envoyRulePort)
clusters = append(clusters, ToCluster(clusterName))
endpoints = append(endpoints, ToEndPoint(clusterName, ip, rule.PortMap[port.ContainerPort]))
endpoints = append(endpoints, ToEndPoint(clusterName, ip, int32(envoyRulePort)))
rr = append(rr, ToRoute(clusterName, rule.Headers))
}
}
rr = append(rr, DefaultRoute())
clusters = append(clusters, OriginCluster())
// if isFargateMode is true, needs to add default route to container port, because use_original_dst not work
if isFargateMode {
// all ips should is IPv4 127.0.0.1 and ::1
var ips = sets.New[string]()
for _, rule := range a.Rules {
if enableIPv6 {
ips.Insert(rule.LocalTunIPv4, rule.LocalTunIPv6)
} else {
ips.Insert(rule.LocalTunIPv4)
}
}
for _, ip := range ips.UnsortedList() {
defaultClusterName := fmt.Sprintf("%s_%v", ip, port.ContainerPort)
clusters = append(clusters, ToCluster(defaultClusterName))
endpoints = append(endpoints, ToEndPoint(defaultClusterName, ip, port.ContainerPort))
rr = append(rr, DefaultRouteToCluster(defaultClusterName))
}
} else {
rr = append(rr, DefaultRoute())
clusters = append(clusters, OriginCluster())
}
routes = append(routes, &route.RouteConfiguration{
Name: routeName,
VirtualHosts: []*route.VirtualHost{
@@ -219,7 +304,30 @@ func DefaultRoute() *route.Route {
}
}
func ToListener(listenerName string, routeName string, port int32, p corev1.Protocol) *listener.Listener {
func DefaultRouteToCluster(clusterName string) *route.Route {
return &route.Route{
Match: &route.RouteMatch{
PathSpecifier: &route.RouteMatch_Prefix{
Prefix: "/",
},
},
Action: &route.Route_Route{
Route: &route.RouteAction{
ClusterSpecifier: &route.RouteAction_Cluster{
Cluster: clusterName,
},
Timeout: durationpb.New(0),
IdleTimeout: durationpb.New(0),
MaxStreamDuration: &route.RouteAction_MaxStreamDuration{
MaxStreamDuration: durationpb.New(0),
GrpcTimeoutHeaderMax: durationpb.New(0),
},
},
},
}
}
func ToListener(listenerName string, routeName string, port int32, p corev1.Protocol, isFargateMode bool) *listener.Listener {
var protocol core.SocketAddress_Protocol
switch p {
case corev1.ProtocolTCP:
@@ -283,6 +391,14 @@ func ToListener(listenerName string, routeName string, port int32, p corev1.Prot
UpgradeConfigs: []*httpconnectionmanager.HttpConnectionManager_UpgradeConfig{{
UpgradeType: "websocket",
}},
AccessLog: []*v31.AccessLog{{
Name: wellknown.FileAccessLog,
ConfigType: &v31.AccessLog_TypedConfig{
TypedConfig: anyFunc(&accesslogfilev3.FileAccessLog{
Path: "/dev/stdout",
}),
},
}},
}
tcpConfig := &tcpproxy.TcpProxy{
@@ -295,7 +411,7 @@ func ToListener(listenerName string, routeName string, port int32, p corev1.Prot
return &listener.Listener{
Name: listenerName,
TrafficDirection: core.TrafficDirection_INBOUND,
BindToPort: &wrapperspb.BoolValue{Value: false},
BindToPort: &wrapperspb.BoolValue{Value: util.If(isFargateMode, true, false)},
UseOriginalDst: &wrapperspb.BoolValue{Value: true},
Address: &core.Address{

View File

@@ -2,15 +2,16 @@ package controlplane
import (
"context"
"fmt"
"github.com/envoyproxy/go-control-plane/pkg/cache/v3"
serverv3 "github.com/envoyproxy/go-control-plane/pkg/server/v3"
"github.com/fsnotify/fsnotify"
log "github.com/sirupsen/logrus"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
func Main(ctx context.Context, filename string, port uint, logger *log.Logger) error {
func Main(ctx context.Context, factory cmdutil.Factory, port uint, logger *log.Logger) error {
snapshotCache := cache.NewSnapshotCache(false, cache.IDHash{}, logger)
proc := NewProcessor(snapshotCache, logger)
@@ -23,33 +24,20 @@ func Main(ctx context.Context, filename string, port uint, logger *log.Logger) e
notifyCh := make(chan NotifyMessage, 100)
notifyCh <- NotifyMessage{
Operation: Create,
FilePath: filename,
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
return fmt.Errorf("failed to create file watcher: %v", err)
}
defer watcher.Close()
err = watcher.Add(filename)
if err != nil {
return fmt.Errorf("failed to add file: %s to wather: %v", filename, err)
}
notifyCh <- NotifyMessage{}
go func() {
errChan <- Watch(watcher, filename, notifyCh)
errChan <- Watch(ctx, factory, notifyCh)
}()
for {
select {
case msg := <-notifyCh:
err = proc.ProcessFile(msg)
err := proc.ProcessFile(msg)
if err != nil {
log.Errorf("failed to process file: %v", err)
plog.G(ctx).Errorf("Failed to process file: %v", err)
return err
}
case err = <-errChan:
case err := <-errChan:
return err
case <-ctx.Done():
return ctx.Err()

View File

@@ -3,10 +3,8 @@ package controlplane
import (
"context"
"encoding/json"
"fmt"
"math"
"math/rand"
"os"
"reflect"
"strconv"
"time"
@@ -14,20 +12,22 @@ import (
"github.com/envoyproxy/go-control-plane/pkg/cache/types"
"github.com/envoyproxy/go-control-plane/pkg/cache/v3"
"github.com/envoyproxy/go-control-plane/pkg/resource/v3"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
utilcache "k8s.io/apimachinery/pkg/util/cache"
"sigs.k8s.io/yaml"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
type Processor struct {
cache cache.SnapshotCache
logger *logrus.Logger
logger *log.Logger
version int64
expireCache *utilcache.Expiring
}
func NewProcessor(cache cache.SnapshotCache, log *logrus.Logger) *Processor {
func NewProcessor(cache cache.SnapshotCache, log *log.Logger) *Processor {
return &Processor{
cache: cache,
logger: log,
@@ -45,24 +45,33 @@ func (p *Processor) newVersion() string {
}
func (p *Processor) ProcessFile(file NotifyMessage) error {
configList, err := ParseYaml(file.FilePath)
configList, err := ParseYaml(file.Content)
if err != nil {
p.logger.Errorf("error parsing yaml file: %+v", err)
p.logger.Errorf("failed to parse config file: %v", err)
return err
}
enableIPv6, _ := util.DetectSupportIPv6()
for _, config := range configList {
if len(config.Uid) == 0 {
continue
}
lastConfig, ok := p.expireCache.Get(config.Uid)
var marshal []byte
marshal, err = json.Marshal(config)
if err != nil {
p.logger.Errorf("failed to marshal config: %v", err)
return err
}
uid := util.GenEnvoyUID(config.Namespace, config.Uid)
lastConfig, ok := p.expireCache.Get(uid)
if ok && reflect.DeepEqual(lastConfig.(*Virtual), config) {
marshal, _ := json.Marshal(config)
p.logger.Debugf("config are same, not needs to update, config: %s", string(marshal))
p.logger.Infof("no need to update, config: %s", string(marshal))
continue
}
p.logger.Debugf("update config, version %d, config %v", p.version, config)
listeners, clusters, routes, endpoints := config.To()
p.logger.Infof("update config, version: %d, config: %s", p.version, marshal)
listeners, clusters, routes, endpoints := config.To(enableIPv6, p.logger)
resources := map[resource.Type][]types.Resource{
resource.ListenerType: listeners, // listeners
resource.RouteType: routes, // routes
@@ -71,38 +80,33 @@ func (p *Processor) ProcessFile(file NotifyMessage) error {
resource.RuntimeType: {}, // runtimes
resource.SecretType: {}, // secrets
}
var snapshot *cache.Snapshot
snapshot, err = cache.NewSnapshot(p.newVersion(), resources)
if err != nil {
p.logger.Errorf("snapshot inconsistency: %v, err: %v", snapshot, err)
p.logger.Errorf("failed to snapshot inconsistency: %v", err)
return err
}
if err = snapshot.Consistent(); err != nil {
p.logger.Errorf("snapshot inconsistency: %v, err: %v", snapshot, err)
p.logger.Errorf("failed to snapshot inconsistency: %v", err)
return err
}
p.logger.Debugf("will serve snapshot %+v, nodeID: %s", snapshot, config.Uid)
if err = p.cache.SetSnapshot(context.Background(), config.Uid, snapshot); err != nil {
p.logger.Infof("will serve snapshot %+v, nodeID: %s", snapshot, uid)
err = p.cache.SetSnapshot(context.Background(), uid, snapshot)
if err != nil {
p.logger.Errorf("snapshot error %q for %v", err, snapshot)
return err
}
p.expireCache.Set(config.Uid, config, time.Minute*5)
p.expireCache.Set(uid, config, time.Minute*5)
}
return nil
}
func ParseYaml(file string) ([]*Virtual, error) {
func ParseYaml(content string) ([]*Virtual, error) {
var virtualList = make([]*Virtual, 0)
yamlFile, err := os.ReadFile(file)
if err != nil {
return nil, fmt.Errorf("Error reading YAML file: %s\n", err)
}
err = yaml.Unmarshal(yamlFile, &virtualList)
err := yaml.Unmarshal([]byte(content), &virtualList)
if err != nil {
return nil, err
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net"
"time"
clusterservice "github.com/envoyproxy/go-control-plane/envoy/service/cluster/v3"
discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
@@ -13,8 +14,10 @@ import (
runtimeservice "github.com/envoyproxy/go-control-plane/envoy/service/runtime/v3"
secretservice "github.com/envoyproxy/go-control-plane/envoy/service/secret/v3"
serverv3 "github.com/envoyproxy/go-control-plane/pkg/server/v3"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
const (
@@ -22,8 +25,17 @@ const (
)
func RunServer(ctx context.Context, server serverv3.Server, port uint) error {
grpcServer := grpc.NewServer(grpc.MaxConcurrentStreams(grpcMaxConcurrentStreams))
grpcOpts := []grpc.ServerOption{
grpc.MaxConcurrentStreams(grpcMaxConcurrentStreams),
grpc.KeepaliveParams(keepalive.ServerParameters{
Time: 15 * time.Second,
Timeout: 5 * time.Second,
}),
grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: 15 * time.Second,
PermitWithoutStream: true,
})}
grpcServer := grpc.NewServer(grpcOpts...)
var lc net.ListenConfig
listener, err := lc.Listen(ctx, "tcp", fmt.Sprintf(":%d", port))
if err != nil {
@@ -38,6 +50,6 @@ func RunServer(ctx context.Context, server serverv3.Server, port uint) error {
secretservice.RegisterSecretDiscoveryServiceServer(grpcServer, server)
runtimeservice.RegisterRuntimeDiscoveryServiceServer(grpcServer, server)
log.Infof("management server listening on %d", port)
plog.G(ctx).Infof("Management server listening on %d", port)
return grpcServer.Serve(listener)
}

View File

@@ -1,62 +1,82 @@
package controlplane
import (
"fmt"
"context"
"time"
"github.com/fsnotify/fsnotify"
)
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
informerv1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
type OperationType int
const (
Create OperationType = iota
Remove
Modify
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
type NotifyMessage struct {
Operation OperationType
FilePath string
Content string
}
func Watch(watcher *fsnotify.Watcher, filename string, notifyCh chan<- NotifyMessage) error {
ticker := time.NewTicker(time.Second * 5)
defer ticker.Stop()
for {
select {
case event, ok := <-watcher.Events:
if !ok {
return fmt.Errorf("watcher has closed")
}
if event.Op&fsnotify.Write == fsnotify.Write {
notifyCh <- NotifyMessage{
Operation: Modify,
FilePath: event.Name,
}
} else if event.Op&fsnotify.Create == fsnotify.Create {
notifyCh <- NotifyMessage{
Operation: Create,
FilePath: event.Name,
}
} else if event.Op&fsnotify.Remove == fsnotify.Remove {
notifyCh <- NotifyMessage{
Operation: Remove,
FilePath: event.Name,
}
}
func Watch(ctx context.Context, f cmdutil.Factory, notifyCh chan<- NotifyMessage) error {
namespace, _, err := f.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
restConfig, err := f.ToRESTConfig()
if err != nil {
return err
}
conf := rest.CopyConfig(restConfig)
conf.QPS = 1
conf.Burst = 2
clientSet, err := kubernetes.NewForConfig(conf)
if err != nil {
plog.G(ctx).Errorf("Failed to create clientset: %v", err)
return err
}
cmIndexers := cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}
cmInformer := informerv1.NewFilteredConfigMapInformer(clientSet, namespace, 0, cmIndexers, func(options *metav1.ListOptions) {
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", config.ConfigMapPodTrafficManager).String()
})
cmTicker := time.NewTicker(time.Second * 5)
_, err = cmInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
cmTicker.Reset(time.Nanosecond * 1)
},
UpdateFunc: func(oldObj, newObj interface{}) {
cmTicker.Reset(time.Nanosecond * 1)
},
DeleteFunc: func(obj interface{}) {
cmTicker.Reset(time.Nanosecond * 1)
},
})
if err != nil {
plog.G(ctx).Errorf("Failed to add service event handler: %v", err)
return err
}
case err, ok := <-watcher.Errors:
if !ok {
return fmt.Errorf("watcher error closed")
}
return err
case <-ticker.C:
notifyCh <- NotifyMessage{
Operation: Modify,
FilePath: filename,
go cmInformer.Run(ctx.Done())
defer cmTicker.Stop()
for ; ctx.Err() == nil; <-cmTicker.C {
cmTicker.Reset(time.Second * 5)
cmList := cmInformer.GetIndexer().List()
if len(cmList) == 0 {
continue
}
for _, cm := range cmList {
configMap, ok := cm.(*v1.ConfigMap)
if ok {
if configMap.Data == nil {
configMap.Data = make(map[string]string)
}
notifyCh <- NotifyMessage{Content: configMap.Data[config.KeyEnvoy]}
continue
}
}
}
return ctx.Err()
}

52
pkg/core/bufferedtcp.go Normal file
View File

@@ -0,0 +1,52 @@
package core
import (
"context"
"errors"
"net"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
type bufferedTCP struct {
net.Conn
Chan chan *DatagramPacket
closed bool
}
func NewBufferedTCP(conn net.Conn) net.Conn {
c := &bufferedTCP{
Conn: conn,
Chan: make(chan *DatagramPacket, MaxSize),
}
go c.Run()
return c
}
func (c *bufferedTCP) Write(b []byte) (n int, err error) {
if c.closed {
return 0, errors.New("tcp channel is closed")
}
if len(b) == 0 {
return 0, nil
}
buf := config.LPool.Get().([]byte)[:]
n = copy(buf, b)
c.Chan <- newDatagramPacket(buf, n)
return n, nil
}
func (c *bufferedTCP) Run() {
for buf := range c.Chan {
_, err := c.Conn.Write(buf.Data[:buf.DataLength])
config.LPool.Put(buf.Data[:])
if err != nil {
plog.G(context.Background()).Errorf("[TCP] Write packet failed: %v", err)
_ = c.Conn.Close()
c.closed = true
return
}
}
}

View File

@@ -1,97 +0,0 @@
package core
import (
"context"
"errors"
"math"
"net"
)
var (
// ErrorEmptyChain is an error that implies the chain is empty.
ErrorEmptyChain = errors.New("empty chain")
)
type Chain struct {
Retries int
node *Node
}
func NewChain(retry int, node *Node) *Chain {
return &Chain{Retries: retry, node: node}
}
func (c *Chain) Node() *Node {
return c.node
}
func (c *Chain) IsEmpty() bool {
return c == nil || c.node == nil
}
func (c *Chain) DialContext(ctx context.Context) (conn net.Conn, err error) {
for i := 0; i < int(math.Max(float64(1), float64(c.Retries))); i++ {
conn, err = c.dial(ctx)
if err == nil {
break
}
}
return
}
func (c *Chain) dial(ctx context.Context) (net.Conn, error) {
if c.IsEmpty() {
return nil, ErrorEmptyChain
}
conn, err := c.getConn(ctx)
if err != nil {
return nil, err
}
var cc net.Conn
cc, err = c.Node().Client.ConnectContext(ctx, conn)
if err != nil {
_ = conn.Close()
return nil, err
}
return cc, nil
}
func (*Chain) resolve(addr string) string {
if host, port, err := net.SplitHostPort(addr); err == nil {
if ips, err := net.LookupIP(host); err == nil && len(ips) > 0 {
return net.JoinHostPort(ips[0].String(), port)
}
}
return addr
}
func (c *Chain) getConn(ctx context.Context) (net.Conn, error) {
if c.IsEmpty() {
return nil, ErrorEmptyChain
}
return c.Node().Client.Dial(ctx, c.resolve(c.Node().Addr))
}
type Handler interface {
Handle(ctx context.Context, conn net.Conn)
}
type Client struct {
Connector
Transporter
}
type Connector interface {
ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error)
}
type Transporter interface {
Dial(ctx context.Context, addr string) (net.Conn, error)
}
type Server struct {
Listener net.Listener
Handler Handler
}

96
pkg/core/forwarder.go Normal file
View File

@@ -0,0 +1,96 @@
package core
import (
"context"
"errors"
"net"
)
var (
// ErrorEmptyForwarder is an error that implies the forward is empty.
ErrorEmptyForwarder = errors.New("empty forwarder")
)
type Forwarder struct {
retries int
node *Node
}
func NewForwarder(retry int, node *Node) *Forwarder {
return &Forwarder{retries: retry, node: node}
}
func (c *Forwarder) Node() *Node {
return c.node
}
func (c *Forwarder) IsEmpty() bool {
return c == nil || c.node == nil
}
func (c *Forwarder) DialContext(ctx context.Context) (conn net.Conn, err error) {
for i := 0; i < max(1, c.retries); i++ {
conn, err = c.dial(ctx)
if err == nil {
break
}
}
return
}
func (c *Forwarder) dial(ctx context.Context) (net.Conn, error) {
if c.IsEmpty() {
return nil, ErrorEmptyForwarder
}
conn, err := c.getConn(ctx)
if err != nil {
return nil, err
}
var cc net.Conn
cc, err = c.Node().Client.ConnectContext(ctx, conn)
if err != nil {
_ = conn.Close()
return nil, err
}
return cc, nil
}
func (*Forwarder) resolve(addr string) string {
if host, port, err := net.SplitHostPort(addr); err == nil {
if ips, err := net.LookupIP(host); err == nil && len(ips) > 0 {
return net.JoinHostPort(ips[0].String(), port)
}
}
return addr
}
func (c *Forwarder) getConn(ctx context.Context) (net.Conn, error) {
if c.IsEmpty() {
return nil, ErrorEmptyForwarder
}
return c.Node().Client.Dial(ctx, c.Node().Addr)
}
type Handler interface {
Handle(ctx context.Context, conn net.Conn)
}
type Client struct {
Connector
Transporter
}
type Connector interface {
ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error)
}
type Transporter interface {
Dial(ctx context.Context, addr string) (net.Conn, error)
}
type Server struct {
Listener net.Listener
Handler Handler
}

View File

@@ -0,0 +1,19 @@
package core
import (
"context"
"gvisor.dev/gvisor/pkg/tcpip/stack"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
func ICMPForwarder(ctx context.Context, s *stack.Stack) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
return func(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
defer pkt.DecRef()
plog.G(ctx).Infof("[TUN-ICMP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
)
return true
}
}

104
pkg/core/gvisorlocalstack.go Executable file
View File

@@ -0,0 +1,104 @@
package core
import (
"context"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/link/packetsocket"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/raw"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
func NewLocalStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
nicID := tcpip.NICID(1)
s := stack.New(stack.Options{
NetworkProtocols: []stack.NetworkProtocolFactory{
ipv4.NewProtocol,
ipv6.NewProtocol,
},
TransportProtocols: []stack.TransportProtocolFactory{
tcp.NewProtocol,
udp.NewProtocol,
},
Clock: tcpip.NewStdClock(),
AllowPacketEndpointWrite: true,
HandleLocal: false, // if set to true, ping local ip will fail
// Enable raw sockets for users with sufficient
// privileges.
RawFactory: raw.EndpointFactory{},
})
// set handler for TCP UDP ICMP
s.SetTransportProtocolHandler(tcp.ProtocolNumber, LocalTCPForwarder(ctx, s))
s.SetTransportProtocolHandler(udp.ProtocolNumber, LocalUDPForwarder(ctx, s))
s.SetTransportProtocolHandler(header.ICMPv4ProtocolNumber, ICMPForwarder(ctx, s))
s.SetTransportProtocolHandler(header.ICMPv6ProtocolNumber, ICMPForwarder(ctx, s))
s.SetRouteTable([]tcpip.Route{
{
Destination: header.IPv4EmptySubnet,
NIC: nicID,
},
{
Destination: header.IPv6EmptySubnet,
NIC: nicID,
},
})
s.CreateNICWithOptions(nicID, packetsocket.New(tun), stack.NICOptions{
Disabled: false,
Context: ctx,
})
s.SetPromiscuousMode(nicID, true)
s.SetSpoofing(nicID, true)
// Enable SACK Recovery.
{
opt := tcpip.TCPSACKEnabled(true)
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
plog.G(ctx).Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %v", tcp.ProtocolNumber, opt, opt, err)
}
}
// Set default TTLs as required by socket/netstack.
{
opt := tcpip.DefaultTTLOption(64)
if err := s.SetNetworkProtocolOption(ipv4.ProtocolNumber, &opt); err != nil {
plog.G(ctx).Fatalf("SetNetworkProtocolOption(%d, &%T(%d)): %v", ipv4.ProtocolNumber, opt, opt, err)
}
if err := s.SetNetworkProtocolOption(ipv6.ProtocolNumber, &opt); err != nil {
plog.G(ctx).Fatalf("SetNetworkProtocolOption(%d, &%T(%d)): %v", ipv6.ProtocolNumber, opt, opt, err)
}
}
// Enable Receive Buffer Auto-Tuning.
{
opt := tcpip.TCPModerateReceiveBufferOption(true)
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
plog.G(ctx).Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %v", tcp.ProtocolNumber, opt, opt, err)
}
}
{
if err := s.SetForwardingDefaultAndAllNICs(ipv4.ProtocolNumber, true); err != nil {
plog.G(ctx).Fatalf("Set IPv4 forwarding: %v", err)
}
if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, true); err != nil {
plog.G(ctx).Fatalf("Set IPv6 forwarding: %v", err)
}
}
{
option := tcpip.TCPModerateReceiveBufferOption(true)
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &option); err != nil {
plog.G(ctx).Fatalf("Set TCP moderate receive buffer: %v", err)
}
}
return s
}

View File

@@ -0,0 +1,84 @@
package core
import (
"context"
"fmt"
"io"
"net"
"time"
"github.com/pkg/errors"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"gvisor.dev/gvisor/pkg/waiter"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
func LocalTCPForwarder(ctx context.Context, s *stack.Stack) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
return tcp.NewForwarder(s, 0, 100000, func(request *tcp.ForwarderRequest) {
ctx = context.Background()
id := request.ID()
plog.G(ctx).Infof("[TUN-TCP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
)
w := &waiter.Queue{}
endpoint, tErr := request.CreateEndpoint(w)
if tErr != nil {
plog.G(ctx).Errorf("[TUN-TCP] Failed to create endpoint: %v", tErr)
request.Complete(true)
return
}
defer endpoint.Close()
conn := gonet.NewTCPConn(w, endpoint)
defer conn.Close()
var err error
defer func() {
if err != nil && !errors.Is(err, io.EOF) {
request.Complete(true)
} else {
request.Complete(false)
}
}()
// 2, dial proxy
var host string
if id.LocalAddress.To4() != (tcpip.Address{}) {
host = "127.0.0.1"
} else {
host = net.IPv6loopback.String()
}
port := fmt.Sprintf("%d", id.LocalPort)
var d = net.Dialer{Timeout: time.Second * 5}
var remote net.Conn
remote, err = d.DialContext(ctx, "tcp", net.JoinHostPort(host, port))
if err != nil {
plog.G(ctx).Errorf("[TUN-TCP] Failed to connect addr %s: %v", net.JoinHostPort(host, port), err)
return
}
defer remote.Close()
errChan := make(chan error, 2)
go func() {
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
written, err2 := io.CopyBuffer(remote, conn, buf)
plog.G(ctx).Infof("[TUN-TCP] Write length %d data to remote", written)
errChan <- err2
}()
go func() {
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
written, err2 := io.CopyBuffer(conn, remote, buf)
plog.G(ctx).Infof("[TUN-TCP] Read length %d data from remote", written)
errChan <- err2
}()
err = <-errChan
if err != nil && !errors.Is(err, io.EOF) {
plog.G(ctx).Errorf("[TUN-TCP] Disconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
}
}).HandlePacket
}

View File

@@ -0,0 +1,57 @@
package core
import (
"context"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
"gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
type gvisorLocalHandler struct {
// read from tcp conn write to gvisor inbound
gvisorInbound <-chan *Packet
// write to tcp conn
gvisorOutbound chan<- *Packet
outbound chan<- *Packet
errChan chan error
}
func handleGvisorPacket(gvisorInbound <-chan *Packet, outbound chan<- *Packet) *gvisorLocalHandler {
return &gvisorLocalHandler{
gvisorInbound: gvisorInbound,
outbound: outbound,
errChan: make(chan error, 1),
}
}
func (h *gvisorLocalHandler) Run(ctx context.Context) {
endpoint := channel.New(tcp.DefaultReceiveBufferSize, uint32(config.DefaultMTU), tcpip.GetRandMacAddr())
// for support ipv6 skip checksum
// vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic.go:763
endpoint.LinkEPCapabilities = stack.CapabilityRXChecksumOffload
go func() {
defer util.HandleCrash()
readFromGvisorInboundWriteToEndpoint(ctx, h.gvisorInbound, endpoint)
util.SafeClose(h.errChan)
}()
go func() {
defer util.HandleCrash()
readFromEndpointWriteToTun(ctx, endpoint, h.outbound)
util.SafeClose(h.errChan)
}()
s := NewLocalStack(ctx, sniffer.NewWithPrefix(endpoint, "[gVISOR] "))
defer s.Destroy()
select {
case <-h.errChan:
return
case <-ctx.Done():
return
}
}

View File

@@ -0,0 +1,67 @@
package core
import (
"context"
"gvisor.dev/gvisor/pkg/buffer"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
"gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func readFromEndpointWriteToTun(ctx context.Context, endpoint *channel.Endpoint, out chan<- *Packet) {
for ctx.Err() == nil {
pkt := endpoint.ReadContext(ctx)
if pkt != nil {
sniffer.LogPacket("[gVISOR] ", sniffer.DirectionSend, pkt.NetworkProtocolNumber, pkt)
data := pkt.ToView().AsSlice()
buf := config.LPool.Get().([]byte)[:]
n := copy(buf[1:], data)
buf[0] = 0
out <- NewPacket(buf[:], n+1, nil, nil)
}
}
}
func readFromGvisorInboundWriteToEndpoint(ctx context.Context, in <-chan *Packet, endpoint *channel.Endpoint) {
for ctx.Err() == nil {
var packet *Packet
select {
case packet = <-in:
if packet == nil {
return
}
case <-ctx.Done():
return
}
// Try to determine network protocol number, default zero.
var protocol tcpip.NetworkProtocolNumber
// TUN interface with IFF_NO_PI enabled, thus
// we need to determine protocol from version field
if util.IsIPv4(packet.data[1:packet.length]) {
protocol = header.IPv4ProtocolNumber
} else if util.IsIPv6(packet.data[1:packet.length]) {
protocol = header.IPv6ProtocolNumber
} else {
plog.G(ctx).Errorf("[TCP-GVISOR] Unknown packet")
config.LPool.Put(packet.data[:])
continue
}
pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
ReserveHeaderBytes: 0,
Payload: buffer.MakeWithData(packet.data[1:packet.length]),
})
config.LPool.Put(packet.data[:])
sniffer.LogPacket("[gVISOR] ", sniffer.DirectionRecv, protocol, pkt)
endpoint.InjectInbound(protocol, pkt)
pkt.DecRef()
}
}

View File

@@ -0,0 +1,127 @@
package core
import (
"context"
"io"
"net"
"time"
"github.com/pkg/errors"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
"gvisor.dev/gvisor/pkg/waiter"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func LocalUDPForwarder(ctx context.Context, s *stack.Stack) func(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
return udp.NewForwarder(s, func(request *udp.ForwarderRequest) {
id := request.ID()
plog.G(ctx).Infof("[TUN-UDP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
)
src := &net.UDPAddr{
IP: id.RemoteAddress.AsSlice(),
Port: int(id.RemotePort),
}
var ip net.IP
if id.LocalAddress.To4() != (tcpip.Address{}) {
ip = net.ParseIP("127.0.0.1")
} else {
ip = net.IPv6loopback
}
dst := &net.UDPAddr{
IP: ip,
Port: int(id.LocalPort),
}
w := &waiter.Queue{}
endpoint, tErr := request.CreateEndpoint(w)
if tErr != nil {
plog.G(ctx).Errorf("[TUN-UDP] Failed to create endpoint to dst: %s: %v", dst.String(), tErr)
return
}
// dial dst
remote, err1 := net.DialUDP("udp", nil, dst)
if err1 != nil {
plog.G(ctx).Errorf("[TUN-UDP] Failed to connect dst: %s: %v", dst.String(), err1)
return
}
conn := gonet.NewUDPConn(w, endpoint)
go func() {
defer conn.Close()
defer remote.Close()
errChan := make(chan error, 2)
go func() {
defer util.HandleCrash()
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
var written int
var err error
for {
err = conn.SetReadDeadline(time.Now().Add(time.Second * 120))
if err != nil {
break
}
var read int
read, _, err = conn.ReadFrom(buf[:])
if err != nil {
break
}
written += read
err = remote.SetWriteDeadline(time.Now().Add(time.Second * 120))
if err != nil {
break
}
_, err = remote.Write(buf[:read])
if err != nil {
break
}
}
plog.G(ctx).Infof("[TUN-UDP] Write length %d data from src: %s -> dst: %s", written, src, dst)
errChan <- err
}()
go func() {
defer util.HandleCrash()
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
var err error
var written int
for {
err = remote.SetReadDeadline(time.Now().Add(time.Second * 120))
if err != nil {
break
}
var n int
n, _, err = remote.ReadFromUDP(buf[:])
if err != nil {
break
}
written += n
err = conn.SetWriteDeadline(time.Now().Add(time.Second * 120))
if err != nil {
break
}
_, err = conn.Write(buf[:n])
if err != nil {
break
}
}
plog.G(ctx).Infof("[TUN-UDP] Read length %d data from dst: %s -> src: %s", written, dst, src)
errChan <- err
}()
err1 = <-errChan
if err1 != nil && !errors.Is(err1, io.EOF) {
plog.G(ctx).Errorf("[TUN-UDP] Disconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err1)
}
}()
}).HandlePacket
}

View File

@@ -3,7 +3,6 @@ package core
import (
"context"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/link/packetsocket"
@@ -13,18 +12,12 @@ import (
"gvisor.dev/gvisor/pkg/tcpip/transport/raw"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
var _ stack.UniqueID = (*id)(nil)
type id struct {
}
func (i id) UniqueID() uint64 {
return 1
}
func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
nicID := tcpip.NICID(1)
s := stack.New(stack.Options{
NetworkProtocols: []stack.NetworkProtocolFactory{
ipv4.NewProtocol,
@@ -40,35 +33,36 @@ func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
// Enable raw sockets for users with sufficient
// privileges.
RawFactory: raw.EndpointFactory{},
UniqueID: id{},
})
// set handler for TCP UDP ICMP
s.SetTransportProtocolHandler(tcp.ProtocolNumber, TCPForwarder(s))
s.SetTransportProtocolHandler(udp.ProtocolNumber, UDPForwarder(s))
s.SetTransportProtocolHandler(tcp.ProtocolNumber, TCPForwarder(ctx, s))
s.SetTransportProtocolHandler(udp.ProtocolNumber, UDPForwarder(ctx, s))
s.SetTransportProtocolHandler(header.ICMPv4ProtocolNumber, ICMPForwarder(ctx, s))
s.SetTransportProtocolHandler(header.ICMPv6ProtocolNumber, ICMPForwarder(ctx, s))
s.SetRouteTable([]tcpip.Route{
{
Destination: header.IPv4EmptySubnet,
NIC: 1,
NIC: nicID,
},
{
Destination: header.IPv6EmptySubnet,
NIC: 1,
NIC: nicID,
},
})
s.CreateNICWithOptions(1, packetsocket.New(tun), stack.NICOptions{
s.CreateNICWithOptions(nicID, packetsocket.New(tun), stack.NICOptions{
Disabled: false,
Context: ctx,
})
s.SetPromiscuousMode(1, true)
s.SetSpoofing(1, true)
s.SetPromiscuousMode(nicID, true)
s.SetSpoofing(nicID, true)
// Enable SACK Recovery.
{
opt := tcpip.TCPSACKEnabled(true)
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
log.Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %v", tcp.ProtocolNumber, opt, opt, err)
plog.G(ctx).Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %v", tcp.ProtocolNumber, opt, opt, err)
}
}
@@ -76,10 +70,10 @@ func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
{
opt := tcpip.DefaultTTLOption(64)
if err := s.SetNetworkProtocolOption(ipv4.ProtocolNumber, &opt); err != nil {
log.Fatalf("SetNetworkProtocolOption(%d, &%T(%d)): %v", ipv4.ProtocolNumber, opt, opt, err)
plog.G(ctx).Fatalf("SetNetworkProtocolOption(%d, &%T(%d)): %v", ipv4.ProtocolNumber, opt, opt, err)
}
if err := s.SetNetworkProtocolOption(ipv6.ProtocolNumber, &opt); err != nil {
log.Fatalf("SetNetworkProtocolOption(%d, &%T(%d)): %v", ipv6.ProtocolNumber, opt, opt, err)
plog.G(ctx).Fatalf("SetNetworkProtocolOption(%d, &%T(%d)): %v", ipv6.ProtocolNumber, opt, opt, err)
}
}
@@ -87,23 +81,23 @@ func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
{
opt := tcpip.TCPModerateReceiveBufferOption(true)
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
log.Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %v", tcp.ProtocolNumber, opt, opt, err)
plog.G(ctx).Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %v", tcp.ProtocolNumber, opt, opt, err)
}
}
{
if err := s.SetForwardingDefaultAndAllNICs(ipv4.ProtocolNumber, true); err != nil {
log.Fatalf("set ipv4 forwarding: %v", err)
plog.G(ctx).Fatalf("Set IPv4 forwarding: %v", err)
}
if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, true); err != nil {
log.Fatalf("set ipv6 forwarding: %v", err)
plog.G(ctx).Fatalf("Set IPv6 forwarding: %v", err)
}
}
{
option := tcpip.TCPModerateReceiveBufferOption(true)
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &option); err != nil {
log.Fatalf("set TCP moderate receive buffer: %v", err)
plog.G(ctx).Fatalf("Set TCP moderate receive buffer: %v", err)
}
}
return s

View File

@@ -1,137 +1,75 @@
package core
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"time"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip"
"github.com/pkg/errors"
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"gvisor.dev/gvisor/pkg/waiter"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
var GvisorTCPForwardAddr string
func TCPForwarder(s *stack.Stack) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
func TCPForwarder(ctx context.Context, s *stack.Stack) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
return tcp.NewForwarder(s, 0, 100000, func(request *tcp.ForwarderRequest) {
defer request.Complete(false)
id := request.ID()
log.Debugf("[TUN-TCP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
plog.G(ctx).Infof("[TUN-TCP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
)
node, err := ParseNode(GvisorTCPForwardAddr)
if err != nil {
log.Debugf("[TUN-TCP] Error: can not parse gvisor tcp forward addr %s: %v", GvisorTCPForwardAddr, err)
return
}
node.Client = &Client{
Connector: GvisorTCPTunnelConnector(),
Transporter: TCPTransporter(),
}
forwardChain := NewChain(5, node)
remote, err := forwardChain.dial(context.Background())
if err != nil {
log.Debugf("[TUN-TCP] Error: failed to dial remote conn: %v", err)
return
}
if err = WriteProxyInfo(remote, id); err != nil {
log.Debugf("[TUN-TCP] Error: failed to write proxy info: %v", err)
return
}
w := &waiter.Queue{}
endpoint, tErr := request.CreateEndpoint(w)
if tErr != nil {
log.Debugf("[TUN-TCP] Error: can not create endpoint: %v", tErr)
plog.G(ctx).Errorf("[TUN-TCP] Failed to create endpoint: %v", tErr)
request.Complete(true)
return
}
conn := gonet.NewTCPConn(w, endpoint)
defer conn.Close()
var err error
defer func() {
if err != nil && !errors.Is(err, io.EOF) {
request.Complete(true)
} else {
request.Complete(false)
}
}()
// 2, dial proxy
host := id.LocalAddress.String()
port := fmt.Sprintf("%d", id.LocalPort)
var remote net.Conn
var d = net.Dialer{Timeout: time.Second * 5}
remote, err = d.DialContext(ctx, "tcp", net.JoinHostPort(host, port))
if err != nil {
plog.G(ctx).Errorf("[TUN-TCP] Failed to connect addr %s: %v", net.JoinHostPort(host, port), err)
return
}
defer remote.Close()
errChan := make(chan error, 2)
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(remote, conn, i)
log.Debugf("[TUN-TCP] Debug: write length %d data to remote", written)
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
written, err2 := io.CopyBuffer(remote, conn, buf)
plog.G(ctx).Infof("[TUN-TCP] Write length %d data to remote", written)
errChan <- err2
}()
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(conn, remote, i)
log.Debugf("[TUN-TCP] Debug: read length %d data from remote", written)
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
written, err2 := io.CopyBuffer(conn, remote, buf)
plog.G(ctx).Infof("[TUN-TCP] Read length %d data from remote", written)
errChan <- err2
}()
err = <-errChan
if err != nil && !errors.Is(err, io.EOF) {
log.Debugf("[TUN-TCP] Error: dsiconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
plog.G(ctx).Errorf("[TUN-TCP] Disconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
}
}).HandlePacket
}
func WriteProxyInfo(conn net.Conn, id stack.TransportEndpointID) error {
var b bytes.Buffer
i := config.SPool.Get().([]byte)[:]
defer config.SPool.Put(i[:])
binary.BigEndian.PutUint16(i, id.LocalPort)
b.Write(i)
binary.BigEndian.PutUint16(i, id.RemotePort)
b.Write(i)
b.WriteByte(byte(id.LocalAddress.Len()))
b.Write(id.LocalAddress.AsSlice())
b.WriteByte(byte(id.RemoteAddress.Len()))
b.Write(id.RemoteAddress.AsSlice())
_, err := b.WriteTo(conn)
return err
}
// ParseProxyInfo parse proxy info [20]byte
func ParseProxyInfo(conn net.Conn) (id stack.TransportEndpointID, err error) {
var n int
var port = make([]byte, 2)
// local port
if n, err = io.ReadFull(conn, port); err != nil || n != 2 {
return
}
id.LocalPort = binary.BigEndian.Uint16(port)
// remote port
if n, err = io.ReadFull(conn, port); err != nil || n != 2 {
return
}
id.RemotePort = binary.BigEndian.Uint16(port)
// local address
if n, err = io.ReadFull(conn, port[:1]); err != nil || n != 1 {
return
}
var localAddress = make([]byte, port[0])
if n, err = io.ReadFull(conn, localAddress); err != nil || n != len(localAddress) {
return
}
id.LocalAddress = tcpip.AddrFromSlice(localAddress)
// remote address
if n, err = io.ReadFull(conn, port[:1]); err != nil || n != 1 {
return
}
var remoteAddress = make([]byte, port[0])
if n, err = io.ReadFull(conn, remoteAddress); err != nil || n != len(remoteAddress) {
return
}
id.RemoteAddress = tcpip.AddrFromSlice(remoteAddress)
return
}

View File

@@ -2,101 +2,87 @@ package core
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"time"
"sync"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
"gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
type gvisorTCPTunnelConnector struct {
type gvisorTCPHandler struct {
// map[srcIP]net.Conn
routeMapTCP *sync.Map
}
func GvisorTCPTunnelConnector() Connector {
return &gvisorTCPTunnelConnector{}
}
func (c *gvisorTCPTunnelConnector) ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error) {
switch con := conn.(type) {
case *net.TCPConn:
err := con.SetNoDelay(true)
if err != nil {
return nil, err
}
err = con.SetKeepAlive(true)
if err != nil {
return nil, err
}
err = con.SetKeepAlivePeriod(15 * time.Second)
if err != nil {
return nil, err
}
}
return conn, nil
}
type gvisorTCPHandler struct{}
func GvisorTCPHandler() Handler {
return &gvisorTCPHandler{}
return &gvisorTCPHandler{
routeMapTCP: RouteMapTCP,
}
}
func (h *gvisorTCPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
defer tcpConn.Close()
log.Debugf("[TUN-TCP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
// 1, get proxy info
endpointID, err := ParseProxyInfo(tcpConn)
if err != nil {
log.Debugf("[TUN-TCP] Error: failed to parse proxy info: %v", err)
return
}
log.Debugf("[TUN-TCP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
)
// 2, dial proxy
host := endpointID.LocalAddress.String()
port := fmt.Sprintf("%d", endpointID.LocalPort)
var remote net.Conn
remote, err = net.DialTimeout("tcp", net.JoinHostPort(host, port), time.Second*5)
if err != nil {
log.Debugf("[TUN-TCP] Error: failed to connect addr %s: %v", net.JoinHostPort(host, port), err)
return
}
cancel, cancelFunc := context.WithCancel(ctx)
defer cancelFunc()
plog.G(ctx).Infof("[TUN-GVISOR] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
h.handle(cancel, tcpConn)
}
func (h *gvisorTCPHandler) handle(ctx context.Context, tcpConn net.Conn) {
endpoint := channel.New(tcp.DefaultReceiveBufferSize, uint32(config.DefaultMTU), tcpip.GetRandMacAddr())
// for support ipv6 skip checksum
// vendor/gvisor.dev/gvisor/pkg/tcpip/stack/nic.go:763
endpoint.LinkEPCapabilities = stack.CapabilityRXChecksumOffload
errChan := make(chan error, 2)
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(remote, tcpConn, i)
log.Debugf("[TUN-TCP] Debug: write length %d data to remote", written)
errChan <- err2
defer util.HandleCrash()
h.readFromTCPConnWriteToEndpoint(ctx, NewBufferedTCP(tcpConn), endpoint)
util.SafeClose(errChan)
}()
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(tcpConn, remote, i)
log.Debugf("[TUN-TCP] Debug: read length %d data from remote", written)
errChan <- err2
defer util.HandleCrash()
h.readFromEndpointWriteToTCPConn(ctx, tcpConn, endpoint)
util.SafeClose(errChan)
}()
err = <-errChan
if err != nil && !errors.Is(err, io.EOF) {
log.Debugf("[TUN-TCP] Error: dsiconnect: %s >-<: %s: %v", tcpConn.LocalAddr(), remote.RemoteAddr(), err)
s := NewStack(ctx, sniffer.NewWithPrefix(endpoint, "[gVISOR] "))
defer s.Destroy()
select {
case <-errChan:
return
case <-ctx.Done():
return
}
}
func GvisorTCPListener(addr string) (net.Listener, error) {
log.Debug("gvisor tcp listen addr", addr)
plog.G(context.Background()).Infof("Gvisor TCP listening addr: %s", addr)
laddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return nil, err
}
ln, err := net.ListenTCP("tcp", laddr)
listener, err := net.ListenTCP("tcp", laddr)
if err != nil {
return nil, err
}
return &tcpKeepAliveListener{TCPListener: ln}, nil
serverConfig, err := util.GetTlsServerConfig(nil)
if err != nil {
if errors.Is(err, util.ErrNoTLSConfig) {
plog.G(context.Background()).Warn("tls config not found in config, use raw tcp mode")
return &tcpKeepAliveListener{TCPListener: listener}, nil
}
plog.G(context.Background()).Errorf("failed to get tls server config: %v", err)
_ = listener.Close()
return nil, err
}
plog.G(context.Background()).Debugf("Use tls mode")
return tls.NewListener(&tcpKeepAliveListener{TCPListener: listener}, serverConfig), nil
}

150
pkg/core/gvisortunendpoint.go Executable file
View File

@@ -0,0 +1,150 @@
package core
import (
"context"
"net"
"github.com/google/gopacket/layers"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
"gvisor.dev/gvisor/pkg/buffer"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
"gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (h *gvisorTCPHandler) readFromEndpointWriteToTCPConn(ctx context.Context, conn net.Conn, endpoint *channel.Endpoint) {
tcpConn, _ := newGvisorUDPConnOverTCP(ctx, conn)
for ctx.Err() == nil {
pkt := endpoint.ReadContext(ctx)
if pkt != nil {
sniffer.LogPacket("[gVISOR] ", sniffer.DirectionSend, pkt.NetworkProtocolNumber, pkt)
data := pkt.ToView().AsSlice()
buf := config.LPool.Get().([]byte)[:]
n := copy(buf[1:], data)
buf[0] = 0
_, err := tcpConn.Write(buf[:n+1])
config.LPool.Put(buf[:])
if err != nil {
plog.G(ctx).Errorf("[TUN-GVISOR] Failed to write data to tun device: %v", err)
}
}
}
}
// tun --> dispatcher
func (h *gvisorTCPHandler) readFromTCPConnWriteToEndpoint(ctx context.Context, conn net.Conn, endpoint *channel.Endpoint) {
tcpConn, _ := newGvisorUDPConnOverTCP(ctx, conn)
defer h.removeFromRouteMapTCP(ctx, conn)
for ctx.Err() == nil {
buf := config.LPool.Get().([]byte)[:]
read, err := tcpConn.Read(buf[:])
if err != nil {
plog.G(ctx).Errorf("[TCP-GVISOR] Failed to read from tcp conn: %v", err)
config.LPool.Put(buf[:])
return
}
if read == 0 {
plog.G(ctx).Warnf("[TCP-GVISOR] Read from tcp conn length is %d", read)
config.LPool.Put(buf[:])
continue
}
// Try to determine network protocol number, default zero.
var protocol tcpip.NetworkProtocolNumber
var ipProtocol int
var src, dst net.IP
// TUN interface with IFF_NO_PI enabled, thus
// we need to determine protocol from version field
if util.IsIPv4(buf[1:read]) {
protocol = header.IPv4ProtocolNumber
ipHeader, err := ipv4.ParseHeader(buf[1:read])
if err != nil {
plog.G(ctx).Errorf("Failed to parse IPv4 header: %v", err)
config.LPool.Put(buf[:])
continue
}
ipProtocol = ipHeader.Protocol
src = ipHeader.Src
dst = ipHeader.Dst
} else if util.IsIPv6(buf[1:read]) {
protocol = header.IPv6ProtocolNumber
ipHeader, err := ipv6.ParseHeader(buf[1:read])
if err != nil {
plog.G(ctx).Errorf("[TCP-GVISOR] Failed to parse IPv6 header: %s", err.Error())
config.LPool.Put(buf[:])
continue
}
ipProtocol = ipHeader.NextHeader
src = ipHeader.Src
dst = ipHeader.Dst
} else {
plog.G(ctx).Errorf("[TCP-GVISOR] Unknown packet")
config.LPool.Put(buf[:])
continue
}
h.addToRouteMapTCP(ctx, src, conn)
// inner ip like 198.19.0.100/102/103 connect each other
// for issue 594, sometimes k8s service network CIDR also use CIDR 198.19.151.170
// if we can find dst in route map, just trade packet as inner communicate
// if not find dst in route map, just trade packet as k8s service/pod ip
if c, found := h.routeMapTCP.Load(dst.String()); found {
plog.G(ctx).Debugf("[TCP-GVISOR] Find TCP route SRC: %s to DST: %s -> %s", src, dst, c.(net.Conn).RemoteAddr())
dgram := newDatagramPacket(buf, read)
err = dgram.Write(c.(net.Conn))
config.LPool.Put(buf[:])
if err != nil {
plog.G(ctx).Errorf("[TCP-GVISOR] Failed to write to %s <- %s : %s", c.(net.Conn).RemoteAddr(), c.(net.Conn).LocalAddr(), err)
}
} else if buf[0] == 1 {
pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
ReserveHeaderBytes: 0,
Payload: buffer.MakeWithData(buf[1:read]),
})
config.LPool.Put(buf[:])
sniffer.LogPacket("[gVISOR] ", sniffer.DirectionRecv, protocol, pkt)
endpoint.InjectInbound(protocol, pkt)
pkt.DecRef()
plog.G(ctx).Debugf("[TCP-GVISOR] Write to gvisor. SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(ipProtocol).String(), read)
} else {
util.SafeWrite(TCPPacketChan, &Packet{
data: buf[:],
length: read,
src: src,
dst: dst,
}, func(v *Packet) {
config.LPool.Put(buf[:])
plog.G(ctx).Debugf("[TCP-TUN] Drop packet. SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(ipProtocol).String(), read)
})
}
}
}
func (h *gvisorTCPHandler) addToRouteMapTCP(ctx context.Context, src net.IP, tcpConn net.Conn) {
value, loaded := h.routeMapTCP.LoadOrStore(src.String(), tcpConn)
if loaded {
if value.(net.Conn) != tcpConn {
h.routeMapTCP.Store(src.String(), tcpConn)
plog.G(ctx).Infof("[TUN-GVISOR] Replace route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
}
} else {
plog.G(ctx).Infof("[TUN-GVISOR] Add new route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
}
}
func (h *gvisorTCPHandler) removeFromRouteMapTCP(ctx context.Context, tcpConn net.Conn) {
h.routeMapTCP.Range(func(key, value any) bool {
if value.(net.Conn) == tcpConn {
h.routeMapTCP.Delete(key)
plog.G(ctx).Infof("[TCP-GVISOR] Delete to DST %s by conn %s from globle route map TCP", key, tcpConn.LocalAddr())
}
return true
})
}

View File

@@ -2,81 +2,118 @@ package core
import (
"context"
"errors"
"io"
"net"
"time"
log "github.com/sirupsen/logrus"
"github.com/pkg/errors"
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
"gvisor.dev/gvisor/pkg/waiter"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
var GvisorUDPForwardAddr string
func UDPForwarder(s *stack.Stack) func(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
func UDPForwarder(ctx context.Context, s *stack.Stack) func(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
return udp.NewForwarder(s, func(request *udp.ForwarderRequest) {
endpointID := request.ID()
log.Debugf("[TUN-UDP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
id := request.ID()
plog.G(ctx).Infof("[TUN-UDP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
)
src := &net.UDPAddr{
IP: id.RemoteAddress.AsSlice(),
Port: int(id.RemotePort),
}
dst := &net.UDPAddr{
IP: id.LocalAddress.AsSlice(),
Port: int(id.LocalPort),
}
w := &waiter.Queue{}
endpoint, tErr := request.CreateEndpoint(w)
if tErr != nil {
log.Debugf("[TUN-UDP] Error: can not create endpoint: %v", tErr)
plog.G(ctx).Errorf("[TUN-UDP] Failed to create endpoint to dst: %s: %v", dst.String(), tErr)
return
}
node, err := ParseNode(GvisorUDPForwardAddr)
if err != nil {
log.Debugf("[TUN-UDP] Error: parse gviosr udp forward addr %s: %v", GvisorUDPForwardAddr, err)
// dial dst
remote, err1 := net.DialUDP("udp", nil, dst)
if err1 != nil {
plog.G(ctx).Errorf("[TUN-UDP] Failed to connect dst: %s: %v", dst.String(), err1)
return
}
node.Client = &Client{
Connector: GvisorUDPOverTCPTunnelConnector(endpointID),
Transporter: TCPTransporter(),
}
forwardChain := NewChain(5, node)
ctx := context.Background()
c, err := forwardChain.getConn(ctx)
if err != nil {
log.Debugf("[TUN-UDP] Error: can not get conn: %v", err)
return
}
if err = WriteProxyInfo(c, endpointID); err != nil {
log.Debugf("[TUN-UDP] Error: can not write proxy info: %v", err)
return
}
remote, err := node.Client.ConnectContext(ctx, c)
if err != nil {
log.Debugf("[TUN-UDP] Error: can not connect: %v", err)
return
}
conn := gonet.NewUDPConn(w, endpoint)
go func() {
defer conn.Close()
defer remote.Close()
errChan := make(chan error, 2)
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(remote, conn, i)
log.Debugf("[TUN-UDP] Debug: write length %d data to remote", written)
errChan <- err2
defer util.HandleCrash()
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
var written int
var err error
for {
err = conn.SetReadDeadline(time.Now().Add(time.Second * 120))
if err != nil {
break
}
var read int
read, _, err = conn.ReadFrom(buf[:])
if err != nil {
break
}
written += read
err = remote.SetWriteDeadline(time.Now().Add(time.Second * 120))
if err != nil {
break
}
_, err = remote.Write(buf[:read])
if err != nil {
break
}
}
plog.G(ctx).Infof("[TUN-UDP] Write length %d data from src: %s -> dst: %s", written, src, dst)
errChan <- err
}()
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(conn, remote, i)
log.Debugf("[TUN-UDP] Debug: read length %d data from remote", written)
errChan <- err2
defer util.HandleCrash()
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
var err error
var written int
for {
err = remote.SetReadDeadline(time.Now().Add(time.Second * 120))
if err != nil {
break
}
var n int
n, _, err = remote.ReadFromUDP(buf[:])
if err != nil {
break
}
written += n
err = conn.SetWriteDeadline(time.Now().Add(time.Second * 120))
if err != nil {
break
}
_, err = conn.Write(buf[:n])
if err != nil {
break
}
}
plog.G(ctx).Infof("[TUN-UDP] Read length %d data from dst: %s -> src: %s", written, dst, src)
errChan <- err
}()
err = <-errChan
if err != nil && !errors.Is(err, io.EOF) {
log.Debugf("[TUN-UDP] Error: dsiconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
err1 = <-errChan
if err1 != nil && !errors.Is(err1, io.EOF) {
plog.G(ctx).Errorf("[TUN-UDP] Disconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err1)
}
}()
}).HandlePacket

View File

@@ -3,44 +3,18 @@ package core
import (
"context"
"fmt"
"io"
"net"
"time"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"github.com/pkg/errors"
"gvisor.dev/gvisor/pkg/tcpip"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
type gvisorUDPOverTCPTunnelConnector struct {
Id stack.TransportEndpointID
}
func GvisorUDPOverTCPTunnelConnector(endpointID stack.TransportEndpointID) Connector {
return &gvisorUDPOverTCPTunnelConnector{
Id: endpointID,
}
}
func (c *gvisorUDPOverTCPTunnelConnector) ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error) {
switch con := conn.(type) {
case *net.TCPConn:
err := con.SetNoDelay(true)
if err != nil {
return nil, err
}
err = con.SetKeepAlive(true)
if err != nil {
return nil, err
}
err = con.SetKeepAlivePeriod(15 * time.Second)
if err != nil {
return nil, err
}
}
return newGvisorFakeUDPTunnelConnOverTCP(ctx, conn)
}
type gvisorUDPHandler struct{}
func GvisorUDPHandler() Handler {
@@ -49,63 +23,73 @@ func GvisorUDPHandler() Handler {
func (h *gvisorUDPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
defer tcpConn.Close()
log.Debugf("[TUN-UDP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
plog.G(ctx).Debugf("[TUN-UDP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
// 1, get proxy info
endpointID, err := ParseProxyInfo(tcpConn)
id, err := util.ParseProxyInfo(tcpConn)
if err != nil {
log.Warningf("[TUN-UDP] Error: Failed to parse proxy info: %v", err)
plog.G(ctx).Errorf("[TUN-UDP] Failed to parse proxy info: %v", err)
return
}
log.Debugf("[TUN-UDP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
plog.G(ctx).Infof("[TUN-UDP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress: %s",
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
)
// 2, dial proxy
addr := &net.UDPAddr{
IP: endpointID.LocalAddress.AsSlice(),
Port: int(endpointID.LocalPort),
IP: id.LocalAddress.AsSlice(),
Port: int(id.LocalPort),
}
var network string
if id.LocalAddress.To4() != (tcpip.Address{}) {
network = "udp4"
} else {
network = "udp6"
}
var remote *net.UDPConn
remote, err = net.DialUDP("udp", nil, addr)
remote, err = net.DialUDP(network, nil, addr)
if err != nil {
log.Debugf("[TUN-UDP] Error: failed to connect addr %s: %v", addr.String(), err)
plog.G(ctx).Errorf("[TUN-UDP] Failed to connect addr %s: %v", addr.String(), err)
return
}
handle(ctx, tcpConn, remote)
}
// fake udp connect over tcp
type gvisorFakeUDPTunnelConn struct {
type gvisorUDPConnOverTCP struct {
// tcp connection
net.Conn
ctx context.Context
}
func newGvisorFakeUDPTunnelConnOverTCP(ctx context.Context, conn net.Conn) (net.Conn, error) {
return &gvisorFakeUDPTunnelConn{ctx: ctx, Conn: conn}, nil
func newGvisorUDPConnOverTCP(ctx context.Context, conn net.Conn) (net.Conn, error) {
return &gvisorUDPConnOverTCP{ctx: ctx, Conn: conn}, nil
}
func (c *gvisorFakeUDPTunnelConn) Read(b []byte) (int, error) {
func (c *gvisorUDPConnOverTCP) Read(b []byte) (int, error) {
select {
case <-c.ctx.Done():
return 0, c.ctx.Err()
default:
dgram, err := readDatagramPacket(c.Conn, b)
datagram, err := readDatagramPacket(c.Conn, b)
if err != nil {
return 0, err
}
return int(dgram.DataLength), nil
return int(datagram.DataLength), nil
}
}
func (c *gvisorFakeUDPTunnelConn) Write(b []byte) (int, error) {
dgram := newDatagramPacket(b)
if err := dgram.Write(c.Conn); err != nil {
func (c *gvisorUDPConnOverTCP) Write(b []byte) (int, error) {
buf := config.LPool.Get().([]byte)[:]
n := copy(buf, b)
defer config.LPool.Put(buf)
packet := newDatagramPacket(buf, n)
if err := packet.Write(c.Conn); err != nil {
return 0, err
}
return len(b), nil
}
func (c *gvisorFakeUDPTunnelConn) Close() error {
func (c *gvisorUDPConnOverTCP) Close() error {
if cc, ok := c.Conn.(interface{ CloseRead() error }); ok {
_ = cc.CloseRead()
}
@@ -116,7 +100,7 @@ func (c *gvisorFakeUDPTunnelConn) Close() error {
}
func GvisorUDPListener(addr string) (net.Listener, error) {
log.Debug("gvisor UDP over TCP listen addr", addr)
plog.G(context.Background()).Infof("Gvisor UDP over TCP listening addr: %s", addr)
laddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return nil, err
@@ -130,77 +114,59 @@ func GvisorUDPListener(addr string) (net.Listener, error) {
func handle(ctx context.Context, tcpConn net.Conn, udpConn *net.UDPConn) {
defer udpConn.Close()
log.Debugf("[TUN-UDP] Debug: %s <-> %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
plog.G(ctx).Debugf("[TUN-UDP] %s <-> %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
errChan := make(chan error, 2)
go func() {
b := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(b[:])
for {
select {
case <-ctx.Done():
return
default:
}
defer util.HandleCrash()
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
for ctx.Err() == nil {
err := tcpConn.SetReadDeadline(time.Now().Add(time.Second * 30))
if err != nil {
log.Debugf("[TUN-UDP] Error: set read deadline failed: %v", err)
errChan <- err
errChan <- errors.WithMessage(err, "set read deadline failed")
return
}
dgram, err := readDatagramPacket(tcpConn, b[:])
datagram, err := readDatagramPacket(tcpConn, buf)
if err != nil {
log.Debugf("[TUN-UDP] Debug: %s -> 0 : %v", tcpConn.RemoteAddr(), err)
errChan <- err
errChan <- errors.WithMessage(err, "read datagram packet failed")
return
}
if dgram.DataLength == 0 {
log.Debugf("[TUN-UDP] Error: length is zero")
if datagram.DataLength == 0 {
errChan <- fmt.Errorf("length of read packet is zero")
return
}
err = udpConn.SetWriteDeadline(time.Now().Add(time.Second * 30))
if err != nil {
log.Debugf("[TUN-UDP] Error: set write deadline failed: %v", err)
errChan <- err
errChan <- errors.WithMessage(err, "set write deadline failed")
return
}
if _, err = udpConn.Write(dgram.Data); err != nil {
log.Debugf("[TUN-UDP] Error: %s -> %s : %s", tcpConn.RemoteAddr(), "localhost:8422", err)
errChan <- err
if _, err = udpConn.Write(datagram.Data[:datagram.DataLength]); err != nil {
errChan <- errors.WithMessage(err, "write datagram packet failed")
return
}
log.Debugf("[TUN-UDP] Debug: %s >>> %s length: %d", tcpConn.RemoteAddr(), "localhost:8422", dgram.DataLength)
plog.G(ctx).Debugf("[TUN-UDP] %s >>> %s length: %d", tcpConn.RemoteAddr(), udpConn.RemoteAddr(), datagram.DataLength)
}
}()
go func() {
b := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(b[:])
for {
select {
case <-ctx.Done():
return
default:
}
defer util.HandleCrash()
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
for ctx.Err() == nil {
err := udpConn.SetReadDeadline(time.Now().Add(time.Second * 30))
if err != nil {
log.Debugf("[TUN-UDP] Error: set read deadline failed: %v", err)
errChan <- err
errChan <- errors.WithMessage(err, "set read deadline failed")
return
}
n, _, err := udpConn.ReadFrom(b[:])
n, _, err := udpConn.ReadFrom(buf[:])
if err != nil {
log.Debugf("[TUN-UDP] Error: %s : %s", tcpConn.RemoteAddr(), err)
errChan <- err
errChan <- errors.WithMessage(err, "read datagram packet failed")
return
}
if n == 0 {
log.Debugf("[TUN-UDP] Error: length is zero")
errChan <- fmt.Errorf("length of read packet is zero")
return
}
@@ -208,23 +174,21 @@ func handle(ctx context.Context, tcpConn net.Conn, udpConn *net.UDPConn) {
// pipe from peer to tunnel
err = tcpConn.SetWriteDeadline(time.Now().Add(time.Second * 30))
if err != nil {
log.Debugf("[TUN-UDP] Error: set write deadline failed: %v", err)
errChan <- errors.WithMessage(err, "set write deadline failed")
return
}
packet := newDatagramPacket(buf, n)
if err = packet.Write(tcpConn); err != nil {
errChan <- err
return
}
dgram := newDatagramPacket(b[:n])
if err = dgram.Write(tcpConn); err != nil {
log.Debugf("[TUN-UDP] Error: %s <- %s : %s", tcpConn.RemoteAddr(), dgram.Addr(), err)
errChan <- err
return
}
log.Debugf("[TUN-UDP] Debug: %s <<< %s length: %d", tcpConn.RemoteAddr(), dgram.Addr(), len(dgram.Data))
plog.G(ctx).Debugf("[TUN-UDP] %s <<< %s length: %d", tcpConn.RemoteAddr(), tcpConn.LocalAddr(), packet.DataLength)
}
}()
err := <-errChan
if err != nil {
log.Debugf("[TUN-UDP] Error: %v", err)
if err != nil && !errors.Is(err, io.EOF) {
plog.G(ctx).Errorf("[TUN-UDP] %v", err)
}
log.Debugf("[TUN-UDP] Debug: %s >-< %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
plog.G(ctx).Debugf("[TUN-UDP] %s >-< %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
return
}

View File

@@ -7,9 +7,7 @@ import (
"strings"
)
var (
ErrorInvalidNode = errors.New("invalid node")
)
var ErrorInvalidNode = errors.New("invalid node")
type Node struct {
Addr string
@@ -29,12 +27,13 @@ func ParseNode(s string) (*Node, error) {
if err != nil {
return nil, err
}
return &Node{
node := &Node{
Addr: u.Host,
Remote: strings.Trim(u.EscapedPath(), "/"),
Values: u.Query(),
Protocol: u.Scheme,
}, nil
}
return node, nil
}
// Get returns node parameter specified by key.

Some files were not shown because too many files have changed in this diff Show More