Compare commits

...

326 Commits

Author SHA1 Message Date
naison
86585214d4 Merge pull request #438 from kubenetworks/hotfix/default-use-spdy-not-websocket-to-portforward
hotfix: default use spdy not websocket protocol to portforward
2025-02-23 21:42:21 +08:00
naison
867aefbc3a Merge pull request #441 from kubenetworks/hotfix/fix-daemon-process-unexpected-exit-on-linux
fix: fix daemon process unexpected exit on linux
2025-02-23 21:39:59 +08:00
fengcaiwen
2037d3b05f fix: fix daemon process unexpected exit on linux 2025-02-23 21:37:52 +08:00
naison
794fd861ba Merge pull request #440 from kubenetworks/hotfix/fix-podlabel-find-service-in-fargate-mode
use match not equal to find svc by pod label in fargate mode
2025-02-22 20:18:54 +08:00
naison
d10a4e3aef use match not equal to find svc by pod label in fargate mode 2025-02-22 12:00:56 +00:00
naison
5b39275f5b hotfix: default use spdy not websocket to portforward 2025-02-21 14:39:21 +00:00
naison
de38a35189 Revert "chore: upload charts to repo charts"
This reverts commit 2793ab20e6.
2025-02-13 10:40:33 +00:00
naison
04c0b33516 Merge pull request #436 from kubenetworks/feat/update-krew-index-version
feat: update krew index version to refs/tags/v2.3.12
2025-02-13 15:50:56 +08:00
kubenetworks
ffdefce23c Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-02-13 07:46:06 +00:00
wencaiwulue
2a3b4d89f7 feat: update krew index version to refs/tags/v2.3.12 2025-02-13 07:20:30 +00:00
naison
b1abafd7f4 Merge pull request #435 from kubenetworks/feat/add-cmd-image-copy
feat: add cmd image copy
2025-02-13 12:50:16 +08:00
naison
12f29f2528 Merge pull request #434 from kubenetworks/hotfix/fix-cmd-ssh-terminal-bug
hotfix: fix ssh terminal bug
2025-02-13 12:50:06 +08:00
naison
7f3f0305e4 feat: add cmd image copy 2025-02-13 04:48:39 +00:00
naison
c947472d47 hotfix: fix ssh terminal bug 2025-02-13 04:46:59 +00:00
naison
4013846cab Merge pull request #433 from kubenetworks/hotfix/use-default-krb5-config
hotfix: use default krb5 config and not cancel context after handle new local connection of PortmapUtil, otherwise ssh.client stop channel also closed
2025-02-13 11:48:14 +08:00
fengcaiwen
399bc4efe0 hotfix: not cancel context after handle new local connection of PortmapUtil, otherwise ssh.client stop channel also closed 2025-02-12 23:27:45 +08:00
fengcaiwen
24367b1b82 hotfix: use default krb5 config 2025-02-12 22:20:51 +08:00
naison
1a32d7a58e Merge pull request #432 from kubenetworks/chore/add-upload-charts-to-repo-charts
chore: upload charts to repo charts
2025-02-09 16:32:35 +08:00
naison
2793ab20e6 chore: upload charts to repo charts 2025-02-09 08:31:00 +00:00
naison
528ac55325 Merge pull request #431 from kubenetworks/chore/upgrade-go-mod-library
chore: upgrade go mod library
2025-02-09 11:19:10 +08:00
fengcaiwen
3896fd1642 chore: upgrade go mod library 2025-02-09 11:07:10 +08:00
naison
819b20bbdb Merge pull request #430 from kubenetworks/chore/upgrade-go-mod-library
chore: upgrade go mod library
2025-02-08 21:38:17 +08:00
fengcaiwen
2fc0bb3f0c chore: upgrade go mod library 2025-02-08 20:45:20 +08:00
naison
a6730613e7 Merge pull request #429 from kubenetworks/hotfix/add-platform-for-cmd-ssh
hotfix: add platform for cmd ssh
2025-02-08 20:12:06 +08:00
naison
3ad0b5d1a3 hotfix: add platform for cmd ssh 2025-02-08 12:04:25 +00:00
naison
3c2b7943b5 Merge pull request #427 from kubenetworks/feat/update-krew-index-version
feat: update krew index version to refs/tags/v2.3.11
2025-02-03 17:25:46 +08:00
kubenetworks
b2f5fc6ac1 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-02-03 09:24:54 +00:00
wencaiwulue
768e8b1931 feat: update krew index version to refs/tags/v2.3.11 2025-02-03 09:24:09 +00:00
naison
abe1bcafd6 Merge pull request #426 from kubenetworks/refactor/cmd-dev-use-exec-command-insead-of-library
refactor: dev mode use exec command instead of library
2025-02-03 14:21:55 +08:00
fengcaiwen
07cfb8b02e refactor: dev mode use exec command instead of library 2025-02-03 14:14:31 +08:00
naison
11a89d8609 Merge pull request #424 from kubenetworks/feat/use-regctl-copy-image-on-local-pc
feat: use regctl copy image on local pc
2025-01-29 14:05:47 +08:00
naison
98baec8253 feat: use regctl copy image on local pc 2025-01-29 06:03:36 +00:00
naison
1d40843e99 Merge pull request #423 from kubenetworks/docs/update-arch-image
docs: update arch image
2025-01-29 11:56:28 +08:00
naison
be327d571b docs: update arch image 2025-01-29 03:55:46 +00:00
naison
8c96431328 Merge pull request #422 from kubenetworks/refactor/logic-create-outbound-pod
refactor: create outbound pod
2025-01-25 21:51:37 +08:00
naison
666a69cdfb refactor: create outbound pod 2025-01-25 13:50:47 +00:00
naison
9a922ae084 Merge pull request #421 from kubenetworks/refactor/refactor-cmd-ssh-client
refactor: cmd ssh client
2025-01-25 20:28:42 +08:00
naison
f55a65e04c refactor: cmd ssh client 2025-01-25 12:27:51 +00:00
naison
a3c166dc7b Merge pull request #420 from kubenetworks/docs/update-gvsior-mesh-arch
docs: update gvisor mesh arch
2025-01-25 10:32:37 +08:00
naison
7426541e0f docs: update gvisor mesh arch 2025-01-25 02:28:27 +00:00
naison
d70ac3418e Merge pull request #418 from kubenetworks/feat/update-krew-index-version
feat: update krew index version to refs/tags/v2.3.10
2025-01-24 21:38:19 +08:00
kubenetworks
5c502c9d5f Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2025-01-24 13:36:35 +00:00
wencaiwulue
c7d8e381f4 feat: update krew index version to refs/tags/v2.3.10 2025-01-24 13:35:42 +00:00
naison
5ac2588e5d Merge pull request #417 from kubenetworks/docs/add-docs-gvisor-service-mode-proxy-arch
docs: add gvisor service proxy mode arch
2025-01-24 18:53:00 +08:00
naison
e0e45cf84e docs: add gvisor service proxy mode arch 2025-01-24 10:52:18 +00:00
naison
ebfb7168d2 Merge pull request #416 from kubenetworks/feat/add-image-pull-secret-name
feat: add image pull secret name
2025-01-24 14:53:06 +08:00
naison
caee039ffd feat: add image pull secret name 2025-01-24 06:51:34 +00:00
naison
3d4c8be963 Merge pull request #415 from kubenetworks/feat/not-realy-on-cap-net-admin-and-privliaged
feat: support AWS Fargate cluster
2025-01-18 21:26:04 +08:00
fengcaiwen
c6f59e46c9 hotfix: use pod label to match service selector for finding service by pod 2025-01-18 16:23:08 +08:00
fengcaiwen
7d028fc950 feat: proxy mode support proxy multiple workloads 2025-01-18 11:13:09 +08:00
fengcaiwen
12920650ba feat: aws fargate mode works 2025-01-18 11:10:20 +08:00
fengcaiwen
2e96247e74 feat: add cmd uninstall and rename cmd reset 2025-01-18 11:06:07 +08:00
fengcaiwen
b6cfba7db9 feat: fargate mode works basic 2025-01-18 10:52:51 +08:00
naison
8b771e82b5 docs: add supported by JETBRAINS (#411) 2025-01-05 20:25:41 +08:00
naison
d737a6b434 feat: update krew index version to refs/tags/v2.3.9 (#410)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-12-21 23:31:26 +08:00
kubenetworks
420fcd4abb Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-12-21 15:29:42 +00:00
naison
fd786caa0f hotfix: disable net.DefaultResolver.PreferGo (#409) 2024-12-21 22:51:03 +08:00
Constantin Revenko
d3c2ddecc4 return system resolver for sending requests to KubeAPI (#407)
Co-authored-by: Константин Ревенко <konstantin.revenko@mediascope.net>
2024-12-21 22:46:20 +08:00
naison
2e8d251b20 refactor: optimize alias output (#408) 2024-12-21 22:24:03 +08:00
naison
6cd7837d28 feat: update krew index version to refs/tags/v2.3.8 (#406)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-12-19 22:26:33 +08:00
kubenetworks
652a60ce1f Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-12-19 14:19:38 +00:00
naison
fad55dce28 feat(log): log trace if panic (#405) 2024-12-18 20:50:47 +08:00
naison
68d550a80d hotfix: use 64k buffer to read tun device packet for windows tun device mtu 65535 (#404) 2024-12-17 11:46:09 +08:00
Constantin Revenko
51166477c2 Fix panic when removing CIDRs containing API server IP addresses (#403)
* add func removeCIDRsContainingIPs

* remove comments

---------

Co-authored-by: Константин Ревенко <konstantin.revenko@mediascope.net>
2024-12-17 10:10:16 +08:00
naison
4476a38883 feat: update krew index version to refs/tags/v2.3.7 (#400)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-12-15 01:25:55 +08:00
kubenetworks
6597331740 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-12-14 17:25:08 +00:00
naison
6e594fa5a5 hotfix: ignore print cancel GRPC msg (#399) 2024-12-13 23:34:54 +08:00
naison
f046e474af hotfix: return error if resolve extra-domain ip is empty (#397) 2024-12-13 18:26:14 +08:00
naison
062c69de0e hotfix: add traffic-manager pod ip to route table (#396) 2024-12-13 18:21:01 +08:00
naison
b9c1f2a814 hotfix: fix print grpc msg bug (#395) 2024-12-13 18:16:20 +08:00
naison
5599dc6bdd refactor: optimize code (#393) 2024-12-11 21:04:09 +08:00
naison
d068125897 feat: update krew index version to refs/tags/v2.3.6 (#391)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-12-09 20:45:32 +08:00
kubenetworks
959d285294 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-12-09 11:52:05 +00:00
naison
d165dacd20 feat: use origin probe (#390) 2024-12-09 19:13:43 +08:00
naison
9ebc95352a hotfix: envoy control-plane detect enable ipv6 or not to add route (#389) 2024-12-09 18:50:44 +08:00
naison
d9d4091905 feat: update krew index version to refs/tags/v2.3.5 (#388)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-12-06 22:41:24 +08:00
kubenetworks
7618ae30ca Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-12-06 14:40:12 +00:00
naison
1dc3c057a7 hotfix: detect enable ipv6 for envoy (#387)
* hotfix: detect enable ipv6 for envoy

* hotfix: detect pod enable ipv6 for envoy

* hotfix: optimize code
2024-12-06 22:03:37 +08:00
naison
81f62eab31 refactor: refactor print GRPC message (#386) 2024-12-06 19:29:11 +08:00
naison
d9a978d330 hotfix: ignore setup ipv6 failed if not enable ipv6 (#385) 2024-12-05 15:05:54 +08:00
naison
c95cb5ba6c feat: update krew index version to refs/tags/v2.3.4 (#383)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-11-29 21:06:40 +08:00
kubenetworks
d418da83b0 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-11-29 13:03:24 +00:00
naison
24a97de5dc hotfix: add more resolver on macOS (#382) 2024-11-29 20:25:26 +08:00
naison
481b720da6 hotfix: close gvisor endpoint is tcp conn closed (#378) 2024-11-23 17:08:03 +08:00
naison
a1247995e7 feat: update krew index version to refs/tags/v2.3.3 (#377)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-11-22 22:54:56 +08:00
kubenetworks
7cb86d70b0 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-11-22 14:54:14 +00:00
naison
9edf0122a7 feat: alias add description (#376) 2024-11-22 22:09:35 +08:00
naison
5a0533c0fc feat: add gvisor endpoint log (#375) 2024-11-22 22:06:44 +08:00
naison
17a13a2672 hotfix: add idle timeout 120s for gvisor udp forwarder connection (#374) 2024-11-22 22:03:56 +08:00
naison
98c22ba9b7 refactor: refactor code (#373) 2024-11-22 22:00:50 +08:00
naison
880f842203 feat: update krew index version to refs/tags/v2.3.2 (#372)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-11-18 19:52:58 +08:00
kubenetworks
ab09f9e71c Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-11-18 11:52:12 +00:00
naison
ef16641675 refactor: refactor code (#371) 2024-11-18 18:47:54 +08:00
naison
d9a9000d7b hotfix: fix can not ping itself tun IP on windows 2024-11-18 10:43:59 +00:00
naison
a1212f5144 feat: update krew index version to refs/tags/v2.3.1 (#370)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-11-15 21:39:14 +08:00
kubenetworks
f4c22f3073 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-11-15 13:36:37 +00:00
naison
2aa7812cb1 feat: use gvisor parse network packet in pod (#369) 2024-11-15 20:56:10 +08:00
naison
cad5d23d33 feat: update krew index version to refs/tags/v2.2.22 (#367)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-10-30 16:52:55 +08:00
kubenetworks
85e8bd76d2 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-10-30 08:46:09 +00:00
naison
a243842052 hotfix: fix port-forward retry bug (#366) 2024-10-30 10:30:48 +08:00
naison
6e052a5a0b feat: logs lines support '-' sign means starting log file lines (#365) 2024-10-30 09:10:20 +08:00
naison
f966cd29d7 feat: add number of lines to logs (#364) 2024-10-29 18:44:41 +08:00
naison
bfb7ac441d feat: update krew index version to refs/tags/v2.2.21 (#362)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-10-25 22:41:23 +08:00
kubenetworks
0cc8b04bab Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-10-25 14:10:26 +00:00
naison
65ae890842 feat: add APIServer ip to route table if ssh info is not empty (#361) 2024-10-25 21:25:42 +08:00
naison
aa881a589e hotfix: fix ssh and port-forward retry bug (#360)
Co-authored-by: fengcaiwen <fengcaiwen@bytedance.com>
2024-10-25 21:25:03 +08:00
naison
07292fcde5 feat: update krew index version to refs/tags/v2.2.20 (#358)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-10-20 12:02:45 +08:00
kubenetworks
3071ff2439 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-10-20 04:00:07 +00:00
naison
a64eaf66da hotfix: fix port forward and ssh (#357)
* hotfix(portfoward): fix port-forward bug and ssh reconnect bug

* hotfix: remove larger overlapping cidrs for adding routes

* feat: retry port-forward if get pod err is not forbidden

* hotfix: fix ssh and port-forward

* feat: add more log

* hotfix: set go default revolver perfergo options to true
2024-10-20 11:23:49 +08:00
naison
9238e9914a feat(alias): show avaliable alias name (#354) 2024-10-18 16:15:35 +08:00
naison
6e4aeb288a hotfix: ssh daemon (#352) 2024-10-14 09:16:24 +08:00
wencaiwulue
105c3967e1 feat: update krew index version to refs/tags/v2.2.19 2024-10-10 12:12:34 +08:00
naison
5dae60ffbc hotfix: sleep 200ms reconnect 2024-10-10 11:35:39 +08:00
wencaiwulue
875cb8dc8c feat: update krew index version to refs/tags/v2.2.19 2024-10-10 11:07:19 +08:00
naison
15103837a7 hotfix: fix ssh re-connect logic 2024-10-10 10:28:58 +08:00
wencaiwulue
baf5b79a24 feat: update krew index version to refs/tags/v2.2.19 2024-10-10 08:48:44 +08:00
kubenetworks
5618500e66 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-10-10 00:47:09 +00:00
fengcaiwen
d28096d9fa hotfix: upgrade github action 2024-10-10 08:11:43 +08:00
fengcaiwen
bc960987ea hotfix: set version to tag if commitid is empty in github action workflow 2024-10-09 23:14:24 +08:00
fengcaiwen
1005075367 feat: upgrade go version in Dockerfile 2024-10-09 22:06:46 +08:00
fengcaiwen
8f4de1968a feat: upgrade dlv 2024-10-09 22:04:54 +08:00
fengcaiwen
a93f0b1667 feat: upgrade github action 2024-10-09 21:57:44 +08:00
fengcaiwen
941373a902 feat: upgrade syncthing gui 2024-10-09 21:50:32 +08:00
naison
605fe047ca feat: upgrade syncthing version 2024-10-09 21:50:32 +08:00
naison
4d075b29b3 feat: upgrade go version to 1.23 2024-10-09 21:50:32 +08:00
naison
d141ec869b fix: fix dns on linux (#336)
* fix: fix dns on linux

* feat: detect run in Github action or not to setup DNS
2024-10-09 19:17:50 +08:00
naison
e2757d3916 hotfix: fix setup docker failed on macos (#334) 2024-10-08 10:37:53 +08:00
naison
9d917ae9cb docs: update doc (#333) 2024-09-14 20:01:58 +08:00
naison
0763e8a201 hotfix: fix upgrade on windows (#330)
* hotfix: fix upgrade on windows
2024-09-13 14:12:04 +08:00
naison
274116e44f feat: update krew index version to refs/tags/v2.2.18 (#329)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-09-10 17:57:33 +08:00
kubenetworks
ed375be157 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-09-10 09:39:12 +00:00
naison
be8ef7a127 hotfix: use pro-bing to send heartbeats instead of genereating icmp packet (#328) 2024-09-10 16:56:25 +08:00
naison
2bfa82d936 docs: update readme.md (#327) 2024-09-08 15:37:18 +08:00
naison
394bc1a0e4 chore: add link to install tools (#326) 2024-09-07 09:26:20 +08:00
naison
e64b9a3311 feat: use scoop to install kubevpn on Windows (#325) 2024-09-07 08:44:37 +08:00
naison
f9bbaeb3cf chore: update command usage (#324) 2024-09-06 21:28:35 +08:00
naison
ac918b5009 feat: update krew index version to refs/tags/v2.2.17 (#319)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-08-03 16:54:54 +08:00
naison
69b6fa6318 hotfix: fix interface conversion panic (#318) 2024-08-03 16:05:33 +08:00
kubenetworks
63be89bf25 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-08-03 07:45:55 +00:00
naison
c4fb3c5ca0 refactor: remove useless heartbeats (#316) 2024-08-03 15:14:43 +08:00
naison
947d50af85 feat: add syncthing re-connect (#315) 2024-08-03 15:02:48 +08:00
naison
0826f2e20c refactor: refactor log make it more formal (#314) 2024-08-03 15:01:16 +08:00
naison
9f62e02f96 hotfix: use pod ip as dns server if service ip is unreachable (#313) 2024-08-02 12:07:10 +08:00
naison
a3b8c1586d refactor: refactor ssh structure (#311) 2024-07-27 10:37:48 +08:00
naison
675ce2a52f feat: update krew index version to refs/tags/v2.2.16 (#310)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-07-26 22:09:06 +08:00
kubenetworks
79e524e319 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-07-26 13:43:50 +00:00
naison
49adeac14c refactor: refactor command dev (#309) 2024-07-26 21:11:59 +08:00
naison
9283c2f8f7 refactor: shrink dev mode container options (#308) 2024-07-25 16:42:21 +08:00
naison
a48750c048 hotfix: fix clone sync init no permission (#307) 2024-07-24 18:44:15 +08:00
naison
bbf3914f1e hotfix: fix upgrade (#305) 2024-07-23 20:54:06 +08:00
naison
f13e21a049 refactor: refactor code (#306) 2024-07-23 19:11:58 +08:00
naison
a37bfc28da hotfix: fix upgrade use rename but cross device (#304) 2024-07-21 21:08:17 +08:00
naison
862238f65f feat: update krew index version to refs/tags/v2.2.15 (#301)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-07-19 23:03:54 +08:00
kubenetworks
18d6f67a5d Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-07-19 15:03:14 +00:00
naison
4ae09a9dd2 refactor: remove useless code (#300) 2024-07-19 22:28:58 +08:00
naison
1feaacaba9 refactor: refactor code (#299) 2024-07-19 22:25:23 +08:00
naison
bc7d205695 refactor: refactor DHCP logic (#298) 2024-07-19 22:07:35 +08:00
naison
78de74bf08 feat: enable tun ip forward on Windows (#297) 2024-07-19 22:06:14 +08:00
naison
8c0f2098c9 feat: update krew index version to refs/tags/v2.2.14 (#296)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-07-12 23:29:48 +08:00
kubenetworks
44320a792e Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-07-12 15:24:28 +00:00
naison
0e2a8f1ce6 feat: exec and port-forward using websocket if available (#295) 2024-07-12 22:09:50 +08:00
naison
b0a6a0d054 hotfix: fix ssh bug (#294) 2024-07-12 22:08:17 +08:00
naison
62b0de99f9 hotfix: typo (#293) 2024-07-08 21:42:51 +08:00
naison
295a7a709e feat: update krew index version to refs/tags/v2.2.13 (#292)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-07-05 23:11:33 +08:00
kubenetworks
8d400fd698 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-07-05 15:08:40 +00:00
naison
5f0fe6668a feat: cmd version use short commitid (#291) 2024-07-05 22:21:19 +08:00
naison
993be34b70 hotfix: delete from hosts while service is be deleted (#290) 2024-07-05 22:01:43 +08:00
naison
8093cb125a hotfix: fix list and watch pod/service to add route bug (#289) 2024-07-05 22:00:59 +08:00
naison
d3542b840a hotfix: fix log rotate (#288) 2024-07-05 22:00:20 +08:00
naison
d2faffc2c7 hotfix: fix log rotate (#287) 2024-07-05 21:59:33 +08:00
naison
d2648aabed hotfix: fix log rotate (#286) 2024-07-05 21:58:09 +08:00
naison
0e87705e5e hotfix: add timeout to ssh dial (#285) 2024-07-05 21:57:35 +08:00
naison
2d947f965f docs: update README.md (#284) 2024-07-03 23:24:24 +08:00
naison
35ef5a8c88 docs: add use brew to install kubevpn (#282) 2024-06-30 14:53:02 +08:00
naison
ce750d9c74 feat: update krew index version to refs/tags/v2.2.12 (#281)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-06-29 23:38:45 +08:00
kubenetworks
207445640e Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-06-29 15:36:12 +00:00
naison
e9327ec572 fix: fix watch pod/svc resource version too old (#280) 2024-06-29 11:56:31 +08:00
naison
deb4ec98f5 fix: disconnect if operation connect has cancelled (#279) 2024-06-29 11:56:21 +08:00
naison
5cd7ef4a0a feat: update krew index version to refs/tags/v2.2.11 (#278)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-06-27 22:01:24 +08:00
naison
d6f833fc0b feat: only use tag judge daemon version (#277) 2024-06-26 22:11:05 +08:00
naison
faa6229aef feat: update krew index version to refs/tags/v2.2.11 (#276)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-06-24 10:11:53 +08:00
naison
98d88ac542 feat: makefile var use env (#275)
* feat: makefile var use env
2024-06-24 09:32:58 +08:00
naison
680e95fd7f feat: update krew index version to refs/tags/v2.2.11 (#273)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-06-22 11:07:38 +08:00
naison
4aeee5f8d8 feat: discard syncthing log (#272) 2024-06-22 10:33:37 +08:00
naison
28d2e78d04 feat: update krew index version to refs/tags/v2.2.11 (#271)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-06-21 22:18:09 +08:00
kubenetworks
d8e0cbcc3d Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-06-21 14:13:54 +00:00
naison
ed4c6bbe2f feat: add syncthing for clone mode (#270)
* feat: add syncthing for clone mode

---------

Co-authored-by: wencaiwulue <895703375@qq.com>
2024-06-21 21:27:20 +08:00
naison
a45688115c feat: push image to ghcr (#269) 2024-06-14 21:17:50 +08:00
naison
35f0568b09 doc: add contribution to readme (#267) 2024-05-31 11:35:14 +08:00
naison
2ec20f7d1d refactor: upgrade (#266) 2024-05-24 10:54:12 +08:00
naison
a26abab8ce refactor: command get (#265) 2024-05-24 10:53:58 +08:00
naison
9be029e65e feat: update krew index version to refs/tags/v2.2.10 (#262)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-05-21 19:58:17 +08:00
naison
6fed288e67 chore: add label to get cidr pod spec (#261)
* chore: add label to get cidr pod spec

* chore: add label selector
2024-05-21 19:14:08 +08:00
naison
80e3aa154c chore: add label to get cidr pod spec (#260) 2024-05-21 18:51:54 +08:00
naison
38b9ad1991 hotfix: fix get cidr npe (#259) 2024-05-21 18:40:55 +08:00
naison
eaacf3954f feat: update krew index version to refs/tags/v2.2.10 (#258)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-05-21 14:47:24 +08:00
kubenetworks
12a12bcda7 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-05-21 06:46:20 +00:00
naison
28f6d54398 hotfix: remove PriorityClassName from deploy spec (#257) 2024-05-21 14:15:59 +08:00
naison
a23b197554 feat: update krew index version to refs/tags/v2.2.9 (#254)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-05-17 09:36:20 +08:00
naison
a0ca862d59 chore: update alias config example (#253) 2024-05-17 08:58:50 +08:00
naison
7dd762b853 feat: update krew index version to refs/tags/v2.2.9 (#252)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-05-16 14:37:34 +08:00
naison
78762cd9e5 hotfix: fix clone mode use jumped kubeconfig (#251) 2024-05-16 10:53:58 +08:00
kubenetworks
e58a9bf69e Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-05-14 11:50:55 +00:00
naison
a10b1b2526 feat: update krew index version to refs/tags/v2.2.9 (#250)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-05-14 19:50:23 +08:00
naison
331423c308 feat: not set ipv6 if ipv6 is not enable on linux (#248)
* feat: not set ipv6 if ipv6 is not enable on linux
2024-05-14 14:17:34 +08:00
naison
e5c1ea4b9b hotfix: fix set ipv4 on bsd (#249) 2024-05-14 13:02:26 +08:00
naison
cc032c4a6d feat: write pprof to file when daemon quit (#247) 2024-05-14 11:19:58 +08:00
naison
984ab2ce89 hotfix: cancel ctx (#246) 2024-05-14 10:31:08 +08:00
naison
3e51bf0f4d hotfix: close chan (#245) 2024-05-13 19:58:56 +08:00
naison
e7f00f5899 hotfix: cleanup in time when connect lite mode (#243) 2024-05-13 10:14:54 +08:00
naison
70d5723e97 refactor: refactor daemon process (#242)
* refactor: refactor daemon process

* hotfix: add fsnotify to watch pid file
2024-05-10 21:39:14 +08:00
naison
5da018db2a feat: update krew index version to refs/tags/v2.2.8 (#241)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-05-07 19:35:00 +08:00
naison
a0137ad485 hotfix: fix alias (#240) 2024-05-07 19:02:39 +08:00
naison
37552d3db9 feat: update krew index version to refs/tags/v2.2.8 (#239)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-05-03 23:51:27 +08:00
kubenetworks
5ac8eac923 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-05-03 15:50:14 +00:00
naison
d2d411a1cb feat: support ssh jump inline (#238)
* feat: support ssh jump inline

* chore: add more comment
2024-05-03 22:28:25 +08:00
naison
d16bdf8fea hotfix: use flag gssapi as default value (#237) 2024-05-01 09:53:00 +08:00
naison
ca18dab08f refactor: refactor cmd status (#236)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-04-30 22:02:48 +08:00
naison
706afb348d feat: update krew index version to refs/tags/v2.2.7 (#235)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-04-27 20:47:48 +08:00
kubenetworks
def6c7dfdd Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-04-27 12:11:36 +00:00
naison
e64dd428ab chore: ut use macos12 not latest (#234) 2024-04-27 19:39:21 +08:00
naison
9df4efb98b refactor: refactor cmd alias status and disconnect (#233)
* refactor: refactor cmd alias status and disconnect

---------

Co-authored-by: wencaiwulue <895703375@qq.com>
2024-04-27 18:22:34 +08:00
naison
6f6d338656 hotfix: fix not add route bug (#228) 2024-04-27 09:58:54 +08:00
naison
f93b06ea1c feat: cmd alias config file support special (#232) 2024-04-27 09:58:38 +08:00
naison
ada4b51035 hotfix: fix proxy with service mesh failed on latest minikube (#231) 2024-04-26 16:11:16 +08:00
naison
679d097e83 hotfix: add cmd status missing field netif (#230) 2024-04-25 11:44:04 +08:00
naison
c7b437c5d8 hotfix: fix cmd status not correct when tun is gone (#229) 2024-04-25 10:48:46 +08:00
naison
43dad39cca chore: run ut coverage on linux (#227) 2024-04-24 18:54:35 +08:00
naison
d428ee42bc hotfix: add portmap to envoy rule (#224) 2024-04-24 17:32:28 +08:00
naison
0e569fe1a4 hotfix: fix dns method genereateHostsEntry NPE (#226) 2024-04-24 17:32:18 +08:00
naison
fe7be90d0b hotfix: fix tun device no buffer space available (#225) 2024-04-24 17:32:04 +08:00
naison
bacc8cdc26 refactor: remove foreground in daemon (#223) 2024-04-20 20:54:33 +08:00
naison
9c62504489 feat: add command alias (#222)
* feat: add command alias

* feat: kube config path support homedir ~
2024-04-20 19:42:20 +08:00
naison
6060bd8120 feat: update krew index version to refs/tags/v2.2.6 (#221)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-04-16 13:44:43 +08:00
kubenetworks
2cd4de52f4 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-04-16 05:44:32 +00:00
naison
03ac484069 hotfix: downgrade tun driver version (#220)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-04-16 13:08:13 +08:00
naison
c7b4499503 feat: update krew index version to refs/tags/v2.2.5 (#219)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-04-14 16:54:13 +08:00
kubenetworks
9a7466479b Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-04-14 08:46:14 +00:00
naison
31d7e4debb refactor: refactor dns (#218)
* refactor: refactor dns

* refactor: optimize forward dns server

* refactor: add short domain test

* refactor: fix remove nameserver from resolver bug

---------

Co-authored-by: wencaiwulue <895703375@qq.com>
2024-04-13 16:39:26 +08:00
Pengfei Jiang
52f1d38e56 fix(connect): fix connect foreground exit failed (#214)
* fix(connect): fix connect foreground exit failed

* fix(*): fix lint

* fix: disconnect by kubeconfig instead of id

---------

Co-authored-by: 江鹏飞 <jiangpengfei.jiangpf@bytedance.com>
2024-04-12 23:47:14 +08:00
naison
acd4de313f fix: fix dev mode connect-mode is container -p not works (#217)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-04-12 21:45:50 +08:00
naison
8dbb80be7c hotfix: fix tun bug (#216)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-04-12 21:45:31 +08:00
naison
45491f185d hotfix: fix ping bug (#213)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-04-06 13:16:13 +08:00
naison
4eeecd5255 refactor: refactor get cidr logic (#211)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-04-05 11:03:52 +08:00
naison
87166494c0 refactor: update go mod library (#210)
refactor: update go mod library and refactor dev logic

Co-authored-by: wencaiwulue <895703375@qq.com>
2024-04-04 12:04:02 +08:00
naison
91b3a2fbdf feat: update krew index version to refs/tags/v2.2.4 (#206)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-04-02 14:04:56 +08:00
kubenetworks
b7615f57c3 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-04-02 05:15:00 +00:00
naison
e5438b297a fix: fix proxy mode bug (#205) 2024-04-02 12:43:40 +08:00
naison
d3aeae7573 hotfix: fix flag context not works bug (#204) 2024-04-01 11:44:59 +08:00
naison
aacdc8a6d0 feat: update gvisor with tag go and go with 1.22 (#202) 2024-03-31 22:42:31 +08:00
naison
fadfd00927 hotfix: fix cpu usage high cause by in loop to call <-time.Tick(xxx) (#201)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-03-31 11:05:34 +08:00
naison
600e35b8d7 refoctor: ssh logic (#194)
* feat: optimize ssh
2024-03-08 19:16:29 +08:00
naison
f3d1c99a04 docs: add codecov badge (#193) 2024-03-05 11:41:07 +08:00
naison
18a5569054 feat: add ut coverage (#192) 2024-03-05 10:02:31 +08:00
naison
1baa1de13f feat: update krew index version to refs/tags/v2.2.3 (#191)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-03-04 13:10:30 +08:00
naison
dcda747d0e fix: fix portmap bug (#190) 2024-03-04 12:38:05 +08:00
naison
2fd6427242 feat: update krew index version to refs/tags/v2.2.3 (#189)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-03-04 00:31:36 +08:00
naison
dc270ca846 fix: fix set hosts bug (#188)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-03-03 23:58:21 +08:00
naison
ab0cd80b39 feat: update krew index version to refs/tags/v2.2.3 (#187)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-03-03 19:55:31 +08:00
kubenetworks
e920133c88 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-03-03 11:52:38 +00:00
naison
0d64dc7b10 refactor: optimize dns code (#186)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-03-03 17:31:52 +08:00
naison
96845ba37a feat: disconnect-by-extra-route-dependency (#185)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-03-03 15:22:26 +08:00
naison
0730cb12b7 refactor: move ssh jump to util (#184)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-03-03 11:49:44 +08:00
naison
e232bf902e refactor: optimize ssh and set dns server logic (#183)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-03-03 00:57:47 +08:00
naison
1bc269d901 fix: fix proxy mode add portmap options (#182)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-03-03 00:53:25 +08:00
naison
a8826b3334 feat: add extra node ip to route table (#179)
* feat: add extra node ip to route table
2024-02-27 18:00:52 +08:00
naison
7c560df82b feat: last connect will first disconnect (#180) 2024-02-27 11:52:47 +08:00
Anton Patsev
939cc8547f Add missing commas, correction of spelling errors (#178) 2024-02-26 00:11:54 +08:00
naison
fed7108eec refactor: move reset command to daemon (#175)
* feat: move cmd reset to daemon

* feat: move cmd reset to daemon

---------

Co-authored-by: wencaiwulue <895703375@qq.com>
2024-02-25 15:49:31 +08:00
naison
2fdfc1d88d feat: proxy mode add portmap option (#176)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-02-23 21:17:24 +08:00
naison
64cd7709e8 fix: fix helm chart (#177)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-02-23 21:16:52 +08:00
naison
5773b69367 fix: close udp conn if timeout (#173) 2024-02-19 22:09:46 +08:00
naison
c689f47664 fix: fix remove pid (#172) 2024-02-18 19:28:46 +08:00
naison
1f32a129b6 feat: update krew index version to refs/tags/v2.2.2 (#171)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-02-17 20:11:07 +08:00
naison
01e3456ad3 fix: ssh portmap redo ssh dial (#170) 2024-02-17 19:25:35 +08:00
naison
46fcf5521f feat: update krew index version to refs/tags/v2.2.2 (#169)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-02-15 21:46:04 +08:00
kubenetworks
26f53209c6 Update charts/index.yaml
Signed-off-by: kubenetworks <kubenetworks@users.noreply.github.com>
2024-02-15 13:35:35 +00:00
fengcaiwen
454f67b6c4 feat: github action add release helm chart 2024-02-15 20:57:41 +08:00
naison
bd5c3c4cf6 feat: empty helm charts image tag (#150) 2024-02-15 13:28:28 +08:00
naison
991a840db2 feat: add manifest helm chart (#145)
* feat: add manifest helm chart

* fix: typo

* feat: helm chart

* feat: add github action to release helm chart

---------

Co-authored-by: wencaiwulue <895703375@qq.com>
2024-02-15 12:44:17 +08:00
naison
3ad6127132 feat: optimize code (#149)
Co-authored-by: wencaiwulue <895703375@qq.com>
2024-02-15 11:38:14 +08:00
naison
14e91d5110 feat: update unsafe pkg of go mod (#148) 2024-02-07 21:03:38 +08:00
naison
4abc5f004a fix: fix some bug and optimize ssh logic (#147) 2024-02-07 20:36:10 +08:00
naison
59abb16136 fix: fix proxy mode option image not work bug (#146) 2024-02-06 22:07:44 +08:00
naison
6a232473cd feat: auto upgrade daemon (#144) 2024-02-02 22:24:23 +08:00
naison
878a8190e3 chore: update readme godoc (#143) 2024-02-01 23:03:02 +08:00
naison
d0978aa5b7 fix: fix delete socks file and pid file not work bug (#142) 2024-02-01 22:47:03 +08:00
naison
073c249e96 fix: fix rotate log bug (#141) 2024-02-01 22:22:26 +08:00
naison
78c8afb456 fix: fix ssh hang up issue (#140) 2024-01-30 21:44:17 +08:00
naison
0384de250a fix: add \n before append host entry (#139) 2024-01-29 23:08:50 +08:00
naison
9be04cc149 feat: dhcp server use grpc instead of http (#138) 2024-01-28 14:42:37 +08:00
naison
f9ef4c8dad fix: add lock to handle webhook event 2024-01-27 16:33:02 +08:00
naison
c09ac8f536 fix: add lock in case of rent same ip (#136)
Co-authored-by: 冯才文 <fengcaiwen@buns-macbook-pro.local>
2024-01-27 00:34:18 +08:00
3deep5me
14731fe8e8 Add more documentation about connect mode (#134)
* add disclaimer to short domain resolve

* updated introduction

* Added content menu for Readme

* changed picture for connect mode to draw.io

* removed wireguard from picture
2024-01-26 00:47:56 +08:00
naison
dc33331a8c feat: add reference doc (#131) 2024-01-18 10:24:52 +08:00
naison
879bdbc03d feat: update krew index version to refs/tags/v2.2.1 (#129)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-01-17 19:57:59 +08:00
naison
8eeb420245 feat: set IPv6 addr only if IPv6 is enabled on Windows (#128) 2024-01-17 19:11:29 +08:00
naison
847c2c8cc1 feat: update go mod to v2 (#127)
* feat: update go mod to v2

* feat: typo
2024-01-14 18:00:12 +08:00
naison
988c2e7fdc feat: update krew index version to refs/tags/v2.2.1 (#126)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-01-12 11:22:45 +08:00
naison
8c55d39af2 feat: set envoy log dynamic (#125)
* feat: set envoy log level dynamic

* feat: set log level
2024-01-12 10:47:49 +08:00
naison
e2cb639c6e feat: set envoy log level dynamic (#124) 2024-01-12 10:29:43 +08:00
naison
f9a67a2773 feat: update krew index version to refs/tags/v2.2.1 (#122)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-01-11 21:16:12 +08:00
naison
cbf3cdff42 fix: add service clusterIP (#121) 2024-01-11 20:43:42 +08:00
naison
d35656f3df feat: update krew index version to refs/tags/v2.2.1 (#118)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-01-10 21:00:49 +08:00
naison
9f97a9202d feat: set dns searchList by LUID not powershell.exe (#117) 2024-01-10 20:16:50 +08:00
naison
ae2b97a4b4 feat: update krew index version to refs/tags/v2.2.1 (#115)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-01-10 12:38:12 +08:00
fengcaiwen
156ee998cd feat: replace envoy config tunIP if header and uid is same 2024-01-10 12:05:32 +08:00
naison
b2a6e602e6 feat: update krew index version to refs/tags/v2.2.1 (#113)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2024-01-09 16:36:48 +08:00
fengcaiwen
8650e4ecf9 hotfix: fix too many portforward and too many watch service 2024-01-09 16:01:43 +08:00
naison
4a2abc24da chore: update README (#111)
* chore: update README

* chore: rename org name
2024-01-01 18:01:54 +08:00
naison
a66fbb1637 feat: upgrade client-go version to v0.29.0 (#109)
* feat: upgrade client-go version to v0.29.0

* feat: upgrade coredns version

* chore: update README.md
2024-01-01 16:45:54 +08:00
naison
c3c6864b47 fix: remove svc from hosts if svc deleted (#107) 2023-12-29 23:07:07 +08:00
naison
80ffd2f468 feat: update krew index version to refs/tags/v2.2.0 (#106)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2023-12-18 20:28:39 +08:00
naison
0ad6b103cb fix: add hosts only if not exists 2023-12-18 11:53:35 +00:00
fengcaiwen
d9977a5c11 fix: setup docker 2023-12-17 09:59:24 +08:00
naison
6fbae091ec feat: update krew index version to refs/tags/v2.2.0 (#105)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2023-12-16 23:57:01 +08:00
fengcaiwen
8505c26830 fix: close file 2023-12-16 23:27:58 +08:00
fengcaiwen
7c53cbc79b fix: download bug 2023-12-16 23:21:55 +08:00
fengcaiwen
c0da61cd4b fix: fix hosts bug 2023-12-16 22:52:49 +08:00
naison
1644201978 feat: optimize readme 2023-12-11 06:35:25 +00:00
naison
91ee5be981 feat: update krew index version to refs/tags/v2.1.3 (#104)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2023-12-10 19:36:46 +08:00
fengcaiwen
74bb3d3746 feat: update README.md 2023-12-10 18:54:00 +08:00
naison
51bb3b8700 fix: fix linux /etc/resolv.conf not restore bug (#103)
* fix: fix linux /etc/resolv.conf not restore bug

* fix: fix linux /etc/resolv.conf not restore bug
2023-12-10 18:53:07 +08:00
naison
c18b56eb2a fix: device or resource busy (#102) 2023-12-10 17:52:41 +08:00
naison
de050c2944 feat: resize ssh terminal (#101) 2023-12-01 22:49:21 +08:00
naison
49876dee05 feat: update krew index version to refs/tags/v2.1.2 (#100)
Co-authored-by: wencaiwulue <wencaiwulue@users.noreply.github.com>
2023-11-28 21:47:15 +08:00
7739 changed files with 1021089 additions and 243857 deletions

27
.github/krew.yaml vendored
View File

@@ -4,20 +4,19 @@ metadata:
name: kubevpn
spec:
version: {{ .TagName }}
homepage: https://github.com/KubeNetworks/kubevpn
shortDescription: "A vpn tunnel tools which can connect to kubernetes cluster network"
homepage: https://github.com/kubenetworks/kubevpn
shortDescription: "KubeVPN offers a Cloud Native Dev Environment that connects to kubernetes cluster network"
description: |
KubeVPN is Cloud Native Dev Environment, connect to kubernetes cluster network, you can access remote kubernetes
cluster network, remote
kubernetes cluster service can also access your local service. and more, you can run your kubernetes pod on local Docker
container with same environment、volume、and network. you can develop your application on local PC totally.
KubeVPN offers a Cloud-Native Dev Environment that seamlessly connects to your Kubernetes cluster network.
Gain access to the Kubernetes cluster network effortlessly using service names or Pod IP/Service IP. Facilitate the interception of inbound traffic from remote Kubernetes cluster services to your local PC through a service mesh and more.
For instance, you have the flexibility to run your Kubernetes pod within a local Docker container, ensuring an identical environment, volume, and network setup. With KubeVPN, empower yourself to develop applications entirely on your local PC!
platforms:
- selector:
matchLabels:
os: windows
arch: amd64
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_amd64.zip" .TagName }}
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_amd64.zip" .TagName }}
files:
- from: ./bin/kubevpn.exe
to: .
@@ -28,7 +27,7 @@ spec:
matchLabels:
os: windows
arch: arm64
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_arm64.zip" .TagName }}
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_arm64.zip" .TagName }}
files:
- from: ./bin/kubevpn.exe
to: .
@@ -39,7 +38,7 @@ spec:
matchLabels:
os: windows
arch: 386
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_386.zip" .TagName }}
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_386.zip" .TagName }}
files:
- from: ./bin/kubevpn.exe
to: .
@@ -50,7 +49,7 @@ spec:
matchLabels:
os: linux
arch: amd64
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_amd64.zip" .TagName }}
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_amd64.zip" .TagName }}
files:
- from: ./bin/kubevpn
to: .
@@ -61,7 +60,7 @@ spec:
matchLabels:
os: linux
arch: arm64
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_arm64.zip" .TagName }}
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_arm64.zip" .TagName }}
files:
- from: ./bin/kubevpn
to: .
@@ -72,7 +71,7 @@ spec:
matchLabels:
os: linux
arch: 386
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_386.zip" .TagName }}
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_386.zip" .TagName }}
files:
- from: ./bin/kubevpn
to: .
@@ -83,7 +82,7 @@ spec:
matchLabels:
os: darwin
arch: amd64
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_darwin_amd64.zip" .TagName }}
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_darwin_amd64.zip" .TagName }}
files:
- from: ./bin/kubevpn
to: .
@@ -94,7 +93,7 @@ spec:
matchLabels:
os: darwin
arch: arm64
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_darwin_arm64.zip" .TagName }}
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_darwin_arm64.zip" .TagName }}
files:
- from: ./bin/kubevpn
to: .

View File

@@ -16,49 +16,49 @@ KubeVPN ${RELEASE} is available now ! 🎉
**Mac** (x86-64/Intel)
\`\`\`
curl -Lo kubevpn.zip https://github.com/KubeNetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_darwin_amd64.zip && unzip -d kubevpn kubevpn.zip
curl -Lo kubevpn.zip https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_darwin_amd64.zip && unzip -d kubevpn kubevpn.zip
\`\`\`
**Mac** (AArch64/Apple M1 silicon)
\`\`\`
curl -Lo kubevpn.zip https://github.com/KubeNetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_darwin_arm64.zip && unzip -d kubevpn kubevpn.zip
curl -Lo kubevpn.zip https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_darwin_arm64.zip && unzip -d kubevpn kubevpn.zip
\`\`\`
**Linux** (x86-64)
\`\`\`
curl -Lo kubevpn.zip https://github.com/KubeNetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_linux_amd64.zip && unzip -d kubevpn kubevpn.zip
curl -Lo kubevpn.zip https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_linux_amd64.zip && unzip -d kubevpn kubevpn.zip
\`\`\`
**Linux** (AArch64)
\`\`\`
curl -Lo kubevpn.zip https://github.com/KubeNetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_linux_arm64.zip && unzip -d kubevpn kubevpn.zip
curl -Lo kubevpn.zip https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_linux_arm64.zip && unzip -d kubevpn kubevpn.zip
\`\`\`
**Linux** (i386)
\`\`\`
curl -Lo kubevpn.zip https://github.com/KubeNetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_linux_386.zip && unzip -d kubevpn kubevpn.zip
curl -Lo kubevpn.zip https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_linux_386.zip && unzip -d kubevpn kubevpn.zip
\`\`\`
**Windows** (x86-64)
\`\`\`
curl -LO https://github.com/KubeNetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_windows_amd64.zip
curl -LO https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_windows_amd64.zip
\`\`\`
**Windows** (AArch64)
\`\`\`
curl -LO https://github.com/KubeNetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_windows_arm64.zip
curl -LO https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_windows_arm64.zip
\`\`\`
**Windows** (i386)
\`\`\`
curl -LO https://github.com/KubeNetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_windows_386.zip
curl -LO https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_windows_386.zip
\`\`\`
## Checksums

79
.github/workflows/coverage.yml vendored Normal file
View File

@@ -0,0 +1,79 @@
name: Coverage
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
linux:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.23'
check-latest: true
- name: Setup Minikube
id: minikube
timeout-minutes: 30
uses: medyagh/setup-minikube@latest
with:
cache: true
- name: Kubernetes info
run: |
kubectl cluster-info
cat ~/.kube/config
kubectl get pods -n kube-system -o wide
- name: Install demo bookinfo
run: |
minikube image load --remote istio/examples-bookinfo-details-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-ratings-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-reviews-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-productpage-v1:1.16.2
minikube image load --remote naison/authors:latest
minikube image load --remote nginx:latest
minikube image load --remote naison/kubevpn:test
minikube image ls
eval $(minikube docker-env)
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
- name: Build
run: |
export VERSION=${{github.event.pull_request.head.sha}}
if [[ -z "$VERSION" ]]; then
export VERSION=${{ github.sha }}
fi
make kubevpn-linux-amd64
chmod +x ./bin/kubevpn
cp ./bin/kubevpn /usr/local/bin/kubevpn
kubevpn version
- name: Wait for pods reviews to be ready
run: |
kubectl wait pods -l app=reviews --for=condition=Ready --timeout=3600s
kubectl wait pods -l app=productpage --for=condition=Ready --timeout=3600s
kubectl get svc -A -o wide
kubectl get pod -A -o wide
kubectl get all -o wide
kubectl get nodes -o yaml
ifconfig
route -n
sudo ln /usr/bin/resolvectl /usr/bin/systemd-resolve
- name: Test
run: make ut
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1 # v4.0.2
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
verbose: true
slug: wencaiwulue/kubevpn

View File

@@ -11,18 +11,19 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.20'
go-version: '1.23'
check-latest: true
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Push image to docker hub
run: |
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USER }} --password-stdin
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
docker buildx create --use
make container
@@ -52,13 +53,13 @@ jobs:
git reset --hard
- name: Upload RELEASE_VERSION
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v4
with:
name: RELEASE_VERSION
path: RELEASE_VERSION
- name: Upload UPLOAD_URL
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v4
with:
name: UPLOAD_URL
path: UPLOAD_URL
@@ -95,13 +96,128 @@ jobs:
labels: |
report
automated pr
# team-reviewers: |
# owners
# maintainers
draft: false
# - name: Update new version in krew-index
# uses: rajatjindal/krew-release-bot@v0.0.43
# with:
# krew_template_file: .github/krew.yaml
# debug: true
release-helm-chart:
name: Release KubeVPN Helm Chart
needs: [ build ]
runs-on: ubuntu-latest
steps:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.23'
check-latest: true
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Helm tool installer
uses: azure/setup-helm@v4
with:
version: "v3.6.3"
- name: Change chart version
run: |
VERSION=${GITHUB_REF#refs/*/}
CHART_VERSION=${VERSION/#v/}
sed -i "s/^appVersion:.*$/appVersion: \"${VERSION}\"/;s/^version:.*$/version: ${CHART_VERSION}/" charts/kubevpn/Chart.yaml
sed -i "s/tag:.*$/tag: \"${VERSION}\"/" charts/kubevpn/values.yaml
- name: Tar chart
run: |
VERSION=${GITHUB_REF#refs/*/}
CHART_VERSION=${VERSION/#v/}
tar --transform 's/^charts\/kubevpn/kubevpn/' -zcf kubevpn-${CHART_VERSION}.tgz charts/kubevpn
shasum -a 256 kubevpn-${CHART_VERSION}.tgz | awk '{print $1}' > kubevpn-${CHART_VERSION}.tgz-SHA256
- name: Download UPLOAD_URL
uses: actions/download-artifact@v4
with:
name: UPLOAD_URL
- name: Get Release UPLOAD_URL
id: get_release_info
run: |
UploadUrl=$(cat ./UPLOAD_URL)
echo "::set-output name=upload_url::$UploadUrl"
- name: Get assert name
id: get_assert_info
run: |
VERSION=${GITHUB_REF#refs/*/}
CHART_VERSION=${VERSION/#v/}
AssertName=kubevpn-${CHART_VERSION}.tgz
echo "::set-output name=assert_name::$AssertName"
- name: Get assert SHA256 name
id: get_assert_info_sha256
run: |
VERSION=${GITHUB_REF#refs/*/}
CHART_VERSION=${VERSION/#v/}
AssertName=kubevpn-${CHART_VERSION}.tgz-SHA256
echo "::set-output name=assert_name::$AssertName"
- name: Upload Release Asset KubeVPN Server Chart
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.get_release_info.outputs.upload_url }}
asset_path: ${{ steps.get_assert_info.outputs.assert_name }}
asset_name: ${{ steps.get_assert_info.outputs.assert_name }}
asset_content_type: application/octet-stream
- name: Upload Release Asset KubeVPN Chart SHA256
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.get_release_info.outputs.upload_url }}
asset_path: ${{ steps.get_assert_info_sha256.outputs.assert_name }}
asset_name: ${{ steps.get_assert_info_sha256.outputs.assert_name }}
asset_content_type: application/octet-stream
github-pages-deploy:
name: Release Helm Chart To branch master
permissions:
contents: write
runs-on: ubuntu-latest
needs: release-helm-chart
steps:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.23'
check-latest: true
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@v4
- name: Change chart version
run: |
VERSION=${GITHUB_REF#refs/*/}
CHART_VERSION=${VERSION/#v/}
sed -i "s/^appVersion:.*$/appVersion: \"${VERSION}\"/;s/^version:.*$/version: ${CHART_VERSION}/" charts/kubevpn/Chart.yaml
sed -i "s/tag:.*$/tag: \"${VERSION}\"/" charts/kubevpn/values.yaml
- name: Package and upload helm chart
run: |
# download helm chart releaser
curl -sSLo cr.tar.gz "https://github.com/helm/chart-releaser/releases/download/v1.6.1/chart-releaser_1.6.1_linux_amd64.tar.gz"
tar -xzf cr.tar.gz
rm -f cr.tar.gz
owner=$(cut -d '/' -f 1 <<< "$GITHUB_REPOSITORY")
repo=$(cut -d '/' -f 2 <<< "$GITHUB_REPOSITORY")
# package chart
./cr package charts/$repo
# update index and push to github pages
git config user.email "$owner@users.noreply.github.com"
git config user.name "$owner"
./cr index \
--owner "$owner" \
--git-repo "$repo" \
--token "${{ secrets.CREATE_HELM_PR }}" \
--release-name-template "v{{ .Version }}" \
--index-path ./index.yaml \
--charts-repo https://github.com/$owner/$repo \
--pages-branch master \
--pages-index-path charts/index.yaml \
--push

View File

@@ -10,33 +10,37 @@ jobs:
image:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.20'
go-version: '1.23'
check-latest: true
- name: Push image to docker hub
run: |
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USER }} --password-stdin
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
docker buildx create --use
export VERSION=test
make container
export VERSION=${{github.event.pull_request.head.sha}}
if [[ -z "$VERSION" ]]; then
export VERSION=${{ github.sha }}
fi
make container-test
linux:
runs-on: ubuntu-latest
needs: [ "image" ]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.20'
go-version: '1.23'
check-latest: true
- name: Setup Minikube
id: minikube
timeout-minutes: 30
uses: medyagh/setup-minikube@master
uses: medyagh/setup-minikube@latest
with:
cache: true
@@ -56,11 +60,14 @@ jobs:
minikube image load --remote naison/kubevpn:test
minikube image ls
eval $(minikube docker-env)
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
- name: Build
run: |
export VERSION=test
export VERSION=${{github.event.pull_request.head.sha}}
if [[ -z "$VERSION" ]]; then
export VERSION=${{ github.sha }}
fi
make kubevpn-linux-amd64
chmod +x ./bin/kubevpn
cp ./bin/kubevpn /usr/local/bin/kubevpn
@@ -79,21 +86,26 @@ jobs:
sudo ln /usr/bin/resolvectl /usr/bin/systemd-resolve
- name: Test
run: go test -v -failfast ./... -timeout=60m
run: make ut
macos:
runs-on: macos-latest
runs-on: macos-13
needs: [ "image" ]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.20'
go-version: '1.23'
check-latest: true
# https://github.com/crazy-max/ghaction-setup-docker/issues/108
- name: Set up QEMU
uses: docker/actions-toolkit/.github/actions/macos-setup-qemu@19ca9ade20f5da695f76a10988d6532058575f82
- name: Set up Docker
uses: crazy-max/ghaction-setup-docker@v1.4.0
uses: crazy-max/ghaction-setup-docker@v3
- name: Install minikube
run: |
@@ -121,11 +133,14 @@ jobs:
minikube image load --remote naison/kubevpn:test
minikube image ls
eval $(minikube docker-env)
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
- name: Build
run: |
export VERSION=test
export VERSION=${{github.event.pull_request.head.sha}}
if [[ -z "$VERSION" ]]; then
export VERSION=${{ github.sha }}
fi
make kubevpn-darwin-amd64
chmod +x ./bin/kubevpn
cp ./bin/kubevpn /usr/local/bin/kubevpn
@@ -143,21 +158,21 @@ jobs:
netstat -anr
- name: Test
run: go test -v -failfast ./... -timeout=60m
run: make ut
windows:
runs-on: windows-latest
needs: [ "image" ]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.20'
go-version: '1.23'
- name: Set up Docker
uses: crazy-max/ghaction-setup-docker@v1.4.0
uses: crazy-max/ghaction-setup-docker@v3
- run: |
docker info --format '{{.OSType}}'
- run: |
@@ -166,14 +181,4 @@ jobs:
choco install make
- name: Build
run: make kubevpn-windows-amd64
upload-coverage-reports-to-codecov:
runs-on: ubuntu-latest
if: ${{ always() }}
needs: [ "linux", "macos", "windows" ]
steps:
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
run: make kubevpn-windows-amd64

View File

@@ -23,12 +23,12 @@ jobs:
arch: 386
steps:
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: '1.20'
go-version: '1.23'
check-latest: true
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Build kubevpn
run: |

2
.gitignore vendored
View File

@@ -1,8 +1,6 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`

View File

@@ -1,25 +1,27 @@
VERSION ?= $(shell git tag -l --sort=v:refname | tail -1)
GIT_COMMIT := $(shell git describe --match=NeVeRmAtCh --always --abbrev=40)
BUILD_TIME := $(shell date +"%Y-%m-%dT%H:%M:%SZ")
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
GIT_COMMIT ?= $(shell git describe --match=NeVeRmAtCh --always --abbrev=7)
BUILD_TIME ?= $(shell date +"%Y-%m-%dT%H:%M:%SZ")
BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
GOOS := $(shell go env GOHOSTOS)
GOARCH := $(shell go env GOHOSTARCH)
TARGET := kubevpn-${GOOS}-${GOARCH}
OS_ARCH := ${GOOS}/${GOARCH}
BASE := github.com/wencaiwulue/kubevpn
BASE := github.com/wencaiwulue/kubevpn/v2
FOLDER := ${BASE}/cmd/kubevpn
BUILD_DIR := ./build
OUTPUT_DIR := ./bin
BUILD_DIR ?= ./build
OUTPUT_DIR ?= ./bin
REGISTRY ?= docker.io
NAMESPACE ?= naison
REPOSITORY ?= kubevpn
IMAGE ?= $(REGISTRY)/$(NAMESPACE)/$(REPOSITORY):$(VERSION)
IMAGE_DEFAULT = docker.io/naison/kubevpn:latest
IMAGE_LATEST ?= docker.io/naison/kubevpn:latest
IMAGE_GH ?= ghcr.io/kubenetworks/kubevpn:$(VERSION)
# Setup the -ldflags option for go build here, interpolate the variable values
LDFLAGS=--ldflags "\
# add '-tag noassets' for syncthing gui
LDFLAGS=-tags noassets --ldflags "-s -w\
-X ${BASE}/pkg/config.Image=${IMAGE} \
-X ${BASE}/pkg/config.Version=${VERSION} \
-X ${BASE}/pkg/config.GitCommit=${GIT_COMMIT} \
@@ -84,21 +86,29 @@ kubevpn-linux-386:
.PHONY: container
container:
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE} -t ${IMAGE_DEFAULT} -f $(BUILD_DIR)/Dockerfile --push .
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE} -t ${IMAGE_LATEST} -t ${IMAGE_GH} -f $(BUILD_DIR)/Dockerfile --push .
############################ build local
.PHONY: container-local
container-local: kubevpn-linux-amd64
docker buildx build --platform linux/amd64,linux/arm64 -t docker.io/naison/kubevpn:latest -f $(BUILD_DIR)/local.Dockerfile --push .
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE_LATEST} -f $(BUILD_DIR)/local.Dockerfile --push .
.PHONY: container-test
container-test: kubevpn-linux-amd64
docker buildx build --platform linux/amd64,linux/arm64 -t docker.io/naison/kubevpn:test -f $(BUILD_DIR)/test.Dockerfile --push .
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE} -t ${IMAGE_GH} -f $(BUILD_DIR)/test.Dockerfile --push .
.PHONY: version
version:
go run github.com/wencaiwulue/kubevpn/pkg/util/krew
go run ${BASE}/pkg/util/krew
.PHONY: gen
gen:
go generate ./...
.PHONY: ut
ut:
go test -tags=noassets -coverprofile=coverage.txt -coverpkg=./... -v ./... -timeout=60m
.PHONY: cover
cover: ut
go tool cover -html=coverage.txt

641
README.md
View File

@@ -1,128 +1,144 @@
![kubevpn](https://raw.githubusercontent.com/wencaiwulue/kubevpn/master/samples/flat_log.png)
[![GitHub Workflow][1]](https://github.com/KubeNetworks/kubevpn/actions)
[![Go Version][2]](https://github.com/KubeNetworks/kubevpn/blob/master/go.mod)
[![Go Report][3]](https://goreportcard.com/badge/github.com/KubeNetworks/kubevpn)
[![Maintainability][4]](https://codeclimate.com/github/KubeNetworks/kubevpn/maintainability)
[![GitHub License][5]](https://github.com/KubeNetworks/kubevpn/blob/main/LICENSE)
[![GitHub Workflow][1]](https://github.com/kubenetworks/kubevpn/actions)
[![Go Version][2]](https://github.com/kubenetworks/kubevpn/blob/master/go.mod)
[![Go Report][3]](https://goreportcard.com/report/github.com/wencaiwulue/kubevpn)
[![Maintainability][4]](https://codeclimate.com/github/kubenetworks/kubevpn/maintainability)
[![GitHub License][5]](https://github.com/kubenetworks/kubevpn/blob/main/LICENSE)
[![Docker Pulls][6]](https://hub.docker.com/r/naison/kubevpn)
[![Releases][7]](https://github.com/KubeNetworks/kubevpn/releases)
[![GoDoc](https://godoc.org/github.com/KubeNetworks/kubevpn?status.png)](https://godoc.org/github.com/KubeNetworks/kubevpn)
[![Test coverage](http://gocover.io/_badge/github.com/KubeNetworks/kubevpn)](https://gocover.io/github.com/KubeNetworks/kubevpn)
[![Releases][7]](https://github.com/kubenetworks/kubevpn/releases)
[![GoDoc](https://godoc.org/github.com/kubenetworks/kubevpn?status.png)](https://pkg.go.dev/github.com/wencaiwulue/kubevpn/v2)
[![codecov](https://codecov.io/gh/wencaiwulue/kubevpn/graph/badge.svg?token=KMDSINSDEP)](https://codecov.io/gh/wencaiwulue/kubevpn)
[1]: https://img.shields.io/github/actions/workflow/status/kubenetworks/kubevpn/release.yml?logo=github
[1]: https://img.shields.io/github/actions/workflow/status/KubeNetworks/kubevpn/release.yml?logo=github
[2]: https://img.shields.io/github/go-mod/go-version/kubenetworks/kubevpn?logo=go
[2]: https://img.shields.io/github/go-mod/go-version/KubeNetworks/kubevpn?logo=go
[3]: https://img.shields.io/badge/go%20report-A+-brightgreen.svg?style=flat
[3]: https://goreportcard.com/badge/github.com/wencaiwulue/kubevpn?style=flat
[4]: https://api.codeclimate.com/v1/badges/b5b30239174fc6603aca/maintainability
[5]: https://img.shields.io/github/license/KubeNetworks/kubevpn
[5]: https://img.shields.io/github/license/kubenetworks/kubevpn
[6]: https://img.shields.io/docker/pulls/naison/kubevpn?logo=docker
[7]: https://img.shields.io/github/v/release/KubeNetworks/kubevpn?logo=smartthings
[7]: https://img.shields.io/github/v/release/kubenetworks/kubevpn?logo=smartthings
# KubeVPN
[中文](README_ZH.md) | [English](README.md) | [Wiki](https://github.com/KubeNetworks/kubevpn/wiki/Architecture)
[中文](README_ZH.md) | [English](README.md) | [Wiki](https://github.com/kubenetworks/kubevpn/wiki/Architecture)
KubeVPN is Cloud Native Dev Environment, connect to kubernetes cluster network, you can access remote kubernetes
cluster network, remote
kubernetes cluster service can also access your local service. and more, you can run your kubernetes pod on local Docker
container with same environment、volume、and network. you can develop your application on local PC totally.
KubeVPN offers a Cloud-Native Dev Environment that seamlessly connects to your Kubernetes cluster network.
Gain access to the Kubernetes cluster network effortlessly using service names or Pod IP/Service IP. Facilitate the
interception of inbound traffic from remote Kubernetes cluster services to your local PC through a service mesh and
more.
For instance, you have the flexibility to run your Kubernetes pod within a local Docker container, ensuring an identical
environment, volume, and network setup.
With KubeVPN, empower yourself to develop applications entirely on your local PC!
## Content
1. [QuickStart](./README.md#quickstart)
2. [Functions](./README.md#functions)
3. [Architecture](./README.md#architecture)
4. [Contributions](./README.md#Contributions)
## QuickStart
#### Install from GitHub release
[LINK](https://github.com/KubeNetworks/kubevpn/releases/latest)
#### Install from custom krew index
### Install from [brew](https://brew.sh/) (macOS / Linux)
```shell
(
kubectl krew index add kubevpn https://github.com/KubeNetworks/kubevpn.git && \
kubectl krew install kubevpn/kubevpn && kubectl kubevpn
)
brew install kubevpn
```
#### Install from build it manually
### Install from [scoop](https://scoop.sh/) (Windows)
```shell
(
git clone https://github.com/KubeNetworks/kubevpn.git && \
cd kubevpn && make kubevpn && ./bin/kubevpn
)
scoop bucket add extras
scoop install kubevpn
```
### Install from [krew](https://krew.sigs.k8s.io/) (Windows / macOS / Linux)
```shell
kubectl krew index add kubevpn https://github.com/kubenetworks/kubevpn.git
kubectl krew install kubevpn/kubevpn
kubectl kubevpn
```
### Install from GitHub release (Windows / macOS / Linux)
[https://github.com/kubenetworks/kubevpn/releases/latest](https://github.com/kubenetworks/kubevpn/releases/latest)
### Install bookinfo as demo application
```shell
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
```
For clean up after test
```shell
kubectl delete -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
kubectl delete -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
```
## Functions
### Connect to k8s cluster network
use command `kubevpn connect` connect to k8s cluster network, prompt `Password:` need to input computer
password. to enable root operation (create a tun device).
```shell
➜ ~ kubevpn connect
Password:
start to connect
get cidr from cluster info...
get cidr from cluster info ok
get cidr from cni...
wait pod cni-net-dir-kubevpn to be running timeout, reason , ignore
get cidr from svc...
get cidr from svc ok
get cidr successfully
traffic manager not exist, try to create it...
label namespace default
create serviceAccount kubevpn-traffic-manager
create roles kubevpn-traffic-manager
create roleBinding kubevpn-traffic-manager
create service kubevpn-traffic-manager
create deployment kubevpn-traffic-manager
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
Starting connect
Getting network CIDR from cluster info...
Getting network CIDR from CNI...
Getting network CIDR from services...
Labeling Namespace default
Creating ServiceAccount kubevpn-traffic-manager
Creating Roles kubevpn-traffic-manager
Creating RoleBinding kubevpn-traffic-manager
Creating Service kubevpn-traffic-manager
Creating MutatingWebhookConfiguration kubevpn-traffic-manager
Creating Deployment kubevpn-traffic-manager
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
Container Reason Message
control-plane ContainerCreating
vpn ContainerCreating
webhook ContainerCreating
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
Container Reason Message
control-plane ContainerRunning
vpn ContainerRunning
webhook ContainerRunning
Creating mutatingWebhook_configuration for kubevpn-traffic-manager
update ref count successfully
port forward ready
tunnel connected
dns service ok
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
+----------------------------------------------------------+
| Now you can access resources in the kubernetes cluster ! |
+----------------------------------------------------------+
➜ ~
```
already connected to cluster network, use command `kubevpn status` to check status
```shell
➜ ~ kubevpn status
ID Mode Cluster Kubeconfig Namespace Status
0 full ccijorbccotmqodvr189g /Users/bytedance/.kube/config default Connected
ID Mode Cluster Kubeconfig Namespace Status
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
➜ ~
```
use pod `productpage-788df7ff7f-jpkcs` IP `172.29.2.134`
```shell
➜ ~ kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
@@ -134,6 +150,8 @@ ratings-77b6cd4499-zvl6c 1/1 Running 0
reviews-85c88894d9-vgkxd 1/1 Running 0 24d 172.29.2.249 192.168.0.5 <none> <none>
```
use `ping` to test connection, seems good
```shell
➜ ~ ping 172.29.2.134
PING 172.29.2.134 (172.29.2.134): 56 data bytes
@@ -147,6 +165,8 @@ PING 172.29.2.134 (172.29.2.134): 56 data bytes
round-trip min/avg/max/stddev = 54.293/55.380/56.270/0.728 ms
```
use service `productpage` IP `172.21.10.49`
```shell
➜ ~ kubectl get services -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
@@ -159,6 +179,8 @@ ratings ClusterIP 172.21.3.247 <none> 9080/TCP
reviews ClusterIP 172.21.8.24 <none> 9080/TCP 114d app=reviews
```
use command `curl` to test service connection
```shell
➜ ~ curl 172.21.10.49:9080
<!DOCTYPE html>
@@ -170,8 +192,18 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
<meta name="viewport" content="width=device-width, initial-scale=1">
```
seems good too~
### Domain resolve
support k8s dns name resolve.
a Pod/Service named `productpage` in the `default` namespace can successfully resolve by following name:
- `productpage`
- `productpage.default`
- `productpage.default.svc.cluster.local`
```shell
➜ ~ curl productpage.default.svc.cluster.local:9080
<!DOCTYPE html>
@@ -185,7 +217,8 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
### Short domain resolve
To access the service in the cluster, service name or you can use the short domain name, such as `productpage.default.svc.cluster.local`
To access the service in the cluster, service name or you can use the short domain name, such
as `productpage`
```shell
➜ ~ curl productpage:9080
@@ -198,50 +231,94 @@ To access the service in the cluster, service name or you can use the short doma
...
```
***Disclaimer:*** This only works on the namespace where kubevpn-traffic-manager is deployed. Otherwise,
use [Domain resolve](./README.md#domain-resolve)
### Connect to multiple kubernetes cluster network
- Mode `lite`: can connect to multiple cluster network, design for only connecting to multiple cluster network.
- Mode `Full`: not only connect to cluster network, it also supports proxy workloads inbound traffic to local PC.
already connected cluster `ccijorbccotmqodvr189g` with mode `full`
```shell
➜ ~ kubevpn status
ID Mode Cluster Kubeconfig Namespace Status
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
```
then connect to another cluster `ccidd77aam2dtnc3qnddg` with mode `lite`
```shell
➜ ~ kubevpn connect -n default --kubeconfig ~/.kube/dev_config --lite
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
+----------------------------------------------------------+
| Now you can access resources in the kubernetes cluster ! |
+----------------------------------------------------------+
```
use command `kubevpn status` to check connection status
```shell
➜ ~ kubevpn status
ID Mode Cluster Kubeconfig Namespace Status
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
1 lite ccidd77aam2dtnc3qnddg /Users/naison/.kube/dev_config default Connected
➜ ~
```
### Reverse proxy
use command `kubevpn proxy` to proxy all inbound traffic to local computer.
```shell
➜ ~ kubevpn proxy deployment/productpage
already connect to cluster
start to create remote inbound pod for deployment/productpage
workload default/deployment/productpage is controlled by a controller
rollout status for deployment/productpage
Connected to cluster
Injecting inbound sidecar for deployment/productpage
Checking rollout status for deployment/productpage
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
deployment "productpage" successfully rolled out
rollout status for deployment/productpage successfully
create remote inbound pod for deployment/productpage successfully
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
Rollout successfully for deployment/productpage
+----------------------------------------------------------+
| Now you can access resources in the kubernetes cluster ! |
+----------------------------------------------------------+
➜ ~
```
For local testing, save the following code as `hello.go`
```go
package main
import (
"fmt"
"io"
"net/http"
"fmt"
"io"
"net/http"
)
func main() {
http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
_, _ = io.WriteString(writer, "Hello world!")
fmt.Printf(">>Received request: %s %s from %s\n", request.Method, request.RequestURI, request.RemoteAddr)
})
_ = http.ListenAndServe(":9080", nil)
http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
_, _ = io.WriteString(writer, "Hello world!")
fmt.Printf(">>Received request: %s %s from %s\n", request.Method, request.RequestURI, request.RemoteAddr)
})
_ = http.ListenAndServe(":9080", nil)
}
```
and compile it
```
go build hello.go
```
then run it
```
./hello &
```
@@ -250,20 +327,20 @@ then run it
export selector=productpage
export pod=`kubectl get pods -l app=${selector} -n default -o jsonpath='{.items[0].metadata.name}'`
export pod_ip=`kubectl get pod $pod -n default -o jsonpath='{.status.podIP}'`
curl -v -H "a: 1" http://$pod_ip:9080/health
curl -v -H "foo: bar" http://$pod_ip:9080/health
```
response would like below
```
curl -v -H "a: 1" http://$pod_ip:9080/health
curl -v -H "foo: bar" http://$pod_ip:9080/health
* Trying 192.168.72.77:9080...
* Connected to 192.168.72.77 (192.168.72.77) port 9080 (#0)
> GET /health HTTP/1.1
> Host: 192.168.72.77:9080
> User-Agent: curl/7.87.0
> Accept: */*
> a: 1
> foo: bar
>
>>Received request: GET /health from xxx.xxx.xxx.xxx:52974
* Mark bundle as not supporting multiuse
@@ -285,30 +362,25 @@ Hello world!%
Hello world!%
```
### Reverse proxy with mesh
Support HTTP, GRPC and WebSocket etc. with specific header `"a: 1"` will route to your local machine
Support HTTP, GRPC and WebSocket etc. with specific header `"foo: bar"` will route to your local machine
```shell
➜ ~ kubevpn proxy deployment/productpage --headers a=1
already connect to cluster
start to create remote inbound pod for deployment/productpage
patch workload default/deployment/productpage with sidecar
rollout status for deployment/productpage
➜ ~ kubevpn proxy deployment/productpage --headers foo=bar
Connected to cluster
Injecting inbound sidecar for deployment/productpage
Checking rollout status for deployment/productpage
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
deployment "productpage" successfully rolled out
rollout status for deployment/productpage successfully
create remote inbound pod for deployment/productpage successfully
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
Rollout successfully for deployment/productpage
+----------------------------------------------------------+
| Now you can access resources in the kubernetes cluster ! |
+----------------------------------------------------------+
➜ ~
```
first access without header "a: 1", it will access existing pod on kubernetes cluster.
first access without header "foo: bar", it will access existing pod on kubernetes cluster.
```shell
➜ ~ curl productpage:9080
@@ -322,38 +394,47 @@ first access without header "a: 1", it will access existing pod on kubernetes cl
...
```
Now let's access local service with header `"a: 1"`
Now let's access local service with header `"foo: bar"`
```shell
➜ ~ curl productpage:9080 -H "a: 1"
➜ ~ curl productpage:9080 -H "foo: bar"
>>Received request: GET / from xxx.xxx.xxx.xxx:51296
Hello world!
```
If you want to cancel proxy, just run command:
```shell
➜ ~ kubevpn leave deployments/productpage
Leaving workload deployments/productpage
Checking rollout status for deployments/productpage
Waiting for deployment "productpage" rollout to finish: 0 out of 1 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Rollout successfully for deployments/productpage
```
### Dev mode in local Docker 🐳
Run the Kubernetes pod in the local Docker container, and cooperate with the service mesh to intercept the traffic with
the specified header to the local, or all the traffic to the local.
```shell
➜ ~ kubevpn dev deployment/authors --headers a=1 -it --rm --entrypoint sh
connectting to cluster
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
start to create remote inbound pod for Deployment.apps/authors
patch workload default/Deployment.apps/authors with sidecar
rollout status for Deployment.apps/authors
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
➜ ~ kubevpn dev deployment/authors --headers foo=bar --entrypoint sh
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
Injecting inbound sidecar for deployment/authors
Patching workload deployment/authors
Checking rollout status for deployment/authors
Waiting for deployment "authors" rollout to finish: 0 out of 1 new replicas have been updated...
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
deployment "authors" successfully rolled out
rollout status for Deployment.apps/authors successfully
create remote inbound pod for Deployment.apps/authors successfully
Rollout successfully for Deployment.apps/authors
tar: removing leading '/' from member names
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/4563987760170736212:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading `/' from member names
@@ -393,23 +474,21 @@ OK: 8 MiB in 19 packages
{"status":"Authors is healthy"} /opt/microservices # echo "continue testing pod access..."
continue testing pod access...
/opt/microservices # exit
prepare to exit, cleaning up
update ref count successfully
tun device closed
leave resource: deployments.apps/authors
workload default/deployments.apps/authors is controlled by a controller
leave resource: deployments.apps/authors successfully
clean up successfully
prepare to exit, cleaning up
update ref count successfully
clean up successfully
Created container: default_authors
Wait container default_authors to be running...
Container default_authors is running now
Disconnecting from the cluster...
Leaving workload deployments.apps/authors
Disconnecting from the cluster...
Performing cleanup operations
Clearing DNS settings
➜ ~
```
You can see that it will start up two containers with docker, mapping to pod two container, and share port with same
network, you can use `localhost:port`
to access another container. And more, all environment、volume and network are the same as remote kubernetes pod, it is
truly consistent with the kubernetes runtime. Makes develop on local PC comes true.
truly consistent with the kubernetes runtime. Makes develop on local PC come true.
```shell
➜ ~ docker ps
@@ -419,39 +498,37 @@ fc04e42799a5 nginx:latest "/docker-entrypoint.…" 37 sec
➜ ~
```
Here is how to access pod in local docker container
Here is how to access pod in local docker container
```shell
export authors_pod=`kubectl get pods -l app=authors -n default -o jsonpath='{.items[0].metadata.name}'`
export authors_pod_ip=`kubectl get pod $authors_pod -n default -o jsonpath='{.status.podIP}'`
curl -kv -H "a: 1" http://$authors_pod_ip:80/health
curl -kv -H "foo: bar" http://$authors_pod_ip:80/health
```
Verify logs of nginx container
```shell
docker logs $(docker ps --format '{{.Names}}' | grep nginx_default_kubevpn)
```
If you just want to start up a docker image, you can use simple way like this:
If you just want to start up a docker image, you can use a simple way like this:
```shell
kubevpn dev deployment/authors --no-proxy -it --rm
kubevpn dev deployment/authors --no-proxy
```
Example
```shell
➜ ~ kubevpn dev deployment/authors --no-proxy -it --rm
connectting to cluster
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
➜ ~ kubevpn dev deployment/authors --no-proxy
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
tar: removing leading '/' from member names
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/5631078868924498209:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading `/' from member names
@@ -469,7 +546,7 @@ Created main container: authors_default_kubevpn_ff34b
Now the main process will hang up to show you log.
If you want to specify the image to start the container locally, you can use the parameter `--docker-image`. When the
If you want to specify the image to start the container locally, you can use the parameter `--dev-image`. When the
image does not exist locally, it will be pulled from the corresponding mirror warehouse. If you want to specify startup
parameters, you can use `--entrypoint` parameter, replace it with the command you want to execute, such
as `--entrypoint /bin/bash`, for more parameters, see `kubevpn dev --help`.
@@ -477,63 +554,53 @@ as `--entrypoint /bin/bash`, for more parameters, see `kubevpn dev --help`.
### DinD ( Docker in Docker ) use kubevpn in Docker
If you want to start the development mode locally using Docker in Docker (DinD), because the program will read and
write the `/tmp` directory, you need to manually add the parameter `-v /tmp:/tmp` (outer docker) and other thing is you
write the `/tmp` directory, you need to manually add the parameter `-v /tmp:/tmp` (outer docker) and another thing is
you
need to special parameter `--network` (inner docker) for sharing network and pid
Example:
```shell
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 naison/kubevpn:latest
```
```shell
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
Unable to find image 'naison/kubevpn:v2.0.0' locally
v2.0.0: Pulling from naison/kubevpn
445a6a12be2b: Already exists
bd6c670dd834: Pull complete
64a7297475a2: Pull complete
33fa2e3224db: Pull complete
e008f553422a: Pull complete
5132e0110ddc: Pull complete
5b2243de1f1a: Pull complete
662a712db21d: Pull complete
4f4fb700ef54: Pull complete
33f0298d1d4f: Pull complete
Digest: sha256:115b975a97edd0b41ce7a0bc1d8428e6b8569c91a72fe31ea0bada63c685742e
Status: Downloaded newer image for naison/kubevpn:v2.0.0
root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison -it --entrypoint sh
----------------------------------------------------------------------------------
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
Because of sudo user env and user env are different.
Current env KUBECONFIG value:
----------------------------------------------------------------------------------
hostname is d0b3dab8912a
connectting to cluster
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
start to create remote inbound pod for Deployment.apps/authors
patch workload default/Deployment.apps/authors with sidecar
rollout status for Deployment.apps/authors
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 naison/kubevpn:latest
Unable to find image 'naison/kubevpn:latest' locally
latest: Pulling from naison/kubevpn
9c704ecd0c69: Already exists
4987d0a976b5: Pull complete
8aa94c4fc048: Pull complete
526fee014382: Pull complete
6c1c2bedceb6: Pull complete
97ac845120c5: Pull complete
ca82aef6a9eb: Pull complete
1fd9534c7596: Pull complete
588bd802eb9c: Pull complete
Digest: sha256:368db2e0d98f6866dcefd60512960ce1310e85c24a398fea2a347905ced9507d
Status: Downloaded newer image for naison/kubevpn:latest
WARNING: image with reference naison/kubevpn was found but does not match the specified platform: wanted linux/amd64, actual: linux/arm64
root@5732124e6447:/app# kubevpn dev deployment/authors --headers user=naison --entrypoint sh
hostname is 5732124e6447
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
Injecting inbound sidecar for deployment/authors
Patching workload deployment/authors
Checking rollout status for deployment/authors
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
deployment "authors" successfully rolled out
rollout status for Deployment.apps/authors successfully
create remote inbound pod for Deployment.apps/authors successfully
Rollout successfully for Deployment.apps/authors
tar: removing leading '/' from member names
/tmp/6460902982794789917:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading `/' from member names
tar: Removing leading `/' from hard link targets
/tmp/5028895788722532426:/var/run/secrets/kubernetes.io/serviceaccount
network mode is container:d0b3dab8912a
Network mode is container:d0b3dab8912a
Created container: nginx_default_kubevpn_6df63
Wait container nginx_default_kubevpn_6df63 to be running...
Container nginx_default_kubevpn_6df63 is running now
@@ -588,49 +655,50 @@ OK: 8 MiB in 19 packages
>> Container Received request: GET / from 127.0.0.1:41230
Hello world!/opt/microservices #
/opt/microservices # curl authors:9080/health -H "a: 1"
/opt/microservices # curl authors:9080/health -H "foo: bar"
>>Received request: GET /health from 223.254.0.109:57930
Hello world!/opt/microservices #
/opt/microservices # curl localhost:9080/health
{"status":"Authors is healthy"}/opt/microservices # exit
prepare to exit, cleaning up
update ref count successfully
tun device closed
leave resource: deployments.apps/authors
workload default/deployments.apps/authors is controlled by a controller
leave resource: deployments.apps/authors successfully
clean up successfully
prepare to exit, cleaning up
update ref count successfully
clean up successfully
Created container: default_authors
Wait container default_authors to be running...
Container default_authors is running now
Disconnecting from the cluster...
Leaving workload deployments.apps/authors
Disconnecting from the cluster...
Performing cleanup operations
Clearing DNS settings
root@d0b3dab8912a:/app# exit
exit
➜ ~
```
during test, check what container is running
```text
➜ ~ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
➜ ~
```
* For clean up after test
```shell
kubectl delete -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
kubectl delete -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
```
### Multiple Protocol
support OSI model layers 3 and above, protocols like `ICMP`, `TCP`, and `UDP`...
- TCP
- UDP
- ICMP
- GRPC
- gRPC
- Thrift
- WebSocket
- HTTP
- ...
@@ -641,166 +709,23 @@ kubectl delete -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/
- Linux
- Windows
on Windows platform, you need to
install [PowerShell](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.2)
in advance
## Architecture
## FAQ
![arch.svg](docs/en/images/proxy-arch.svg)
Architecture can be found [here](/docs/en/Architecture.md)
and [website](https://www.kubevpn.cn/docs/architecture/connect).
### 1, What should I do if the dependent image cannot be pulled, or the inner environment cannot access docker.io?
## Contributions
Answer: here are two solution to solve this problem
Always welcome. Just opening an issue should be also grateful.
- Solution 1: In the network that can access docker.io, transfer the image in the command `kubevpn version` to your own
private image registry, and then add option `--image` to special image when starting the command.
Example:
If you want to debug this project on local PC. Please follow the steps bellow:
``` shell
➜ ~ kubevpn version
KubeVPN: CLI
Version: v2.0.0
DaemonVersion: v2.0.0
Image: docker.io/naison/kubevpn:v2.0.0
Branch: feature/daemon
Git commit: 7c3a87e14e05c238d8fb23548f95fa1dd6e96936
Built time: 2023-09-30 22:01:51
Built OS/Arch: darwin/arm64
Built Go version: go1.20.5
```
- Startup daemon and sudo daemon process with IDE debug mode. (Essentially two GRPC server)
- Add breakpoint to file `pkg/daemon/action/connect.go:21`.
- Open another terminal run `make kubevpn`.
- Then run `./bin/kubevpn connect` and it will hit breakpoint.
Image is `docker.io/naison/kubevpn:v2.0.0`, transfer this image to private docker registry
### Supported by
```text
docker pull docker.io/naison/kubevpn:v2.0.0
docker tag docker.io/naison/kubevpn:v2.0.0 [docker registry]/[namespace]/[repo]:[tag]
docker push [docker registry]/[namespace]/[repo]:[tag]
```
Then you can use this image, as follows:
```text
➜ ~ kubevpn connect --image [docker registry]/[namespace]/[repo]:[tag]
got cidr from cache
traffic manager not exist, try to create it...
pod [kubevpn-traffic-manager] status is Running
...
```
- Solution 2: Use options `--transfer-image`, enable this flags will transfer image from default image to `--image`
special address automatically。
Example
```shell
➜ ~ kubevpn connect --transfer-image --image nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn:v2.0.0
v2.0.0: Pulling from naison/kubevpn
Digest: sha256:450446850891eb71925c54a2fab5edb903d71103b485d6a4a16212d25091b5f4
Status: Image is up to date for naison/kubevpn:v2.0.0
The push refers to repository [nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn]
ecc065754c15: Preparing
f2b6c07cb397: Pushed
448eaa16d666: Pushed
f5507edfc283: Pushed
3b6ea9aa4889: Pushed
ecc065754c15: Pushed
feda785382bb: Pushed
v2.0.0: digest: sha256:85d29ebb53af7d95b9137f8e743d49cbc16eff1cdb9983128ab6e46e0c25892c size: 2000
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
➜ ~
```
### 2, When use `kubevpn dev`, but got error code 137, how to resolve ?
```text
dns service ok
tar: Removing leading `/' from member names
tar: Removing leading `/' from hard link targets
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/7375606548554947868:/var/run/secrets/kubernetes.io/serviceaccount
Created container: server_vke-system_kubevpn_0db84
Wait container server_vke-system_kubevpn_0db84 to be running...
Container server_vke-system_kubevpn_0db84 is running on port 8888/tcp: 6789/tcp:6789 now
$ Status: , Code: 137
prepare to exit, cleaning up
port-forward occurs error, err: lost connection to pod, retrying
update ref count successfully
ref-count is zero, prepare to clean up resource
clean up successfully
```
This is because of your docker-desktop required resource is less than pod running request resource, it OOM killed, so
you can add more resource in your docker-desktop setting `Preferences --> Resources --> Memory`
### 3, Using WSL( Windows Sub Linux ) Docker, when use mode `kubevpn dev`, can not connect to cluster network, how to solve this problem?
Answer:
this is because WSL'Docker using Windows's Network, so if even start a container in WSL, this container will not use WSL
network, but use Windows network
Solution:
- 1): install docker in WSL, not use Windows Docker-desktop
- 2): use command `kubevpn connect` on Windows, and then startup `kubevpn dev` in WSL
- 3): startup a container using command `kubevpn connect` on Windows, and then
startup `kubevpn dev --network container:$CONTAINER_ID` in WSL
### 4After use command `kubevpn dev` enter develop modebut can't assess kubernetes api-serveroccur error `172.17.0.1:443 connect refusued`how to solve this problem?
Answer:
Maybe k8s network subnet is conflict with docker subnet
Solution:
- Use option `--connect-mode container` to startup command `kubevpn dev`
- Modify `~/.docker/daemon.json`, add not conflict subnet, eg: `"bip": "172.15.0.1/24"`.
```shell
➜ ~ cat ~/.docker/daemon.json
{
"builder": {
"gc": {
"defaultKeepStorage": "20GB",
"enabled": true
}
},
"experimental": false,
"features": {
"buildkit": true
},
"insecure-registries": [
],
}
```
add subnet not conflict, eg: 172.15.0.1/24
```shell
➜ ~ cat ~/.docker/daemon.json
{
"builder": {
"gc": {
"defaultKeepStorage": "20GB",
"enabled": true
}
},
"experimental": false,
"features": {
"buildkit": true
},
"insecure-registries": [
],
"bip": "172.15.0.1/24"
}
```
restart docker and retry
[![JetBrains logo.](https://resources.jetbrains.com/storage/products/company/brand/logos/jetbrains.svg)](https://jb.gg/OpenSourceSupport)

View File

@@ -1,112 +1,123 @@
![kubevpn](https://raw.githubusercontent.com/wencaiwulue/kubevpn/master/samples/flat_log.png)
[![GitHub Workflow][1]](https://github.com/KubeNetworks/kubevpn/actions)
[![Go Version][2]](https://github.com/KubeNetworks/kubevpn/blob/master/go.mod)
[![Go Report][3]](https://goreportcard.com/badge/github.com/KubeNetworks/kubevpn)
[![Maintainability][4]](https://codeclimate.com/github/KubeNetworks/kubevpn/maintainability)
[![GitHub License][5]](https://github.com/KubeNetworks/kubevpn/blob/main/LICENSE)
[![GitHub Workflow][1]](https://github.com/kubenetworks/kubevpn/actions)
[![Go Version][2]](https://github.com/kubenetworks/kubevpn/blob/master/go.mod)
[![Go Report][3]](https://goreportcard.com/report/github.com/wencaiwulue/kubevpn)
[![Maintainability][4]](https://codeclimate.com/github/kubenetworks/kubevpn/maintainability)
[![GitHub License][5]](https://github.com/kubenetworks/kubevpn/blob/main/LICENSE)
[![Docker Pulls][6]](https://hub.docker.com/r/naison/kubevpn)
[![Releases][7]](https://github.com/KubeNetworks/kubevpn/releases)
[![GoDoc](https://godoc.org/github.com/KubeNetworks/kubevpn?status.png)](https://godoc.org/github.com/KubeNetworks/kubevpn)
[![Test coverage](http://gocover.io/_badge/github.com/KubeNetworks/kubevpn)](https://gocover.io/github.com/KubeNetworks/kubevpn)
[![Releases][7]](https://github.com/kubenetworks/kubevpn/releases)
[![GoDoc](https://godoc.org/github.com/kubenetworks/kubevpn?status.png)](https://pkg.go.dev/github.com/wencaiwulue/kubevpn/v2)
[![codecov](https://codecov.io/gh/wencaiwulue/kubevpn/graph/badge.svg?token=KMDSINSDEP)](https://codecov.io/gh/wencaiwulue/kubevpn)
[1]: https://img.shields.io/github/actions/workflow/status/KubeNetworks/kubevpn/release.yml?logo=github
[1]: https://img.shields.io/github/actions/workflow/status/kubenetworks/kubevpn/release.yml?logo=github
[2]: https://img.shields.io/github/go-mod/go-version/KubeNetworks/kubevpn?logo=go
[2]: https://img.shields.io/github/go-mod/go-version/kubenetworks/kubevpn?logo=go
[3]: https://img.shields.io/badge/go%20report-A+-brightgreen.svg?style=flat
[3]: https://goreportcard.com/badge/github.com/wencaiwulue/kubevpn?style=flat
[4]: https://api.codeclimate.com/v1/badges/b5b30239174fc6603aca/maintainability
[5]: https://img.shields.io/github/license/KubeNetworks/kubevpn
[5]: https://img.shields.io/github/license/kubenetworks/kubevpn
[6]: https://img.shields.io/docker/pulls/naison/kubevpn?logo=docker
[7]: https://img.shields.io/github/v/release/KubeNetworks/kubevpn?logo=smartthings
[7]: https://img.shields.io/github/v/release/kubenetworks/kubevpn?logo=smartthings
# KubeVPN
[English](README.md) | [中文](README_ZH.md) | [维基](https://github.com/KubeNetworks/kubevpn/wiki/%E6%9E%B6%E6%9E%84)
[English](README.md) | [中文](README_ZH.md) | [维基](https://github.com/kubenetworks/kubevpn/wiki/%E6%9E%B6%E6%9E%84)
KubeVPN 一个云原生开发工具, 可以在本地连接云端 kubernetes 网络的工具,可以在本地直接访问远端集群的服务。也可以在远端集群访问到本地服务,便于调试及开发。同时还可以使用开发模式,直接在本地使用 Docker
将远程容器运行在本地
KubeVPN 提供一个云原生开发环境。通过连接云端 kubernetes 网络,可以在本地使用 k8s dns 或者 Pod IP / Service IP
直接访问远端集群中的服务。拦截远端集群中的工作负载的入流量到本地电脑,配合服务网格便于调试及开发。同时还可以使用开发模式,直接在本地使用
Docker
模拟 k8s pod runtime 将容器运行在本地 (具有相同的环境变量,磁盘和网络)。
## 内容
1. [快速开始](./README_ZH.md#快速开始)
2. [功能](./README_ZH.md#功能)
3. [架构](./README_ZH.md#架构)
4. [贡献代码](./README_ZH.md#贡献代码)
## 快速开始
#### 从 Github release 下载编译好的二进制文件
[链接](https://github.com/KubeNetworks/kubevpn/releases/latest)
#### 从 自定义 Krew 仓库安装
### 使用 [brew](https://brew.sh/) 安装 (macOS / Linux)
```shell
(
kubectl krew index add kubevpn https://github.com/KubeNetworks/kubevpn.git && \
kubectl krew install kubevpn/kubevpn && kubectl kubevpn
)
brew install kubevpn
```
#### 自己构建二进制文件
### 使用 [scoop](https://scoop.sh/) (Windows)
```shell
(
git clone https://github.com/KubeNetworks/kubevpn.git && \
cd kubevpn && make kubevpn && ./bin/kubevpn
)
scoop bucket add extras
scoop install kubevpn
```
#### 安装 bookinfo 作为 demo 应用
### 使用 [krew](https://krew.sigs.k8s.io/) (Windows / macOS / Linux)
```shell
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
kubectl krew index add kubevpn https://github.com/kubenetworks/kubevpn.git
kubectl krew install kubevpn/kubevpn
kubectl kubevpn
```
### 从 Github release 下载 (Windows / macOS / Linux)
[https://github.com/kubenetworks/kubevpn/releases/latest](https://github.com/kubenetworks/kubevpn/releases/latest)
### 安装 bookinfo 作为 demo 应用
```shell
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
```
## 功能
### 链接到集群网络
使用命令 `kubevpn connect` 链接到集群,请注意这里需要输入电脑密码。因为需要 `root` 权限。(创建虚拟网卡)
```shell
➜ ~ kubevpn connect
Password:
start to connect
get cidr from cluster info...
get cidr from cluster info ok
get cidr from cni...
wait pod cni-net-dir-kubevpn to be running timeout, reason , ignore
get cidr from svc...
get cidr from svc ok
get cidr successfully
traffic manager not exist, try to create it...
label namespace default
create serviceAccount kubevpn-traffic-manager
create roles kubevpn-traffic-manager
create roleBinding kubevpn-traffic-manager
create service kubevpn-traffic-manager
create deployment kubevpn-traffic-manager
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
Starting connect
Getting network CIDR from cluster info...
Getting network CIDR from CNI...
Getting network CIDR from services...
Labeling Namespace default
Creating ServiceAccount kubevpn-traffic-manager
Creating Roles kubevpn-traffic-manager
Creating RoleBinding kubevpn-traffic-manager
Creating Service kubevpn-traffic-manager
Creating MutatingWebhookConfiguration kubevpn-traffic-manager
Creating Deployment kubevpn-traffic-manager
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
Container Reason Message
control-plane ContainerCreating
vpn ContainerCreating
webhook ContainerCreating
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
Container Reason Message
control-plane ContainerRunning
vpn ContainerRunning
webhook ContainerRunning
Creating mutatingWebhook_configuration for kubevpn-traffic-manager
update ref count successfully
port forward ready
tunnel connected
dns service ok
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
+----------------------------------------------------------+
| Now you can access resources in the kubernetes cluster ! |
+----------------------------------------------------------+
➜ ~
```
提示已经链接到集群了。使用命令 `kubevpn status` 检查一下状态。
```shell
➜ ~ kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
@@ -118,6 +129,8 @@ ratings-77b6cd4499-zvl6c 1/1 Running 0
reviews-85c88894d9-vgkxd 1/1 Running 0 24d 172.29.2.249 192.168.0.5 <none> <none>
```
找一个 pod 的 IP比如 `productpage-788df7ff7f-jpkcs` 的 IP `172.29.2.134`
```shell
➜ ~ ping 172.29.2.134
PING 172.29.2.134 (172.29.2.134): 56 data bytes
@@ -131,6 +144,8 @@ PING 172.29.2.134 (172.29.2.134): 56 data bytes
round-trip min/avg/max/stddev = 54.293/55.380/56.270/0.728 ms
```
测试应该可以直接 Ping 通,说明本地可以正常访问到集群网络了。
```shell
➜ ~ kubectl get services -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
@@ -143,6 +158,8 @@ ratings ClusterIP 172.21.3.247 <none> 9080/TCP
reviews ClusterIP 172.21.8.24 <none> 9080/TCP 114d app=reviews
```
找一个 service 的 IP比如 `productpage` 的 IP `172.21.10.49`,试着访问一下服务 `productpage`
```shell
➜ ~ curl 172.21.10.49:9080
<!DOCTYPE html>
@@ -154,8 +171,16 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
<meta name="viewport" content="width=device-width, initial-scale=1">
```
可以看到也可以正常访问,也就是可以在本地访问到集群的 pod 和 service 了~
### 域名解析功能
支持 k8s dns 解析。比如一个名为 `productpage` 的 Pod 或者 Service 处于 `default` 命名空间下可以被如下域名正常解析到:
- `productpage`
- `productpage.default`
- `productpage.default.svc.cluster.local`
```shell
➜ ~ curl productpage.default.svc.cluster.local:9080
<!DOCTYPE html>
@@ -167,8 +192,15 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
<meta name="viewport" content="width=device-width, initial-scale=1">
```
可以看到能够被正常解析,并且返回相应内容。
### 短域名解析功能
连接到此命名空间下,可以直接使用 `service` name 的方式访问,否则访问其它命令空间下的服务,需要带上命令空间作为域名的一部分,使用如下的域名即可。
- `productpage.default`
- `productpage.default.svc.cluster.local`
```shell
➜ ~ curl productpage:9080
<!DOCTYPE html>
@@ -180,25 +212,71 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
...
```
可以看到直接使用 service name 的方式,可以正常访问到集群资源。
### 链接到多集群网络
有个两个模式
- 模式 `lite`: 可以链接到多个集群网络,但是仅支持链接到多集群。
- 模式 `full`: 不仅支持链接到单个集群网络,还可以拦截工作负载流量到本地电脑。
可以看到已经链接到了一个集群 `ccijorbccotmqodvr189g`,是 `full` 模式
```shell
➜ ~ kubevpn status
ID Mode Cluster Kubeconfig Namespace Status
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
```
此时还可以使用 `lite` 模式链接到其它集群
```shell
➜ ~ kubevpn connect -n default --kubeconfig ~/.kube/dev_config --lite
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
+----------------------------------------------------------+
| Now you can access resources in the kubernetes cluster ! |
+----------------------------------------------------------+
```
使用命令 `kubevpn status` 查看当前链接状态。
```shell
➜ ~ kubevpn status
ID Mode Cluster Kubeconfig Namespace Status
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
1 lite ccidd77aam2dtnc3qnddg /Users/naison/.kube/dev_config default Connected
➜ ~
```
可以看到连接到了多个集群。
### 反向代理
使用命令 `kubevpn proxy` 代理所有的入站流量到本地电脑。
```shell
➜ ~ kubevpn proxy deployment/productpage
already connect to cluster
start to create remote inbound pod for deployment/productpage
workload default/deployment/productpage is controlled by a controller
rollout status for deployment/productpage
Connected to cluster
Injecting inbound sidecar for deployment/productpage
Checking rollout status for deployment/productpage
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
deployment "productpage" successfully rolled out
rollout status for deployment/productpage successfully
create remote inbound pod for deployment/productpage successfully
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
Rollout successfully for deployment/productpage
+----------------------------------------------------------+
| Now you can access resources in the kubernetes cluster ! |
+----------------------------------------------------------+
➜ ~
```
此时在本地使用 `go` 启动一个服务,用于承接流量。
```go
package main
@@ -215,6 +293,8 @@ func main() {
}
```
使用 `service` name 的方式,直接访问集群中的 `productpage` 服务。
```shell
➜ ~ curl productpage:9080
Hello world!%
@@ -222,27 +302,28 @@ Hello world!%
Hello world!%
```
可以看到直接击中了本地电脑的服务。
### 反向代理支持 service mesh
支持 HTTP, GRPC 和 WebSocket 等, 携带了指定 header `"a: 1"` 的流量,将会路由到本地
支持 HTTP, GRPC 和 WebSocket 等, 携带了指定 header `"foo: bar"` 的流量,将会路由到本地
```shell
➜ ~ kubevpn proxy deployment/productpage --headers a=1
already connect to cluster
start to create remote inbound pod for deployment/productpage
patch workload default/deployment/productpage with sidecar
rollout status for deployment/productpage
➜ ~ kubevpn proxy deployment/productpage --headers foo=bar
Connected to cluster
Injecting inbound sidecar for deployment/productpage
Checking rollout status for deployment/productpage
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
deployment "productpage" successfully rolled out
rollout status for deployment/productpage successfully
create remote inbound pod for deployment/productpage successfully
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
Rollout successfully for deployment/productpage
+----------------------------------------------------------+
| Now you can access resources in the kubernetes cluster ! |
+----------------------------------------------------------+
➜ ~
```
不带 header 直接访问集群资源,可以看到返回的是集群中的服务内容。
```shell
➜ ~ curl productpage:9080
<!DOCTYPE html>
@@ -255,34 +336,46 @@ create remote inbound pod for deployment/productpage successfully
...
```
带上特定 header 访问集群资源,可以看到返回了本地服务的内容。
```shell
➜ ~ curl productpage:9080 -H "a: 1"
➜ ~ curl productpage:9080 -H "foo: bar"
Hello world!%
```
如果你需要取消代理流量,可以执行如下命令:
```shell
➜ ~ kubevpn leave deployments/productpage
Leaving workload deployments/productpage
Checking rollout status for deployments/productpage
Waiting for deployment "productpage" rollout to finish: 0 out of 1 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Rollout successfully for deployments/productpage
```
### 本地进入开发模式 🐳
将 Kubernetes pod 运行在本地的 Docker 容器中,同时配合 service mesh, 拦截带有指定 header 的流量到本地,或者所有的流量到本地。这个开发模式依赖于本地 Docker。
将 Kubernetes pod 运行在本地的 Docker 容器中,同时配合 service mesh, 拦截带有指定 header 的流量到本地,或者所有的流量到本地。这个开发模式依赖于本地
Docker。
```shell
➜ ~ kubevpn dev deployment/authors --headers a=1 -it --rm --entrypoint sh
connectting to cluster
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
start to create remote inbound pod for Deployment.apps/authors
patch workload default/Deployment.apps/authors with sidecar
rollout status for Deployment.apps/authors
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
➜ ~ kubevpn dev deployment/authors --headers foo=bar --entrypoint sh
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
Injecting inbound sidecar for deployment/authors
Patching workload deployment/authors
Checking rollout status for deployment/authors
Waiting for deployment "authors" rollout to finish: 0 out of 1 new replicas have been updated...
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
deployment "authors" successfully rolled out
rollout status for Deployment.apps/authors successfully
create remote inbound pod for Deployment.apps/authors successfully
Rollout successfully for Deployment.apps/authors
tar: removing leading '/' from member names
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/4563987760170736212:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading `/' from member names
@@ -319,21 +412,22 @@ OK: 8 MiB in 19 packages
/opt/microservices # 2023/09/30 13:41:58 Start listening http port 9080 ...
/opt/microservices # curl localhost:9080/health
{"status":"Authors is healthy"}/opt/microservices # exit
prepare to exit, cleaning up
update ref count successfully
tun device closed
leave resource: deployments.apps/authors
workload default/deployments.apps/authors is controlled by a controller
leave resource: deployments.apps/authors successfully
clean up successfully
prepare to exit, cleaning up
update ref count successfully
clean up successfully
{"status":"Authors is healthy"} /opt/microservices # echo "continue testing pod access..."
continue testing pod access...
/opt/microservices # exit
Created container: default_authors
Wait container default_authors to be running...
Container default_authors is running now
Disconnecting from the cluster...
Leaving workload deployments.apps/authors
Disconnecting from the cluster...
Performing cleanup operations
Clearing DNS settings
➜ ~
```
此时本地会启动两个 container, 对应 pod 容器中的两个 container, 并且共享端口, 可以直接使用 localhost:port 的形式直接访问另一个 container,
此时本地会启动两个 container, 对应 pod 容器中的两个 container, 并且共享端口, 可以直接使用 localhost:port 的形式直接访问另一个
container,
并且, 所有的环境变量、挂载卷、网络条件都和 pod 一样, 真正做到与 kubernetes 运行环境一致。
```shell
@@ -347,22 +441,20 @@ fc04e42799a5 nginx:latest "/docker-entrypoint.…" 37 sec
如果你只是想在本地启动镜像,可以用一种简单的方式:
```shell
kubevpn dev deployment/authors --no-proxy -it --rm
kubevpn dev deployment/authors --no-proxy
```
例如:
```shell
➜ ~ kubevpn dev deployment/authors --no-proxy -it --rm
connectting to cluster
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
➜ ~ kubevpn dev deployment/authors --no-proxy
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
tar: removing leading '/' from member names
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/5631078868924498209:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading `/' from member names
@@ -380,68 +472,59 @@ Created main container: authors_default_kubevpn_ff34b
此时程序会挂起,默认为显示日志
如果你想指定在本地启动容器的镜像, 可以使用参数 `--docker-image`, 当本地不存在该镜像时, 会从对应的镜像仓库拉取。如果你想指定启动参数,可以使用 `--entrypoint`
如果你想指定在本地启动容器的镜像, 可以使用参数 `--dev-image`, 当本地不存在该镜像时,
会从对应的镜像仓库拉取。如果你想指定启动参数,可以使用 `--entrypoint`
参数,替换为你想要执行的命令,比如 `--entrypoint /bin/bash`, 更多使用参数,请参见 `kubevpn dev --help`.
### DinD ( Docker in Docker ) 在 Docker 中使用 kubevpn
如果你想在本地使用 Docker in Docker (DinD) 的方式启动开发模式, 由于程序会读写 `/tmp` 目录,您需要手动添加参数 `-v /tmp:/tmp`, 还有一点需要注意, 如果使用 DinD
如果你想在本地使用 Docker in Docker (DinD) 的方式启动开发模式, 由于程序会读写 `/tmp`
目录,您需要手动添加参数 `-v /tmp:/tmp`, 还有一点需要注意, 如果使用 DinD
模式,为了共享容器网络和 pid, 还需要指定参数 `--network`
例如:
```shell
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 naison/kubevpn:latest
```
```shell
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
Unable to find image 'naison/kubevpn:v2.0.0' locally
v2.0.0: Pulling from naison/kubevpn
445a6a12be2b: Already exists
bd6c670dd834: Pull complete
64a7297475a2: Pull complete
33fa2e3224db: Pull complete
e008f553422a: Pull complete
5132e0110ddc: Pull complete
5b2243de1f1a: Pull complete
662a712db21d: Pull complete
4f4fb700ef54: Pull complete
33f0298d1d4f: Pull complete
Digest: sha256:115b975a97edd0b41ce7a0bc1d8428e6b8569c91a72fe31ea0bada63c685742e
Status: Downloaded newer image for naison/kubevpn:v2.0.0
root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison -it --entrypoint sh
----------------------------------------------------------------------------------
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
Because of sudo user env and user env are different.
Current env KUBECONFIG value:
----------------------------------------------------------------------------------
hostname is d0b3dab8912a
connectting to cluster
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
start to create remote inbound pod for Deployment.apps/authors
patch workload default/Deployment.apps/authors with sidecar
rollout status for Deployment.apps/authors
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 naison/kubevpn:latest
Unable to find image 'naison/kubevpn:latest' locally
latest: Pulling from naison/kubevpn
9c704ecd0c69: Already exists
4987d0a976b5: Pull complete
8aa94c4fc048: Pull complete
526fee014382: Pull complete
6c1c2bedceb6: Pull complete
97ac845120c5: Pull complete
ca82aef6a9eb: Pull complete
1fd9534c7596: Pull complete
588bd802eb9c: Pull complete
Digest: sha256:368db2e0d98f6866dcefd60512960ce1310e85c24a398fea2a347905ced9507d
Status: Downloaded newer image for naison/kubevpn:latest
WARNING: image with reference naison/kubevpn was found but does not match the specified platform: wanted linux/amd64, actual: linux/arm64
root@5732124e6447:/app# kubevpn dev deployment/authors --headers user=naison --entrypoint sh
hostname is 5732124e6447
Starting connect
Got network CIDR from cache
Use exist traffic manager
Forwarding port...
Connected tunnel
Adding route...
Configured DNS service
Injecting inbound sidecar for deployment/authors
Patching workload deployment/authors
Checking rollout status for deployment/authors
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
deployment "authors" successfully rolled out
rollout status for Deployment.apps/authors successfully
create remote inbound pod for Deployment.apps/authors successfully
Rollout successfully for Deployment.apps/authors
tar: removing leading '/' from member names
/tmp/6460902982794789917:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading `/' from member names
tar: Removing leading `/' from hard link targets
/tmp/5028895788722532426:/var/run/secrets/kubernetes.io/serviceaccount
network mode is container:d0b3dab8912a
Network mode is container:d0b3dab8912a
Created container: nginx_default_kubevpn_6df63
Wait container nginx_default_kubevpn_6df63 to be running...
Container nginx_default_kubevpn_6df63 is running now
@@ -458,77 +541,82 @@ PID USER TIME COMMAND
Executing busybox-1.33.1-r3.trigger
OK: 8 MiB in 19 packagesnx: worker process
/opt/microservices #
/opt/microservices # cat > hello.go <<EOF
package main
import (
"fmt"
"io"
"net/http"
)
func main() {
http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
_, _ = io.WriteString(writer, "Hello world!")
fmt.Println(">> Container Received request: %s %s from %s\n", request.Method, request.RequestURI, request.RemoteAddr)
})
fmt.Println("Start listening http port 9080 ...")
_ = http.ListenAndServe(":9080", nil)
}
EOF
/opt/microservices # go build hello.go
/opt/microservices #
//opt/microservices # ls -alh
total 12M
drwxr-xr-x 1 root root 26 Nov 4 10:29 .
drwxr-xr-x 1 root root 26 Oct 18 2021 ..
-rwxr-xr-x 1 root root 6.3M Oct 18 2021 app
-rwxr-xr-x 1 root root 5.8M Nov 4 10:29 hello
-rw-r--r-- 1 root root 387 Nov 4 10:28 hello.go
/opt/microservices #
/opt/microservices # apk add curl
OK: 8 MiB in 19 packages
/opt/microservices # curl localhost:80
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
/opt/microservices # ./hello &
/opt/microservices # Start listening http port 9080 ...
[2]+ Done ./hello
/opt/microservices # curl localhost:9080
>> Container Received request: GET / from 127.0.0.1:41230
Hello world!/opt/microservices #
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
/opt/microservices # ls
app
/opt/microservices # ls -alh
total 6M
drwxr-xr-x 2 root root 4.0K Oct 18 2021 .
drwxr-xr-x 1 root root 4.0K Oct 18 2021 ..
-rwxr-xr-x 1 root root 6.3M Oct 18 2021 app
/opt/microservices # ./app &
/opt/microservices # 2023/09/30 14:27:32 Start listening http port 9080 ...
/opt/microservices # curl authors:9080/health
/opt/microservices # curl authors:9080/health
{"status":"Authors is healthy"}/opt/microservices #
/opt/microservices # curl authors:9080/health -H "foo: bar"
>>Received request: GET /health from 223.254.0.109:57930
Hello world!/opt/microservices #
/opt/microservices # curl localhost:9080/health
{"status":"Authors is healthy"}/opt/microservices # exit
prepare to exit, cleaning up
update ref count successfully
tun device closed
leave resource: deployments.apps/authors
workload default/deployments.apps/authors is controlled by a controller
leave resource: deployments.apps/authors successfully
clean up successfully
prepare to exit, cleaning up
update ref count successfully
clean up successfully
Created container: default_authors
Wait container default_authors to be running...
Container default_authors is running now
Disconnecting from the cluster...
Leaving workload deployments.apps/authors
Disconnecting from the cluster...
Performing cleanup operations
Clearing DNS settings
root@d0b3dab8912a:/app# exit
exit
➜ ~
```
可以看到实际上是在本地使用 `Docker` 启动了三个容器。
```text
➜ ~ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
➜ ~
```
### 支持多种协议
支持 OSI 模型三层及三层以上的协议,例如:
- TCP
- UDP
- ICMP
- GRPC
- gRPC
- Thrift
- WebSocket
- HTTP
- ...
@@ -539,157 +627,22 @@ d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago
- Linux
- Windows
Windows
下需要安装 [PowerShell](https://docs.microsoft.com/zh-cn/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.2)
## 架构
## 问答
![arch.svg](docs/en/images/proxy-arch.svg)
架构信息可以从[这里](/docs/en/Architecture.md) 和 [网站](https://www.kubevpn.cn/docs/architecture/connect) 找到.
### 1依赖的镜像拉不下来或者内网环境无法访问 docker.io 怎么办?
## 贡献代码
答:有两种方法可以解决
所有都是欢迎的,只是打开一个问题也是受欢迎的~
- 第一种,在可以访问 docker.io 的网络中,将命令 `kubevpn version` 中的 image 镜像, 转存到自己的私有镜像仓库,然后启动命令的时候,加上 `--image 新镜像` 即可。
例如:
如果你想在本地电脑上调试项目,可以按照这样的步骤:
``` shell
➜ ~ kubevpn version
KubeVPN: CLI
Version: v2.0.0
DaemonVersion: v2.0.0
Image: docker.io/naison/kubevpn:v2.0.0
Branch: feature/daemon
Git commit: 7c3a87e14e05c238d8fb23548f95fa1dd6e96936
Built time: 2023-09-30 22:01:51
Built OS/Arch: darwin/arm64
Built Go version: go1.20.5
```
- 使用喜欢的 IDE Debug 启动 daemon 和 sudo daemon 两个后台进程。(本质上是两个 GRPC server
- 添加断点给文件 `pkg/daemon/action/connect.go:21`
- 新开个终端,执行命令 `make kubevpn`
- 然后运行命令 `./bin/kubevpn connect` 这样将会击中断点
镜像是 `docker.io/naison/kubevpn:v2.0.0`,将此镜像转存到自己的镜像仓库。
### 支持者
```text
docker pull docker.io/naison/kubevpn:v2.0.0
docker tag docker.io/naison/kubevpn:v2.0.0 [镜像仓库地址]/[命名空间]/[镜像仓库]:[镜像版本号]
docker push [镜像仓库地址]/[命名空间]/[镜像仓库]:[镜像版本号]
```
然后就可以使用这个镜像了,如下:
```text
➜ ~ kubevpn connect --image [docker registry]/[namespace]/[repo]:[tag]
got cidr from cache
traffic manager not exist, try to create it...
pod [kubevpn-traffic-manager] status is Running
...
```
- 第二种,使用选项 `--transfer-image`, 这个选项将会自动转存镜像到选项 `--image` 指定的地址。
例如:
```shell
➜ ~ kubevpn connect --transfer-image --image nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn:v2.0.0
v2.0.0: Pulling from naison/kubevpn
Digest: sha256:450446850891eb71925c54a2fab5edb903d71103b485d6a4a16212d25091b5f4
Status: Image is up to date for naison/kubevpn:v2.0.0
The push refers to repository [nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn]
ecc065754c15: Preparing
f2b6c07cb397: Pushed
448eaa16d666: Pushed
f5507edfc283: Pushed
3b6ea9aa4889: Pushed
ecc065754c15: Pushed
feda785382bb: Pushed
v2.0.0: digest: sha256:85d29ebb53af7d95b9137f8e743d49cbc16eff1cdb9983128ab6e46e0c25892c size: 2000
start to connect
got cidr from cache
get cidr successfully
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
➜ ~
```
### 2在使用 `kubevpn dev` 进入开发模式的时候,有出现报错 137, 改怎么解决 ?
```text
dns service ok
tar: Removing leading `/' from member names
tar: Removing leading `/' from hard link targets
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/7375606548554947868:/var/run/secrets/kubernetes.io/serviceaccount
Created container: server_vke-system_kubevpn_0db84
Wait container server_vke-system_kubevpn_0db84 to be running...
Container server_vke-system_kubevpn_0db84 is running on port 8888/tcp: 6789/tcp:6789 now
$ Status: , Code: 137
prepare to exit, cleaning up
port-forward occurs error, err: lost connection to pod, retrying
update ref count successfully
ref-count is zero, prepare to clean up resource
clean up successfully
```
这是因为你的 `Docker-desktop` 声明的资源, 小于 container 容器启动时所需要的资源, 因此被 OOM 杀掉了, 你可以增加 `Docker-desktop` 对于 resources
的设置, 目录是:`Preferences --> Resources --> Memory`
### 3使用 WSL( Windows Sub Linux ) Docker, 用命令 `kubevpn dev` 进入开发模式的时候, 在 terminal 中无法提示链接集群网络, 这是为什么, 如何解决?
答案: 这是因为 WSL 的 Docker 使用的是 主机 Windows 的网络, 所以即便在 WSL 中启动 container, 这个 container 不会使用 WSL 的网络,而是使用 Windows 的网络。
解决方案:
- 1): 在 WSL 中安装 Docker, 不要使用 Windows 版本的 Docker-desktop
- 2): 在主机 Windows 使用命令 `kubevpn connect`, 然后在 WSL 中使用 `kubevpn dev` 进入开发模式
- 3): 在主机 Windows 上启动一个 container在 container 中使用命令 `kubevpn connect`, 然后在 WSL
中使用 `kubevpn dev --network container:$CONTAINER_ID`
### 4在使用 `kubevpn dev` 进入开发模式后,无法访问容器网络,出现错误 `172.17.0.1:443 connect refusued`,该如何解决?
答案:大概率是因为 k8s 容器网络和 docker 网络网段冲突了。
解决方案:
- 使用参数 `--connect-mode container` 在容器中链接,也可以解决此问题
- 可以修改文件 `~/.docker/daemon.json` 增加不冲突的网络,例如 `"bip": "172.15.0.1/24"`.
```shell
➜ ~ cat ~/.docker/daemon.json
{
"builder": {
"gc": {
"defaultKeepStorage": "20GB",
"enabled": true
}
},
"experimental": false,
"features": {
"buildkit": true
},
"insecure-registries": [
],
}
```
增加不冲突的网段
```shell
➜ ~ cat ~/.docker/daemon.json
{
"builder": {
"gc": {
"defaultKeepStorage": "20GB",
"enabled": true
}
},
"experimental": false,
"features": {
"buildkit": true
},
"insecure-registries": [
],
"bip": "172.15.0.1/24"
}
```
重启 docker重新操作即可
[![JetBrains logo.](https://resources.jetbrains.com/storage/products/company/brand/logos/jetbrains.svg)](https://jb.gg/OpenSourceSupport)

24
TODO.MD
View File

@@ -1,24 +0,0 @@
## TODO
- [x] 访问集群网络
- [x] 域名解析功能
- [x] 支持多个 service 反向代理
- [x] 短域名解析
- [x] 优化 DHCP 功能
- [x] 支持多种类型,例如 statefulset, replicaset...
- [ ] 支持 ipv6
- [x] 自己实现 socks5 协议
- [ ] 考虑是否需要把 openvpn tap/tun 驱动作为后备方案
- [x] 加入 TLS 以提高安全性
- [ ] 写个 CNI 网络插件,直接提供 VPN 功能
- [x] 优化重连逻辑
- [x] 支持 service mesh
- [x] service mesh 支持多端口
- [x] 使用自己写的 proxy 替换 envoy
- [ ] 优化性能Windows 上考虑使用 IPC 通信
- [x] 自己写个 control plane
- [x] 考虑是否将 control plane 和服务分开
- [x] 写单元测试,优化 GitHub action
- [x] Linux 和 macOS 也改用 WireGuard library
- [x] 探测是否有重复路由的 utun设备禁用 `sudo ifconfig utun1 down`

View File

@@ -1,5 +1,5 @@
FROM envoyproxy/envoy:v1.25.0 AS envoy
FROM golang:1.20 AS builder
FROM golang:1.23 AS builder
ARG BASE=github.com/wencaiwulue/kubevpn
COPY . /go/src/$BASE
@@ -16,7 +16,18 @@ ARG BASE=github.com/wencaiwulue/kubevpn
RUN sed -i s@/security.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list \
&& sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list
RUN apt-get clean && apt-get update && apt-get install -y wget dnsutils vim curl \
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3 \
apt-transport-https ca-certificates curl
RUN if [ $(uname -m) = "x86_64" ]; then \
echo "The architecture is AMD64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
elif [ $(uname -m) = "aarch64" ]; then \
echo "The architecture is ARM64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
else \
echo "Unsupported architecture."; \
fi
ENV TZ=Asia/Shanghai \
DEBIAN_FRONTEND=noninteractive

View File

@@ -1,6 +1,6 @@
FROM golang:1.20 as delve
RUN curl --location --output delve-1.20.1.tar.gz https://github.com/go-delve/delve/archive/v1.20.1.tar.gz \
&& tar xzf delve-1.20.1.tar.gz
RUN cd delve-1.20.1 && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /go/dlv -ldflags '-extldflags "-static"' ./cmd/dlv/
FROM golang:1.23 as delve
RUN curl --location --output delve-1.23.1.tar.gz https://github.com/go-delve/delve/archive/v1.23.1.tar.gz \
&& tar xzf delve-1.23.1.tar.gz
RUN cd delve-1.23.1 && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /go/dlv -ldflags '-extldflags "-static"' ./cmd/dlv/
FROM busybox
COPY --from=delve /go/dlv /bin/dlv

View File

@@ -1,4 +1,4 @@
FROM golang:1.20 AS builder
FROM golang:1.23 AS builder
RUN go env -w GO111MODULE=on && go env -w GOPROXY=https://goproxy.cn,direct
RUN go install github.com/go-delve/delve/cmd/dlv@latest
@@ -8,7 +8,18 @@ FROM ubuntu:latest
RUN sed -i s@/security.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list \
&& sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list
RUN apt-get clean && apt-get update && apt-get install -y wget dnsutils vim curl \
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3 \
apt-transport-https ca-certificates curl
RUN if [ $(uname -m) = "x86_64" ]; then \
echo "The architecture is AMD64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
elif [ $(uname -m) = "aarch64" ]; then \
echo "The architecture is ARM64"; \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
else \
echo "Unsupported architecture."; \
fi
ENV TZ=Asia/Shanghai \
DEBIAN_FRONTEND=noninteractive

334
charts/index.yaml Normal file
View File

@@ -0,0 +1,334 @@
apiVersion: v1
entries:
kubevpn:
- apiVersion: v2
appVersion: v2.3.12
created: "2025-02-13T07:46:06.029130129Z"
description: A Helm chart for KubeVPN
digest: 0b7d9f8b4cd306377e4452a9d86530387afcae379e11665909b90e15f2d82a04
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.12/kubevpn-2.3.12.tgz
version: 2.3.12
- apiVersion: v2
appVersion: v2.3.11
created: "2025-02-03T09:24:54.033585049Z"
description: A Helm chart for KubeVPN
digest: a54a2ed19e6f4aa5c274186d6b188c0230244582055905155c4620ebe8864838
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.11/kubevpn-2.3.11.tgz
version: 2.3.11
- apiVersion: v2
appVersion: v2.3.10
created: "2025-01-24T13:36:34.489289734Z"
description: A Helm chart for KubeVPN
digest: 987b73399637eee01570492115114696fdb054074507f0d16e47d077e4ea770c
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.10/kubevpn-2.3.10.tgz
version: 2.3.10
- apiVersion: v2
appVersion: v2.3.9
created: "2024-12-21T15:29:42.173109915Z"
description: A Helm chart for KubeVPN
digest: 0f9dd91504c1d1c3149cca785f0a9d72ef860d002ee73590f41e3d8decc99365
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.9/kubevpn-2.3.9.tgz
version: 2.3.9
- apiVersion: v2
appVersion: v2.3.8
created: "2024-12-19T14:19:38.126241384Z"
description: A Helm chart for KubeVPN
digest: 84239f1bce053eaa9314e53b820ad0ba32bbc51c37dcac6ae8abd03bef6f7fd2
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.8/kubevpn-2.3.8.tgz
version: 2.3.8
- apiVersion: v2
appVersion: v2.3.7
created: "2024-12-14T17:25:08.398840622Z"
description: A Helm chart for KubeVPN
digest: 437faa6cd98e81c4ad2c1b48c9ef7a33e7d435cf6343c5cc2c88ea251b2a545b
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.7/kubevpn-2.3.7.tgz
version: 2.3.7
- apiVersion: v2
appVersion: v2.3.6
created: "2024-12-09T11:52:04.779835011Z"
description: A Helm chart for KubeVPN
digest: 7b23d14f6aea4410d68911d202199f15c88cb96cef8edbd94d4a95e9b9254bf7
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.6/kubevpn-2.3.6.tgz
version: 2.3.6
- apiVersion: v2
appVersion: v2.3.5
created: "2024-12-06T14:40:11.685095653Z"
description: A Helm chart for KubeVPN
digest: c2a85f446af834b60308b1384e6cae5662229c34370053319c0f759f650a1cb5
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.5/kubevpn-2.3.5.tgz
version: 2.3.5
- apiVersion: v2
appVersion: v2.3.4
created: "2024-11-29T13:03:24.255324387Z"
description: A Helm chart for KubeVPN
digest: 2804aa624f6139695f3fb723bdc6ba087492bcd8810baf7196a1ae88bd2a62b5
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.4/kubevpn-2.3.4.tgz
version: 2.3.4
- apiVersion: v2
appVersion: v2.3.3
created: "2024-11-22T14:54:13.795282085Z"
description: A Helm chart for KubeVPN
digest: 33cbbc9312e7b7e415fb14f80f17df50d305194617bcf75d1501227cb90b8f32
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.3/kubevpn-2.3.3.tgz
version: 2.3.3
- apiVersion: v2
appVersion: v2.3.2
created: "2024-11-18T11:52:12.076510627Z"
description: A Helm chart for KubeVPN
digest: cdb38ab84bf1649ac4280f6996060c49a095f9c056044cd5f691e7bf4f259dad
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.2/kubevpn-2.3.2.tgz
version: 2.3.2
- apiVersion: v2
appVersion: v2.3.1
created: "2024-11-15T13:36:37.056311943Z"
description: A Helm chart for KubeVPN
digest: 10c1200241309be4ec2eb88e9689ebbf96704c8fad270e6fda30047135aeccf2
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.1/kubevpn-2.3.1.tgz
version: 2.3.1
- apiVersion: v2
appVersion: v2.2.22
created: "2024-10-30T08:46:08.845218523Z"
description: A Helm chart for KubeVPN
digest: c2dc336383d7de2fb97cfd40a15e9f6c29a9a598484b88515a98bcaeb4925eda
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.22/kubevpn-2.2.22.tgz
version: 2.2.22
- apiVersion: v2
appVersion: v2.2.21
created: "2024-10-25T14:10:25.545716679Z"
description: A Helm chart for KubeVPN
digest: 98ae51247535525ff6a10b5f493d8bfc573af62759432f7aa54dd7eb6edeffd5
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.21/kubevpn-2.2.21.tgz
version: 2.2.21
- apiVersion: v2
appVersion: v2.2.20
created: "2024-10-20T04:00:07.263734809Z"
description: A Helm chart for KubeVPN
digest: 7863701dff5b3fce0795ee8e0b73044b7c88f8777c86a65adc1f5563123565dc
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.20/kubevpn-2.2.20.tgz
version: 2.2.20
- apiVersion: v2
appVersion: v2.2.19
created: "2024-10-10T00:47:08.858011096Z"
description: A Helm chart for KubeVPN
digest: be2c672081307c03b7fe6b635d524c8f3f73d70ae3316efa85e781a62c25a46d
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.19/kubevpn-2.2.19.tgz
version: 2.2.19
- apiVersion: v2
appVersion: v2.2.18
created: "2024-09-10T09:39:11.71407425Z"
description: A Helm chart for KubeVPN
digest: 2d953103425ca2a087a2d521c9297662f97b72e78cf831e947942f292bbcc643
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.18/kubevpn-2.2.18.tgz
version: 2.2.18
- apiVersion: v2
appVersion: v2.2.17
created: "2024-08-03T07:45:55.228743946Z"
description: A Helm chart for KubeVPN
digest: 476317ad82b2c59a623e1fca968c09a28554ebcabec337c1c363e7296bb27514
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.17/kubevpn-2.2.17.tgz
version: 2.2.17
- apiVersion: v2
appVersion: v2.2.16
created: "2024-07-26T13:43:50.473565863Z"
description: A Helm chart for KubeVPN
digest: 6cdb809d04687197a8defbf4349871c505ac699924833fecc210d8a6d82a9f20
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.16/kubevpn-2.2.16.tgz
version: 2.2.16
- apiVersion: v2
appVersion: v2.2.15
created: "2024-07-19T15:03:13.558586823Z"
description: A Helm chart for KubeVPN
digest: 279b24976cef25e1dd8a4cd612a7c6a5767cecd4ba386ccab80fc00db76117e7
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.15/kubevpn-2.2.15.tgz
version: 2.2.15
- apiVersion: v2
appVersion: v2.2.14
created: "2024-07-12T15:24:27.825047662Z"
description: A Helm chart for KubeVPN
digest: 52ab9b89ea3773792bf3839e4a7c23a9ea60a6c72547024dc0907c973a8d34b3
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.14/kubevpn-2.2.14.tgz
version: 2.2.14
- apiVersion: v2
appVersion: v2.2.13
created: "2024-07-05T15:08:40.140645659Z"
description: A Helm chart for KubeVPN
digest: 610c5528952826839d5636b8bd940ac907ab0e70377e37538063cb53a5f75443
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.13/kubevpn-2.2.13.tgz
version: 2.2.13
- apiVersion: v2
appVersion: v2.2.12
created: "2024-06-29T15:36:12.429229459Z"
description: A Helm chart for KubeVPN
digest: a129ac0efda2e2967937407b904d59122e7b9725fb225c0bcbfdf2260337c032
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.12/kubevpn-2.2.12.tgz
version: 2.2.12
- apiVersion: v2
appVersion: v2.2.11
created: "2024-06-21T14:13:53.982206886Z"
description: A Helm chart for KubeVPN
digest: 3a7fa4cb3e1785da68e422ef151a3c7f621fbe76862b557ae2750af70d34e1ad
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.11/kubevpn-2.2.11.tgz
version: 2.2.11
- apiVersion: v2
appVersion: v2.2.10
created: "2024-05-21T06:46:20.368800554Z"
description: A Helm chart for KubeVPN
digest: 89be252c9eedb13560224550f06270f8be88049edfb0a46ca170ab5c8c493a6c
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.10/kubevpn-2.2.10.tgz
version: 2.2.10
- apiVersion: v2
appVersion: v2.2.9
created: "2024-05-14T11:50:54.700148975Z"
description: A Helm chart for KubeVPN
digest: e94debe7c904e21f791c1e3bb877ca8132888a3bb3c53beaa74e2ff1e7dd8769
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.9/kubevpn-2.2.9.tgz
version: 2.2.9
- apiVersion: v2
appVersion: v2.2.8
created: "2024-05-03T15:50:13.647253665Z"
description: A Helm chart for KubeVPN
digest: 9e18d0d02f123e5d8f096362daa5e6893d5db1e8447a632585ae23d6ce755489
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.8/kubevpn-2.2.8.tgz
version: 2.2.8
- apiVersion: v2
appVersion: v2.2.7
created: "2024-04-27T12:11:35.594701859Z"
description: A Helm chart for KubeVPN
digest: 3828f5b20d6bf4c0c7d94654cc33fd8d7b4c5f2aa20a3cc18d18b9298f459456
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.7/kubevpn-2.2.7.tgz
version: 2.2.7
- apiVersion: v2
appVersion: v2.2.6
created: "2024-04-16T05:44:31.777079658Z"
description: A Helm chart for KubeVPN
digest: 63668930b99e6c18f6dd77a25e5ce2d21579d52a83451f58be3bc0ca32678829
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.6/kubevpn-2.2.6.tgz
version: 2.2.6
- apiVersion: v2
appVersion: v2.2.5
created: "2024-04-14T08:46:13.877936123Z"
description: A Helm chart for KubeVPN
digest: 8509aeec7584935344bdf465efd8f0d5efb58ef1b7a31fd2738e5c2790f680c4
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.5/kubevpn-2.2.5.tgz
version: 2.2.5
- apiVersion: v2
appVersion: v2.2.4
created: "2024-04-02T05:15:00.372823536Z"
description: A Helm chart for KubeVPN
digest: 07e87e648b7ad5688146a356c93c1771e94485c2fd9d5441553d94ce6371c19f
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.4/kubevpn-2.2.4.tgz
version: 2.2.4
- apiVersion: v2
appVersion: v2.2.3
created: "2024-03-03T11:52:37.856463964Z"
description: A Helm chart for KubeVPN
digest: cb1b8c210259292488548853bdeb2eb9ef4c60d1643e0d6537174349514dc8e9
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.3/kubevpn-2.2.3.tgz
version: 2.2.3
- apiVersion: v2
appVersion: v2.2.2
created: "2024-02-15T13:35:35.121411893Z"
description: A Helm chart for KubeVPN
digest: b7589312eab83e50db9ae5703a30e76f0b40fd280c81d102a823aeeb61e14c1c
name: kubevpn
type: application
urls:
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.2/kubevpn-2.2.2.tgz
version: 2.2.2
generated: "2025-02-13T07:46:06.029354826Z"

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,6 @@
apiVersion: v2
name: kubevpn
description: A Helm chart for KubeVPN
type: application
version: 0.1.0
appVersion: "1.16.0"

View File

@@ -0,0 +1,4 @@
1. Connect to cluster network by running these commands:
kubevpn connect --namespace {{ .Release.Namespace }}
export POD_IP=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kubevpn.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].status.podIP}")
ping $POD_IP

View File

@@ -0,0 +1,63 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "kubevpn.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kubevpn.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kubevpn.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "kubevpn.labels" -}}
helm.sh/chart: {{ include "kubevpn.chart" . }}
app: kubevpn-traffic-manager
{{ include "kubevpn.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kubevpn.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kubevpn.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "kubevpn.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "kubevpn.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "kubevpn.fullname" . }}
data:
DHCP: ""
DHCP6: ""
ENVOY_CONFIG: ""
IPv4_POOLS: "{{ .Values.cidr.pod }} {{ .Values.cidr.service }}"
REF_COUNT: "0"

View File

@@ -0,0 +1,133 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "kubevpn.fullname" . }}
labels:
{{- include "kubevpn.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "kubevpn.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "kubevpn.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "kubevpn.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- args:
- |2-
sysctl -w net.ipv4.ip_forward=1
sysctl -w net.ipv6.conf.all.disable_ipv6=0
sysctl -w net.ipv6.conf.all.forwarding=1
update-alternatives --set iptables /usr/sbin/iptables-legacy
iptables -F
ip6tables -F
iptables -P INPUT ACCEPT
ip6tables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
ip6tables -P FORWARD ACCEPT
iptables -t nat -A POSTROUTING -s ${CIDR4} -o eth0 -j MASQUERADE
ip6tables -t nat -A POSTROUTING -s ${CIDR6} -o eth0 -j MASQUERADE
kubevpn serve -L "tcp://:10800" -L "tun://:8422?net=${TunIPv4}" -L "gtcp://:10801" -L "gudp://:10802" --debug=true
command:
- /bin/sh
- -c
env:
- name: CIDR4
value: 223.254.0.0/16
- name: CIDR6
value: efff:ffff:ffff:ffff::/64
- name: TunIPv4
value: 223.254.0.100/16
- name: TunIPv6
value: efff:ffff:ffff:ffff:ffff:ffff:ffff:9999/64
envFrom:
- secretRef:
name: {{ include "kubevpn.fullname" . }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: vpn
ports:
- containerPort: {{ .Values.service.port8422 }}
name: 8422-for-udp
protocol: UDP
- containerPort: {{ .Values.service.port10800 }}
name: 10800-for-tcp
protocol: TCP
resources:
{{- toYaml .Values.resources | nindent 12 }}
securityContext:
capabilities:
add:
- NET_ADMIN
privileged: true
runAsUser: 0
- args:
- control-plane
- --watchDirectoryFilename
- /etc/envoy/envoy-config.yaml
command:
- kubevpn
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: control-plane
ports:
- containerPort: {{ .Values.service.port9002 }}
name: 9002-for-envoy
protocol: TCP
resources:
{{- toYaml .Values.resourcesSmall | nindent 12 }}
volumeMounts:
- mountPath: /etc/envoy
name: envoy-config
readOnly: true
- args:
- webhook
command:
- kubevpn
envFrom:
- secretRef:
name: {{ include "kubevpn.fullname" . }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: webhook
ports:
- containerPort: 80
name: 80-for-webhook
protocol: TCP
resources:
{{- toYaml .Values.resourcesSmall | nindent 12 }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,32 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "kubevpn.fullname" . }}
labels:
{{- include "kubevpn.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "kubevpn.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,72 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "kubevpn.fullname" . }}
labels:
{{- include "kubevpn.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install
"helm.sh/hook-delete-policy": before-hook-creation
spec:
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "kubevpn.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
restartPolicy: Never
serviceAccountName: {{ include "kubevpn.serviceAccountName" . }}
containers:
- name: label-ns
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- /bin/bash
- -c
args:
- |2-
echo "Label namespace {{ .Release.Namespace }}"
kubectl label ns {{ .Release.Namespace }} ns={{ .Release.Namespace }}
echo "Generating https certificate"
openssl req -x509 -nodes -days 36500 -newkey rsa:2048 -subj "/CN={{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}.svc" -addext "subjectAltName=DNS:{{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local,DNS:{{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}.svc" -keyout server.key -out server.crt
export TLS_CRT=$(cat server.crt | base64 | tr -d '\n')
echo "Patch mutatingwebhookconfigurations {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}"
kubectl patch mutatingwebhookconfigurations {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }} -p "{\"webhooks\":[{\"name\":\"{{ include "kubevpn.fullname" . }}.naison.io\",\"sideEffects\":\"None\",\"admissionReviewVersions\":[\"v1\", \"v1beta1\"],\"clientConfig\":{\"service\":{\"namespace\":\"{{ .Release.Namespace }}\",\"name\":\"{{ include "kubevpn.fullname" . }}\"},\"caBundle\":\"$TLS_CRT\"}}]}"
export TLS_KEY=$(cat server.key | base64 | tr -d '\n')
echo "Patch secret {{ include "kubevpn.fullname" . }}"
kubectl patch secret {{ include "kubevpn.fullname" . }} -n {{ .Release.Namespace }} -p "{\"data\":{\"tls_key\":\"$TLS_KEY\",\"tls_crt\":\"$TLS_CRT\"}}"
echo "Restart the pods..."
kubectl scale -n {{ .Release.Namespace }} --replicas=0 deployment/{{ include "kubevpn.fullname" . }}
kubectl scale -n {{ .Release.Namespace }} --replicas=1 deployment/{{ include "kubevpn.fullname" . }}
export POOLS=$(kubectl get cm {{ include "kubevpn.fullname" . }} -n {{ .Release.Namespace }} -o jsonpath='{.data.IPv4_POOLS}')
if [[ -z "${POOLS// }" ]];then
echo "Cidr is empty"
echo "Get pod cidr..."
export POD_CIDR=$(kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' | tr -s '\n' ' ')
echo "Get service cidr..."
export SVC_CIDR=$(echo '{"apiVersion":"v1","kind":"Service","metadata":{"name":"kubevpn-get-svc-cidr-{{ .Release.Namespace }}", "namespace": "{{ .Release.Namespace }}"},"spec":{"clusterIP":"1.1.1.1","ports":[{"port":443}]}}' | kubectl apply -f - 2>&1 | sed 's/.*valid IPs is //')
echo "Pod cidr: $POD_CIDR, service cidr: $SVC_CIDR"
echo "Patch configmap {{ include "kubevpn.fullname" . }}"
kubectl patch configmap {{ include "kubevpn.fullname" . }} -n {{ .Release.Namespace }} -p "{\"data\":{\"IPv4_POOLS\":\"$POD_CIDR $SVC_CIDR\"}}"
else
echo "Cidr is NOT empty"
fi
echo "Done~"
exit 0

View File

@@ -0,0 +1,36 @@
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
webhooks:
- admissionReviewVersions:
- v1
- v1beta1
clientConfig:
caBundle: {{ .Values.tls.crt }}
service:
name: {{ include "kubevpn.fullname" . }}
namespace: {{ .Release.Namespace }}
path: /pods
port: 80
failurePolicy: Ignore
matchPolicy: Equivalent
name: {{ include "kubevpn.fullname" . }}.naison.io
namespaceSelector:
matchLabels:
ns: {{ .Release.Namespace }}
objectSelector: { }
reinvocationPolicy: Never
rules:
- apiGroups:
- ""
apiVersions:
- v1
operations:
- CREATE
- DELETE
resources:
- pods
scope: Namespaced
sideEffects: None
timeoutSeconds: 15

View File

@@ -0,0 +1,69 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "kubevpn.fullname" . }}
rules:
- apiGroups:
- ""
resourceNames:
- {{ include "kubevpn.fullname" . }}
resources:
- configmaps
- secrets
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups: [ "" ]
resources: [ "namespaces" ]
resourceNames: [{{ .Release.Namespace }}]
verbs:
- get
- patch
- apiGroups: [ "apps" ]
resources: [ "deployments/scale", "deployments" ]
resourceNames:
- {{ include "kubevpn.fullname" . }}
verbs:
- get
- update
- patch
- list
- apiGroups:
- ""
resources:
- services
verbs:
- create
- get
- update
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
resourceNames:
- {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
verbs:
- get
- list
- patch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch

View File

@@ -0,0 +1,26 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "kubevpn.fullname" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "kubevpn.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "kubevpn.fullname" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
subjects:
- kind: ServiceAccount
name: {{ include "kubevpn.fullname" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,8 @@
apiVersion: v1
data:
tls_crt: {{ .Values.tls.crt }}
tls_key: {{ .Values.tls.key }}
kind: Secret
metadata:
name: {{ include "kubevpn.fullname" . }}
type: Opaque

View File

@@ -0,0 +1,31 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "kubevpn.fullname" . }}
labels:
{{- include "kubevpn.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- name: 8422-for-udp
port: {{ .Values.service.port8422 }}
protocol: UDP
targetPort: 8422
- name: 10800-for-tcp
port: {{ .Values.service.port10800 }}
protocol: TCP
targetPort: 10800
- name: 9002-for-envoy
port: {{ .Values.service.port9002 }}
protocol: TCP
targetPort: 9002
- name: 80-for-webhook
port: {{ .Values.service.port80 }}
protocol: TCP
targetPort: 80
- name: 53-for-dns
port: {{ .Values.service.port53 }}
protocol: UDP
targetPort: 53
selector:
{{- include "kubevpn.selectorLabels" . | nindent 4 }}

View File

@@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "kubevpn.serviceAccountName" . }}
labels:
{{- include "kubevpn.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}

113
charts/kubevpn/values.yaml Normal file
View File

@@ -0,0 +1,113 @@
# Default values for kubevpn.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: naison/kubevpn
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: [ ]
nameOverride: ""
fullnameOverride: "kubevpn-traffic-manager"
# this filed is import if configured this value
# if not configured, it will get this value from cluster automatically
cidr:
pod: ""
service: ""
tls:
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURXVENDQWtHZ0F3SUJBZ0lJU0NmUDdHeHVhUkl3RFFZSktvWklodmNOQVFFTEJRQXdNREV1TUN3R0ExVUUKQXd3bGEzVmlaWFp3YmkxMGNtRm1abWxqTFcxaGJtRm5aWEl0WTJGQU1UY3dOamsyTnpjd01EQWVGdzB5TkRBeQpNRE14TWpReE5EQmFGdzB5TlRBeU1ESXhNalF4TkRCYU1DMHhLekFwQmdOVkJBTU1JbXQxWW1WMmNHNHRkSEpoClptWnBZeTF0WVc1aFoyVnlRREUzTURZNU5qYzNNREF3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXcKZ2dFS0FvSUJBUURzVnNleEVpVG00dmlleUhEeU5SbldKbXNiaFBWV24yTkgvNi9wUGVBT3ZUbXgwSDdHUnZJLwpzMzVoZW9EWExhdFVmaDlXT1hXdzRqaGZsdUdWQWlzZGs2Y2ZkS1hVVzJheXpRbFpZd1ZMTzdUUHFoeWF0UHVpCmpRYVB2bUErRGNYMHJRc2Y3SFJwVWhjVTJ1QTJ4WGhZNy9QWWFUdzhkU0NTTHFTK2ZLM3poc0lONTFrYnIzdG4KU2FKcWFybDNhSU82N1JvdmNZbmxERG9XTzFwS1ZSUmROVkM1anVtREJOSWdOam5TSTY5QTFydzR0REkwdjcxWQpPRmhjYnUwNnFVdkNNU1JzR3F5ZkhOeUlXakVvcnk4Wk0xVExlcnZhTk12WlFTRndRNk5SRExHYXNlbTBlNTRXCmVublA0OVpIR1FhTjllYnJQSkJuL2pQQ3p0NlFDMkg5QWdNQkFBR2plakI0TUE0R0ExVWREd0VCL3dRRUF3SUYKb0RBVEJnTlZIU1VFRERBS0JnZ3JCZ0VGQlFjREFUQU1CZ05WSFJNQkFmOEVBakFBTUI4R0ExVWRJd1FZTUJhQQpGQVA3WmhvcGsvbEc3MVNCMk42QkpKdDI2eXhuTUNJR0ExVWRFUVFiTUJtQ0YydDFZbVYyY0c0dGRISmhabVpwCll5MXRZVzVoWjJWeU1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQVhYWk1WazhhQWwwZTlqUWRQTDc3ZVZOL3kKY1ZZZzRBVDlhdkh0UXV2UkZnOU80Z3JMaFVDQnoyN25wdlZZcHNMbmdEMTFRTXpYdHlsRDNMNDJNQ3V0Wnk5VQorL1BCL291ajQzWkZUckJDbk9DZDl6elE2MXZSL1RmbUFrTUhObTNZYjE1OGt2V0ZhNVlBdytRVi9vRDNUcGlXClREVTZXNkxvRFg5N0lNSFk0L3VLNTNzbXVLMjh5VzduSVVrbnpqN3h5UzVOWTFZaVNUN0w2ZFZ0VVppR1FUK00KRk16ODVRcTJOTWVXU1lKTmhhQVk5WEpwMXkrcEhoeWpPVFdjSEFNYmlPR29mODM5N1R6YmUyWHdNQ3BGMWc5NwpMaHZERnNsNzcyOWs1NFJVb1d2ZjFIVFFxL2R6cVBQTTNhWGpTbXFWUEV2Zk5qeGNhZnFnNHBaRmdzYzEKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJREpEQ0NBZ3lnQXdJQkFnSUlJMmROaFBKY0Uxc3dEUVlKS29aSWh2Y05BUUVMQlFBd01ERXVNQ3dHQTFVRQpBd3dsYTNWaVpYWndiaTEwY21GbVptbGpMVzFoYm1GblpYSXRZMkZBTVRjd05qazJOemN3TURBZUZ3MHlOREF5Ck1ETXhNalF4TkRCYUZ3MHlOVEF5TURJeE1qUXhOREJhTURBeExqQXNCZ05WQkFNTUpXdDFZbVYyY0c0dGRISmgKWm1acFl5MXRZVzVoWjJWeUxXTmhRREUzTURZNU5qYzNNREF3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQgpEd0F3Z2dFS0FvSUJBUURBQVpBdEZaTzJEZG9BVTUxWnRiVjI0QkVGN3RkakMzTzBPdEE2UURYTlVwNWlZZGdjCjdORVlGZE55YXltTWZMUVFGTWZqZFcxNWpDQ0N4KzFFMm1KQTVZa0ZFcXJTeDA3Z1pkKy9hcU13ZkhDT0ZTM0UKSUROdzBKYlBGVHZuSGsyZHVXby8zT1BnVmpONWw2UTBWaE10WkJEc2haVHVvSUhWaTJZcldDdnNkMU9mWFVyMwo0Y0ZJUkJ2OW5mNDIzdWthajYxdisrRDd6K3Y0bEN4R0JtUDhpYXFaNFVlckxIdWF2N1hQUnZ4QmQzNDBGY2diCm5TZVUxTXZmcTgvOUg4VTRzeWRGaUpZVUs1RFhkWU15NEw0RlMvbXZRaWR1TU5lWUw1Y2xHSXZTNGFzQjl2QlMKM0ZIY1IrQk1xVzFQWUdDc2YyL0RvdVNRVVNhcnB5VU5aczZKQWdNQkFBR2pRakJBTUE0R0ExVWREd0VCL3dRRQpBd0lDcERBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJRRCsyWWFLWlA1UnU5VWdkamVnU1NiCmR1c3NaekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBVGFNR0NLK2YxSmdKaXplVjlla3ZhckhDZHpmZzJNZkQKV2pCeFUzMXNabE1vZU9GS0hPdndjMVliTzVNTStHTGM0bGhMS2VHV1pwQmVRV0lFamo4V01wa3k2M2VtUUl5eQpOT2hjdVBUTFhCQ0JkS1lhUU1LcU9mN3c4MEw2cVRKclFER0t0a0MxVzEwbFJzbUd0TEtBbDVjU0w4VFRSZVhXCjhiNXRGOFd5Yms1Vm12VWtxdEpkSVNJTjdVOG5nV21WRUVOZFcvckNqclI5TllaSXZBZk9mS1Zrc1JuZEJaQ0kKOXdxVUI2K2JITEJBWjNpV293ZFhpRGhLMSt5Z2ZwNnpUcW9LRmxOWi8rRTNkS0tpbStyZFFGSmIvNTNvU2xaaApwMkVkT1ZNYU1mRjh1ZFhDdE44WjZnVHpPWkJxN1pmWjVpMlU1eFQ2aFNxRjFjT1ZuQS9idmc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBN0ZiSHNSSWs1dUw0bnNodzhqVVoxaVpyRzRUMVZwOWpSLyt2NlQzZ0RyMDVzZEIrCnhrYnlQN04rWVhxQTF5MnJWSDRmVmpsMXNPSTRYNWJobFFJckhaT25IM1NsMUZ0bXNzMEpXV01GU3p1MHo2b2MKbXJUN29vMEdqNzVnUGczRjlLMExIK3gwYVZJWEZOcmdOc1Y0V08vejJHazhQSFVna2k2a3ZueXQ4NGJDRGVkWgpHNjk3WjBtaWFtcTVkMmlEdXUwYUwzR0o1UXc2Rmp0YVNsVVVYVFZRdVk3cGd3VFNJRFk1MGlPdlFOYThPTFF5Ck5MKzlXRGhZWEc3dE9xbEx3akVrYkJxc254emNpRm94S0s4dkdUTlV5M3E3MmpUTDJVRWhjRU9qVVF5eG1ySHAKdEh1ZUZucDV6K1BXUnhrR2pmWG02enlRWi80endzN2VrQXRoL1FJREFRQUJBb0lCQVFEWkRaWVdsS0JaZ0Nodgp3NHlmbFk4bDgyQzVCSEpCM041VWVJbjVmejh3cWk2N2xNMXBraXpYdmlTYXArUitPczQ0S2lEamtwLzVGTHBMCmFBbkRUUnVGN1Y0MmNHNEFTdlZWenlMLytnWVpvenNhNFpPbHJnUFF0UTVLbzhCR0hXWXBvV2N2S1gxOFlNMGIKOVN5b2dORlhkUUNSUjR6dnhXNWxjdnNRaXZkRFNFTUJhbW00bFpEM0ZtUm5HVGlpaUVNSis2SFdlR1lBS1RMSgoxN0NnejZaWjg1bGtUZ0dxeEUrWkQwNDJGYWdJZlJORVI0QmZOMlp6NU5CU3RnMTJFdUpXWmRGcWpxSHlwbnNjCjNjbEd0U1Z5VStvWUFUWnV5Y2VMNVIwZUdzdTB6ZHhLT3ZzSm9yVWZ0dlMrUGovclJxWHVjOVdXSkFLU1FDVm0Ka1I1Y2M4ak5Bb0dCQU8wYkVrNTdtZWYwcXNKT0U3TFlFV1hRRFZiTmhnZ1E2eTlBZlNnVjZDMFFDdC93YkVGaQo0Rm41bTdhSHdqZUJ5OFJnMGhGbTdVNThCb3FyNnBQNFp6MEhwY1ZSTmVLeTF6R0wreFRJRXFnTXQxei9TYVE0CkIwWEZ4Ulg3d2pjeit2OC9GOVdsOElLbHhBWjhxNXd6aHNFUVVYcVIxTzF1T2FjRktSdXg3OU1UQW9HQkFQOHMKRVJBa1R3WEV3UU9ya2dQOW5tTHZLYXMwM0J6UXUrOFBtQWlsOGFmbVR5ZEFWdzJKaHBwOFlUQzl6NDM3VXU4Ngpta2lOVHpRL3MvQ1lCNEdJVVFCMEdlTDJtc2VjdWNaUHhTSW10dElSOWY4bjk2NEpuL3RtVUd4VXRFaWhWdER4ClZCdFBiWmNzc2E5VVVCRFVqRnZJSUdPTGlqSVdxbW8zM3htT0tJaXZBb0dCQU5HV2k0RWFtdnBCK1N1V3JxejUKZDYrQzBEZTVwcys4Zk5nZzdrRWYxRUw1R2xQSGh6bnBPQjN3bWFjb3JCSTZ4cTlKVW9lVmJ4RmdhcnZycVlpeApIRGtEYUpKWjdnTDlTV0YvdGlzeGkrUkdrVk5BU28xQ0JaTzBkVG13ZUlZcGlhWlUxREhENUN6b2NMVzNRRTdyCjhTTDUxTHcrNm5RU2FoM3NYdUVmVWJwSEFvR0JBTk1FNlROMUkxaDg1cldYVEJBNnk2RzdjTFVoNktsM3dRTW8KM1N6QnRyK0h5WXVIUExaNE5iVktDTUhiSm1xZkhXMnpBK1hkM2xNeUh5ZG5Ra1hQcWxUNnJuR3dTRDJ0RVVDNwp0U1hSNkR4L0YvVWpZME1zdUgyWmxnYVFZZXJ5YWE0dTlNUUZBbmNUUWZuaGVya0FYUGFGNEtzUnVYNUVtamR1Cjd2UGVTUTBIQW9HQUM0ZlJmZnFFM3RRdWxSeUJVeHhKNHlPaWJiVlpCV1hxWHRzMU0wczdsZ1YxaGVrYis1VmMKVTZ3MFh2T0pTaEZPaGF6UVdseVZUejhmSVdSa1BXa2MzSzE1dWx6cmh6NWZVa0dYOEw0OGMrTHlaSzZ1M2ZRVgpyL1pRV3JsYlZSWlhRVGhuaGhOM1Jodm96SlZZV0lpckVyMGp3VmRaQWRUYW1XZEpTQ3J4WE1NPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: { }
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: { }
podLabels:
podSecurityContext: { }
# fsGroup: 2000
securityContext: { }
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port8422: 8422
port9002: 9002
port10800: 10800
port80: 80
port53: 53
resources:
limits:
cpu: "2"
memory: 2Gi
requests:
cpu: 500m
memory: 512Mi
resourcesSmall:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 1
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes:
- configMap:
defaultMode: 420
items:
- key: ENVOY_CONFIG
path: envoy-config.yaml
name: kubevpn-traffic-manager
optional: false
name: envoy-config
# Additional volumeMounts on the output Deployment definition.
volumeMounts: [ ]
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: { }
tolerations: [ ]
affinity: { }

204
cmd/kubevpn/cmds/alias.go Normal file
View File

@@ -0,0 +1,204 @@
package cmds
import (
"errors"
"fmt"
"io"
"os"
"os/exec"
"strings"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/util/sets"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
yaml "sigs.k8s.io/yaml/goyaml.v3"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
// CmdAlias
/**
Name: test
Description: this is a test environment
Needs: test1
Flags:
- connect
- --kubeconfig=~/.kube/config
- --namespace=test
- --lite
---
Name: test1
Description: this is another test environment
Flags:
- connect
- --kubeconfig=~/.kube/jumper_config
- --namespace=test
- --extra-hosts=xxx.com
*/
func CmdAlias(f cmdutil.Factory) *cobra.Command {
var localFile, remoteAddr string
cmd := &cobra.Command{
Use: "alias",
Short: i18n.T("Config file alias to execute command simply"),
Long: templates.LongDesc(i18n.T(`
Config file alias to execute command simply, just like ssh alias config
It will read ~/.kubevpn/config.yaml file as config, also support special file path
by flag -f. It also supports depends relationship, like one cluster api server needs to
access via another cluster, you can use syntax needs. it will do action to needs cluster first
and then do action to target cluster
`)),
Example: templates.Examples(i18n.T(`
If you have following config in your ~/.kubevpn/config.yaml
Name: dev
Needs: jumper
Flags:
- connect
- --kubeconfig=~/.kube/config
- --namespace=default
- --lite
---
Name: jumper
Flags:
- connect
- --kubeconfig=~/.kube/jumper_config
- --namespace=test
- --extra-hosts=xxx.com
Config file support three field: Name,Needs,Flags
# Use kubevpn alias config to simply execute command, connect to cluster network by order: jumper --> dev
kubevpn alias dev
# kubevpn alias jumper, just connect to cluster jumper
kubevpn alias jumper
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
if localFile != "" {
_, err = os.Stat(localFile)
}
return err
},
Args: cobra.MatchAll(cobra.ExactArgs(1)),
RunE: func(cmd *cobra.Command, args []string) error {
configs, err := ParseAndGet(localFile, remoteAddr, args[0])
if err != nil {
return err
}
name, err := os.Executable()
if err != nil {
return err
}
for _, conf := range configs {
c := exec.Command(name, conf.Flags...)
c.Stdout = os.Stdout
c.Stdin = os.Stdin
c.Stderr = os.Stderr
fmt.Println(fmt.Sprintf("Name: %s", conf.Name))
if conf.Description != "" {
fmt.Println(fmt.Sprintf("Description: %s", conf.Description))
}
fmt.Println(fmt.Sprintf("Command: %v", c.Args))
err = c.Run()
if err != nil {
return err
}
}
return nil
},
}
cmd.Flags().StringVarP(&localFile, "file", "f", config.GetConfigFilePath(), "Config file location")
cmd.Flags().StringVarP(&remoteAddr, "remote", "r", "", "Remote config file, eg: https://raw.githubusercontent.com/kubenetworks/kubevpn/master/pkg/config/config.yaml")
return cmd
}
func ParseAndGet(localFile, remoteAddr string, aliasName string) ([]Config, error) {
var content []byte
var err error
var path string
if localFile != "" {
path = localFile
content, err = os.ReadFile(path)
} else if remoteAddr != "" {
path = remoteAddr
content, err = util.DownloadFileStream(path)
} else {
path = config.GetConfigFilePath()
content, err = os.ReadFile(path)
}
if err != nil {
return nil, err
}
list, err := ParseConfig(content)
if err != nil {
return nil, err
}
configs, err := GetConfigs(list, aliasName)
if err != nil {
return nil, err
}
if len(configs) == 0 {
var names []string
for _, c := range list {
if c.Name != "" {
names = append(names, c.Name)
}
}
err = errors.New(fmt.Sprintf("Can't find any alias for the name: '%s', avaliable: \n[\"%s\"]\nPlease check config file: %s", aliasName, strings.Join(names, "\", \""), path))
return nil, err
}
return configs, nil
}
func ParseConfig(file []byte) ([]Config, error) {
decoder := yaml.NewDecoder(strings.NewReader(string(file)))
var configs []Config
for {
var cfg Config
err := decoder.Decode(&cfg)
if err != nil {
if err == io.EOF {
break
}
return nil, err
}
configs = append(configs, cfg)
}
return configs, nil
}
func GetConfigs(configs []Config, name string) ([]Config, error) {
m := make(map[string]Config)
for _, config := range configs {
m[config.Name] = config
}
var result []Config
var set []string
for !sets.New[string](set...).Has(name) {
config, ok := m[name]
if ok {
result = append([]Config{config}, result...)
set = append(set, name)
name = config.Needs
if name == "" {
return result, nil
}
} else {
return result, nil
}
}
return nil, fmt.Errorf("loop jump detected: %s. verify your configuration", strings.Join(append(set, name), " -> "))
}
type Config struct {
Name string `yaml:"Name"`
Description string `yaml:"Description"`
Needs string `yaml:"Needs,omitempty"`
Flags []string `yaml:"Flags,omitempty"`
}

View File

@@ -0,0 +1,227 @@
package cmds
import (
"log"
"reflect"
"testing"
)
func TestAlias(t *testing.T) {
str := `Name: test
Needs: test1
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
---
Name: test1
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
- --extra-hosts=xxx.com`
_, err := ParseConfig([]byte(str))
if err != nil {
log.Fatal(err)
}
}
func TestCheckLoop(t *testing.T) {
str := `Name: test
Needs: test1
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
---
Name: test1
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
- --extra-hosts=xxx.com`
_, err := ParseConfig([]byte(str))
if err != nil {
log.Fatal(err)
}
}
func TestLoop(t *testing.T) {
data := []struct {
Config string
Run string
ExpectError bool
ExpectOrder []string
}{
{
Config: `
Name: test
Needs: test1
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
---
Name: test1
Needs: test
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
- --extra-hosts=xxx.com
`,
Run: "test",
ExpectError: true,
ExpectOrder: nil,
},
{
Config: `
Name: test
Needs: test1
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
---
Name: test1
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
- --extra-hosts=xxx.com
`,
Run: "test",
ExpectError: false,
ExpectOrder: []string{"test1", "test"},
},
{
Config: `
Name: test
Needs: test1
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
---
Name: test1
Needs: test2
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
- --extra-hosts=xxx.com
`,
Run: "test",
ExpectError: false,
ExpectOrder: []string{"test1", "test"},
},
{
Config: `
Name: test
Needs: test1
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
---
Name: test1
Needs: test2
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
- --extra-hosts=xxx.com
---
Name: test2
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
- --extra-hosts=xxx.com
`,
Run: "test",
ExpectError: false,
ExpectOrder: []string{"test2", "test1", "test"},
},
{
Config: `
Name: test
Needs: test1
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
---
Name: test1
Needs: test2
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
- --extra-hosts=xxx.com
---
Name: test2
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
- --extra-hosts=xxx.com
`,
Run: "test2",
ExpectError: false,
ExpectOrder: []string{"test2"},
},
{
Config: `
Name: test
Needs: test1
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
---
Name: test1
Needs: test2
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
- --extra-hosts=xxx.com
---
Name: test2
Flags:
- --kubeconfig=~/.kube/config
- --namespace=test
- --extra-hosts=xxx.com
`,
Run: "test1",
ExpectError: false,
ExpectOrder: []string{"test2", "test1"},
},
}
for _, datum := range data {
configs, err := ParseConfig([]byte(datum.Config))
if err != nil {
log.Fatal(err)
}
getConfigs, err := GetConfigs(configs, datum.Run)
if err != nil && !datum.ExpectError {
log.Fatal(err)
} else if err != nil {
}
if datum.ExpectError {
continue
}
var c []string
for _, config := range getConfigs {
c = append(c, config.Name)
}
if !reflect.DeepEqual(c, datum.ExpectOrder) {
log.Fatalf("Not match, expect: %v, real: %v", datum.ExpectOrder, c)
}
}
}

View File

@@ -2,9 +2,9 @@ package cmds
import (
"fmt"
"io"
"os"
pkgerr "github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
@@ -14,63 +14,77 @@ import (
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
)
// CmdClone multiple cluster operate, can start up one deployment to another cluster
// kubectl exec POD_NAME -c CONTAINER_NAME /sbin/killall5 or ephemeralcontainers
func CmdClone(f cmdutil.Factory) *cobra.Command {
var options = handler.CloneOptions{}
var sshConf = &util.SshConfig{}
var sshConf = &pkgssh.SshConfig{}
var extraRoute = &handler.ExtraRouteInfo{}
var transferImage bool
var syncDir string
var imagePullSecretName string
cmd := &cobra.Command{
Use: "clone",
Short: i18n.T("Clone workloads to target-kubeconfig cluster with same volume、env、and network"),
Long: templates.LongDesc(i18n.T(`Clone workloads to target-kubeconfig cluster with same volume、env、and network`)),
Short: i18n.T("Clone workloads to run in target-kubeconfig cluster with same volume、env、and network"),
Long: templates.LongDesc(i18n.T(`
Clone workloads to run into target-kubeconfig cluster with same volume、env、and network
In this way, you can startup another deployment in same cluster or not, but with different image version,
it also supports service mesh proxy. only traffic with special header will hit to cloned_resource.
`)),
Example: templates.Examples(i18n.T(`
# clone
- clone deployment in current cluster and current namespace
- clone deployment run into current cluster and current namespace
kubevpn clone deployment/productpage
- clone deployment in current cluster with different namespace
- clone deployment run into current cluster with different namespace
kubevpn clone deployment/productpage -n test
- clone deployment to another cluster
- clone deployment run into another cluster
kubevpn clone deployment/productpage --target-kubeconfig ~/.kube/other-kubeconfig
- clone multiple workloads
- clone multiple workloads run into current cluster and current namespace
kubevpn clone deployment/authors deployment/productpage
or
kubevpn clone deployment authors productpage
# clone with mesh, traffic with header a=1, will hit cloned workloads, otherwise hit origin workloads
kubevpn clone deployment/productpage --headers a=1
# clone with mesh, traffic with header foo=bar, will hit cloned workloads, otherwise hit origin workloads
kubevpn clone deployment/productpage --headers foo=bar
# clone workloads which api-server behind of bastion host or ssh jump host
kubevpn clone deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers a=1
kubevpn clone deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers foo=bar
# it also support ProxyJump, like
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn clone service/productpage --ssh-alias <alias> --headers a=1
kubevpn clone service/productpage --ssh-alias <alias> --headers foo=bar
# Support ssh auth GSSAPI
kubevpn clone service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
kubevpn clone service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
kubevpn clone service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
// not support temporally
if options.Engine == config.EngineGvisor {
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
}
util.InitLoggerForClient(false)
// startup daemon process and sudo process
return daemon.StartupDaemon(cmd.Context())
err = daemon.StartupDaemon(cmd.Context())
if err != nil {
return err
}
if transferImage {
err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image)
}
return err
},
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
@@ -85,19 +99,37 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
// special empty string, eg: --target-registry ""
options.IsChangeTargetRegistry = cmd.Flags().Changed("target-registry")
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
if syncDir != "" {
local, remote, err := util.ParseDirMapping(syncDir)
if err != nil {
return pkgerr.Wrapf(err, "options 'sync' is invalid, %s", syncDir)
}
options.LocalDir = local
options.RemoteDir = remote
} else {
options.RemoteDir = config.DefaultRemoteDir
}
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return err
}
if !sshConf.IsEmpty() {
if ip := util.GetAPIServerFromKubeConfigBytes(bytes); ip != nil {
extraRoute.ExtraCIDR = append(extraRoute.ExtraCIDR, ip.String())
}
}
logLevel := log.InfoLevel
if config.Debug {
logLevel = log.DebugLevel
}
req := &rpc.CloneRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
Headers: options.Headers,
Workloads: args,
ExtraCIDR: options.ExtraCIDR,
ExtraDomain: options.ExtraDomain,
UseLocalDNS: options.UseLocalDNS,
OriginKubeconfigPath: util.GetKubeconfigPath(f),
ExtraRoute: extraRoute.ToRPC(),
OriginKubeconfigPath: util.GetKubeConfigPath(f),
Engine: string(options.Engine),
SshJump: sshConf.ToRPC(),
TargetKubeconfig: options.TargetKubeconfig,
@@ -108,44 +140,39 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
IsChangeTargetRegistry: options.IsChangeTargetRegistry,
TransferImage: transferImage,
Image: config.Image,
Level: int32(log.DebugLevel),
ImagePullSecretName: imagePullSecretName,
Level: int32(logLevel),
LocalDir: options.LocalDir,
RemoteDir: options.RemoteDir,
}
cli := daemon.GetClient(false)
resp, err := cli.Clone(cmd.Context(), req)
if err != nil {
return err
}
for {
recv, err := resp.Recv()
if err == io.EOF {
break
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
err = util.PrintGRPCStream[rpc.CloneResponse](resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
} else if err != nil {
return err
}
fmt.Fprint(os.Stdout, recv.GetMessage())
return err
}
util.Print(os.Stdout, "Now clone workloads running successfully on other cluster, enjoy it :)")
util.Print(os.Stdout, config.Slogan)
return nil
},
}
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to clone workloads, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to clone workloads, format is k=v, like: k1=v1,k2=v2")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug mode or not, true or false")
cmd.Flags().StringVar(&config.Image, "image", config.Image, "Use this image to startup container")
cmd.Flags().StringArrayVar(&options.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
cmd.Flags().StringArrayVar(&options.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
cmd.Flags().StringVar((*string)(&options.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
cmd.Flags().BoolVar(&options.UseLocalDNS, "use-localdns", false, "if use-lcoaldns is true, kubevpn will start coredns listen at 53 to forward your dns queries. only support on linux now")
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers (use `and` to match all headers) with reverse it to target cluster cloned workloads, If not special, redirect all traffic to target cluster cloned workloads. eg: --headers foo=bar --headers env=dev")
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &options.Engine)
cmd.Flags().StringVar(&options.TargetImage, "target-image", "", "Clone container use this image to startup container, if not special, use origin image")
cmd.Flags().StringVar(&options.TargetContainer, "target-container", "", "Clone container use special image to startup this container, if not special, use origin image")
cmd.Flags().StringVar(&options.TargetNamespace, "target-namespace", "", "Clone workloads in this namespace, if not special, use origin namespace")
cmd.Flags().StringVar(&options.TargetKubeconfig, "target-kubeconfig", "", "Clone workloads will create in this cluster, if not special, use origin cluster")
cmd.Flags().StringVar(&options.TargetRegistry, "target-registry", "", "Clone workloads will create this registry domain to replace origin registry, if not special, use origin registry")
cmd.Flags().StringVar(&syncDir, "sync", "", "Sync local dir to remote pod dir. format: LOCAL_DIR:REMOTE_DIR, eg: ~/code:/app/code")
addSshFlags(cmd, sshConf)
handler.AddExtraRoute(cmd.Flags(), extraRoute)
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
cmd.ValidArgsFunction = utilcomp.ResourceTypeAndNameCompletionFunc(f)
return cmd
}

View File

@@ -9,9 +9,10 @@ import (
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdConfig(f cmdutil.Factory) *cobra.Command {
@@ -25,27 +26,27 @@ func CmdConfig(f cmdutil.Factory) *cobra.Command {
}
func cmdConfigAdd(f cmdutil.Factory) *cobra.Command {
var sshConf = &util.SshConfig{}
var sshConf = &pkgssh.SshConfig{}
cmd := &cobra.Command{
Use: "add",
Short: "Proxy kubeconfig",
Short: i18n.T("Proxy kubeconfig"),
Long: templates.LongDesc(i18n.T(`proxy kubeconfig which behind of ssh jump server`)),
Example: templates.Examples(i18n.T(`
# proxy api-server which api-server behind of bastion host or ssh jump host
kubevpn config add --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# it also support ProxyJump, like
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn config add --ssh-alias <alias>
`)),
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
// startup daemon process and sudo process
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return err
}
@@ -59,23 +60,25 @@ func cmdConfigAdd(f cmdutil.Factory) *cobra.Command {
if err != nil {
return err
}
fmt.Fprint(os.Stdout, resp.ClusterID)
_, _ = fmt.Fprint(os.Stdout, resp.ClusterID)
return nil
},
}
addSshFlags(cmd, sshConf)
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
return cmd
}
func cmdConfigRemove(f cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "remove",
Short: "Remove proxy kubeconfig",
Long: templates.LongDesc(i18n.T(`Remove proxy kubeconfig which behind of ssh jump server`)),
Short: i18n.T("Remove proxy kubeconfig"),
Long: templates.LongDesc(i18n.T(`
Remove proxy kubeconfig which behind of ssh jump server
`)),
Example: templates.Examples(i18n.T(`
# remove proxy api-server which api-server behind of bastion host or ssh jump host
kubevpn config remove --kubeconfig /var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/947048961.kubeconfig
`)),
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
// startup daemon process and sudo process
return daemon.StartupDaemon(cmd.Context())

View File

@@ -1,33 +1,47 @@
package cmds
import (
"context"
"fmt"
"io"
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"k8s.io/utils/ptr"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
)
func CmdConnect(f cmdutil.Factory) *cobra.Command {
var connect = &handler.ConnectOptions{}
var sshConf = &util.SshConfig{}
var extraRoute = &handler.ExtraRouteInfo{}
var sshConf = &pkgssh.SshConfig{}
var transferImage, foreground, lite bool
var imagePullSecretName string
cmd := &cobra.Command{
Use: "connect",
Short: i18n.T("Connect to kubernetes cluster network"),
Long: templates.LongDesc(i18n.T(`Connect to kubernetes cluster network`)),
Long: templates.LongDesc(i18n.T(`
Connect to kubernetes cluster network
After connect to kubernetes cluster network, you can ping PodIP or
curl ServiceIP in local PC, it also supports k8s DNS resolve.
Like: curl authors/authors.default/authors.default.svc/authors.default.svc.cluster.local.
So you can start up your application in local PC. depends on anything in
k8s cluster is ok, connect to them just like in k8s cluster.
`)),
Example: templates.Examples(i18n.T(`
# Connect to k8s cluster network
kubevpn connect
@@ -35,7 +49,7 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
# Connect to api-server behind of bastion host or ssh jump host
kubevpn connect --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# it also support ProxyJump, like
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
@@ -45,82 +59,97 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
kubevpn connect --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
kubevpn connect --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
kubevpn connect --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
# Support ssh jump inline
kubevpn connect --ssh-jump "--ssh-addr jump.naison.org --ssh-username naison --gssapi-password xxx" --ssh-username root --ssh-addr 127.0.0.1:22 --ssh-keyfile ~/.ssh/dst.pem
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
util.InitLoggerForClient(false)
// startup daemon process and sudo process
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
err := daemon.StartupDaemon(cmd.Context())
if err != nil {
return err
}
if transferImage {
err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image)
}
return err
},
RunE: func(cmd *cobra.Command, args []string) error {
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return err
}
if !sshConf.IsEmpty() {
if ip := util.GetAPIServerFromKubeConfigBytes(bytes); ip != nil {
extraRoute.ExtraCIDR = append(extraRoute.ExtraCIDR, ip.String())
}
}
logLevel := log.InfoLevel
if config.Debug {
logLevel = log.DebugLevel
}
req := &rpc.ConnectRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
ExtraCIDR: connect.ExtraCIDR,
ExtraDomain: connect.ExtraDomain,
UseLocalDNS: connect.UseLocalDNS,
ExtraRoute: extraRoute.ToRPC(),
Engine: string(connect.Engine),
OriginKubeconfigPath: util.GetKubeconfigPath(f),
OriginKubeconfigPath: util.GetKubeConfigPath(f),
SshJump: sshConf.ToRPC(),
TransferImage: transferImage,
Foreground: foreground,
Image: config.Image,
Level: int32(log.DebugLevel),
SshJump: sshConf.ToRPC(),
TransferImage: transferImage,
Image: config.Image,
ImagePullSecretName: imagePullSecretName,
Level: int32(logLevel),
}
// if is foreground, send to sudo daemon server
cli := daemon.GetClient(false)
var resp grpc.ClientStream
if lite {
resp, err := cli.ConnectFork(cmd.Context(), req)
if err != nil {
return err
}
for {
recv, err := resp.Recv()
if err == io.EOF {
break
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
return nil
} else if err != nil {
return err
}
fmt.Fprint(os.Stdout, recv.GetMessage())
}
resp, err = cli.ConnectFork(cmd.Context(), req)
} else {
resp, err := cli.Connect(cmd.Context(), req)
resp, err = cli.Connect(cmd.Context(), req)
}
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.ConnectResponse](resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
if !foreground {
util.Print(os.Stdout, config.Slogan)
} else {
<-cmd.Context().Done()
disconnect, err := cli.Disconnect(context.Background(), &rpc.DisconnectRequest{
KubeconfigBytes: ptr.To(string(bytes)),
Namespace: ptr.To(ns),
SshJump: sshConf.ToRPC(),
})
if err != nil {
log.Errorf("Disconnect error: %v", err)
return err
}
for {
recv, err := resp.Recv()
if err == io.EOF {
break
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
err = util.PrintGRPCStream[rpc.DisconnectResponse](disconnect)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
} else if err != nil {
return err
}
fmt.Fprint(os.Stdout, recv.GetMessage())
return err
}
}
if !req.Foreground {
util.Print(os.Stdout, "Now you can access resources in the kubernetes cluster, enjoy it :)")
_, _ = fmt.Fprint(os.Stdout, "Disconnect completed")
}
return nil
},
}
cmd.Flags().BoolVar(&config.Debug, "debug", false, "enable debug mode or not, true or false")
cmd.Flags().StringVar(&config.Image, "image", config.Image, "use this image to startup container")
cmd.Flags().StringArrayVar(&connect.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
cmd.Flags().StringArrayVar(&connect.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
cmd.Flags().BoolVar(&connect.UseLocalDNS, "use-localdns", false, "if use-lcoaldns is true, kubevpn will start coredns listen at 53 to forward your dns queries. only support on linux now")
cmd.Flags().StringVar((*string)(&connect.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &connect.Engine)
cmd.Flags().BoolVar(&foreground, "foreground", false, "Hang up")
cmd.Flags().BoolVar(&lite, "lite", false, "connect to multiple cluster in lite mode, you needs to special this options")
cmd.Flags().BoolVar(&lite, "lite", false, "connect to multiple cluster in lite mode. mode \"lite\": design for only connecting to multiple cluster network. mode \"full\": not only connect to cluster network, it also supports proxy workloads inbound traffic to local PC.")
addSshFlags(cmd, sshConf)
handler.AddExtraRoute(cmd.Flags(), extraRoute)
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
return cmd
}

View File

@@ -1,13 +1,18 @@
package cmds
import (
"github.com/docker/docker/libnetwork/resolvconf"
miekgdns "github.com/miekg/dns"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/controlplane"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/controlplane"
"github.com/wencaiwulue/kubevpn/v2/pkg/dns"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdControlPlane(_ cmdutil.Factory) *cobra.Command {
@@ -18,12 +23,22 @@ func CmdControlPlane(_ cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "control-plane",
Hidden: true,
Short: "Control-plane is a envoy xds server",
Long: `Control-plane is a envoy xds server, distribute envoy route configuration`,
Run: func(cmd *cobra.Command, args []string) {
util.InitLogger(config.Debug)
go util.StartupPProf(0)
controlplane.Main(watchDirectoryFilename, port, log.StandardLogger())
Short: i18n.T("Control-plane is a envoy xds server"),
Long: templates.LongDesc(i18n.T(`
Control-plane is a envoy xds server, distribute envoy route configuration
`)),
RunE: func(cmd *cobra.Command, args []string) error {
util.InitLoggerForServer(config.Debug)
go util.StartupPProfForServer(0)
go func() {
conf, err := miekgdns.ClientConfigFromFile(resolvconf.Path())
if err != nil {
log.Fatal(err)
}
log.Fatal(dns.ListenAndServe("udp", ":53", conf))
}()
err := controlplane.Main(cmd.Context(), watchDirectoryFilename, port, log.StandardLogger())
return err
},
}
cmd.Flags().StringVarP(&watchDirectoryFilename, "watchDirectoryFilename", "w", "/etc/envoy/envoy-config.yaml", "full path to directory to watch for files")

View File

@@ -6,15 +6,14 @@ import (
"strings"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/genericiooptions"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/completion"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/cp"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/cp"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
)
var cpExample = templates.Examples(i18n.T(`
@@ -46,7 +45,7 @@ var cpExample = templates.Examples(i18n.T(`
# copy reverse proxy api-server behind of bastion host or ssh jump host
kubevpn cp deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# it also support ProxyJump, like
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
@@ -60,20 +59,21 @@ var cpExample = templates.Examples(i18n.T(`
))
func CmdCp(f cmdutil.Factory) *cobra.Command {
o := cp.NewCopyOptions(genericclioptions.IOStreams{
o := cp.NewCopyOptions(genericiooptions.IOStreams{
In: os.Stdin,
Out: os.Stdout,
ErrOut: os.Stderr,
})
var sshConf = &util.SshConfig{}
var sshConf = &pkgssh.SshConfig{}
cmd := &cobra.Command{
Use: "cp <file-spec-src> <file-spec-dest>",
DisableFlagsInUseLine: true,
Hidden: true,
Short: i18n.T("Copy files and directories to and from containers"),
Long: i18n.T("Copy files and directories to and from containers. Different between kubectl cp is it will de-reference symbol link."),
Example: cpExample,
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
cmdutil.CheckErr(handler.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false))
cmdutil.CheckErr(pkgssh.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false))
var comps []string
if len(args) == 0 {
@@ -85,14 +85,14 @@ func CmdCp(f cmdutil.Factory) *cobra.Command {
// complete <namespace>/<pod>
namespace := toComplete[:idx]
template := "{{ range .items }}{{ .metadata.namespace }}/{{ .metadata.name }}: {{ end }}"
comps = completion.CompGetFromTemplate(&template, f, namespace, cmd, []string{"pod"}, toComplete)
comps = completion.CompGetFromTemplate(&template, f, namespace, []string{"pod"}, toComplete)
} else {
// Complete namespaces followed by a /
for _, ns := range completion.CompGetResource(f, cmd, "namespace", toComplete) {
for _, ns := range completion.CompGetResource(f, "namespace", toComplete) {
comps = append(comps, fmt.Sprintf("%s/", ns))
}
// Complete pod names followed by a :
for _, pod := range completion.CompGetResource(f, cmd, "pod", toComplete) {
for _, pod := range completion.CompGetResource(f, "pod", toComplete) {
comps = append(comps, fmt.Sprintf("%s:", pod))
}
@@ -135,11 +135,6 @@ func CmdCp(f cmdutil.Factory) *cobra.Command {
cmd.Flags().BoolVarP(&o.NoPreserve, "no-preserve", "", false, "The copied file/directory's ownership and permissions will not be preserved in the container")
cmd.Flags().IntVarP(&o.MaxTries, "retries", "", 0, "Set number of retries to complete a copy operation from a container. Specify 0 to disable or any negative value for infinite retrying. The default is 0 (no retry).")
// for ssh jumper host
cmd.Flags().StringVar(&sshConf.Addr, "ssh-addr", "", "Optional ssh jump server address to dial as <hostname>:<port>, eg: 127.0.0.1:22")
cmd.Flags().StringVar(&sshConf.User, "ssh-username", "", "Optional username for ssh jump server")
cmd.Flags().StringVar(&sshConf.Password, "ssh-password", "", "Optional password for ssh jump server")
cmd.Flags().StringVar(&sshConf.Keyfile, "ssh-keyfile", "", "Optional file with private key for SSH authentication")
cmd.Flags().StringVar(&sshConf.ConfigAlias, "ssh-alias", "", "Optional config alias with ~/.ssh/config for SSH authentication")
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
return cmd
}

View File

@@ -1,15 +1,24 @@
package cmds
import (
"crypto/rand"
"encoding/base64"
"errors"
"net/http"
"os"
"strconv"
"path/filepath"
"runtime/pprof"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/action"
"github.com/wencaiwulue/kubevpn/v2/pkg/dns"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdDaemon(_ cmdutil.Factory) *cobra.Command {
@@ -17,39 +26,65 @@ func CmdDaemon(_ cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "daemon",
Short: i18n.T("Startup kubevpn daemon server"),
Long: i18n.T(`Startup kubevpn daemon server`),
Long: templates.LongDesc(i18n.T(`Startup kubevpn daemon server`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
sockPath := daemon.GetSockPath(opt.IsSudo)
err := os.Remove(sockPath)
if err != nil && !errors.Is(err, os.ErrNotExist) {
b := make([]byte, 32)
if _, err := rand.Read(b); err != nil {
return err
}
pidPath := daemon.GetPidPath(opt.IsSudo)
err = os.Remove(pidPath)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return err
opt.ID = base64.URLEncoding.EncodeToString(b)
if opt.IsSudo {
go util.StartupPProf(config.SudoPProfPort)
_ = os.RemoveAll("/etc/resolver")
_ = dns.CleanupHosts()
_ = util.CleanupTempKubeConfigFile()
} else {
go util.StartupPProf(config.PProfPort)
}
pid := os.Getpid()
err = os.WriteFile(pidPath, []byte(strconv.Itoa(pid)), os.ModePerm)
if err != nil {
return err
}
err = os.Chmod(pidPath, os.ModePerm)
return err
return initLogfile(action.GetDaemonLogPath())
},
RunE: func(cmd *cobra.Command, args []string) error {
RunE: func(cmd *cobra.Command, args []string) (err error) {
defer opt.Stop()
defer func() {
if errors.Is(err, http.ErrServerClosed) {
err = nil
}
if opt.IsSudo {
for _, profile := range pprof.Profiles() {
func() {
file, e := os.Create(filepath.Join(config.PprofPath, profile.Name()))
if e != nil {
return
}
defer file.Close()
e = profile.WriteTo(file, 1)
if e != nil {
return
}
}()
}
}
}()
return opt.Start(cmd.Context())
},
PostRun: func(cmd *cobra.Command, args []string) {
sockPath := daemon.GetSockPath(opt.IsSudo)
_ = os.Remove(sockPath)
pidPath := daemon.GetPidPath(opt.IsSudo)
_ = os.Remove(pidPath)
},
Hidden: true,
DisableFlagsInUseLine: true,
}
cmd.Flags().BoolVar(&opt.IsSudo, "sudo", false, "is sudo or not")
return cmd
}
func initLogfile(path string) error {
_, err := os.Lstat(path)
if os.IsNotExist(err) {
var f *os.File
f, err = os.Create(path)
if err != nil {
return err
}
_ = f.Close()
return os.Chmod(path, 0644)
}
return nil
}

View File

@@ -5,7 +5,6 @@ import (
"os"
"github.com/docker/cli/cli/command"
dockercomp "github.com/docker/cli/cli/command/completion"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
@@ -13,38 +12,34 @@ import (
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/dev"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/dev"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
)
func CmdDev(f cmdutil.Factory) *cobra.Command {
cli, dockerCli, err := util.GetClient()
if err != nil {
panic(err)
var options = &dev.Options{
NoProxy: false,
ExtraRouteInfo: handler.ExtraRouteInfo{},
}
var devOptions = &dev.Options{
Factory: f,
NoProxy: false,
ExtraCIDR: []string{},
Cli: cli,
DockerCli: dockerCli,
}
var sshConf = &util.SshConfig{}
var sshConf = &pkgssh.SshConfig{}
var transferImage bool
var imagePullSecretName string
cmd := &cobra.Command{
Use: "dev TYPE/NAME [-c CONTAINER] [flags] -- [args...]",
Short: i18n.T("Startup your kubernetes workloads in local Docker container"),
Long: templates.LongDesc(i18n.T(`
Startup your kubernetes workloads in local Docker container with same volume、env、and network
Startup your kubernetes workloads in local Docker container with same volume、env、and network
## What did i do:
- Download volume which MountPath point to, mount to docker container
- Connect to cluster network, set network to docker container
- Get all environment with command (env), set env to docker container
`)),
## What did it do:
- Download volume which MountPath point to, mount to docker container
- Connect to cluster network, set network to docker container
- Get all environment with command (env), set env to docker container
`)),
Example: templates.Examples(i18n.T(`
# Develop workloads
- develop deployment
@@ -52,8 +47,8 @@ Startup your kubernetes workloads in local Docker container with same volume、e
- develop service
kubevpn dev service/productpage
# Develop workloads with mesh, traffic with header a=1, will hit local PC, otherwise no effect
kubevpn dev service/productpage --headers a=1
# Develop workloads with mesh, traffic with header foo=bar, will hit local PC, otherwise no effect
kubevpn dev service/productpage --headers foo=bar
# Develop workloads without proxy traffic
kubevpn dev service/productpage --no-proxy
@@ -61,28 +56,28 @@ Startup your kubernetes workloads in local Docker container with same volume、e
# Develop workloads which api-server behind of bastion host or ssh jump host
kubevpn dev deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# It also support ProxyJump, like
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn dev deployment/productpage --ssh-alias <alias>
# Switch to terminal mode; send stdin to 'bash' and sends stdout/stderror from 'bash' back to the client
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev -i -t --entrypoint /bin/bash
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev --entrypoint /bin/bash
or
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev -it --entrypoint /bin/bash
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev --entrypoint /bin/bash
# Support ssh auth GSSAPI
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab -it --entrypoint /bin/bash
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache -it --entrypoint /bin/bash
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD> -it --entrypoint /bin/bash
`)),
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab --entrypoint /bin/bash
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache --entrypoint /bin/bash
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD> --entrypoint /bin/bash
`)),
ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f),
Args: cobra.MatchAll(cobra.OnlyValidArgs),
DisableFlagsInUseLine: true,
PreRunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
fmt.Fprintf(os.Stdout, "You must specify the type of resource to proxy. %s\n\n", cmdutil.SuggestAPIResources("kubevpn"))
_, _ = fmt.Fprintf(os.Stdout, "You must specify the type of resource to proxy. %s\n\n", cmdutil.SuggestAPIResources("kubevpn"))
fullCmdName := cmd.Parent().CommandPath()
usageString := "Required resource not specified."
if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") {
@@ -90,92 +85,70 @@ Startup your kubernetes workloads in local Docker container with same volume、e
}
return cmdutil.UsageErrorf(cmd, usageString)
}
err = cmd.Flags().Parse(args[1:])
err := cmd.Flags().Parse(args[1:])
if err != nil {
return err
}
util.InitLogger(false)
// not support temporally
if devOptions.Engine == config.EngineGvisor {
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
}
util.InitLoggerForClient(config.Debug)
err = daemon.StartupDaemon(cmd.Context())
if err != nil {
return err
}
return handler.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false)
if transferImage {
err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image)
}
if err != nil {
return err
}
return pkgssh.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false)
},
RunE: func(cmd *cobra.Command, args []string) error {
devOptions.Workload = args[0]
options.Workload = args[0]
for i, arg := range args {
if arg == "--" && i != len(args)-1 {
devOptions.Copts.Args = args[i+1:]
options.ContainerOptions.Args = args[i+1:]
break
}
}
err = dev.DoDev(cmd.Context(), devOptions, sshConf, cmd.Flags(), f, transferImage)
for _, fun := range devOptions.GetRollbackFuncList() {
if fun != nil {
if err = fun(); err != nil {
log.Errorf("roll back failed, error: %s", err.Error())
defer func() {
for _, function := range options.GetRollbackFuncList() {
if function != nil {
if err := function(); err != nil {
log.Errorf("Rollback failed, error: %s", err.Error())
}
}
}
}()
if err := options.InitClient(f); err != nil {
return err
}
return err
conf, hostConfig, err := dev.Parse(cmd.Flags(), options.ContainerOptions)
if err != nil {
return err
}
return options.Main(cmd.Context(), sshConf, conf, hostConfig, imagePullSecretName)
},
}
cmd.Flags().SortFlags = false
cmd.Flags().StringToStringVarP(&devOptions.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "enable debug mode or not, true or false")
cmd.Flags().StringVar(&config.Image, "image", config.Image, "use this image to startup container")
cmd.Flags().BoolVar(&devOptions.NoProxy, "no-proxy", false, "Whether proxy remote workloads traffic into local or not, true: just startup container on local without inject containers to intercept traffic, false: intercept traffic and forward to local")
cmdutil.AddContainerVarFlags(cmd, &devOptions.ContainerName, devOptions.ContainerName)
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
cmd.Flags().BoolVar(&options.NoProxy, "no-proxy", false, "Whether proxy remote workloads traffic into local or not, true: just startup container on local without inject containers to intercept traffic, false: intercept traffic and forward to local")
cmdutil.AddContainerVarFlags(cmd, &options.ContainerName, options.ContainerName)
cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc("container", completion.ContainerCompletionFunc(f)))
cmd.Flags().StringArrayVar(&devOptions.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
cmd.Flags().StringArrayVar(&devOptions.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
cmd.Flags().StringVar((*string)(&devOptions.ConnectMode), "connect-mode", string(dev.ConnectModeHost), "Connect to kubernetes network in container or in host, eg: ["+string(dev.ConnectModeContainer)+"|"+string(dev.ConnectModeHost)+"]")
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
cmd.Flags().StringVar((*string)(&devOptions.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
cmd.Flags().StringVar((*string)(&options.ConnectMode), "connect-mode", string(dev.ConnectModeHost), "Connect to kubernetes network in container or in host, eg: ["+string(dev.ConnectModeContainer)+"|"+string(dev.ConnectModeHost)+"]")
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &options.Engine)
// diy docker options
cmd.Flags().StringVar(&devOptions.DockerImage, "docker-image", "", "Overwrite the default K8s pod of the image")
// origin docker options
flags := cmd.Flags()
flags.SetInterspersed(false)
// These are flags not stored in Config/HostConfig
flags.BoolVarP(&devOptions.Options.Detach, "detach", "d", false, "Run container in background and print container ID")
flags.StringVar(&devOptions.Options.Name, "name", "", "Assign a name to the container")
flags.StringVar(&devOptions.Options.Pull, "pull", dev.PullImageMissing, `Pull image before running ("`+dev.PullImageAlways+`"|"`+dev.PullImageMissing+`"|"`+dev.PullImageNever+`")`)
flags.BoolVarP(&devOptions.Options.Quiet, "quiet", "q", false, "Suppress the pull output")
// Add an explicit help that doesn't have a `-h` to prevent the conflict
// with hostname
flags.Bool("help", false, "Print usage")
command.AddPlatformFlag(flags, &devOptions.Options.Platform)
command.AddTrustVerificationFlags(flags, &devOptions.Options.Untrusted, dockerCli.ContentTrustEnabled())
devOptions.Copts = dev.AddFlags(flags)
_ = cmd.RegisterFlagCompletionFunc(
"env",
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return os.Environ(), cobra.ShellCompDirectiveNoFileComp
},
)
_ = cmd.RegisterFlagCompletionFunc(
"env-file",
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return nil, cobra.ShellCompDirectiveDefault
},
)
_ = cmd.RegisterFlagCompletionFunc(
"network",
dockercomp.NetworkNames(nil),
)
addSshFlags(cmd, sshConf)
cmd.Flags().StringVar(&options.DevImage, "dev-image", "", "Use to startup docker container, Default is pod image")
// -- origin docker options -- start
options.ContainerOptions = dev.AddFlags(cmd.Flags())
cmd.Flags().StringVar(&options.RunOptions.Pull, "pull", dev.PullImageMissing, `Pull image before running ("`+dev.PullImageAlways+`"|"`+dev.PullImageMissing+`"|"`+dev.PullImageNever+`")`)
command.AddPlatformFlag(cmd.Flags(), &options.RunOptions.Platform)
// -- origin docker options -- end
handler.AddExtraRoute(cmd.Flags(), &options.ExtraRouteInfo)
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
return cmd
}

View File

@@ -2,7 +2,6 @@ package cmds
import (
"fmt"
"io"
"os"
"strconv"
@@ -14,31 +13,44 @@ import (
"k8s.io/kubectl/pkg/util/templates"
"k8s.io/utils/pointer"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdDisconnect(f cmdutil.Factory) *cobra.Command {
var all = false
var clusterIDs []string
cmd := &cobra.Command{
Use: "disconnect",
Short: i18n.T("Disconnect from kubernetes cluster network"),
Long: templates.LongDesc(i18n.T(`Disconnect from kubernetes cluster network`)),
Long: templates.LongDesc(i18n.T(`
Disconnect from kubernetes cluster network
This command is to disconnect from cluster. after use command 'kubevpn connect',
you can use this command to disconnect from a specific cluster.
before disconnect, it will leave proxy resource and clone resource if resource depends on this cluster
after disconnect it will also cleanup DNS and host
`)),
Example: templates.Examples(i18n.T(`
# disconnect from cluster network and restore proxy resource
kubevpn disconnect
`)),
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
util.InitLoggerForClient(false)
err = daemon.StartupDaemon(cmd.Context())
return err
},
Args: cobra.MatchAll(cobra.OnlyValidArgs),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 && all {
return fmt.Errorf("either specify --all or specific ID, not both")
return fmt.Errorf("either specify --all or ID, not both")
}
if len(args) == 0 && !all {
return fmt.Errorf("either specify --all or specific ID")
if len(clusterIDs) > 0 && all {
return fmt.Errorf("either specify --all or cluster-id, not both")
}
if len(args) == 0 && !all && len(clusterIDs) == 0 {
return fmt.Errorf("either specify --all or ID or cluster-id")
}
var ids *int32
if len(args) > 0 {
@@ -51,27 +63,26 @@ func CmdDisconnect(f cmdutil.Factory) *cobra.Command {
client, err := daemon.GetClient(false).Disconnect(
cmd.Context(),
&rpc.DisconnectRequest{
ID: ids,
All: pointer.Bool(all),
ID: ids,
ClusterIDs: clusterIDs,
All: pointer.Bool(all),
},
)
var resp *rpc.DisconnectResponse
for {
resp, err = client.Recv()
if err == io.EOF {
break
} else if err == nil {
fmt.Fprint(os.Stdout, resp.Message)
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
break
} else {
return err
}
if err != nil {
return err
}
fmt.Fprint(os.Stdout, "disconnect successfully")
err = util.PrintGRPCStream[rpc.DisconnectResponse](client)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
_, _ = fmt.Fprint(os.Stdout, "Disconnect completed")
return nil
},
}
cmd.Flags().BoolVar(&all, "all", all, "Select all, disconnect from all cluster network")
cmd.Flags().BoolVar(&all, "all", all, "Disconnect all cluster, disconnect from all cluster network")
cmd.Flags().StringArrayVar(&clusterIDs, "cluster-id", []string{}, "Cluster id, command status -o yaml/json will show cluster-id")
return cmd
}

View File

@@ -1,20 +1,27 @@
package cmds
import (
"fmt"
"cmp"
"encoding/json"
"os"
"slices"
"strings"
"github.com/spf13/cobra"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/printers"
cmdget "k8s.io/kubectl/pkg/cmd/get"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/scheme"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"sigs.k8s.io/yaml"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
)
func CmdGet(f cmdutil.Factory) *cobra.Command {
var printFlags = cmdget.NewGetPrintFlags()
cmd := &cobra.Command{
Use: "get",
Hidden: true,
@@ -27,38 +34,76 @@ func CmdGet(f cmdutil.Factory) *cobra.Command {
# Get api-server behind of bastion host or ssh jump host
kubevpn get deployment --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# it also support ProxyJump, like
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn get service --ssh-alias <alias>
`)),
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
// startup daemon process and sudo process
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
namespace, _, err := f.ToRawKubeConfigLoader().Namespace()
ns, _, err := f.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
client, err := daemon.GetClient(false).Get(
client, err := daemon.GetClient(true).Get(
cmd.Context(),
&rpc.GetRequest{
Namespace: namespace,
Namespace: ns,
Resource: args[0],
},
)
if err != nil {
return err
}
marshal, err := yaml.Marshal(client.Metadata)
if err != nil {
return err
w := printers.GetNewTabWriter(os.Stdout)
var toPrinter = func() (printers.ResourcePrinterFunc, error) {
var flags = printFlags.Copy()
_ = flags.EnsureWithNamespace()
printer, err := flags.ToPrinter()
if err != nil {
return nil, err
}
printer, err = printers.NewTypeSetter(scheme.Scheme).WrapToPrinter(printer, nil)
if err != nil {
return nil, err
}
outputOption := cmd.Flags().Lookup("output").Value.String()
if strings.Contains(outputOption, "custom-columns") || outputOption == "yaml" || strings.Contains(outputOption, "json") {
} else {
printer = &cmdget.TablePrinter{Delegate: printer}
}
return printer.PrintObj, nil
}
fmt.Fprint(os.Stdout, string(marshal))
return nil
var list []*v1.PartialObjectMetadata
for _, m := range client.Metadata {
var data v1.PartialObjectMetadata
err = json.Unmarshal([]byte(m), &data)
if err != nil {
continue
}
list = append(list, &data)
}
slices.SortStableFunc(list, func(a, b *v1.PartialObjectMetadata) int {
compare := cmp.Compare(a.GetNamespace(), b.GetNamespace())
if compare == 0 {
return cmp.Compare(a.GetName(), b.GetName())
}
return compare
})
for _, m := range list {
printer, err := toPrinter()
if err != nil {
return err
}
_ = printer.PrintObj(m, w)
}
return w.Flush()
},
}
printFlags.AddFlags(cmd)
return cmd
}

View File

@@ -0,0 +1,43 @@
package cmds
import (
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
)
func CmdImageCopy(_ cmdutil.Factory) *cobra.Command {
var imageCmd = &cobra.Command{
Use: "image <cmd>",
Short: "copy images",
}
copyCmd := &cobra.Command{
Use: "copy <src_image_ref> <dst_image_ref>",
Aliases: []string{"cp"},
Short: "copy or re-tag image",
Long: `Copy or re-tag an image. This works between registries and only pulls layers
that do not exist at the target. In the same registry it attempts to mount
the layers between repositories. And within the same repository it only
sends the manifest with the new tag.`,
Example: `
# copy an image
regctl image copy ghcr.io/kubenetworks/kubevpn:latest docker.io/naison/kubevpn:latest
# re-tag an image
regctl image copy registry.example.org/repo:v1.2.3 registry.example.org/repo:v1`,
Args: cobra.MatchAll(cobra.ExactArgs(2), cobra.OnlyValidArgs),
PreRunE: func(cmd *cobra.Command, args []string) error {
util.InitLoggerForClient(false)
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
err := regctl.TransferImageWithRegctl(cmd.Context(), args[0], args[1])
return err
},
}
imageCmd.AddCommand(copyCmd)
return imageCmd
}

View File

@@ -1,10 +1,6 @@
package cmds
import (
"fmt"
"io"
"os"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -12,19 +8,28 @@ import (
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdLeave(f cmdutil.Factory) *cobra.Command {
var leaveCmd = &cobra.Command{
Use: "leave",
Short: "Leave proxy resource",
Long: `leave proxy resource and restore it to origin`,
Short: i18n.T("Leave proxy resource"),
Long: templates.LongDesc(i18n.T(`
Leave proxy resource and restore it to origin
This command is used to leave proxy resources. after use command 'kubevpn proxy xxx',
you can use this command to leave proxy resources.
you can just leave proxy resources which do proxy by yourself.
and the last one leave proxy resource, it will also restore workloads container.
otherwise it will keep containers [vpn, envoy-proxy] until last one to leave.
`)),
Example: templates.Examples(i18n.T(`
# leave proxy resource and restore it to origin
kubevpn leave deployment/authors
`)),
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
return daemon.StartupDaemon(cmd.Context())
},
@@ -35,17 +40,14 @@ func CmdLeave(f cmdutil.Factory) *cobra.Command {
if err != nil {
return err
}
for {
recv, err := leave.Recv()
if err == io.EOF {
err = util.PrintGRPCStream[rpc.LeaveResponse](leave)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
return nil
} else if err != nil {
return err
}
fmt.Fprint(os.Stdout, recv.GetMessage())
return err
}
return nil
},
}
return leaveCmd

View File

@@ -8,8 +8,8 @@ import (
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
)
func CmdList(f cmdutil.Factory) *cobra.Command {
@@ -20,7 +20,7 @@ func CmdList(f cmdutil.Factory) *cobra.Command {
Example: templates.Examples(i18n.T(`
# list proxy resources
kubevpn list
`)),
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
return daemon.StartupDaemon(cmd.Context())
},
@@ -35,6 +35,7 @@ func CmdList(f cmdutil.Factory) *cobra.Command {
fmt.Println(client.GetMessage())
return nil
},
Hidden: true,
}
return cmd
}

View File

@@ -1,10 +1,6 @@
package cmds
import (
"fmt"
"io"
"os"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -12,23 +8,27 @@ import (
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdLogs(f cmdutil.Factory) *cobra.Command {
req := &rpc.LogRequest{}
cmd := &cobra.Command{
Use: "logs",
Short: i18n.T("Log kubevpn daemon server"),
Long: templates.LongDesc(i18n.T(`Log kubevpn daemon server`)),
Short: i18n.T("Log kubevpn daemon grpc server"),
Long: templates.LongDesc(i18n.T(`
Print the logs for kubevpn daemon grpc server. it will show sudo daemon and daemon grpc server log in both
`)),
Example: templates.Examples(i18n.T(`
# show log for kubevpn daemon server
kubevpn logs
# follow more log
kubevpn logs -f
`)),
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
util.InitLoggerForClient(false)
// startup daemon process and sudo process
return daemon.StartupDaemon(cmd.Context())
},
@@ -37,22 +37,17 @@ func CmdLogs(f cmdutil.Factory) *cobra.Command {
if err != nil {
return err
}
var resp *rpc.LogResponse
for {
resp, err = client.Recv()
if err == io.EOF {
break
} else if err == nil {
fmt.Fprintln(os.Stdout, resp.Message)
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
err = util.PrintGRPCStream[rpc.LogResponse](client)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
} else {
return err
}
return err
}
return nil
},
}
cmd.Flags().BoolVarP(&req.Follow, "follow", "f", false, "Specify if the logs should be streamed.")
cmd.Flags().Int32VarP(&req.Lines, "number", "N", 10, "Lines of recent log file to display.")
return cmd
}

View File

@@ -12,14 +12,17 @@ import (
var (
optionsExample = templates.Examples(i18n.T(`
# Print flags inherited by all commands
kubevpn options`))
kubevpn options
`))
)
func CmdOptions(cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "options",
Short: i18n.T("Print the list of flags inherited by all commands"),
Long: i18n.T("Print the list of flags inherited by all commands"),
Use: "options",
Short: i18n.T("Print the list of flags inherited by all commands"),
Long: templates.LongDesc(i18n.T(`
Print the list of flags inherited by all commands
`)),
Example: optionsExample,
Run: func(cmd *cobra.Command, args []string) {
cmd.Usage()

View File

@@ -3,7 +3,6 @@ package cmds
import (
"context"
"fmt"
"io"
"os"
log "github.com/sirupsen/logrus"
@@ -15,21 +14,34 @@ import (
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
)
func CmdProxy(f cmdutil.Factory) *cobra.Command {
var connect = handler.ConnectOptions{}
var sshConf = &util.SshConfig{}
var extraRoute = &handler.ExtraRouteInfo{}
var sshConf = &pkgssh.SshConfig{}
var transferImage, foreground bool
var imagePullSecretName string
cmd := &cobra.Command{
Use: "proxy",
Short: i18n.T("Proxy kubernetes workloads inbound traffic into local PC"),
Long: templates.LongDesc(i18n.T(`Proxy kubernetes workloads inbound traffic into local PC`)),
Long: templates.LongDesc(i18n.T(`
Proxy kubernetes workloads inbound traffic into local PC
Proxy k8s workloads inbound traffic into local PC with/without service mesh.
Without service mesh, it will proxy all inbound traffic into local PC, even traffic protocol is layer 4(Transport layer).
With service mesh, it will proxy traffic which has special header to local PC, support protocol HTTP,GRPC,THRIFT, WebSocket...
After proxy resource, it also connected to cluster network automatically. so just startup your app in local PC
and waiting for inbound traffic, make debug more easier.
`)),
Example: templates.Examples(i18n.T(`
# Reverse proxy
- proxy deployment
@@ -43,30 +55,45 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
or
kubevpn proxy deployment authors productpage
# Reverse proxy with mesh, traffic with header a=1, will hit local PC, otherwise no effect
kubevpn proxy service/productpage --headers a=1
# Reverse proxy with mesh, traffic with header foo=bar, will hit local PC, otherwise no effect
kubevpn proxy service/productpage --headers foo=bar
# Reverse proxy with mesh, traffic with header foo=bar and env=dev, will hit local PC, otherwise no effect
kubevpn proxy service/productpage --headers foo=bar --headers env=dev
# Connect to api-server behind of bastion host or ssh jump host and proxy kubernetes resource traffic into local PC
kubevpn proxy deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers a=1
kubevpn proxy deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers foo=bar
# it also support ProxyJump, like
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn proxy service/productpage --ssh-alias <alias> --headers a=1
kubevpn proxy service/productpage --ssh-alias <alias> --headers foo=bar
# Support ssh auth GSSAPI
kubevpn proxy service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
kubevpn proxy service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
kubevpn proxy service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
# Support port map, you can proxy container port to local port by command:
kubevpn proxy deployment/productpage --portmap 80:8080
# Proxy container port 9080 to local port 8080 of TCP protocol
kubevpn proxy deployment/productpage --portmap 9080:8080
# Proxy container port 9080 to local port 5000 of UDP protocol
kubevpn proxy deployment/productpage --portmap udp/9080:5000
# Auto proxy container port to same local port, and auto detect protocol
kubevpn proxy deployment/productpage
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
util.InitLoggerForClient(false)
if err = daemon.StartupDaemon(cmd.Context()); err != nil {
return err
}
// not support temporally
if connect.Engine == config.EngineGvisor {
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
if transferImage {
err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image)
}
return err
},
@@ -81,47 +108,50 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
return cmdutil.UsageErrorf(cmd, usageString)
}
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return err
}
if !sshConf.IsEmpty() {
if ip := util.GetAPIServerFromKubeConfigBytes(bytes); ip != nil {
extraRoute.ExtraCIDR = append(extraRoute.ExtraCIDR, ip.String())
}
}
// todo 将 doConnect 方法封装?内部使用 client 发送到daemon
cli := daemon.GetClient(false)
logLevel := log.InfoLevel
if config.Debug {
logLevel = log.DebugLevel
}
client, err := cli.Proxy(
cmd.Context(),
&rpc.ConnectRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
Headers: connect.Headers,
PortMap: connect.PortMap,
Workloads: args,
ExtraCIDR: connect.ExtraCIDR,
ExtraDomain: connect.ExtraDomain,
UseLocalDNS: connect.UseLocalDNS,
ExtraRoute: extraRoute.ToRPC(),
Engine: string(connect.Engine),
SshJump: sshConf.ToRPC(),
TransferImage: transferImage,
Image: config.Image,
Level: int32(log.DebugLevel),
OriginKubeconfigPath: util.GetKubeconfigPath(f),
ImagePullSecretName: imagePullSecretName,
Level: int32(logLevel),
OriginKubeconfigPath: util.GetKubeConfigPath(f),
},
)
if err != nil {
return err
}
var resp *rpc.ConnectResponse
for {
resp, err = client.Recv()
if err == io.EOF {
break
} else if err == nil {
fmt.Fprint(os.Stdout, resp.Message)
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
err = util.PrintGRPCStream[rpc.ConnectResponse](client)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
} else {
return err
}
return err
}
util.Print(os.Stdout, "Now you can access resources in the kubernetes cluster, enjoy it :)")
util.Print(os.Stdout, config.Slogan)
// hangup
if foreground {
// leave from cluster resources
@@ -130,47 +160,27 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
stream, err := cli.Leave(context.Background(), &rpc.LeaveRequest{
Workloads: args,
})
var resp *rpc.LeaveResponse
for {
resp, err = stream.Recv()
if err == io.EOF {
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.LeaveResponse](stream)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
return nil
} else if err != nil {
return err
}
fmt.Fprint(os.Stdout, resp.Message)
return err
}
}
return nil
},
}
cmd.Flags().StringToStringVarP(&connect.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug mode or not, true or false")
cmd.Flags().StringVar(&config.Image, "image", config.Image, "Use this image to startup container")
cmd.Flags().StringArrayVar(&connect.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
cmd.Flags().StringArrayVar(&connect.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
cmd.Flags().StringVar((*string)(&connect.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
cmd.Flags().StringToStringVarP(&connect.Headers, "headers", "H", map[string]string{}, "Traffic with special headers (use `and` to match all headers) with reverse it to local PC, If not special, redirect all traffic to local PC. format: <KEY>=<VALUE> eg: --headers foo=bar --headers env=dev")
cmd.Flags().StringArrayVar(&connect.PortMap, "portmap", []string{}, "Port map, map container port to local port, format: [tcp/udp]/containerPort:localPort, If not special, localPort will use containerPort. eg: tcp/80:8080 or udp/5000:5001 or 80 or 80:8080")
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &connect.Engine)
cmd.Flags().BoolVar(&foreground, "foreground", false, "foreground hang up")
addSshFlags(cmd, sshConf)
handler.AddExtraRoute(cmd.Flags(), extraRoute)
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
cmd.ValidArgsFunction = utilcomp.ResourceTypeAndNameCompletionFunc(f)
return cmd
}
func addSshFlags(cmd *cobra.Command, sshConf *util.SshConfig) {
// for ssh jumper host
cmd.Flags().StringVar(&sshConf.Addr, "ssh-addr", "", "Optional ssh jump server address to dial as <hostname>:<port>, eg: 127.0.0.1:22")
cmd.Flags().StringVar(&sshConf.User, "ssh-username", "", "Optional username for ssh jump server")
cmd.Flags().StringVar(&sshConf.Password, "ssh-password", "", "Optional password for ssh jump server")
cmd.Flags().StringVar(&sshConf.Keyfile, "ssh-keyfile", "", "Optional file with private key for SSH authentication")
cmd.Flags().StringVar(&sshConf.ConfigAlias, "ssh-alias", "", "Optional config alias with ~/.ssh/config for SSH authentication")
cmd.Flags().StringVar(&sshConf.GSSAPIPassword, "gssapi-password", "", "GSSAPI password")
cmd.Flags().StringVar(&sshConf.GSSAPIKeytabConf, "gssapi-keytab", "", "GSSAPI keytab file path")
cmd.Flags().StringVar(&sshConf.GSSAPICacheFile, "gssapi-cache", "", "GSSAPI cache file path, use command `kinit -c /path/to/cache USERNAME@RELAM` to generate")
cmd.Flags().StringVar(&sshConf.RemoteKubeconfig, "remote-kubeconfig", "", "Remote kubeconfig abstract path of ssh server, default is /home/$USERNAME/.kube/config")
lookup := cmd.Flags().Lookup("remote-kubeconfig")
lookup.NoOptDefVal = "~/.kube/config"
}

View File

@@ -3,7 +3,6 @@ package cmds
import (
"context"
"fmt"
"io"
"os"
"github.com/spf13/cobra"
@@ -13,23 +12,27 @@ import (
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdQuit(f cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "quit",
Short: i18n.T("Quit kubevpn daemon server"),
Long: templates.LongDesc(i18n.T(`Disconnect from cluster, leave proxy resources, and quit daemon`)),
Short: i18n.T("Quit kubevpn daemon grpc server"),
Long: templates.LongDesc(i18n.T(`
Disconnect from cluster, leave proxy resources, quit daemon grpc server and cleanup dns/hosts
`)),
Example: templates.Examples(i18n.T(`
# before quit kubevpn, it will leave proxy resources to origin and disconnect from cluster
kubevpn quit
`)),
`)),
RunE: func(cmd *cobra.Command, args []string) error {
_ = quit(cmd.Context(), true)
_ = quit(cmd.Context(), false)
fmt.Fprint(os.Stdout, "quit successfully")
_ = quit(cmd.Context(), true)
util.CleanExtensionLib()
_, _ = fmt.Fprint(os.Stdout, "Exited")
return nil
},
}
@@ -45,18 +48,12 @@ func quit(ctx context.Context, isSudo bool) error {
if err != nil {
return err
}
var resp *rpc.QuitResponse
for {
resp, err = client.Recv()
if err == io.EOF {
break
} else if err == nil {
fmt.Fprint(os.Stdout, resp.Message)
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
err = util.PrintGRPCStream[rpc.QuitResponse](client)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
} else {
return err
}
return err
}
return nil
}

View File

@@ -1,10 +1,6 @@
package cmds
import (
"fmt"
"io"
"os"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -12,19 +8,26 @@ import (
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdRemove(f cmdutil.Factory) *cobra.Command {
var cmd = &cobra.Command{
Use: "remove",
Short: "Remove cloned resource",
Long: `Remove cloned resource`,
Short: "Remove clone resource",
Long: templates.LongDesc(i18n.T(`
Remove clone resource
This command is design to remove clone resources, after use command 'kubevpn clone xxx',
it will generate and create a new resource in target k8s cluster with format [resource_name]_clone_xxxxx,
use this command to remove this created resources.
`)),
Example: templates.Examples(i18n.T(`
# leave proxy resources to origin
kubevpn remove deployment/authors
`)),
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
return daemon.StartupDaemon(cmd.Context())
},
@@ -35,17 +38,14 @@ func CmdRemove(f cmdutil.Factory) *cobra.Command {
if err != nil {
return err
}
for {
recv, err := leave.Recv()
if err == io.EOF {
err = util.PrintGRPCStream[rpc.RemoveResponse](leave)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
return nil
} else if err != nil {
return err
}
fmt.Fprint(os.Stdout, recv.GetMessage())
return err
}
return nil
},
}
return cmd

View File

@@ -1,68 +1,82 @@
package cmds
import (
"fmt"
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdReset(factory cmdutil.Factory) *cobra.Command {
var connect = handler.ConnectOptions{}
var sshConf = &util.SshConfig{}
func CmdReset(f cmdutil.Factory) *cobra.Command {
var sshConf = &pkgssh.SshConfig{}
cmd := &cobra.Command{
Use: "reset",
Short: "Reset all changes made by KubeVPN",
Long: `Reset all changes made by KubeVPN`,
Short: "Reset workloads to origin status",
Long: templates.LongDesc(i18n.T(`
Reset workloads to origin status
Reset will remove injected container envoy-proxy and vpn, and restore service mesh rules.
`)),
Example: templates.Examples(i18n.T(`
# Reset default namespace
kubevpn reset
# Reset default namespace workloads depooyment/productpage
kubevpn reset deployment/productpage
# Reset another namespace test
kubevpn reset -n test
# Reset another namespace test workloads depooyment/productpage
kubevpn reset deployment/productpage -n test
# Reset cluster api-server behind of bastion host or ssh jump host
kubevpn reset --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# Reset workloads depooyment/productpage which api-server behind of bastion host or ssh jump host
kubevpn reset deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# it also support ProxyJump, like
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn reset --ssh-alias <alias>
kubevpn reset deployment/productpage --ssh-alias <alias>
# Support ssh auth GSSAPI
kubevpn reset --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
kubevpn reset --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
kubevpn reset --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
kubevpn reset deployment/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
kubevpn reset deployment/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
kubevpn reset deployment/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
return handler.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false)
util.InitLoggerForClient(false)
return daemon.StartupDaemon(cmd.Context())
},
Run: func(cmd *cobra.Command, args []string) {
util.InitLogger(false)
if err := connect.InitClient(factory); err != nil {
log.Fatal(err)
}
err := connect.Reset(cmd.Context())
Args: cobra.MatchAll(cobra.ExactArgs(1)),
RunE: func(cmd *cobra.Command, args []string) error {
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
log.Fatal(err)
return err
}
fmt.Fprint(os.Stdout, "done")
cli := daemon.GetClient(false)
req := &rpc.ResetRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
Workloads: args,
SshJump: sshConf.ToRPC(),
}
resp, err := cli.Reset(cmd.Context(), req)
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.ResetResponse](resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
return nil
},
}
// for ssh jumper host
cmd.Flags().StringVar(&sshConf.Addr, "ssh-addr", "", "Optional ssh jump server address to dial as <hostname>:<port>, eg: 127.0.0.1:22")
cmd.Flags().StringVar(&sshConf.User, "ssh-username", "", "Optional username for ssh jump server")
cmd.Flags().StringVar(&sshConf.Password, "ssh-password", "", "Optional password for ssh jump server")
cmd.Flags().StringVar(&sshConf.Keyfile, "ssh-keyfile", "", "Optional file with private key for SSH authentication")
cmd.Flags().StringVar(&sshConf.ConfigAlias, "ssh-alias", "", "Optional config alias with ~/.ssh/config for SSH authentication")
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
return cmd
}

View File

@@ -2,26 +2,29 @@ package cmds
import (
"os"
"strings"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/rest"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"k8s.io/utils/ptr"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
)
func NewKubeVPNCommand() *cobra.Command {
var cmd = &cobra.Command{
Use: "kubevpn",
Short: i18n.T("kubevpn connect to Kubernetes cluster network"),
Short: i18n.T("KubeVPN offers a Cloud-Native Dev Environment that seamlessly connects to your Kubernetes cluster network."),
Long: templates.LongDesc(`
kubevpn connect to Kubernetes cluster network.
`),
KubeVPN offers a Cloud-Native Dev Environment that seamlessly connects to your Kubernetes cluster network.
`),
Run: func(cmd *cobra.Command, args []string) {
cmd.Help()
},
@@ -41,7 +44,7 @@ func NewKubeVPNCommand() *cobra.Command {
return c
}
configFlags.AddFlags(flags)
matchVersionFlags := cmdutil.NewMatchVersionFlags(configFlags)
matchVersionFlags := cmdutil.NewMatchVersionFlags(&warp{ConfigFlags: configFlags})
matchVersionFlags.AddFlags(flags)
factory := cmdutil.NewFactory(matchVersionFlags)
@@ -61,28 +64,32 @@ func NewKubeVPNCommand() *cobra.Command {
CmdServe(factory),
CmdDaemon(factory),
CmdWebhook(factory),
CmdSyncthing(factory),
},
},
{
Message: "Management commands",
Message: "Management commands:",
Commands: []*cobra.Command{
CmdStatus(factory),
CmdList(factory),
CmdAlias(factory),
CmdGet(factory),
CmdConfig(factory),
CmdCp(factory),
CmdSSH(factory),
CmdSSHDaemon(factory),
CmdImageCopy(factory),
CmdLogs(factory),
CmdCp(factory),
CmdReset(factory),
CmdUninstall(factory),
CmdQuit(factory),
},
},
{
Message: "Other commands",
Message: "Other commands:",
Commands: []*cobra.Command{
CmdStatus(factory),
CmdVersion(factory),
CmdUpgrade(factory),
CmdVersion(factory),
},
},
}
@@ -91,3 +98,15 @@ func NewKubeVPNCommand() *cobra.Command {
cmd.AddCommand(CmdOptions(factory))
return cmd
}
type warp struct {
*genericclioptions.ConfigFlags
}
func (f *warp) ToRawKubeConfigLoader() clientcmd.ClientConfig {
if strings.HasPrefix(ptr.Deref[string](f.KubeConfig, ""), "~") {
home := homedir.HomeDir()
f.KubeConfig = ptr.To(strings.Replace(*f.KubeConfig, "~", home, 1))
}
return f.ConfigFlags.ToRawKubeConfigLoader()
}

View File

@@ -2,20 +2,22 @@ package cmds
import (
"math/rand"
"os"
"runtime"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"go.uber.org/automaxprocs/maxprocs"
glog "gvisor.dev/gvisor/pkg/log"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/core"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/core"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdServe(_ cmdutil.Factory) *cobra.Command {
@@ -24,35 +26,32 @@ func CmdServe(_ cmdutil.Factory) *cobra.Command {
Use: "serve",
Hidden: true,
Short: "Server side, startup traffic manager, forward inbound and outbound traffic",
Long: templates.LongDesc(`Server side, startup traffic manager, forward inbound and outbound traffic.`),
Long: templates.LongDesc(i18n.T(`
Server side, startup traffic manager, forward inbound and outbound traffic.
`)),
Example: templates.Examples(i18n.T(`
# serve node
kubevpn serve -L "tcp://:10800" -L "tun://127.0.0.1:8422?net=223.254.0.123/32"
`)),
`)),
PreRun: func(*cobra.Command, []string) {
util.InitLogger(config.Debug)
util.InitLoggerForServer(config.Debug)
runtime.GOMAXPROCS(0)
go util.StartupPProf(0)
go util.StartupPProfForServer(config.PProfPort)
glog.SetTarget(util.ServerEmitter{Writer: &glog.Writer{Next: os.Stderr}})
},
RunE: func(cmd *cobra.Command, args []string) error {
rand.Seed(time.Now().UnixNano())
_, _ = maxprocs.Set(maxprocs.Logger(nil))
err := handler.RentIPIfNeeded(route)
ctx := cmd.Context()
err := handler.Complete(ctx, route)
if err != nil {
return err
}
defer func() {
err := handler.ReleaseIPIfNeeded()
if err != nil {
log.Errorf("release ip failed: %v", err)
}
}()
servers, err := handler.Parse(*route)
if err != nil {
log.Errorf("parse server failed: %v", err)
log.Errorf("Parse server failed: %v", err)
return err
}
ctx := cmd.Context()
return handler.Run(ctx, servers)
},
}

View File

@@ -1,37 +1,48 @@
package cmds
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"strings"
"github.com/containerd/containerd/platforms"
"github.com/google/uuid"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/net/websocket"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"k8s.io/kubectl/pkg/util/term"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/handler"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
// CmdSSH
// 设置本地的IP是223.254.0.1/32 ,记得一定是掩码 32位
// 这样别的路由不会走到这里来
// Remember to use network mask 32, because ssh using unique network CIDR 223.255.0.0/16
func CmdSSH(_ cmdutil.Factory) *cobra.Command {
var sshConf = &util.SshConfig{}
var ExtraCIDR []string
var sshConf = &pkgssh.SshConfig{}
var extraCIDR []string
var platform string
var lite bool
cmd := &cobra.Command{
Use: "ssh",
Short: "Ssh to jump server",
Long: `Ssh to jump server`,
Long: templates.LongDesc(i18n.T(`
Ssh to jump server
`)),
Example: templates.Examples(i18n.T(`
# Jump to server behind of bastion host or ssh jump host
kubevpn ssh --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# It also support ProxyJump, like
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────┘
@@ -41,26 +52,17 @@ func CmdSSH(_ cmdutil.Factory) *cobra.Command {
kubevpn ssh --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
kubevpn ssh --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
kubevpn ssh --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
util.InitLoggerForClient(false)
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
config, err := websocket.NewConfig("ws://test/ws", "http://test")
plat, err := platforms.Parse(platform)
if err != nil {
return err
}
config.Header.Set("ssh-addr", sshConf.Addr)
config.Header.Set("ssh-username", sshConf.User)
config.Header.Set("ssh-password", sshConf.Password)
config.Header.Set("ssh-keyfile", sshConf.Keyfile)
config.Header.Set("ssh-alias", sshConf.ConfigAlias)
config.Header.Set("extra-cidr", strings.Join(ExtraCIDR, ","))
config.Header.Set("gssapi-password", sshConf.GSSAPIPassword)
config.Header.Set("gssapi-keytab", sshConf.GSSAPIKeytabConf)
config.Header.Set("gssapi-cache", sshConf.GSSAPICacheFile)
client := daemon.GetTCPClient(true)
conn, err := websocket.NewClient(config, client)
config, err := websocket.NewConfig("ws://test/ws", "http://test")
if err != nil {
return err
}
@@ -68,17 +70,126 @@ func CmdSSH(_ cmdutil.Factory) *cobra.Command {
if !terminal.IsTerminal(fd) {
return fmt.Errorf("stdin is not a terminal")
}
state, err := terminal.MakeRaw(fd)
width, height, err := terminal.GetSize(fd)
if err != nil {
return fmt.Errorf("terminal make raw: %s", err)
return fmt.Errorf("terminal get size: %s", err)
}
sessionID := uuid.NewString()
ssh := handler.Ssh{
Config: *sshConf,
ExtraCIDR: extraCIDR,
Width: width,
Height: height,
Platform: platforms.Format(platforms.Normalize(plat)),
SessionID: sessionID,
Lite: lite,
}
marshal, err := json.Marshal(ssh)
if err != nil {
return err
}
config.Header.Set("ssh", string(marshal))
client := daemon.GetTCPClient(true)
if client == nil {
return fmt.Errorf("client is nil")
}
conn, err := websocket.NewClient(config, client)
if err != nil {
return err
}
defer conn.Close()
errChan := make(chan error, 3)
go func() {
errChan <- monitorSize(cmd.Context(), sessionID)
}()
readyCtx, cancelFunc := context.WithCancel(cmd.Context())
checker := func(log string) bool {
isReady := strings.Contains(log, fmt.Sprintf(handler.SshTerminalReadyFormat, sessionID))
if isReady {
cancelFunc()
}
return isReady
}
var state *terminal.State
go func() {
select {
case <-cmd.Context().Done():
return
case <-readyCtx.Done():
}
if state, err = terminal.MakeRaw(fd); err != nil {
log.Errorf("terminal make raw: %s", err)
}
}()
go func() {
_, err := io.Copy(conn, os.Stdin)
errChan <- err
}()
go func() {
_, err := io.Copy(io.MultiWriter(os.Stdout, util.NewWriter(checker)), conn)
errChan <- err
}()
defer func() {
if state != nil {
terminal.Restore(fd, state)
}
}()
select {
case err := <-errChan:
return err
case <-cmd.Context().Done():
return cmd.Context().Err()
}
defer terminal.Restore(fd, state)
go io.Copy(conn, os.Stdin)
_, err = io.Copy(os.Stdout, conn)
return err
},
}
addSshFlags(cmd, sshConf)
cmd.Flags().StringArrayVar(&ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
cmd.Flags().StringArrayVar(&extraCIDR, "extra-cidr", []string{}, "Extra network CIDR string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
cmd.Flags().StringVar(&platform, "platform", util.If(os.Getenv("KUBEVPN_DEFAULT_PLATFORM") != "", os.Getenv("KUBEVPN_DEFAULT_PLATFORM"), "linux/amd64"), "Set ssh server platform if needs to install command kubevpn")
cmd.Flags().BoolVar(&lite, "lite", false, "connect to ssh server in lite mode. mode \"lite\": design for only connect to ssh server. mode \"full\": not only connect to ssh server, it also create a two-way tunnel communicate with inner ip")
return cmd
}
func monitorSize(ctx context.Context, sessionID string) error {
conn := daemon.GetTCPClient(true)
if conn == nil {
return fmt.Errorf("conn is nil")
}
var tt = term.TTY{
In: os.Stdin,
Out: os.Stdout,
Raw: false,
TryDev: false,
Parent: nil,
}
sizeQueue := tt.MonitorSize(tt.GetSize())
if sizeQueue == nil {
return fmt.Errorf("sizeQueue is nil")
}
//defer runtime.HandleCrash()
config, err := websocket.NewConfig("ws://test/resize", "http://test")
if err != nil {
return err
}
config.Header.Set("session-id", sessionID)
client, err := websocket.NewClient(config, conn)
if err != nil {
return err
}
encoder := json.NewEncoder(client)
for ctx.Err() == nil {
size := sizeQueue.Next()
if size == nil {
return nil
}
if err = encoder.Encode(&size); err != nil {
log.Errorf("Encode resize: %s", err)
return err
}
}
return nil
}

View File

@@ -9,24 +9,23 @@ import (
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
)
// CmdSSHDaemon
// 设置本地的IP是223.254.0.1/32 ,记得一定是掩码 32位,
// 这样别的路由不会走到这里来
// set local tun ip 223.254.0.1/32, remember to use mask 32
func CmdSSHDaemon(_ cmdutil.Factory) *cobra.Command {
var clientIP string
cmd := &cobra.Command{
Use: "ssh-daemon",
Hidden: true,
Short: "Ssh daemon server",
Long: `Ssh daemon server`,
Long: templates.LongDesc(i18n.T(`Ssh daemon server`)),
Example: templates.Examples(i18n.T(`
# SSH daemon server
kubevpn ssh-daemon --client-ip 223.254.0.123/32
`)),
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
err := daemon.StartupDaemon(cmd.Context())
return err
@@ -41,8 +40,8 @@ func CmdSSHDaemon(_ cmdutil.Factory) *cobra.Command {
if err != nil {
return err
}
fmt.Fprint(os.Stdout, client.ServerIP)
return nil
_, err = fmt.Fprint(os.Stdout, client.ServerIP)
return err
},
}
cmd.Flags().StringVar(&clientIP, "client-ip", "", "Client cidr")

View File

@@ -1,41 +1,282 @@
package cmds
import (
"bytes"
"encoding/json"
"fmt"
"os"
"strings"
"github.com/liggitt/tabwriter"
"github.com/spf13/cobra"
flag "github.com/spf13/pflag"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/printers"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"sigs.k8s.io/yaml"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
const (
FormatJson = "json"
FormatYaml = "yaml"
FormatTable = "table"
)
func CmdStatus(f cmdutil.Factory) *cobra.Command {
var aliasName string
var localFile string
var remoteAddr string
var format string
cmd := &cobra.Command{
Use: "status",
Short: i18n.T("KubeVPN status"),
Long: templates.LongDesc(i18n.T(`KubeVPN status`)),
Short: i18n.T("Show connect status and list proxy/clone resource"),
Long: templates.LongDesc(i18n.T(`
Show connect status and list proxy/clone resource
Show connect status and list proxy or clone resource, you can check connect status by filed status and netif.
if netif is empty, means tun device closed, so it's unhealthy, it will also show route info, if proxy workloads,
not only show myself proxy resource, another route info will also display.
`)),
Example: templates.Examples(i18n.T(`
# show status for kubevpn status
# show status for connect status and list proxy/clone resource
kubevpn status
`)),
# query status by alias config name dev_new
kubevpn status --alias dev_new
# query status with output json format
kubevpn status -o json
# query status with output yaml format
kubevpn status -o yaml
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
util.InitLoggerForClient(false)
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
client, err := daemon.GetClient(false).Status(
var clusterIDs []string
if aliasName != "" {
configs, err := ParseAndGet(localFile, remoteAddr, aliasName)
if err != nil {
return err
}
for _, config := range configs {
clusterID, err := GetClusterIDByConfig(cmd, config)
if err != nil {
return err
}
clusterIDs = append(clusterIDs, clusterID)
}
}
resp, err := daemon.GetClient(false).Status(
cmd.Context(),
&rpc.StatusRequest{},
&rpc.StatusRequest{
ClusterIDs: clusterIDs,
},
)
if err != nil {
return err
}
fmt.Fprint(os.Stdout, client.GetMessage())
output, err := genOutput(resp, format)
if err != nil {
return err
}
_, _ = fmt.Fprint(os.Stdout, output)
return nil
},
}
cmd.Flags().StringVar(&aliasName, "alias", "", "Alias name, query connect status by alias config name")
cmd.Flags().StringVarP(&localFile, "file", "f", config.GetConfigFilePath(), "Config file location")
cmd.Flags().StringVarP(&remoteAddr, "remote", "r", "", "Remote config file, eg: https://raw.githubusercontent.com/kubenetworks/kubevpn/master/pkg/config/config.yaml")
cmd.Flags().StringVarP(&format, "output", "o", FormatTable, fmt.Sprintf("Output format. One of: (%s, %s, %s)", FormatJson, FormatYaml, FormatTable))
return cmd
}
func genOutput(status *rpc.StatusResponse, format string) (string, error) {
switch format {
case FormatJson:
if len(status.List) == 0 {
return "", nil
}
marshal, err := json.Marshal(status.List)
if err != nil {
return "", err
}
return string(marshal), nil
case FormatYaml:
if len(status.List) == 0 {
return "", nil
}
marshal, err := yaml.Marshal(status.List)
if err != nil {
return "", err
}
return string(marshal), nil
default:
var sb = new(bytes.Buffer)
w := printers.GetNewTabWriter(sb)
genConnectMsg(w, status.List)
genProxyMsg(w, status.List)
genCloneMsg(w, status.List)
_ = w.Flush()
return sb.String(), nil
}
}
func genConnectMsg(w *tabwriter.Writer, status []*rpc.Status) {
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Mode", "Cluster", "Kubeconfig", "Namespace", "Status", "Netif")
for _, c := range status {
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\t%s\n", c.ID, c.Mode, c.Cluster, c.Kubeconfig, c.Namespace, c.Status, c.Netif)
}
}
func genProxyMsg(w *tabwriter.Writer, list []*rpc.Status) {
var needsPrint bool
for _, status := range list {
if len(status.ProxyList) != 0 {
needsPrint = true
break
}
}
if !needsPrint {
return
}
_, _ = fmt.Fprintf(w, "\n")
w.SetRememberedWidths(nil)
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Name", "Headers", "IP", "PortMap", "CurrentPC")
for _, c := range list {
for _, proxy := range c.ProxyList {
for _, rule := range proxy.RuleList {
var headers []string
for k, v := range rule.Headers {
headers = append(headers, fmt.Sprintf("%s=%s", k, v))
}
if len(headers) == 0 {
headers = []string{"*"}
}
var portmap []string
for k, v := range rule.PortMap {
portmap = append(portmap, fmt.Sprintf("%d->%d", k, v))
}
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%v\n",
c.ID,
proxy.Workload,
strings.Join(headers, ","),
rule.LocalTunIPv4,
strings.Join(portmap, ","),
rule.CurrentDevice,
)
}
}
}
}
func genCloneMsg(w *tabwriter.Writer, list []*rpc.Status) {
var needsPrint bool
for _, status := range list {
if len(status.CloneList) != 0 {
needsPrint = true
break
}
}
if !needsPrint {
return
}
_, _ = fmt.Fprintf(w, "\n")
w.SetRememberedWidths(nil)
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Name", "Headers", "ToName", "ToKubeconfig", "ToNamespace", "SyncthingGUI")
for _, c := range list {
for _, clone := range c.CloneList {
//_, _ = fmt.Fprintf(w, "%s\n", clone.Workload)
for _, rule := range clone.RuleList {
var headers []string
for k, v := range rule.Headers {
headers = append(headers, fmt.Sprintf("%s=%s", k, v))
}
if len(headers) == 0 {
headers = []string{"*"}
}
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\t%s\n",
c.ID,
clone.Workload,
strings.Join(headers, ","),
rule.DstWorkload,
rule.DstKubeconfig,
rule.DstNamespace,
clone.SyncthingGUIAddr,
)
}
}
}
}
func GetClusterIDByConfig(cmd *cobra.Command, config Config) (string, error) {
flags := flag.NewFlagSet("", flag.ContinueOnError)
var sshConf = &pkgssh.SshConfig{}
pkgssh.AddSshFlags(flags, sshConf)
handler.AddExtraRoute(flags, &handler.ExtraRouteInfo{})
configFlags := genericclioptions.NewConfigFlags(false).WithDeprecatedPasswordFlag()
configFlags.AddFlags(flags)
matchVersionFlags := cmdutil.NewMatchVersionFlags(&warp{ConfigFlags: configFlags})
matchVersionFlags.AddFlags(flags)
factory := cmdutil.NewFactory(matchVersionFlags)
for _, command := range cmd.Parent().Commands() {
command.Flags().VisitAll(func(f *flag.Flag) {
if flags.Lookup(f.Name) == nil && flags.ShorthandLookup(f.Shorthand) == nil {
flags.AddFlag(f)
}
})
}
err := flags.ParseAll(config.Flags, func(flag *flag.Flag, value string) error {
_ = flags.Set(flag.Name, value)
return nil
})
bytes, ns, err := util.ConvertToKubeConfigBytes(factory)
if err != nil {
return "", err
}
file, err := util.ConvertToTempKubeconfigFile(bytes)
if err != nil {
return "", err
}
flags = flag.NewFlagSet("", flag.ContinueOnError)
flags.AddFlag(&flag.Flag{
Name: "kubeconfig",
DefValue: file,
})
flags.AddFlag(&flag.Flag{
Name: "namespace",
DefValue: ns,
})
var path string
path, err = pkgssh.SshJump(cmd.Context(), sshConf, flags, false)
if err != nil {
return "", err
}
var c = &handler.ConnectOptions{}
err = c.InitClient(util.InitFactoryByPath(path, ns))
if err != nil {
return "", err
}
err = c.InitDHCP(cmd.Context())
if err != nil {
return "", err
}
return c.GetClusterID(), nil
}

View File

@@ -0,0 +1,218 @@
package cmds
import (
"fmt"
"testing"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
)
func TestPrintProxyAndClone(t *testing.T) {
var status = &rpc.StatusResponse{
List: []*rpc.Status{
{
ID: 0,
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun4",
ProxyList: []*rpc.Proxy{
{
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Workload: "deployment.apps/authors",
RuleList: []*rpc.ProxyRule{
{
Headers: map[string]string{"user": "naison"},
LocalTunIPv4: "223.254.0.103",
LocalTunIPv6: "efff:ffff:ffff:ffff:ffff:ffff:ffff:999d",
CurrentDevice: false,
PortMap: map[int32]int32{8910: 8910},
},
},
},
},
CloneList: []*rpc.Clone{
{
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Workload: "deployment.apps/ratings",
RuleList: []*rpc.CloneRule{{
Headers: map[string]string{"user": "naison"},
DstClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
DstCluster: "ccm6epn7qvcplhs3o8p00",
DstKubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
DstNamespace: "vke-system",
DstWorkload: "deployment.apps/ratings-clone-5ngn6",
}},
},
},
},
{
ID: 1,
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
Cluster: "ccnepblsebp68ivej4a20",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun5",
ProxyList: []*rpc.Proxy{},
CloneList: []*rpc.Clone{},
},
},
}
output, err := genOutput(status, FormatTable)
if err != nil {
t.Fatal(err)
}
fmt.Println(output)
}
func TestPrintProxy(t *testing.T) {
var status = &rpc.StatusResponse{
List: []*rpc.Status{
{
ID: 0,
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun4",
ProxyList: []*rpc.Proxy{
{
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Workload: "deployment.apps/authors",
RuleList: []*rpc.ProxyRule{
{
Headers: map[string]string{"user": "naison"},
LocalTunIPv4: "223.254.0.103",
LocalTunIPv6: "efff:ffff:ffff:ffff:ffff:ffff:ffff:999d",
CurrentDevice: false,
PortMap: map[int32]int32{8910: 8910},
},
},
},
},
CloneList: []*rpc.Clone{},
},
{
ID: 1,
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
Cluster: "ccnepblsebp68ivej4a20",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun5",
ProxyList: []*rpc.Proxy{},
CloneList: []*rpc.Clone{},
},
},
}
output, err := genOutput(status, FormatTable)
if err != nil {
t.Fatal(err)
}
fmt.Println(output)
}
func TestPrintClone(t *testing.T) {
var status = &rpc.StatusResponse{
List: []*rpc.Status{
{
ID: 0,
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun4",
ProxyList: []*rpc.Proxy{},
CloneList: []*rpc.Clone{
{
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Workload: "deployment.apps/ratings",
RuleList: []*rpc.CloneRule{{
Headers: map[string]string{"user": "naison"},
DstClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
DstCluster: "ccm6epn7qvcplhs3o8p00",
DstKubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
DstNamespace: "vke-system",
DstWorkload: "deployment.apps/ratings-clone-5ngn6",
}},
},
},
},
{
ID: 1,
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
Cluster: "ccnepblsebp68ivej4a20",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun5",
ProxyList: []*rpc.Proxy{},
CloneList: []*rpc.Clone{},
},
},
}
output, err := genOutput(status, FormatTable)
if err != nil {
t.Fatal(err)
}
fmt.Println(output)
}
func TestPrint(t *testing.T) {
var status = &rpc.StatusResponse{
List: []*rpc.Status{
{
ID: 0,
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
Cluster: "ccm6epn7qvcplhs3o8p00",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun4",
ProxyList: []*rpc.Proxy{},
CloneList: []*rpc.Clone{},
},
{
ID: 1,
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
Cluster: "ccnepblsebp68ivej4a20",
Mode: "full",
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
Namespace: "vke-system",
Status: "Connected",
Netif: "utun5",
ProxyList: []*rpc.Proxy{},
CloneList: []*rpc.Clone{},
},
},
}
output, err := genOutput(status, FormatTable)
if err != nil {
t.Fatal(err)
}
fmt.Println(output)
}

View File

@@ -0,0 +1,30 @@
package cmds
import (
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/v2/pkg/syncthing"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdSyncthing(_ cmdutil.Factory) *cobra.Command {
var detach bool
var dir string
cmd := &cobra.Command{
Use: "syncthing",
Short: i18n.T("Syncthing"),
Long: templates.LongDesc(i18n.T(`Syncthing`)),
RunE: func(cmd *cobra.Command, args []string) (err error) {
go util.StartupPProfForServer(0)
return syncthing.StartServer(cmd.Context(), detach, dir)
},
Hidden: true,
DisableFlagsInUseLine: true,
}
cmd.Flags().StringVar(&dir, "dir", "", "dir")
cmd.Flags().BoolVarP(&detach, "detach", "d", false, "Run syncthing in background")
return cmd
}

View File

@@ -0,0 +1,95 @@
package cmds
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"k8s.io/utils/ptr"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdUninstall(f cmdutil.Factory) *cobra.Command {
var sshConf = &pkgssh.SshConfig{}
cmd := &cobra.Command{
Use: "uninstall",
Short: "Uninstall all resource create by kubevpn in k8s cluster",
Long: templates.LongDesc(i18n.T(`
Uninstall all resource create by kubevpn in k8s cluster
Uninstall will delete all resources create by kubevpn in k8s cluster, like deployment, service, serviceAccount...
and it will also delete local develop docker containers, docker networks. delete hosts entry added by kubevpn,
cleanup DNS settings.
`)),
Example: templates.Examples(i18n.T(`
# Uninstall default namespace
kubevpn uninstall
# Uninstall another namespace test
kubevpn uninstall -n test
# Uninstall cluster api-server behind of bastion host or ssh jump host
kubevpn uninstall --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# It also supports ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn uninstall --ssh-alias <alias>
# Support ssh auth GSSAPI
kubevpn uninstall --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
kubevpn uninstall --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
kubevpn uninstall --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
util.InitLoggerForClient(false)
return daemon.StartupDaemon(cmd.Context())
},
RunE: func(cmd *cobra.Command, args []string) error {
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
if err != nil {
return err
}
cli := daemon.GetClient(false)
disconnect, err := cli.Disconnect(cmd.Context(), &rpc.DisconnectRequest{
KubeconfigBytes: ptr.To(string(bytes)),
Namespace: ptr.To(ns),
SshJump: sshConf.ToRPC(),
})
if err != nil {
log.Warnf("Failed to disconnect from cluter: %v", err)
} else {
_ = util.PrintGRPCStream[rpc.DisconnectResponse](disconnect)
}
req := &rpc.UninstallRequest{
KubeconfigBytes: string(bytes),
Namespace: ns,
SshJump: sshConf.ToRPC(),
}
resp, err := cli.Uninstall(cmd.Context(), req)
if err != nil {
return err
}
err = util.PrintGRPCStream[rpc.UninstallResponse](resp)
if err != nil {
if status.Code(err) == codes.Canceled {
return nil
}
return err
}
return nil
},
}
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
return cmd
}

View File

@@ -4,56 +4,55 @@ import (
"fmt"
"net/http"
"os"
"runtime"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/pkg/upgrade"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/upgrade"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func CmdUpgrade(_ cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "upgrade",
Short: "Upgrade KubeVPN version",
Long: `Upgrade KubeVPN version, automatically download latest KubeVPN from GitHub`,
Run: func(cmd *cobra.Command, args []string) {
Short: i18n.T("Upgrade kubevpn client to latest version"),
Long: templates.LongDesc(i18n.T(`
Upgrade kubevpn client to latest version, automatically download and install latest kubevpn from GitHub.
disconnect all from k8s cluster, leave all resources, remove all clone resource, and then,
upgrade local daemon grpc server to latest version.
`)),
RunE: func(cmd *cobra.Command, args []string) error {
const (
envLatestUrl = "KUBEVPN_LATEST_VERSION_URL"
)
util.InitLoggerForClient(false)
var client = http.DefaultClient
if config.GitHubOAuthToken != "" {
client = oauth2.NewClient(cmd.Context(), oauth2.StaticTokenSource(&oauth2.Token{AccessToken: config.GitHubOAuthToken, TokenType: "Bearer"}))
}
latestVersion, latestCommit, url, err := util.GetManifest(client, runtime.GOOS, runtime.GOARCH)
if err != nil {
log.Fatal(err)
}
err = upgrade.Main(cmd.Context(), client, latestVersion, latestCommit, url)
if err != nil {
log.Fatal(err)
}
fmt.Fprint(os.Stdout, "Upgrade daemon...")
for _, isSudo := range []bool{false, true} {
cli := daemon.GetClient(isSudo)
if cli != nil {
var response *rpc.UpgradeResponse
response, err = cli.Upgrade(cmd.Context(), &rpc.UpgradeRequest{
ClientVersion: latestVersion,
ClientCommitId: latestCommit,
})
if err == nil && !response.NeedUpgrade {
// do nothing
} else {
_ = quit(cmd.Context(), isSudo)
}
var url = os.Getenv(envLatestUrl)
if url == "" {
var latestVersion string
var needsUpgrade bool
var err error
url, latestVersion, needsUpgrade, err = upgrade.NeedsUpgrade(cmd.Context(), client, config.Version)
if err != nil {
return err
}
if !needsUpgrade {
_, _ = fmt.Fprintf(os.Stdout, "Already up to date, don't needs to upgrade, version: %s", latestVersion)
return nil
}
_, _ = fmt.Fprintf(os.Stdout, "Current version is: %s less than latest version: %s, needs to upgrade", config.Version, latestVersion)
_ = os.Setenv(envLatestUrl, url)
_ = quit(cmd.Context(), false)
_ = quit(cmd.Context(), true)
}
err = daemon.StartupDaemon(cmd.Context())
fmt.Fprint(os.Stdout, "done")
return upgrade.Main(cmd.Context(), client, url)
},
}
return cmd

View File

@@ -9,10 +9,12 @@ import (
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/daemon"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
)
// --ldflags -X
@@ -33,12 +35,14 @@ func reformatDate(buildTime string) string {
func CmdVersion(cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "version",
Short: "Print the client version information",
Long: `Print the client version information`,
Short: i18n.T("Print the client version information"),
Long: templates.LongDesc(i18n.T(`
Print the client version information
`)),
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("KubeVPN: CLI\n")
fmt.Printf(" Version: %s\n", config.Version)
fmt.Printf(" DaemonVersion: %s\n", getDaemonVersion())
fmt.Printf(" Daemon: %s\n", getDaemonVersion())
fmt.Printf(" Image: %s\n", config.Image)
fmt.Printf(" Branch: %s\n", Branch)
fmt.Printf(" Git commit: %s\n", config.GitCommit)

View File

@@ -3,23 +3,27 @@ package cmds
import (
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/pkg/webhook"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/webhook"
)
func CmdWebhook(f cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "webhook",
Hidden: true,
Short: "Starts a HTTP server, useful for creating MutatingAdmissionWebhook",
Long: `Starts a HTTP server, useful for creating MutatingAdmissionWebhook.
After deploying it to Kubernetes cluster, the Administrator needs to create a MutatingWebhookConfiguration
in the Kubernetes cluster to register remote webhook admission controllers.`,
Short: i18n.T("Starts a HTTP server, useful for creating MutatingAdmissionWebhook"),
Long: templates.LongDesc(i18n.T(`
Starts a HTTP server, useful for creating MutatingAdmissionWebhook.
After deploying it to Kubernetes cluster, the Administrator needs to create a MutatingWebhookConfiguration
in the Kubernetes cluster to register remote webhook admission controllers.
`)),
Args: cobra.MaximumNArgs(0),
PreRun: func(cmd *cobra.Command, args []string) {
util.InitLogger(true)
go util.StartupPProf(0)
util.InitLoggerForServer(true)
go util.StartupPProfForServer(0)
},
RunE: func(cmd *cobra.Command, args []string) error {
return webhook.Main(f)

View File

@@ -6,7 +6,7 @@ import (
_ "k8s.io/client-go/plugin/pkg/client/auth"
_ "net/http/pprof"
"github.com/wencaiwulue/kubevpn/cmd/kubevpn/cmds"
"github.com/wencaiwulue/kubevpn/v2/cmd/kubevpn/cmds"
)
func main() {

45
docs/en/Architecture.md Normal file
View File

@@ -0,0 +1,45 @@
## Architecture
### Connect mode
create a tunnel with port-forward, add route to virtual interface, like tun0, forward traffic though tunnel to remote
traffic manager.
![connect-mode](/docs/en/images/connect-mode.drawio.svg)
### Reverse mode
base on connect mode, inject a container to controller, use iptables to block all inbound traffic and forward to local
though tunnel.
```text
┌──────────┐ ┌─────────┌──────────┐ ┌──────────┐
│ ServiceA ├───►│ sidecar │ ServiceB │ ┌─►│ ServiceC │
└──────────┘ └────┌────┘──────────┘ │ └──────────┘
│ │
│ │ cloud
─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘─ ─ ─ ─ ─ ─ ─ ─ ─┘ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
│ │ local
┌───┘──────┐ │
│ ServiceB'├──────────┘
└──────────┘
```
### Mesh mode
base on reverse mode, using envoy as proxy, if headers have special key-value pair, it will route to local machine, if
not, use origin service.
```text
┌──────────┐ ┌─────────┌────────────┐ ┌──────────┐
│ ServiceA ├───►│ sidecar ├─► ServiceB │─►┌─►│ ServiceC │
└──────────┘ └────┌────┘────────────┘ │ └──────────┘
│ │ cloud
─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─┘─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
│ │ local
header: foo=bar │
┌───┘──────┐ │
│ ServiceB'├─────────────┘
└──────────┘
```
![arch.svg](/docs/en/images/proxy-arch.svg)

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 106 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 488 KiB

366
go.mod
View File

@@ -1,220 +1,314 @@
module github.com/wencaiwulue/kubevpn
module github.com/wencaiwulue/kubevpn/v2
go 1.20
go 1.23.2
require (
github.com/cilium/ipam v0.0.0-20220824141044-46ef3d556735
github.com/containerd/containerd v1.5.18
github.com/cilium/ipam v0.0.0-20230509084518-fd66eae7909b
github.com/containerd/containerd v1.7.14
github.com/containernetworking/cni v1.1.2
github.com/coredns/caddy v1.1.1
github.com/coredns/coredns v1.10.1
github.com/docker/cli v23.0.1+incompatible
github.com/docker/distribution v2.8.1+incompatible
github.com/docker/docker v23.0.1+incompatible
github.com/docker/go-connections v0.4.0
github.com/coredns/coredns v1.11.2
github.com/distribution/reference v0.6.0
github.com/docker/cli v27.5.1+incompatible
github.com/docker/docker v27.5.1+incompatible
github.com/docker/go-connections v0.5.0
github.com/docker/go-units v0.5.0
github.com/docker/libcontainer v2.2.1+incompatible
github.com/envoyproxy/go-control-plane v0.10.3
github.com/fsnotify/fsnotify v1.6.0
github.com/envoyproxy/go-control-plane v0.13.1
github.com/fsnotify/fsnotify v1.7.0
github.com/gliderlabs/ssh v0.3.8
github.com/google/gopacket v1.1.19
github.com/google/uuid v1.3.0
github.com/google/uuid v1.6.0
github.com/hashicorp/go-version v1.6.0
github.com/hpcloud/tail v1.0.0
github.com/jcmturner/gokrb5/v8 v8.4.3
github.com/jcmturner/gofork v1.7.6
github.com/jcmturner/gokrb5/v8 v8.4.4
github.com/kevinburke/ssh_config v1.2.0
github.com/libp2p/go-netroute v0.2.1
github.com/mattbaird/jsonpatch v0.0.0-20200820163806-098863c1fc24
github.com/miekg/dns v1.1.50
github.com/moby/term v0.0.0-20221205130635-1aeaba878587
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de
github.com/mattbaird/jsonpatch v0.0.0-20240118010651-0ba75a80ca38
github.com/miekg/dns v1.1.58
github.com/moby/term v0.5.0
github.com/opencontainers/image-spec v1.1.0
github.com/pkg/errors v0.9.1
github.com/prometheus-community/pro-bing v0.1.0
github.com/schollz/progressbar/v3 v3.13.0
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.6.1
github.com/prometheus-community/pro-bing v0.4.0
github.com/regclient/regclient v0.8.0
github.com/schollz/progressbar/v3 v3.14.2
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
go.uber.org/automaxprocs v1.5.1
golang.org/x/crypto v0.2.0
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
golang.org/x/net v0.8.0
golang.org/x/oauth2 v0.6.0
golang.org/x/sync v0.1.0
golang.org/x/sys v0.6.0
golang.org/x/text v0.8.0
golang.org/x/time v0.3.0
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224
github.com/syncthing/syncthing v1.29.2
github.com/thejerf/suture/v4 v4.0.6
go.uber.org/automaxprocs v1.6.0
golang.org/x/crypto v0.33.0
golang.org/x/net v0.34.0
golang.org/x/oauth2 v0.24.0
golang.org/x/sync v0.11.0
golang.org/x/sys v0.30.0
golang.org/x/term v0.29.0
golang.org/x/text v0.22.0
golang.org/x/time v0.8.0
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
golang.zx2c4.com/wireguard v0.0.0-20220920152132-bb719d3a6e2c
golang.zx2c4.com/wireguard/windows v0.5.3
google.golang.org/grpc v1.53.0-dev.0.20230123225046-4075ef07c5d5
google.golang.org/protobuf v1.30.0
gopkg.in/natefinch/lumberjack.v2 v2.0.0
gvisor.dev/gvisor v0.0.0-20230603040744-5c9219dedd33
k8s.io/api v0.26.3
k8s.io/apimachinery v0.26.3
k8s.io/cli-runtime v0.26.1
k8s.io/client-go v0.26.3
k8s.io/kubectl v0.26.1
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749
sigs.k8s.io/controller-runtime v0.14.5
sigs.k8s.io/kustomize/api v0.12.1
sigs.k8s.io/yaml v1.3.0
google.golang.org/grpc v1.69.4
google.golang.org/protobuf v1.36.3
gopkg.in/natefinch/lumberjack.v2 v2.2.1
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987
k8s.io/api v0.31.0-alpha.0
k8s.io/apimachinery v0.31.0-alpha.0
k8s.io/cli-runtime v0.29.3
k8s.io/client-go v0.31.0-alpha.0
k8s.io/klog/v2 v2.130.1
k8s.io/kubectl v0.29.3
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
sigs.k8s.io/controller-runtime v0.18.4
sigs.k8s.io/kustomize/api v0.16.0
sigs.k8s.io/yaml v1.4.0
tailscale.com v1.74.1
)
require (
cloud.google.com/go/compute v1.15.1 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cel.dev/expr v0.16.2 // indirect
cloud.google.com/go/compute/metadata v0.5.2 // indirect
dario.cat/mergo v1.0.0 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.28 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
github.com/Azure/go-autorest/autorest v0.11.29 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/DataDog/datadog-agent/pkg/obfuscate v0.0.0-20211129110424-6491aa3bf583 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0-rc.1 // indirect
github.com/DataDog/datadog-go v4.8.2+incompatible // indirect
github.com/DataDog/datadog-go/v5 v5.0.2 // indirect
github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect
github.com/DataDog/sketches-go v1.2.1 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/DataDog/appsec-internal-go v1.5.0 // indirect
github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.0 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.0 // indirect
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
github.com/DataDog/go-libddwaf/v2 v2.4.2 // indirect
github.com/DataDog/go-sqllexer v0.0.11 // indirect
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/sketches-go v1.4.4 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/antonmedv/expr v1.12.0 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/hcsshim v0.12.2 // indirect
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
github.com/antonmedv/expr v1.15.5 // indirect
github.com/apparentlymart/go-cidr v1.1.0 // indirect
github.com/aws/aws-sdk-go v1.44.194 // indirect
github.com/aws/aws-sdk-go v1.55.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect
github.com/calmh/incontainer v1.0.0 // indirect
github.com/calmh/xdr v1.2.0 // indirect
github.com/ccding/go-stun v0.1.5 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgraph-io/ristretto v0.1.0 // indirect
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/dnstap/golang-dnstap v0.4.0 // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/cli-docs-tool v0.9.0 // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.8.1 // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/emicklei/go-restful/v3 v3.10.2 // indirect
github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/ebitengine/purego v0.8.2 // indirect
github.com/emicklei/go-restful/v3 v3.12.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/farsightsec/golang-framestream v0.3.0 // indirect
github.com/fatih/camelcase v1.0.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/fvbommel/sortorder v1.0.2 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/fvbommel/sortorder v1.1.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/gaissmai/bart v0.11.1 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
github.com/go-errors/errors v1.5.1 // indirect
github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 // indirect
github.com/go-ldap/ldap/v3 v3.4.10 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
github.com/golang/glog v1.0.0 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/gnostic v0.6.9 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect
github.com/google/pprof v0.0.0-20250202011525-fc3143867406 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.3 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/greatroar/blobloom v0.8.0 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/imdario/mergo v0.3.14 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
github.com/illarion/gonotify/v2 v2.0.3 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/infobloxopen/go-trees v0.0.0-20200715205103-96a057b8dfb9 // indirect
github.com/infobloxopen/go-trees v0.0.0-20221216143356-66ceba885ebc // indirect
github.com/jackpal/gateway v1.0.16 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
github.com/jcmturner/gofork v1.7.6 // indirect
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect
github.com/jsimonetti/rtnetlink v1.4.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.15.15 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mdlayher/netlink v1.7.2 // indirect
github.com/mdlayher/socket v0.5.0 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.4.2 // indirect
github.com/moby/buildkit v0.9.0-rc1 // indirect
github.com/moby/patternmatcher v0.5.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/signal v0.7.0 // indirect
github.com/moby/sys/symlink v0.2.0 // indirect
github.com/moby/sys/user v0.1.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/runc v1.1.4 // indirect
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 // indirect
github.com/openzipkin/zipkin-go v0.4.1 // indirect
github.com/oschwald/geoip2-golang v1.8.0 // indirect
github.com/oschwald/maxminddb-golang v1.10.0 // indirect
github.com/openzipkin/zipkin-go v0.4.2 // indirect
github.com/oschwald/geoip2-golang v1.11.0 // indirect
github.com/oschwald/maxminddb-golang v1.13.1 // indirect
github.com/outcaste-io/ristretto v0.2.3 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/philhofer/fwd v1.1.1 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/prometheus/client_golang v1.20.5 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.60.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/quic-go/quic-go v0.49.0 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
github.com/shirou/gopsutil/v4 v4.25.1 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/stretchr/testify v1.10.0 // indirect
github.com/syncthing/notify v0.0.0-20210616190510-c6b7342338d2 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 // indirect
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/tinylib/msgp v1.1.6 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/tinylib/msgp v1.1.9 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/veraison/go-cose v1.3.0 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.1.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.7 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect
go.etcd.io/etcd/client/v3 v3.5.7 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.etcd.io/etcd/api/v3 v3.5.13 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.13 // indirect
go.etcd.io/etcd/client/v3 v3.5.13 // indirect
go.opencensus.io v0.24.0 // indirect
go.starlark.net v0.0.0-20230112144946-fae38c8a6d89 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.24.0 // indirect
go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect
go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect
golang.org/x/mod v0.11.0 // indirect
golang.org/x/term v0.6.0 // indirect
golang.org/x/tools v0.6.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/api v0.109.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5 // indirect
gopkg.in/DataDog/dd-trace-go.v1 v1.47.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
go.opentelemetry.io/otel v1.34.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.34.0 // indirect
go.opentelemetry.io/otel/sdk v1.34.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect
go.opentelemetry.io/otel/trace v1.34.0 // indirect
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.starlark.net v0.0.0-20240329153429-e6e8e7ce1b7a // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/mock v0.5.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect
golang.org/x/exp v0.0.0-20250207012021-f9890c6ad9f3 // indirect
golang.org/x/mod v0.23.0 // indirect
golang.org/x/tools v0.29.0 // indirect
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/api v0.172.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect
gopkg.in/DataDog/dd-trace-go.v1 v1.62.0 // indirect
gopkg.in/evanphx/json-patch.v5 v5.9.0 // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
inet.af/netaddr v0.0.0-20220617031823-097006376321 // indirect
k8s.io/apiextensions-apiserver v0.26.3 // indirect
k8s.io/component-base v0.26.3 // indirect
k8s.io/klog/v2 v2.90.1 // indirect
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect
k8s.io/apiextensions-apiserver v0.31.0-alpha.0 // indirect
k8s.io/component-base v0.31.0-alpha.0 // indirect
k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/kustomize/kyaml v0.16.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
)

1728
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -19,7 +19,6 @@ const (
KeyDHCP6 = "DHCP6"
KeyEnvoy = "ENVOY_CONFIG"
KeyClusterIPv4POOLS = "IPv4_POOLS"
KeyRefCount = "REF_COUNT"
// secret keys
// TLSCertKey is the key for tls certificates in a TLS secret.
@@ -31,8 +30,10 @@ const (
ContainerSidecarEnvoyProxy = "envoy-proxy"
ContainerSidecarControlPlane = "control-plane"
ContainerSidecarVPN = "vpn"
ContainerSidecarSyncthing = "syncthing"
VolumeEnvoyConfig = "envoy-config"
VolumeSyncthing = "syncthing"
innerIPv4Pool = "223.254.0.100/16"
// 原因在docker环境中设置docker的 gateway 和 subnet不能 inner 的冲突,也不能和 docker的 172.17 冲突
@@ -68,14 +69,8 @@ const (
EnvPodNamespace = "POD_NAMESPACE"
// header name
HeaderPodName = "POD_NAME"
HeaderPodNamespace = "POD_NAMESPACE"
HeaderIPv4 = "IPv4"
HeaderIPv6 = "IPv6"
// api
APIRentIP = "/rent/ip"
APIReleaseIP = "/release/ip"
HeaderIPv4 = "IPv4"
HeaderIPv6 = "IPv6"
KUBECONFIG = "kubeconfig"
@@ -83,14 +78,12 @@ const (
ManageBy = konfig.ManagedbyLabelKey
// pprof port
PProfPort = 32345
PProfPort = 32345
SudoPProfPort = 33345
PProfDir = "pprof"
// startup by KubeVPN
EnvStartSudoKubeVPNByKubeVPN = "DEPTH_SIGNED_BY_NAISON"
EnvSSHJump = "SSH_JUMP_BY_KUBEVPN"
EnvSSHJump = "SSH_JUMP_BY_KUBEVPN"
// transport mode
ConfigKubeVPNTransportEngine = "transport-engine"
// hosts entry key word
HostsKeyWord = "# Add by KubeVPN"
)
@@ -107,6 +100,8 @@ var (
OriginImage = "docker.io/naison/kubevpn:" + Version
DaemonPath string
HomePath string
PprofPath string
)
var (
@@ -126,14 +121,16 @@ func init() {
DockerRouterIP, DockerCIDR, _ = net.ParseCIDR(dockerInnerIPv4Pool)
dir, _ := os.UserHomeDir()
DaemonPath = filepath.Join(dir, HOME, Daemon)
HomePath = filepath.Join(dir, HOME)
PprofPath = filepath.Join(dir, HOME, Daemon, PProfDir)
}
var Debug bool
var (
SmallBufferSize = (1 << 13) - 1 // 8KB small buffer
MediumBufferSize = (1 << 15) - 1 // 32KB medium buffer
LargeBufferSize = (1 << 16) - 1 // 64KB large buffer
SmallBufferSize = 8 * 1024 // 8KB small buffer
MediumBufferSize = 32 * 1024 // 32KB medium buffer
LargeBufferSize = 64 * 1024 // 64KB large buffer
)
var (
@@ -153,23 +150,28 @@ var (
)
var (
LPool = &sync.Pool{
SPool = &sync.Pool{
New: func() interface{} {
return make([]byte, SmallBufferSize)
},
}
MPool = sync.Pool{
New: func() any {
return make([]byte, MediumBufferSize)
},
}
LPool = sync.Pool{
New: func() any {
return make([]byte, LargeBufferSize)
},
}
)
var SPool = sync.Pool{
New: func() any {
return make([]byte, 2)
},
}
type Engine string
const (
EngineGvisor Engine = "gvisor"
EngineMix Engine = "mix"
EngineRaw Engine = "raw"
EngineSystem Engine = "system"
)
const Slogan = "Now you can access resources in the kubernetes cluster !"

20
pkg/config/config.yaml Normal file
View File

@@ -0,0 +1,20 @@
# Here is an example config kubevpn config file, please change it into your custom config.
# Support three filed: Name,Needs,Flags
# Exec command: kubevpn alias qa <===> kubevpn connect --kubeconfig=~/.kube/jumper_config --namespace=default
# Simple is Good ~
Name: dev
Needs: qa
Flags:
- connect
- --kubeconfig=~/.kube/config
- --namespace=default
- --lite
---
Name: qa
Flags:
- connect
- --kubeconfig=~/.kube/jumper_config
- --namespace=default

View File

@@ -1,6 +1,12 @@
package config
import "os"
import (
_ "embed"
"os"
"path/filepath"
"github.com/pkg/errors"
)
const (
HOME = ".kubevpn"
@@ -14,16 +20,64 @@ const (
LogFile = "daemon.log"
KubeVPNRestorePatchKey = "kubevpn-probe-restore-patch"
ConfigFile = "config.yaml"
)
//go:embed config.yaml
var config []byte
func init() {
err := os.MkdirAll(DaemonPath, os.ModePerm)
err := os.MkdirAll(DaemonPath, 0755)
if err != nil {
panic(err)
}
err = os.Chmod(DaemonPath, os.ModePerm)
err = os.Chmod(DaemonPath, 0755)
if err != nil {
panic(err)
}
err = os.MkdirAll(PprofPath, 0755)
if err != nil {
panic(err)
}
err = os.Chmod(PprofPath, 0755)
if err != nil {
panic(err)
}
path := filepath.Join(HomePath, ConfigFile)
_, err = os.Stat(path)
if errors.Is(err, os.ErrNotExist) {
err = os.WriteFile(path, config, 0644)
}
if err != nil {
panic(err)
}
}
func GetSockPath(isSudo bool) string {
name := SockPath
if isSudo {
name = SudoSockPath
}
return filepath.Join(DaemonPath, name)
}
func GetPidPath(isSudo bool) string {
name := PidPath
if isSudo {
name = SudoPidPath
}
return filepath.Join(DaemonPath, name)
}
func GetSyncthingPath() string {
return filepath.Join(DaemonPath, SyncthingDir)
}
func GetSyncthingGUIPath() string {
return filepath.Join(DaemonPath, SyncthingDir, SyncthingGUIDir)
}
func GetConfigFilePath() string {
return filepath.Join(HomePath, ConfigFile)
}

97
pkg/config/syncthing.go Normal file
View File

@@ -0,0 +1,97 @@
package config
import (
"crypto/tls"
"github.com/syncthing/syncthing/lib/protocol"
)
const (
SyncthingDir = "syncthing"
SyncthingGUIDir = "gui"
DefaultRemoteDir = "/kubevpn-data"
// EnvDisableSyncthingLog disable syncthing log, because it can not set output writer, only write os.Stdout or io.Discard
EnvDisableSyncthingLog = "LOGGER_DISCARD"
SyncthingAPIKey = "kubevpn"
)
var LocalCert tls.Certificate
var RemoteCert tls.Certificate
var LocalDeviceID protocol.DeviceID
var RemoteDeviceID protocol.DeviceID
const (
SyncthingLocalDeviceID = "BSNCBRY-ZI5HLYC-YH6544V-SQ3IDKT-4JQKING-ZGSW463-UKYEYCA-WO7ZHA3"
SyncthingLocalCert = `-----BEGIN CERTIFICATE-----
MIICHjCCAaSgAwIBAgIIHY0CWDFbXYEwCgYIKoZIzj0EAwIwSjESMBAGA1UEChMJ
U3luY3RoaW5nMSAwHgYDVQQLExdBdXRvbWF0aWNhbGx5IEdlbmVyYXRlZDESMBAG
A1UEAxMJc3luY3RoaW5nMCAXDTI0MDYxOTAwMDAwMFoYDzE4NTQwOTExMDA1MDUy
WjBKMRIwEAYDVQQKEwlTeW5jdGhpbmcxIDAeBgNVBAsTF0F1dG9tYXRpY2FsbHkg
R2VuZXJhdGVkMRIwEAYDVQQDEwlzeW5jdGhpbmcwdjAQBgcqhkjOPQIBBgUrgQQA
IgNiAAQj1ov1aM0902yssK+3LPiGM1e1pUcVRuQjxl0nDX0fpZp3kdeWeiBm9AlE
uwhAll/8QjoWBlNiEXyGFN9lOaIGf7ZIk7owPT6LiJXc1n3E6iqHWeSXcZ9dJL7M
+E4eleajVTBTMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYI
KwYBBQUHAwIwDAYDVR0TAQH/BAIwADAUBgNVHREEDTALgglzeW5jdGhpbmcwCgYI
KoZIzj0EAwIDaAAwZQIwJI4KA9JgFXWU4dWq6JnIr+lAuIJ5ON2lFPrX8JWi1Z3F
UXrvm80w+uR+1rLt6AdkAjEA3dpoBnS7tV21krEVmfX2vabtkzZidhXwuvP+1VJN
By4EwZnuTLX3TqQx2TERF9rV
-----END CERTIFICATE-----
`
SyncthingLocalKey = `-----BEGIN EC PRIVATE KEY-----
MIGkAgEBBDAltfhZ8YO4CrPsvFRpU6P8lOspm5VXFGvJghSaDr4D/ub66+4HpTk9
3TdgtbUSMSmgBwYFK4EEACKhZANiAAQj1ov1aM0902yssK+3LPiGM1e1pUcVRuQj
xl0nDX0fpZp3kdeWeiBm9AlEuwhAll/8QjoWBlNiEXyGFN9lOaIGf7ZIk7owPT6L
iJXc1n3E6iqHWeSXcZ9dJL7M+E4eleY=
-----END EC PRIVATE KEY-----
`
)
const (
SyncthingRemoteDeviceID = "OELB2JL-MIOW652-6JPBYPZ-POV3EBV-XEOW2Z2-I45QUGZ-QF5TT4P-Z2AH7AU"
SyncthingRemoteCert = `-----BEGIN CERTIFICATE-----
MIICHzCCAaWgAwIBAgIJAOGCLdtwnUShMAoGCCqGSM49BAMCMEoxEjAQBgNVBAoT
CVN5bmN0aGluZzEgMB4GA1UECxMXQXV0b21hdGljYWxseSBHZW5lcmF0ZWQxEjAQ
BgNVBAMTCXN5bmN0aGluZzAgFw0yNDA2MTkwMDAwMDBaGA8xODU0MDkxMTAwNTA1
MlowSjESMBAGA1UEChMJU3luY3RoaW5nMSAwHgYDVQQLExdBdXRvbWF0aWNhbGx5
IEdlbmVyYXRlZDESMBAGA1UEAxMJc3luY3RoaW5nMHYwEAYHKoZIzj0CAQYFK4EE
ACIDYgAETwaM3V92D499uMXWFgGxdTUAvtp1tN7ePuJxt8W+FO0izG1fa7oU29Hp
FU0Ohh3xwnQfEHIWzlKJllZ2ZbbXGOvcfr0Yfiir6ToKuN6185EA8RHkA+5HRtu5
nw5wyWL/o1UwUzAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG
CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwFAYDVR0RBA0wC4IJc3luY3RoaW5nMAoG
CCqGSM49BAMCA2gAMGUCMGxR9q9vjzm4GynOkoRIC+BQJN0zpiNusYUD6iYJNGe1
wNH8jhOJEG+rjGracDZ6bgIxAIpyHv/rOAjEX7/wcafRqGTFhwXdRq0l3493aERd
RCwqD8rbzP0QStVOCAE7xYt/sQ==
-----END CERTIFICATE-----
`
SyncthingRemoteKey = `-----BEGIN EC PRIVATE KEY-----
MIGkAgEBBDAKabOokHf64xAsIQp5PA1zZ5vLjfcgKcuikx/D0CP6c2Cf48a6eADE
GWrY1Ng8UzOgBwYFK4EEACKhZANiAARPBozdX3YPj324xdYWAbF1NQC+2nW03t4+
4nG3xb4U7SLMbV9ruhTb0ekVTQ6GHfHCdB8QchbOUomWVnZlttcY69x+vRh+KKvp
Ogq43rXzkQDxEeQD7kdG27mfDnDJYv8=
-----END EC PRIVATE KEY-----
`
)
func init() {
var err error
LocalCert, err = tls.X509KeyPair([]byte(SyncthingLocalCert), []byte(SyncthingLocalKey))
if err != nil {
panic(err)
}
RemoteCert, err = tls.X509KeyPair([]byte(SyncthingRemoteCert), []byte(SyncthingRemoteKey))
if err != nil {
panic(err)
}
LocalDeviceID, err = protocol.DeviceIDFromString(SyncthingLocalDeviceID)
if err != nil {
panic(err)
}
RemoteDeviceID, err = protocol.DeviceIDFromString(SyncthingRemoteDeviceID)
if err != nil {
panic(err)
}
}

View File

@@ -2,13 +2,18 @@ package controlplane
import (
"fmt"
"strconv"
"strings"
"time"
v31 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
accesslogfilev3 "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/file/v3"
corsv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/cors/v3"
grpcwebv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_web/v3"
routerv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3"
@@ -21,26 +26,71 @@ import (
"github.com/envoyproxy/go-control-plane/pkg/cache/types"
"github.com/envoyproxy/go-control-plane/pkg/resource/v3"
"github.com/envoyproxy/go-control-plane/pkg/wellknown"
"github.com/sirupsen/logrus"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/wrapperspb"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
type Virtual struct {
Uid string // group.resource.name
Ports []corev1.ContainerPort
Ports []ContainerPort
Rules []*Rule
}
type ContainerPort struct {
// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
// named port in a pod must have a unique name. Name for the port that can be
// referred to by services.
// +optional
Name string `json:"name,omitempty"`
// Number of port to expose on the host.
// If specified, this must be a valid port number, 0 < x < 65536.
// envoy listener port, if is not 0, means fargate mode
// +optional
EnvoyListenerPort int32 `json:"envoyListenerPort,omitempty"`
// Number of port to expose on the pod's IP address.
// This must be a valid port number, 0 < x < 65536.
ContainerPort int32 `json:"containerPort"`
// Protocol for port. Must be UDP, TCP, or SCTP.
// Defaults to "TCP".
// +optional
// +default="TCP"
Protocol corev1.Protocol `json:"protocol,omitempty"`
}
func ConvertContainerPort(ports ...corev1.ContainerPort) []ContainerPort {
var result []ContainerPort
for _, port := range ports {
result = append(result, ContainerPort{
Name: port.Name,
EnvoyListenerPort: 0,
ContainerPort: port.ContainerPort,
Protocol: port.Protocol,
})
}
return result
}
type Rule struct {
Headers map[string]string
LocalTunIPv4 string
LocalTunIPv6 string
// for no privileged mode (AWS Fargate mode), don't have cap NET_ADMIN and privileged: true. so we can not use OSI layer 3 proxy
// containerPort -> envoyRulePort:localPort
// envoyRulePort for envoy forward to localhost:envoyRulePort
// localPort for local pc listen localhost:localPort
// use ssh reverse tunnel, envoy rule endpoint localhost:envoyRulePort will forward to local pc localhost:localPort
// localPort is required and envoyRulePort is optional
PortMap map[int32]string
}
func (a *Virtual) To() (
func (a *Virtual) To(enableIPv6 bool) (
listeners []types.Resource,
clusters []types.Resource,
routes []types.Resource,
@@ -48,21 +98,57 @@ func (a *Virtual) To() (
) {
//clusters = append(clusters, OriginCluster())
for _, port := range a.Ports {
listenerName := fmt.Sprintf("%s_%v_%s", a.Uid, port.ContainerPort, port.Protocol)
isFargateMode := port.EnvoyListenerPort != 0
listenerName := fmt.Sprintf("%s_%v_%s", a.Uid, util.If(isFargateMode, port.EnvoyListenerPort, port.ContainerPort), port.Protocol)
routeName := listenerName
listeners = append(listeners, ToListener(listenerName, routeName, port.ContainerPort, port.Protocol))
listeners = append(listeners, ToListener(listenerName, routeName, util.If(isFargateMode, port.EnvoyListenerPort, port.ContainerPort), port.Protocol, isFargateMode))
var rr []*route.Route
for _, rule := range a.Rules {
for _, ip := range []string{rule.LocalTunIPv4, rule.LocalTunIPv6} {
clusterName := fmt.Sprintf("%s_%v", ip, port.ContainerPort)
var ips []string
if enableIPv6 {
ips = []string{rule.LocalTunIPv4, rule.LocalTunIPv6}
} else {
ips = []string{rule.LocalTunIPv4}
}
ports := rule.PortMap[port.ContainerPort]
if isFargateMode {
if strings.Index(ports, ":") > 0 {
ports = strings.Split(ports, ":")[0]
} else {
logrus.Errorf("fargate mode port should have two pair")
}
}
envoyRulePort, _ := strconv.Atoi(ports)
for _, ip := range ips {
clusterName := fmt.Sprintf("%s_%v", ip, envoyRulePort)
clusters = append(clusters, ToCluster(clusterName))
endpoints = append(endpoints, ToEndPoint(clusterName, ip, port.ContainerPort))
endpoints = append(endpoints, ToEndPoint(clusterName, ip, int32(envoyRulePort)))
rr = append(rr, ToRoute(clusterName, rule.Headers))
}
}
rr = append(rr, DefaultRoute())
clusters = append(clusters, OriginCluster())
// if isFargateMode is true, needs to add default route to container port, because use_original_dst not work
if isFargateMode {
// all ips should is IPv4 127.0.0.1 and ::1
var ips = sets.New[string]()
for _, rule := range a.Rules {
if enableIPv6 {
ips.Insert(rule.LocalTunIPv4, rule.LocalTunIPv6)
} else {
ips.Insert(rule.LocalTunIPv4)
}
}
for _, ip := range ips.UnsortedList() {
defaultClusterName := fmt.Sprintf("%s_%v", ip, port.ContainerPort)
clusters = append(clusters, ToCluster(defaultClusterName))
endpoints = append(endpoints, ToEndPoint(defaultClusterName, ip, port.ContainerPort))
rr = append(rr, DefaultRouteToCluster(defaultClusterName))
}
} else {
rr = append(rr, DefaultRoute())
clusters = append(clusters, OriginCluster())
}
routes = append(routes, &route.RouteConfiguration{
Name: routeName,
VirtualHosts: []*route.VirtualHost{
@@ -122,6 +208,9 @@ func ToCluster(clusterName string) *cluster.Cluster {
LbPolicy: cluster.Cluster_ROUND_ROBIN,
TypedExtensionProtocolOptions: map[string]*anypb.Any{
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": anyFunc(&httpv3.HttpProtocolOptions{
CommonHttpProtocolOptions: &corev3.HttpProtocolOptions{
IdleTimeout: durationpb.New(time.Second * 10),
},
UpstreamProtocolOptions: &httpv3.HttpProtocolOptions_UseDownstreamProtocolConfig{
UseDownstreamProtocolConfig: &httpv3.HttpProtocolOptions_UseDownstreamHttpConfig{},
},
@@ -214,7 +303,30 @@ func DefaultRoute() *route.Route {
}
}
func ToListener(listenerName string, routeName string, port int32, p corev1.Protocol) *listener.Listener {
func DefaultRouteToCluster(clusterName string) *route.Route {
return &route.Route{
Match: &route.RouteMatch{
PathSpecifier: &route.RouteMatch_Prefix{
Prefix: "/",
},
},
Action: &route.Route_Route{
Route: &route.RouteAction{
ClusterSpecifier: &route.RouteAction_Cluster{
Cluster: clusterName,
},
Timeout: durationpb.New(0),
IdleTimeout: durationpb.New(0),
MaxStreamDuration: &route.RouteAction_MaxStreamDuration{
MaxStreamDuration: durationpb.New(0),
GrpcTimeoutHeaderMax: durationpb.New(0),
},
},
},
}
}
func ToListener(listenerName string, routeName string, port int32, p corev1.Protocol, isFargateMode bool) *listener.Listener {
var protocol core.SocketAddress_Protocol
switch p {
case corev1.ProtocolTCP:
@@ -278,6 +390,14 @@ func ToListener(listenerName string, routeName string, port int32, p corev1.Prot
UpgradeConfigs: []*httpconnectionmanager.HttpConnectionManager_UpgradeConfig{{
UpgradeType: "websocket",
}},
AccessLog: []*v31.AccessLog{{
Name: wellknown.FileAccessLog,
ConfigType: &v31.AccessLog_TypedConfig{
TypedConfig: anyFunc(&accesslogfilev3.FileAccessLog{
Path: "/dev/stdout",
}),
},
}},
}
tcpConfig := &tcpproxy.TcpProxy{
@@ -290,7 +410,7 @@ func ToListener(listenerName string, routeName string, port int32, p corev1.Prot
return &listener.Listener{
Name: listenerName,
TrafficDirection: core.TrafficDirection_INBOUND,
BindToPort: &wrapperspb.BoolValue{Value: false},
BindToPort: &wrapperspb.BoolValue{Value: util.If(isFargateMode, true, false)},
UseOriginalDst: &wrapperspb.BoolValue{Value: true},
Address: &core.Address{

View File

@@ -10,14 +10,15 @@ import (
log "github.com/sirupsen/logrus"
)
func Main(filename string, port uint, logger *log.Logger) {
func Main(ctx context.Context, filename string, port uint, logger *log.Logger) error {
snapshotCache := cache.NewSnapshotCache(false, cache.IDHash{}, logger)
proc := NewProcessor(snapshotCache, logger)
errChan := make(chan error, 2)
go func() {
ctx := context.Background()
server := serverv3.NewServer(ctx, snapshotCache, nil)
RunServer(ctx, server, port)
errChan <- RunServer(ctx, server, port)
}()
notifyCh := make(chan NotifyMessage, 100)
@@ -29,20 +30,29 @@ func Main(filename string, port uint, logger *log.Logger) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(fmt.Errorf("failed to create file watcher, err: %v", err))
return fmt.Errorf("failed to create file watcher: %v", err)
}
defer watcher.Close()
if err = watcher.Add(filename); err != nil {
log.Fatal(fmt.Errorf("failed to add file: %s to wather, err: %v", filename, err))
err = watcher.Add(filename)
if err != nil {
return fmt.Errorf("failed to add file: %s to wather: %v", filename, err)
}
go func() {
log.Fatal(Watch(watcher, filename, notifyCh))
errChan <- Watch(watcher, filename, notifyCh)
}()
for {
select {
case msg := <-notifyCh:
proc.ProcessFile(msg)
err = proc.ProcessFile(msg)
if err != nil {
log.Errorf("Failed to process file: %v", err)
return err
}
case err = <-errChan:
return err
case <-ctx.Done():
return ctx.Err()
}
}
}

View File

@@ -14,20 +14,22 @@ import (
"github.com/envoyproxy/go-control-plane/pkg/cache/types"
"github.com/envoyproxy/go-control-plane/pkg/cache/v3"
"github.com/envoyproxy/go-control-plane/pkg/resource/v3"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
utilcache "k8s.io/apimachinery/pkg/util/cache"
"k8s.io/apimachinery/pkg/util/yaml"
"sigs.k8s.io/yaml"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
type Processor struct {
cache cache.SnapshotCache
logger *logrus.Logger
logger *log.Logger
version int64
expireCache *utilcache.Expiring
}
func NewProcessor(cache cache.SnapshotCache, log *logrus.Logger) *Processor {
func NewProcessor(cache cache.SnapshotCache, log *log.Logger) *Processor {
return &Processor{
cache: cache,
logger: log,
@@ -44,12 +46,13 @@ func (p *Processor) newVersion() string {
return strconv.FormatInt(p.version, 10)
}
func (p *Processor) ProcessFile(file NotifyMessage) {
func (p *Processor) ProcessFile(file NotifyMessage) error {
configList, err := ParseYaml(file.FilePath)
if err != nil {
p.logger.Errorf("error parsing yaml file: %+v", err)
return
return err
}
enableIPv6, _ := util.DetectSupportIPv6()
for _, config := range configList {
if len(config.Uid) == 0 {
continue
@@ -62,7 +65,7 @@ func (p *Processor) ProcessFile(file NotifyMessage) {
}
p.logger.Debugf("update config, version %d, config %v", p.version, config)
listeners, clusters, routes, endpoints := config.To()
listeners, clusters, routes, endpoints := config.To(enableIPv6)
resources := map[resource.Type][]types.Resource{
resource.ListenerType: listeners, // listeners
resource.RouteType: routes, // routes
@@ -76,21 +79,22 @@ func (p *Processor) ProcessFile(file NotifyMessage) {
if err != nil {
p.logger.Errorf("snapshot inconsistency: %v, err: %v", snapshot, err)
return
return err
}
if err = snapshot.Consistent(); err != nil {
p.logger.Errorf("snapshot inconsistency: %v, err: %v", snapshot, err)
return
return err
}
p.logger.Debugf("will serve snapshot %+v, nodeID: %s", snapshot, config.Uid)
if err = p.cache.SetSnapshot(context.Background(), config.Uid, snapshot); err != nil {
p.logger.Errorf("snapshot error %q for %v", err, snapshot)
p.logger.Fatal(err)
return err
}
p.expireCache.Set(config.Uid, config, time.Minute*5)
}
return nil
}
func ParseYaml(file string) ([]*Virtual, error) {

View File

@@ -21,13 +21,13 @@ const (
grpcMaxConcurrentStreams = 1000000
)
func RunServer(ctx context.Context, server serverv3.Server, port uint) {
func RunServer(ctx context.Context, server serverv3.Server, port uint) error {
grpcServer := grpc.NewServer(grpc.MaxConcurrentStreams(grpcMaxConcurrentStreams))
var lc net.ListenConfig
listener, err := lc.Listen(ctx, "tcp", fmt.Sprintf(":%d", port))
if err != nil {
log.Fatal(err)
return err
}
discoverygrpc.RegisterAggregatedDiscoveryServiceServer(grpcServer, server)
@@ -38,8 +38,6 @@ func RunServer(ctx context.Context, server serverv3.Server, port uint) {
secretservice.RegisterSecretDiscoveryServiceServer(grpcServer, server)
runtimeservice.RegisterRuntimeDiscoveryServiceServer(grpcServer, server)
log.Infof("management server listening on %d", port)
if err = grpcServer.Serve(listener); err != nil {
log.Fatal(err)
}
log.Infof("Management server listening on %d", port)
return grpcServer.Serve(listener)
}

View File

@@ -13,12 +13,12 @@ var (
)
type Chain struct {
Retries int
retries int
node *Node
}
func NewChain(retry int, node *Node) *Chain {
return &Chain{Retries: retry, node: node}
return &Chain{retries: retry, node: node}
}
func (c *Chain) Node() *Node {
@@ -30,7 +30,7 @@ func (c *Chain) IsEmpty() bool {
}
func (c *Chain) DialContext(ctx context.Context) (conn net.Conn, err error) {
for i := 0; i < int(math.Max(float64(1), float64(c.Retries))); i++ {
for i := 0; i < int(math.Max(float64(1), float64(c.retries))); i++ {
conn, err = c.dial(ctx)
if err == nil {
break

View File

@@ -0,0 +1,27 @@
package core
import (
"context"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func ICMPForwarder(s *stack.Stack, ctx context.Context) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
return func(id stack.TransportEndpointID, buffer *stack.PacketBuffer) bool {
log.Debugf("[TUN-ICMP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
)
ctx1, cancelFunc := context.WithCancel(ctx)
defer cancelFunc()
ok, err := util.PingOnce(ctx1, id.RemoteAddress.String(), id.LocalAddress.String())
if err != nil {
log.Debugf("[TUN-ICMP] Failed to ping dst %s from src %s",
id.LocalAddress.String(), id.RemoteAddress.String(),
)
}
return ok
}
}

View File

@@ -15,16 +15,8 @@ import (
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
)
var _ stack.UniqueID = (*id)(nil)
type id struct {
}
func (i id) UniqueID() uint64 {
return 1
}
func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
nicID := tcpip.NICID(1)
s := stack.New(stack.Options{
NetworkProtocols: []stack.NetworkProtocolFactory{
ipv4.NewProtocol,
@@ -40,29 +32,30 @@ func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
// Enable raw sockets for users with sufficient
// privileges.
RawFactory: raw.EndpointFactory{},
UniqueID: id{},
})
// set handler for TCP UDP ICMP
s.SetTransportProtocolHandler(tcp.ProtocolNumber, TCPForwarder(s))
s.SetTransportProtocolHandler(udp.ProtocolNumber, UDPForwarder(s))
s.SetTransportProtocolHandler(tcp.ProtocolNumber, TCPForwarder(s, ctx))
s.SetTransportProtocolHandler(udp.ProtocolNumber, UDPForwarder(s, ctx))
s.SetTransportProtocolHandler(header.ICMPv4ProtocolNumber, ICMPForwarder(s, ctx))
s.SetTransportProtocolHandler(header.ICMPv6ProtocolNumber, ICMPForwarder(s, ctx))
s.SetRouteTable([]tcpip.Route{
{
Destination: header.IPv4EmptySubnet,
NIC: 1,
NIC: nicID,
},
{
Destination: header.IPv6EmptySubnet,
NIC: 1,
NIC: nicID,
},
})
s.CreateNICWithOptions(1, packetsocket.New(tun), stack.NICOptions{
s.CreateNICWithOptions(nicID, packetsocket.New(tun), stack.NICOptions{
Disabled: false,
Context: ctx,
})
s.SetPromiscuousMode(1, true)
s.SetSpoofing(1, true)
s.SetPromiscuousMode(nicID, true)
s.SetSpoofing(nicID, true)
// Enable SACK Recovery.
{
@@ -93,17 +86,17 @@ func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
{
if err := s.SetForwardingDefaultAndAllNICs(ipv4.ProtocolNumber, true); err != nil {
log.Fatalf("set ipv4 forwarding: %v", err)
log.Fatalf("Set IPv4 forwarding: %v", err)
}
if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, true); err != nil {
log.Fatalf("set ipv6 forwarding: %v", err)
log.Fatalf("Set IPv6 forwarding: %v", err)
}
}
{
option := tcpip.TCPModerateReceiveBufferOption(true)
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &option); err != nil {
log.Fatalf("set TCP moderate receive buffer: %v", err)
log.Fatalf("Set TCP moderate receive buffer: %v", err)
}
}
return s

View File

@@ -4,10 +4,12 @@ import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"time"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
@@ -15,44 +17,32 @@ import (
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"gvisor.dev/gvisor/pkg/waiter"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
)
var GvisorTCPForwardAddr string
func TCPForwarder(s *stack.Stack) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
func TCPForwarder(s *stack.Stack, ctx context.Context) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
return tcp.NewForwarder(s, 0, 100000, func(request *tcp.ForwarderRequest) {
defer request.Complete(false)
id := request.ID()
log.Debugf("[TUN-TCP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
log.Debugf("[TUN-TCP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
)
node, err := ParseNode(GvisorTCPForwardAddr)
// 2, dial proxy
host := id.LocalAddress.String()
port := fmt.Sprintf("%d", id.LocalPort)
var remote net.Conn
var d = net.Dialer{Timeout: time.Second * 5}
remote, err := d.DialContext(ctx, "tcp", net.JoinHostPort(host, port))
if err != nil {
log.Debugf("[TUN-TCP] Error: can not parse gvisor tcp forward addr %s: %v", GvisorTCPForwardAddr, err)
return
}
node.Client = &Client{
Connector: GvisorTCPTunnelConnector(),
Transporter: TCPTransporter(),
}
forwardChain := NewChain(5, node)
remote, err := forwardChain.dial(context.Background())
if err != nil {
log.Debugf("[TUN-TCP] Error: failed to dial remote conn: %v", err)
return
}
if err = WriteProxyInfo(remote, id); err != nil {
log.Debugf("[TUN-TCP] Error: failed to write proxy info: %v", err)
log.Errorf("[TUN-TCP] Failed to connect addr %s: %v", net.JoinHostPort(host, port), err)
return
}
w := &waiter.Queue{}
endpoint, tErr := request.CreateEndpoint(w)
if tErr != nil {
log.Debugf("[TUN-TCP] Error: can not create endpoint: %v", tErr)
log.Debugf("[TUN-TCP] Failed to create endpoint: %v", tErr)
return
}
conn := gonet.NewTCPConn(w, endpoint)
@@ -61,30 +51,30 @@ func TCPForwarder(s *stack.Stack) func(stack.TransportEndpointID, *stack.PacketB
defer remote.Close()
errChan := make(chan error, 2)
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(remote, conn, i)
log.Debugf("[TUN-TCP] Debug: write length %d data to remote", written)
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
written, err2 := io.CopyBuffer(remote, conn, buf)
log.Debugf("[TUN-TCP] Write length %d data to remote", written)
errChan <- err2
}()
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(conn, remote, i)
log.Debugf("[TUN-TCP] Debug: read length %d data from remote", written)
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
written, err2 := io.CopyBuffer(conn, remote, buf)
log.Debugf("[TUN-TCP] Read length %d data from remote", written)
errChan <- err2
}()
err = <-errChan
if err != nil && !errors.Is(err, io.EOF) {
log.Debugf("[TUN-TCP] Error: dsiconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
log.Debugf("[TUN-TCP] Disconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
}
}).HandlePacket
}
func WriteProxyInfo(conn net.Conn, id stack.TransportEndpointID) error {
var b bytes.Buffer
i := config.SPool.Get().([]byte)[:]
defer config.SPool.Put(i[:])
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
binary.BigEndian.PutUint16(i, id.LocalPort)
b.Write(i)
binary.BigEndian.PutUint16(i, id.RemotePort)

View File

@@ -2,94 +2,65 @@ package core
import (
"context"
"errors"
"fmt"
"io"
"net"
"time"
"sync"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
"gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
type gvisorTCPTunnelConnector struct {
type gvisorTCPHandler struct {
// map[srcIP]net.Conn
routeMapTCP *sync.Map
packetChan chan *datagramPacket
}
func GvisorTCPTunnelConnector() Connector {
return &gvisorTCPTunnelConnector{}
}
func (c *gvisorTCPTunnelConnector) ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error) {
switch con := conn.(type) {
case *net.TCPConn:
err := con.SetNoDelay(true)
if err != nil {
return nil, err
}
err = con.SetKeepAlive(true)
if err != nil {
return nil, err
}
err = con.SetKeepAlivePeriod(15 * time.Second)
if err != nil {
return nil, err
}
}
return conn, nil
}
type gvisorTCPHandler struct{}
func GvisorTCPHandler() Handler {
return &gvisorTCPHandler{}
return &gvisorTCPHandler{
routeMapTCP: RouteMapTCP,
packetChan: TCPPacketChan,
}
}
func (h *gvisorTCPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
defer tcpConn.Close()
log.Debugf("[TUN-TCP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
// 1, get proxy info
endpointID, err := ParseProxyInfo(tcpConn)
if err != nil {
log.Debugf("[TUN-TCP] Error: failed to parse proxy info: %v", err)
return
}
log.Debugf("[TUN-TCP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
)
// 2, dial proxy
host := endpointID.LocalAddress.String()
port := fmt.Sprintf("%d", endpointID.LocalPort)
var remote net.Conn
remote, err = net.DialTimeout("tcp", net.JoinHostPort(host, port), time.Second*5)
if err != nil {
log.Debugf("[TUN-TCP] Error: failed to connect addr %s: %v", net.JoinHostPort(host, port), err)
return
}
cancel, cancelFunc := context.WithCancel(ctx)
defer cancelFunc()
log.Debugf("[TCP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
h.handle(cancel, tcpConn)
}
func (h *gvisorTCPHandler) handle(ctx context.Context, tcpConn net.Conn) {
endpoint := channel.New(tcp.DefaultReceiveBufferSize, uint32(config.DefaultMTU), tcpip.GetRandMacAddr())
errChan := make(chan error, 2)
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(remote, tcpConn, i)
log.Debugf("[TUN-TCP] Debug: write length %d data to remote", written)
errChan <- err2
defer util.HandleCrash()
h.readFromTCPConnWriteToEndpoint(ctx, tcpConn, endpoint)
util.SafeClose(errChan)
}()
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(tcpConn, remote, i)
log.Debugf("[TUN-TCP] Debug: read length %d data from remote", written)
errChan <- err2
defer util.HandleCrash()
h.readFromEndpointWriteToTCPConn(ctx, tcpConn, endpoint)
util.SafeClose(errChan)
}()
err = <-errChan
if err != nil && !errors.Is(err, io.EOF) {
log.Debugf("[TUN-TCP] Error: dsiconnect: %s >-<: %s: %v", tcpConn.LocalAddr(), remote.RemoteAddr(), err)
stack := NewStack(ctx, sniffer.NewWithPrefix(endpoint, "[gVISOR] "))
defer stack.Destroy()
select {
case <-errChan:
return
case <-ctx.Done():
return
}
}
func GvisorTCPListener(addr string) (net.Listener, error) {
log.Debug("gvisor tcp listen addr", addr)
log.Debugf("Gvisor TCP listening addr: %s", addr)
laddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return nil, err

132
pkg/core/gvisortunendpoint.go Executable file
View File

@@ -0,0 +1,132 @@
package core
import (
"context"
"net"
"github.com/google/gopacket/layers"
log "github.com/sirupsen/logrus"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
"gvisor.dev/gvisor/pkg/buffer"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
"gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (h *gvisorTCPHandler) readFromEndpointWriteToTCPConn(ctx context.Context, conn net.Conn, endpoint *channel.Endpoint) {
tcpConn, _ := newGvisorFakeUDPTunnelConnOverTCP(ctx, conn)
for {
select {
case <-ctx.Done():
return
default:
}
pktBuffer := endpoint.ReadContext(ctx)
if pktBuffer != nil {
sniffer.LogPacket("[gVISOR] ", sniffer.DirectionSend, pktBuffer.NetworkProtocolNumber, pktBuffer)
buf := pktBuffer.ToView().AsSlice()
_, err := tcpConn.Write(buf)
if err != nil {
log.Errorf("[TUN] Failed to write data to tun device: %v", err)
}
}
}
}
// tun --> dispatcher
func (h *gvisorTCPHandler) readFromTCPConnWriteToEndpoint(ctx context.Context, conn net.Conn, endpoint *channel.Endpoint) {
tcpConn, _ := newGvisorFakeUDPTunnelConnOverTCP(ctx, conn)
for {
select {
case <-ctx.Done():
return
default:
}
buf := config.LPool.Get().([]byte)[:]
read, err := tcpConn.Read(buf[:])
if err != nil {
log.Errorf("[TUN] Failed to read from tcp conn: %v", err)
config.LPool.Put(buf[:])
return
}
if read == 0 {
log.Warnf("[TUN] Read from tcp conn length is %d", read)
config.LPool.Put(buf[:])
continue
}
// Try to determine network protocol number, default zero.
var protocol tcpip.NetworkProtocolNumber
var ipProtocol int
var src, dst net.IP
// TUN interface with IFF_NO_PI enabled, thus
// we need to determine protocol from version field
if util.IsIPv4(buf) {
protocol = header.IPv4ProtocolNumber
ipHeader, err := ipv4.ParseHeader(buf[:read])
if err != nil {
log.Errorf("Failed to parse IPv4 header: %v", err)
config.LPool.Put(buf[:])
continue
}
ipProtocol = ipHeader.Protocol
src = ipHeader.Src
dst = ipHeader.Dst
} else if util.IsIPv6(buf) {
protocol = header.IPv6ProtocolNumber
ipHeader, err := ipv6.ParseHeader(buf[:read])
if err != nil {
log.Errorf("Failed to parse IPv6 header: %s", err.Error())
config.LPool.Put(buf[:])
continue
}
ipProtocol = ipHeader.NextHeader
src = ipHeader.Src
dst = ipHeader.Dst
} else {
log.Debugf("[TUN-GVISOR] Unknown packet")
config.LPool.Put(buf[:])
continue
}
h.addRoute(src, conn)
// inner ip like 223.254.0.100/102/103 connect each other
if config.CIDR.Contains(dst) || config.CIDR6.Contains(dst) {
log.Tracef("[TUN-RAW] Forward to TUN device, SRC: %s, DST: %s, Length: %d", src.String(), dst.String(), read)
util.SafeWrite(h.packetChan, &datagramPacket{
DataLength: uint16(read),
Data: buf[:],
})
continue
}
pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
ReserveHeaderBytes: 0,
Payload: buffer.MakeWithData(buf[:read]),
})
config.LPool.Put(buf[:])
sniffer.LogPacket("[gVISOR] ", sniffer.DirectionRecv, protocol, pkt)
endpoint.InjectInbound(protocol, pkt)
pkt.DecRef()
log.Tracef("[TUN-%s] Write to Gvisor IP-Protocol: %s, SRC: %s, DST: %s, Length: %d", layers.IPProtocol(ipProtocol).String(), layers.IPProtocol(ipProtocol).String(), src.String(), dst, read)
}
}
func (h *gvisorTCPHandler) addRoute(src net.IP, tcpConn net.Conn) {
value, loaded := h.routeMapTCP.LoadOrStore(src.String(), tcpConn)
if loaded {
if tcpConn != value.(net.Conn) {
h.routeMapTCP.Store(src.String(), tcpConn)
log.Debugf("[TCP] Replace route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
}
} else {
log.Debugf("[TCP] Add new route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
}
}

View File

@@ -2,81 +2,118 @@ package core
import (
"context"
"errors"
"io"
"net"
"time"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
"gvisor.dev/gvisor/pkg/waiter"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
var GvisorUDPForwardAddr string
func UDPForwarder(s *stack.Stack) func(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
func UDPForwarder(s *stack.Stack, ctx context.Context) func(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
return udp.NewForwarder(s, func(request *udp.ForwarderRequest) {
endpointID := request.ID()
log.Debugf("[TUN-UDP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
id := request.ID()
log.Debugf("[TUN-UDP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
)
src := &net.UDPAddr{
IP: id.RemoteAddress.AsSlice(),
Port: int(id.RemotePort),
}
dst := &net.UDPAddr{
IP: id.LocalAddress.AsSlice(),
Port: int(id.LocalPort),
}
w := &waiter.Queue{}
endpoint, tErr := request.CreateEndpoint(w)
if tErr != nil {
log.Debugf("[TUN-UDP] Error: can not create endpoint: %v", tErr)
log.Debugf("[TUN-UDP] Failed to create endpoint to dst: %s: %v", dst.String(), tErr)
return
}
node, err := ParseNode(GvisorUDPForwardAddr)
if err != nil {
log.Debugf("[TUN-UDP] Error: parse gviosr udp forward addr %s: %v", GvisorUDPForwardAddr, err)
// dial dst
remote, err1 := net.DialUDP("udp", nil, dst)
if err1 != nil {
log.Errorf("[TUN-UDP] Failed to connect dst: %s: %v", dst.String(), err1)
return
}
node.Client = &Client{
Connector: GvisorUDPOverTCPTunnelConnector(endpointID),
Transporter: TCPTransporter(),
}
forwardChain := NewChain(5, node)
ctx := context.Background()
c, err := forwardChain.getConn(ctx)
if err != nil {
log.Debugf("[TUN-UDP] Error: can not get conn: %v", err)
return
}
if err = WriteProxyInfo(c, endpointID); err != nil {
log.Debugf("[TUN-UDP] Error: can not write proxy info: %v", err)
return
}
remote, err := node.Client.ConnectContext(ctx, c)
if err != nil {
log.Debugf("[TUN-UDP] Error: can not connect: %v", err)
return
}
conn := gonet.NewUDPConn(s, w, endpoint)
conn := gonet.NewUDPConn(w, endpoint)
go func() {
defer conn.Close()
defer remote.Close()
errChan := make(chan error, 2)
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(remote, conn, i)
log.Debugf("[TUN-UDP] Debug: write length %d data to remote", written)
errChan <- err2
defer util.HandleCrash()
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
var written int
var err error
for {
err = conn.SetReadDeadline(time.Now().Add(time.Second * 120))
if err != nil {
break
}
var read int
read, _, err = conn.ReadFrom(buf[:])
if err != nil {
break
}
written += read
err = remote.SetWriteDeadline(time.Now().Add(time.Second * 120))
if err != nil {
break
}
_, err = remote.Write(buf[:read])
if err != nil {
break
}
}
log.Debugf("[TUN-UDP] Write length %d data from src: %s -> dst: %s", written, src.String(), dst.String())
errChan <- err
}()
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(conn, remote, i)
log.Debugf("[TUN-UDP] Debug: read length %d data from remote", written)
errChan <- err2
defer util.HandleCrash()
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
var err error
var written int
for {
err = remote.SetReadDeadline(time.Now().Add(time.Second * 120))
if err != nil {
break
}
var n int
n, _, err = remote.ReadFromUDP(buf[:])
if err != nil {
break
}
written += n
err = conn.SetWriteDeadline(time.Now().Add(time.Second * 120))
if err != nil {
break
}
_, err = conn.Write(buf[:n])
if err != nil {
break
}
}
log.Debugf("[TUN-UDP] Read length %d data from dst: %s -> src: %s", written, dst.String(), src.String())
errChan <- err
}()
err = <-errChan
if err != nil && !errors.Is(err, io.EOF) {
log.Debugf("[TUN-UDP] Error: dsiconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
err1 = <-errChan
if err1 != nil && !errors.Is(err1, io.EOF) {
log.Debugf("[TUN-UDP] Disconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err1)
}
}()
}).HandlePacket

View File

@@ -7,40 +7,10 @@ import (
"time"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
type gvisorUDPOverTCPTunnelConnector struct {
Id stack.TransportEndpointID
}
func GvisorUDPOverTCPTunnelConnector(endpointID stack.TransportEndpointID) Connector {
return &gvisorUDPOverTCPTunnelConnector{
Id: endpointID,
}
}
func (c *gvisorUDPOverTCPTunnelConnector) ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error) {
switch con := conn.(type) {
case *net.TCPConn:
err := con.SetNoDelay(true)
if err != nil {
return nil, err
}
err = con.SetKeepAlive(true)
if err != nil {
return nil, err
}
err = con.SetKeepAlivePeriod(15 * time.Second)
if err != nil {
return nil, err
}
}
return newGvisorFakeUDPTunnelConnOverTCP(ctx, conn)
}
type gvisorUDPHandler struct{}
func GvisorUDPHandler() Handler {
@@ -53,10 +23,10 @@ func (h *gvisorUDPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
// 1, get proxy info
endpointID, err := ParseProxyInfo(tcpConn)
if err != nil {
log.Warningf("[TUN-UDP] Error: Failed to parse proxy info: %v", err)
log.Errorf("[TUN-UDP] Failed to parse proxy info: %v", err)
return
}
log.Debugf("[TUN-UDP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
log.Debugf("[TUN-UDP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
)
// 2, dial proxy
@@ -67,7 +37,7 @@ func (h *gvisorUDPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
var remote *net.UDPConn
remote, err = net.DialUDP("udp", nil, addr)
if err != nil {
log.Debugf("[TUN-UDP] Error: failed to connect addr %s: %v", addr.String(), err)
log.Errorf("[TUN-UDP] Failed to connect addr %s: %v", addr.String(), err)
return
}
handle(ctx, tcpConn, remote)
@@ -116,7 +86,7 @@ func (c *gvisorFakeUDPTunnelConn) Close() error {
}
func GvisorUDPListener(addr string) (net.Listener, error) {
log.Debug("gvisor UDP over TCP listen addr", addr)
log.Debugf("Gvisor UDP over TCP listening addr: %s", addr)
laddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return nil, err
@@ -125,16 +95,17 @@ func GvisorUDPListener(addr string) (net.Listener, error) {
if err != nil {
return nil, err
}
return &tcpKeepAliveListener{ln}, nil
return &tcpKeepAliveListener{TCPListener: ln}, nil
}
func handle(ctx context.Context, tcpConn net.Conn, udpConn *net.UDPConn) {
defer udpConn.Close()
log.Debugf("[TUN-UDP] Debug: %s <-> %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
log.Debugf("[TUN-UDP] %s <-> %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
errChan := make(chan error, 2)
go func() {
b := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(b[:])
defer util.HandleCrash()
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
for {
select {
@@ -143,30 +114,43 @@ func handle(ctx context.Context, tcpConn net.Conn, udpConn *net.UDPConn) {
default:
}
dgram, err := readDatagramPacket(tcpConn, b[:])
err := tcpConn.SetReadDeadline(time.Now().Add(time.Second * 30))
if err != nil {
log.Debugf("[TUN-UDP] Debug: %s -> 0 : %v", tcpConn.RemoteAddr(), err)
log.Errorf("[TUN-UDP] Failed to set read deadline: %v", err)
errChan <- err
return
}
dgram, err := readDatagramPacket(tcpConn, buf[:])
if err != nil {
log.Errorf("[TUN-UDP] %s -> %s: %v", tcpConn.RemoteAddr(), udpConn.LocalAddr(), err)
errChan <- err
return
}
if dgram.DataLength == 0 {
log.Debugf("[TUN-UDP] Error: length is zero")
log.Errorf("[TUN-UDP] Length is zero")
errChan <- fmt.Errorf("length of read packet is zero")
return
}
if _, err = udpConn.Write(dgram.Data); err != nil {
log.Debugf("[TUN-UDP] Error: %s -> %s : %s", tcpConn.RemoteAddr(), "localhost:8422", err)
err = udpConn.SetWriteDeadline(time.Now().Add(time.Second * 30))
if err != nil {
log.Errorf("[TUN-UDP] Failed to set write deadline: %v", err)
errChan <- err
return
}
log.Debugf("[TUN-UDP] Debug: %s >>> %s length: %d", tcpConn.RemoteAddr(), "localhost:8422", dgram.DataLength)
if _, err = udpConn.Write(dgram.Data); err != nil {
log.Errorf("[TUN-UDP] %s -> %s : %s", tcpConn.RemoteAddr(), "localhost:8422", err)
errChan <- err
return
}
log.Debugf("[TUN-UDP] %s >>> %s length: %d", tcpConn.RemoteAddr(), "localhost:8422", dgram.DataLength)
}
}()
go func() {
b := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(b[:])
defer util.HandleCrash()
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
for {
select {
@@ -175,32 +159,44 @@ func handle(ctx context.Context, tcpConn net.Conn, udpConn *net.UDPConn) {
default:
}
n, _, err := udpConn.ReadFrom(b[:])
err := udpConn.SetReadDeadline(time.Now().Add(time.Second * 30))
if err != nil {
log.Debugf("[TUN-UDP] Error: %s : %s", tcpConn.RemoteAddr(), err)
log.Errorf("[TUN-UDP] Failed to set read deadline failed: %v", err)
errChan <- err
return
}
n, _, err := udpConn.ReadFrom(buf[:])
if err != nil {
log.Errorf("[TUN-UDP] %s : %s", tcpConn.RemoteAddr(), err)
errChan <- err
return
}
if n == 0 {
log.Debugf("[TUN-UDP] Error: length is zero")
log.Errorf("[TUN-UDP] Length is zero")
errChan <- fmt.Errorf("length of read packet is zero")
return
}
// pipe from peer to tunnel
dgram := newDatagramPacket(b[:n])
if err = dgram.Write(tcpConn); err != nil {
log.Debugf("[TUN-UDP] Error: %s <- %s : %s", tcpConn.RemoteAddr(), dgram.Addr(), err)
err = tcpConn.SetWriteDeadline(time.Now().Add(time.Second * 30))
if err != nil {
log.Errorf("[TUN-UDP] Error: set write deadline failed: %v", err)
errChan <- err
return
}
log.Debugf("[TUN-UDP] Debug: %s <<< %s length: %d", tcpConn.RemoteAddr(), dgram.Addr(), len(dgram.Data))
dgram := newDatagramPacket(buf[:n])
if err = dgram.Write(tcpConn); err != nil {
log.Errorf("[TUN-UDP] Error: %s <- %s : %s", tcpConn.RemoteAddr(), dgram.Addr(), err)
errChan <- err
return
}
log.Debugf("[TUN-UDP] %s <<< %s length: %d", tcpConn.RemoteAddr(), dgram.Addr(), len(dgram.Data))
}
}()
err := <-errChan
if err != nil {
log.Debugf("[TUN-UDP] Error: %v", err)
log.Errorf("[TUN-UDP] %v", err)
}
log.Debugf("[TUN-UDP] Debug: %s >-< %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
log.Debugf("[TUN-UDP] %s >-< %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
return
}

View File

@@ -7,9 +7,7 @@ import (
"strings"
)
var (
ErrorInvalidNode = errors.New("invalid node")
)
var ErrorInvalidNode = errors.New("invalid node")
type Node struct {
Addr string
@@ -29,12 +27,13 @@ func ParseNode(s string) (*Node, error) {
if err != nil {
return nil, err
}
return &Node{
node := &Node{
Addr: u.Host,
Remote: strings.Trim(u.EscapedPath(), "/"),
Values: u.Query(),
Protocol: u.Scheme,
}, nil
}
return node, nil
}
// Get returns node parameter specified by key.

View File

@@ -11,17 +11,15 @@ import (
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/tun"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/tun"
)
var (
// RouteNAT Globe route table for inner ip
RouteNAT = NewNAT()
// RouteConnNAT map[srcIP]net.Conn
RouteConnNAT = &sync.Map{}
// Chan tcp connects
Chan = make(chan *datagramPacket, MaxSize)
// RouteMapTCP map[srcIP]net.Conn Globe route table for inner ip
RouteMapTCP = &sync.Map{}
// TCPPacketChan tcp connects
TCPPacketChan = make(chan *datagramPacket, MaxSize)
)
type TCPUDPacket struct {
@@ -39,7 +37,6 @@ type Route struct {
}
func (r *Route) parseChain() (*Chain, error) {
// parse the base nodes
node, err := parseChainNode(r.ChainNode)
if err != nil {
return nil, err
@@ -50,7 +47,6 @@ func (r *Route) parseChain() (*Chain, error) {
func parseChainNode(ns string) (*Node, error) {
node, err := ParseNode(ns)
if err != nil {
log.Errorf("parse node error: %v", err)
return nil, err
}
node.Client = &Client{
@@ -63,7 +59,7 @@ func parseChainNode(ns string) (*Node, error) {
func (r *Route) GenerateServers() ([]Server, error) {
chain, err := r.parseChain()
if err != nil && !errors.Is(err, ErrorInvalidNode) {
log.Errorf("parse chain error: %v", err)
log.Errorf("Failed to parse chain: %v", err)
return nil, err
}
@@ -72,7 +68,7 @@ func (r *Route) GenerateServers() ([]Server, error) {
var node *Node
node, err = ParseNode(serveNode)
if err != nil {
log.Errorf("parse node %s error: %v", serveNode, err)
log.Errorf("Failed to parse node %s: %v", serveNode, err)
return nil, err
}
@@ -91,32 +87,39 @@ func (r *Route) GenerateServers() ([]Server, error) {
Gateway: node.Get("gw"),
})
if err != nil {
log.Errorf("create tun listener error: %v", err)
log.Errorf("Failed to create tun listener: %v", err)
return nil, err
}
case "tcp":
handler = TCPHandler()
ln, err = TCPListener(node.Addr)
if err != nil {
log.Errorf("create tcp listener error: %v", err)
log.Errorf("Failed to create tcp listener: %v", err)
return nil, err
}
case "gtcp":
handler = GvisorTCPHandler()
ln, err = GvisorTCPListener(node.Addr)
if err != nil {
log.Errorf("create gvisor tcp listener error: %v", err)
log.Errorf("Failed to create gvisor tcp listener: %v", err)
return nil, err
}
case "gudp":
handler = GvisorUDPHandler()
ln, err = GvisorUDPListener(node.Addr)
if err != nil {
log.Errorf("create gvisor udp listener error: %v", err)
log.Errorf("Failed to create gvisor udp listener: %v", err)
return nil, err
}
case "ssh":
handler = SSHHandler()
ln, err = SSHListener(node.Addr)
if err != nil {
log.Errorf("Failed to create ssh listener: %v", err)
return nil, err
}
default:
log.Errorf("not support protocol %s", node.Protocol)
log.Errorf("Not support protocol %s", node.Protocol)
return nil, fmt.Errorf("not support protocol %s", node.Protocol)
}
servers = append(servers, Server{Listener: ln, Handler: handler})

65
pkg/core/ssh.go Normal file
View File

@@ -0,0 +1,65 @@
package core
import (
"context"
"crypto/rand"
"crypto/rsa"
"io"
"net"
"github.com/gliderlabs/ssh"
log "github.com/sirupsen/logrus"
gossh "golang.org/x/crypto/ssh"
)
func SSHListener(addr string) (net.Listener, error) {
ln, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
log.Debugf("starting ssh server on port %s...", addr)
return ln, err
}
func SSHHandler() Handler {
return &sshHandler{}
}
type sshHandler struct {
}
func (s *sshHandler) Handle(ctx context.Context, conn net.Conn) {
forwardHandler := &ssh.ForwardedTCPHandler{}
server := ssh.Server{
LocalPortForwardingCallback: ssh.LocalPortForwardingCallback(func(ctx ssh.Context, dhost string, dport uint32) bool {
log.Println("Accepted forward", dhost, dport)
return true
}),
Handler: ssh.Handler(func(s ssh.Session) {
io.WriteString(s, "Remote forwarding available...\n")
select {}
}),
ReversePortForwardingCallback: ssh.ReversePortForwardingCallback(func(ctx ssh.Context, host string, port uint32) bool {
log.Println("attempt to bind", host, port, "granted")
return true
}),
RequestHandlers: map[string]ssh.RequestHandler{
"tcpip-forward": forwardHandler.HandleSSHRequest,
"cancel-tcpip-forward": forwardHandler.HandleSSHRequest,
},
SubsystemHandlers: ssh.DefaultSubsystemHandlers,
ChannelHandlers: ssh.DefaultChannelHandlers,
HostSigners: func() []ssh.Signer {
key, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return nil
}
fromKey, err := gossh.NewSignerFromKey(key)
if err != nil {
return nil
}
return []ssh.Signer{fromKey}
}(),
}
server.HandleConn(conn)
}

View File

@@ -4,7 +4,7 @@ import (
"context"
"net"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
)
type tcpTransporter struct{}
@@ -27,7 +27,7 @@ func TCPListener(addr string) (net.Listener, error) {
if err != nil {
return nil, err
}
return &tcpKeepAliveListener{ln}, nil
return &tcpKeepAliveListener{TCPListener: ln}, nil
}
type tcpKeepAliveListener struct {

View File

@@ -3,13 +3,14 @@ package core
import (
"context"
"net"
"strings"
"sync"
"time"
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
type fakeUDPTunnelConnector struct {
@@ -41,33 +42,33 @@ func (c *fakeUDPTunnelConnector) ConnectContext(ctx context.Context, conn net.Co
type fakeUdpHandler struct {
// map[srcIP]net.Conn
connNAT *sync.Map
ch chan *datagramPacket
routeMapTCP *sync.Map
packetChan chan *datagramPacket
}
func TCPHandler() Handler {
return &fakeUdpHandler{
connNAT: RouteConnNAT,
ch: Chan,
routeMapTCP: RouteMapTCP,
packetChan: TCPPacketChan,
}
}
func (h *fakeUdpHandler) Handle(ctx context.Context, tcpConn net.Conn) {
defer tcpConn.Close()
log.Debugf("[tcpserver] %s -> %s\n", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
log.Debugf("[TCP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
defer func(addr net.Addr) {
var keys []string
h.connNAT.Range(func(key, value any) bool {
h.routeMapTCP.Range(func(key, value any) bool {
if value.(net.Conn) == tcpConn {
keys = append(keys, key.(string))
}
return true
})
for _, key := range keys {
h.connNAT.Delete(key)
h.routeMapTCP.Delete(key)
}
log.Debugf("[tcpserver] delete conn %s from globle routeConnNAT, deleted count %d", addr, len(keys))
log.Debugf("[TCP] To %s by conn %s from globle route map TCP", strings.Join(keys, " "), addr)
}(tcpConn.LocalAddr())
for {
@@ -77,34 +78,31 @@ func (h *fakeUdpHandler) Handle(ctx context.Context, tcpConn net.Conn) {
default:
}
b := config.LPool.Get().([]byte)[:]
dgram, err := readDatagramPacketServer(tcpConn, b[:])
buf := config.LPool.Get().([]byte)[:]
dgram, err := readDatagramPacketServer(tcpConn, buf[:])
if err != nil {
log.Debugf("[tcpserver] %s -> 0 : %v", tcpConn.RemoteAddr(), err)
log.Errorf("[TCP] %s -> %s : %v", tcpConn.RemoteAddr(), tcpConn.LocalAddr(), err)
config.LPool.Put(buf[:])
return
}
var src net.IP
bb := dgram.Data[:dgram.DataLength]
if util.IsIPv4(bb) {
src = net.IPv4(bb[12], bb[13], bb[14], bb[15])
} else if util.IsIPv6(bb) {
src = bb[8:24]
} else {
log.Errorf("[tcpserver] unknown packet")
src, _, err = util.ParseIP(dgram.Data[:dgram.DataLength])
if err != nil {
log.Errorf("[TCP] Unknown packet")
config.LPool.Put(buf[:])
continue
}
value, loaded := h.connNAT.LoadOrStore(src.String(), tcpConn)
value, loaded := h.routeMapTCP.LoadOrStore(src.String(), tcpConn)
if loaded {
if tcpConn != value.(net.Conn) {
h.connNAT.Store(src.String(), tcpConn)
log.Debugf("[tcpserver] replace routeConnNAT: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
h.routeMapTCP.Store(src.String(), tcpConn)
log.Debugf("[TCP] Replace route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
}
log.Debugf("[tcpserver] find routeConnNAT: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
} else {
log.Debugf("[tcpserver] new routeConnNAT: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
log.Debugf("[TCP] Add new route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
}
h.ch <- dgram
util.SafeWrite(h.packetChan, dgram)
}
}

View File

@@ -1,221 +0,0 @@
package core
import (
"context"
"net"
"sync"
"github.com/google/gopacket/layers"
log "github.com/sirupsen/logrus"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
"gvisor.dev/gvisor/pkg/buffer"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"github.com/wencaiwulue/kubevpn/pkg/config"
)
var _ stack.LinkEndpoint = (*tunEndpoint)(nil)
// tunEndpoint /Users/naison/go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20220422052705-39790bd3a15a/pkg/tcpip/link/tun/device.go:122
type tunEndpoint struct {
ctx context.Context
tun net.Conn
once sync.Once
endpoint *channel.Endpoint
engine config.Engine
in chan<- *DataElem
out chan *DataElem
}
// WritePackets writes packets. Must not be called with an empty list of
// packet buffers.
//
// WritePackets may modify the packet buffers, and takes ownership of the PacketBufferList.
// it is not safe to use the PacketBufferList after a call to WritePackets.
func (e *tunEndpoint) WritePackets(p stack.PacketBufferList) (int, tcpip.Error) {
return e.endpoint.WritePackets(p)
}
// MTU is the maximum transmission unit for this endpoint. This is
// usually dictated by the backing physical network; when such a
// physical network doesn't exist, the limit is generally 64k, which
// includes the maximum size of an IP packet.
func (e *tunEndpoint) MTU() uint32 {
return uint32(config.DefaultMTU)
}
// MaxHeaderLength returns the maximum size the data link (and
// lower level layers combined) headers can have. Higher levels use this
// information to reserve space in the front of the packets they're
// building.
func (e *tunEndpoint) MaxHeaderLength() uint16 {
return 0
}
// LinkAddress returns the link address (typically a MAC) of the
// endpoint.
func (e *tunEndpoint) LinkAddress() tcpip.LinkAddress {
return e.endpoint.LinkAddress()
}
// Capabilities returns the set of capabilities supported by the
// endpoint.
func (e *tunEndpoint) Capabilities() stack.LinkEndpointCapabilities {
return e.endpoint.LinkEPCapabilities
}
// Attach attaches the data link layer endpoint to the network-layer
// dispatcher of the stack.
//
// Attach is called with a nil dispatcher when the endpoint's NIC is being
// removed.
func (e *tunEndpoint) Attach(dispatcher stack.NetworkDispatcher) {
e.endpoint.Attach(dispatcher)
// queue --> tun
e.once.Do(func() {
go func() {
for {
select {
case <-e.ctx.Done():
return
default:
}
read := e.endpoint.ReadContext(e.ctx)
if !read.IsNil() {
bb := read.ToView().AsSlice()
i := config.LPool.Get().([]byte)[:]
n := copy(i, bb)
bb = nil
e.out <- NewDataElem(i[:], n, nil, nil)
}
}
}()
// tun --> dispatcher
go func() {
// full(all use gvisor), mix(cluster network use gvisor), raw(not use gvisor)
for {
bytes := config.LPool.Get().([]byte)[:]
read, err := e.tun.Read(bytes[:])
if err != nil {
// if context is still going
if e.ctx.Err() == nil {
log.Fatalf("[TUN]: read from tun failed: %v", err)
} else {
log.Info("tun device closed")
}
return
}
if read == 0 {
log.Warnf("[TUN]: read from tun length is %d", read)
continue
}
// Try to determine network protocol number, default zero.
var protocol tcpip.NetworkProtocolNumber
var ipProtocol int
var src, dst net.IP
// TUN interface with IFF_NO_PI enabled, thus
// we need to determine protocol from version field
version := bytes[0] >> 4
if version == 4 {
protocol = header.IPv4ProtocolNumber
ipHeader, err := ipv4.ParseHeader(bytes[:read])
if err != nil {
log.Errorf("parse ipv4 header failed: %s", err.Error())
continue
}
ipProtocol = ipHeader.Protocol
src = ipHeader.Src
dst = ipHeader.Dst
} else if version == 6 {
protocol = header.IPv6ProtocolNumber
ipHeader, err := ipv6.ParseHeader(bytes[:read])
if err != nil {
log.Errorf("parse ipv6 header failed: %s", err.Error())
continue
}
ipProtocol = ipHeader.NextHeader
src = ipHeader.Src
dst = ipHeader.Dst
} else {
log.Debugf("[TUN-gvisor] unknown packet version %d", version)
continue
}
// only tcp and udp needs to distinguish transport engine
// gvisor: all network use gvisor
// mix: cluster network use gvisor, diy network use raw
// raw: all network use raw
if (ipProtocol == int(layers.IPProtocolUDP) || ipProtocol == int(layers.IPProtocolUDPLite) || ipProtocol == int(layers.IPProtocolTCP)) &&
(e.engine == config.EngineGvisor || (e.engine == config.EngineMix && (!config.CIDR.Contains(dst) && !config.CIDR6.Contains(dst)))) {
pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
ReserveHeaderBytes: 0,
Payload: buffer.MakeWithData(bytes[:read]),
})
//defer pkt.DecRef()
config.LPool.Put(bytes[:])
e.endpoint.InjectInbound(protocol, pkt)
log.Debugf("[TUN-%s] IP-Protocol: %s, SRC: %s, DST: %s, Length: %d", layers.IPProtocol(ipProtocol).String(), layers.IPProtocol(ipProtocol).String(), src.String(), dst, read)
} else {
log.Debugf("[TUN-RAW] IP-Protocol: %s, SRC: %s, DST: %s, Length: %d", layers.IPProtocol(ipProtocol).String(), src.String(), dst, read)
e.in <- NewDataElem(bytes[:], read, src, dst)
}
}
}()
go func() {
for elem := range e.out {
_, err := e.tun.Write(elem.Data()[:elem.Length()])
config.LPool.Put(elem.Data()[:])
if err != nil {
log.Fatalf("[TUN] Fatal: failed to write data to tun device: %v", err)
}
}
}()
})
}
// IsAttached returns whether a NetworkDispatcher is attached to the
// endpoint.
func (e *tunEndpoint) IsAttached() bool {
return e.endpoint.IsAttached()
}
// Wait waits for any worker goroutines owned by the endpoint to stop.
//
// For now, requesting that an endpoint's worker goroutine(s) stop is
// implementation specific.
//
// Wait will not block if the endpoint hasn't started any goroutines
// yet, even if it might later.
func (e *tunEndpoint) Wait() {
return
}
// ARPHardwareType returns the ARPHRD_TYPE of the link endpoint.
//
// See:
// https://github.com/torvalds/linux/blob/aa0c9086b40c17a7ad94425b3b70dd1fdd7497bf/include/uapi/linux/if_arp.h#L30
func (e *tunEndpoint) ARPHardwareType() header.ARPHardwareType {
return header.ARPHardwareNone
}
// AddHeader adds a link layer header to the packet if required.
func (e *tunEndpoint) AddHeader(ptr stack.PacketBufferPtr) {
return
}
func NewTunEndpoint(ctx context.Context, tun net.Conn, mtu uint32, engine config.Engine, in chan<- *DataElem, out chan *DataElem) stack.LinkEndpoint {
addr, _ := tcpip.ParseMACAddress("02:03:03:04:05:06")
return &tunEndpoint{
ctx: ctx,
tun: tun,
endpoint: channel.New(tcp.DefaultReceiveBufferSize, mtu, addr),
engine: engine,
in: in,
out: out,
}
}

View File

@@ -2,135 +2,75 @@ package core
import (
"context"
"fmt"
"math/rand"
"net"
"strings"
"sync"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
const (
MaxSize = 1000
MaxThread = 10
MaxConn = 1
MaxSize = 1000
)
type tunHandler struct {
chain *Chain
node *Node
routeNAT *NAT
chain *Chain
node *Node
routeMapUDP *RouteMap
// map[srcIP]net.Conn
routeConnNAT *sync.Map
chExit chan error
routeMapTCP *sync.Map
chExit chan error
}
type NAT struct {
type RouteMap struct {
lock *sync.RWMutex
routes map[string][]net.Addr
routes map[string]net.Addr
}
func NewNAT() *NAT {
return &NAT{
func NewRouteMap() *RouteMap {
return &RouteMap{
lock: &sync.RWMutex{},
routes: map[string][]net.Addr{},
routes: map[string]net.Addr{},
}
}
func (n *NAT) RemoveAddr(addr net.Addr) (count int) {
n.lock.Lock()
defer n.lock.Unlock()
for k, v := range n.routes {
for i := 0; i < len(v); i++ {
if v[i].String() == addr.String() {
v = append(v[:i], v[i+1:]...)
i--
count++
}
}
n.routes[k] = v
}
return
}
func (n *NAT) LoadOrStore(to net.IP, addr net.Addr) (result net.Addr, load bool) {
func (n *RouteMap) LoadOrStore(to net.IP, addr net.Addr) (net.Addr, bool) {
n.lock.RLock()
addrList := n.routes[to.String()]
route, load := n.routes[to.String()]
n.lock.RUnlock()
for _, add := range addrList {
if add.String() == addr.String() {
load = true
result = addr
return
}
if load {
return route, true
}
n.lock.Lock()
defer n.lock.Unlock()
if addrList == nil {
n.routes[to.String()] = []net.Addr{addr}
result = addr
return
} else {
n.routes[to.String()] = append(n.routes[to.String()], addr)
result = addr
return
}
n.routes[to.String()] = addr
return addr, false
}
func (n *NAT) RouteTo(ip net.IP) net.Addr {
n.lock.RLock()
defer n.lock.RUnlock()
addrList := n.routes[ip.String()]
if len(addrList) == 0 {
return nil
}
// for load balance
index := rand.Intn(len(n.routes[ip.String()]))
return addrList[index]
}
func (n *NAT) Remove(ip net.IP, addr net.Addr) {
func (n *RouteMap) Store(to net.IP, addr net.Addr) {
n.lock.Lock()
defer n.lock.Unlock()
addrList, ok := n.routes[ip.String()]
if !ok {
return
}
for i := 0; i < len(addrList); i++ {
if addrList[i].String() == addr.String() {
addrList = append(addrList[:i], addrList[i+1:]...)
i--
}
}
n.routes[ip.String()] = addrList
return
n.routes[to.String()] = addr
}
func (n *NAT) Range(f func(key string, v []net.Addr)) {
func (n *RouteMap) RouteTo(ip net.IP) net.Addr {
n.lock.RLock()
defer n.lock.RUnlock()
for k, v := range n.routes {
f(k, v)
}
return n.routes[ip.String()]
}
// TunHandler creates a handler for tun tunnel.
func TunHandler(chain *Chain, node *Node) Handler {
return &tunHandler{
chain: chain,
node: node,
routeNAT: RouteNAT,
routeConnNAT: RouteConnNAT,
chExit: make(chan error, 1),
chain: chain,
node: node,
routeMapUDP: NewRouteMap(),
routeMapTCP: RouteMapTCP,
chExit: make(chan error, 1),
}
}
@@ -142,37 +82,11 @@ func (h *tunHandler) Handle(ctx context.Context, tun net.Conn) {
}
}
func (h tunHandler) printRoute() {
for {
select {
case <-time.Tick(time.Second * 5):
var i int
var sb strings.Builder
h.routeNAT.Range(func(key string, value []net.Addr) {
i++
var s []string
for _, addr := range value {
if addr != nil {
s = append(s, addr.String())
}
}
if len(s) != 0 {
sb.WriteString(fmt.Sprintf("to: %s, route: %s\n", key, strings.Join(s, " ")))
}
})
log.Debug(sb.String())
log.Debug(i)
}
}
}
type Device struct {
tun net.Conn
thread int
tun net.Conn
tunInboundRaw chan *DataElem
tunInbound chan *DataElem
tunOutbound chan *DataElem
tunInbound chan *DataElem
tunOutbound chan *DataElem
// your main logic
tunInboundHandler func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem)
@@ -181,184 +95,99 @@ type Device struct {
}
func (d *Device) readFromTun() {
defer util.HandleCrash()
for {
b := config.LPool.Get().([]byte)[:]
n, err := d.tun.Read(b[:])
buf := config.LPool.Get().([]byte)[:]
n, err := d.tun.Read(buf[:])
if err != nil {
select {
case d.chExit <- err:
default:
}
config.LPool.Put(buf[:])
log.Errorf("[TUN] Failed to read from tun: %v", err)
util.SafeWrite(d.chExit, err)
return
}
d.tunInboundRaw <- &DataElem{
data: b[:],
length: n,
if n == 0 {
log.Errorf("[TUN] Read packet length 0")
config.LPool.Put(buf[:])
continue
}
src, dst, err := util.ParseIP(buf[:n])
if err != nil {
log.Errorf("[TUN] Unknown packet")
config.LPool.Put(buf[:])
continue
}
log.Debugf("[TUN] SRC: %s --> DST: %s, length: %d", src, dst, n)
util.SafeWrite(d.tunInbound, &DataElem{
data: buf[:],
length: n,
src: src,
dst: dst,
})
}
}
func (d *Device) writeToTun() {
defer util.HandleCrash()
for e := range d.tunOutbound {
_, err := d.tun.Write(e.data[:e.length])
config.LPool.Put(e.data[:])
if err != nil {
select {
case d.chExit <- err:
default:
}
util.SafeWrite(d.chExit, err)
return
}
}
}
func (d *Device) parseIPHeader() {
for e := range d.tunInboundRaw {
if util.IsIPv4(e.data[:e.length]) {
// ipv4.ParseHeader
b := e.data[:e.length]
e.src = net.IPv4(b[12], b[13], b[14], b[15])
e.dst = net.IPv4(b[16], b[17], b[18], b[19])
} else if util.IsIPv6(e.data[:e.length]) {
// ipv6.ParseHeader
e.src = e.data[:e.length][8:24]
e.dst = e.data[:e.length][24:40]
} else {
log.Errorf("[tun-packet] unknown packet")
continue
}
log.Debugf("[tun] %s --> %s, length: %d", e.src, e.dst, e.length)
d.tunInbound <- e
}
}
func (d *Device) Close() {
d.tun.Close()
util.SafeClose(d.tunInbound)
util.SafeClose(d.tunOutbound)
util.SafeClose(TCPPacketChan)
}
func heartbeats(tun net.Conn, in chan<- *DataElem) {
conn, err := util.GetTunDeviceByConn(tun)
func heartbeats(ctx context.Context, tun net.Conn) {
tunIfi, err := util.GetTunDeviceByConn(tun)
if err != nil {
log.Errorf("get tun device error: %s", err.Error())
log.Errorf("Failed to get tun device: %s", err.Error())
return
}
srcIPv4, srcIPv6, err := util.GetLocalTunIP(conn.Name)
srcIPv4, srcIPv6, dockerSrcIPv4, err := util.GetTunDeviceIP(tunIfi.Name)
if err != nil {
return
}
if config.RouterIP.To4().Equal(srcIPv4) {
return
}
if config.RouterIP6.To4().Equal(srcIPv6) {
return
}
var bytes []byte
var bytes6 []byte
ticker := time.NewTicker(time.Second * 5)
defer ticker.Stop()
for ; true; <-ticker.C {
for i := 0; i < 4; i++ {
if bytes == nil {
bytes, err = genICMPPacket(srcIPv4, config.RouterIP)
if err != nil {
log.Errorf("generate ipv4 packet error: %s", err.Error())
continue
}
}
if bytes6 == nil {
bytes6, err = genICMPPacketIPv6(srcIPv6, config.RouterIP6)
if err != nil {
log.Errorf("generate ipv6 packet error: %s", err.Error())
continue
}
}
for index, i2 := range [][]byte{bytes, bytes6} {
data := config.LPool.Get().([]byte)[:]
length := copy(data, i2)
var src, dst net.IP
if index == 0 {
src, dst = srcIPv4, config.RouterIP
} else {
src, dst = srcIPv6, config.RouterIP6
}
in <- &DataElem{
data: data[:],
length: length,
src: src,
dst: dst,
}
}
time.Sleep(time.Second)
select {
case <-ctx.Done():
return
default:
}
if srcIPv4 != nil {
go util.Ping(ctx, srcIPv4.String(), config.RouterIP.String())
}
if srcIPv6 != nil {
go util.Ping(ctx, srcIPv6.String(), config.RouterIP6.String())
}
if dockerSrcIPv4 != nil {
go util.Ping(ctx, dockerSrcIPv4.String(), config.DockerRouterIP.String())
}
}
}
func genICMPPacket(src net.IP, dst net.IP) ([]byte, error) {
buf := gopacket.NewSerializeBuffer()
icmpLayer := layers.ICMPv4{
TypeCode: layers.CreateICMPv4TypeCode(layers.ICMPv4TypeEchoRequest, 0),
Id: 3842,
Seq: 1,
}
ipLayer := layers.IPv4{
Version: 4,
SrcIP: src,
DstIP: dst,
Protocol: layers.IPProtocolICMPv4,
Flags: layers.IPv4DontFragment,
TTL: 64,
IHL: 5,
Id: 55664,
}
opts := gopacket.SerializeOptions{
FixLengths: true,
ComputeChecksums: true,
}
err := gopacket.SerializeLayers(buf, opts, &ipLayer, &icmpLayer)
if err != nil {
return nil, fmt.Errorf("failed to serialize icmp packet, err: %v", err)
}
return buf.Bytes(), nil
}
func genICMPPacketIPv6(src net.IP, dst net.IP) ([]byte, error) {
buf := gopacket.NewSerializeBuffer()
icmpLayer := layers.ICMPv6{
TypeCode: layers.CreateICMPv6TypeCode(layers.ICMPv6TypeEchoRequest, 0),
}
ipLayer := layers.IPv6{
Version: 6,
SrcIP: src,
DstIP: dst,
NextHeader: layers.IPProtocolICMPv6,
HopLimit: 255,
}
opts := gopacket.SerializeOptions{
FixLengths: true,
}
err := gopacket.SerializeLayers(buf, opts, &ipLayer, &icmpLayer)
if err != nil {
return nil, fmt.Errorf("failed to serialize icmp6 packet, err: %v", err)
}
return buf.Bytes(), nil
}
func (d *Device) Start(ctx context.Context) {
go d.readFromTun()
for i := 0; i < d.thread; i++ {
go d.parseIPHeader()
}
go d.tunInboundHandler(d.tunInbound, d.tunOutbound)
go d.writeToTun()
go heartbeats(d.tun, d.tunInbound)
select {
case err := <-d.chExit:
log.Errorf("device exit: %s", err.Error())
log.Errorf("Device exit: %v", err)
return
case <-ctx.Done():
return
@@ -370,26 +199,22 @@ func (d *Device) SetTunInboundHandler(handler func(tunInbound <-chan *DataElem,
}
func (h *tunHandler) HandleServer(ctx context.Context, tun net.Conn) {
go h.printRoute()
device := &Device{
tun: tun,
thread: MaxThread,
tunInboundRaw: make(chan *DataElem, MaxSize),
tunInbound: make(chan *DataElem, MaxSize),
tunOutbound: make(chan *DataElem, MaxSize),
chExit: h.chExit,
tun: tun,
tunInbound: make(chan *DataElem, MaxSize),
tunOutbound: make(chan *DataElem, MaxSize),
chExit: h.chExit,
}
device.SetTunInboundHandler(func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem) {
for {
for ctx.Err() == nil {
packetConn, err := (&net.ListenConfig{}).ListenPacket(ctx, "udp", h.node.Addr)
if err != nil {
log.Debugf("[udp] can not listen %s, err: %v", h.node.Addr, err)
log.Errorf("[UDP] Failed to listen %s: %v", h.node.Addr, err)
return
}
err = transportTun(ctx, tunInbound, tunOutbound, packetConn, h.routeNAT, h.routeConnNAT)
err = transportTunServer(ctx, tunInbound, tunOutbound, packetConn, h.routeMapUDP, h.routeMapTCP)
if err != nil {
log.Debugf("[tun] %s: %v", tun.LocalAddr(), err)
log.Errorf("[TUN] %s: %v", tun.LocalAddr(), err)
}
}
})
@@ -431,19 +256,17 @@ type udpElem struct {
}
type Peer struct {
conn net.PacketConn
thread int
conn net.PacketConn
connInbound chan *udpElem
parsedConnInfo chan *udpElem
connInbound chan *udpElem
tunInbound <-chan *DataElem
tunOutbound chan<- *DataElem
routeNAT *NAT
// map[srcIP]net.Conn
// routeConnNAT sync.Map
routeConnNAT *sync.Map
// map[srcIP.String()]net.Addr for udp
routeMapUDP *RouteMap
// map[srcIP.String()]net.Conn for tcp
routeMapTCP *sync.Map
errChan chan error
}
@@ -456,97 +279,84 @@ func (p *Peer) sendErr(err error) {
}
func (p *Peer) readFromConn() {
defer util.HandleCrash()
for {
b := config.LPool.Get().([]byte)[:]
n, srcAddr, err := p.conn.ReadFrom(b[:])
buf := config.LPool.Get().([]byte)[:]
n, from, err := p.conn.ReadFrom(buf[:])
if err != nil {
config.LPool.Put(buf[:])
p.sendErr(err)
return
}
src, dst, err := util.ParseIP(buf[:n])
if err != nil {
config.LPool.Put(buf[:])
log.Errorf("[TUN] Unknown packet: %v", err)
continue
}
if addr, loaded := p.routeMapUDP.LoadOrStore(src, from); loaded {
if addr.String() != from.String() {
p.routeMapUDP.Store(src, from)
log.Debugf("[TUN] Replace route map UDP: %s -> %s", src, from)
}
} else {
log.Debugf("[TUN] Add new route map UDP: %s -> %s", src, from)
}
p.connInbound <- &udpElem{
from: srcAddr,
data: b[:],
from: from,
data: buf[:],
length: n,
src: src,
dst: dst,
}
}
}
func (p *Peer) readFromTCPConn() {
for packet := range Chan {
defer util.HandleCrash()
for packet := range TCPPacketChan {
src, dst, err := util.ParseIP(packet.Data)
if err != nil {
log.Errorf("[TUN] Unknown packet")
config.LPool.Put(packet.Data[:])
continue
}
u := &udpElem{
data: packet.Data[:],
length: int(packet.DataLength),
src: src,
dst: dst,
}
b := packet.Data
if util.IsIPv4(packet.Data) {
// ipv4.ParseHeader
u.src = net.IPv4(b[12], b[13], b[14], b[15])
u.dst = net.IPv4(b[16], b[17], b[18], b[19])
} else if util.IsIPv6(packet.Data) {
// ipv6.ParseHeader
u.src = b[8:24]
u.dst = b[24:40]
} else {
log.Errorf("[tun-conn] unknown packet")
continue
}
log.Debugf("[tcpserver] udp-tun %s >>> %s length: %d", u.src, u.dst, u.length)
p.parsedConnInfo <- u
}
}
func (p *Peer) parseHeader() {
var firstIPv4, firstIPv6 = true, true
for e := range p.connInbound {
b := e.data[:e.length]
if util.IsIPv4(e.data[:e.length]) {
// ipv4.ParseHeader
e.src = net.IPv4(b[12], b[13], b[14], b[15])
e.dst = net.IPv4(b[16], b[17], b[18], b[19])
} else if util.IsIPv6(e.data[:e.length]) {
// ipv6.ParseHeader
e.src = b[:e.length][8:24]
e.dst = b[:e.length][24:40]
} else {
log.Errorf("[tun] unknown packet")
continue
}
if firstIPv4 || firstIPv6 {
if util.IsIPv4(e.data[:e.length]) {
firstIPv4 = false
} else {
firstIPv6 = false
}
if _, loaded := p.routeNAT.LoadOrStore(e.src, e.from); loaded {
log.Debugf("[tun] find route: %s -> %s", e.src, e.from)
} else {
log.Debugf("[tun] new route: %s -> %s", e.src, e.from)
}
}
p.parsedConnInfo <- e
log.Debugf("[TCP] udp-tun %s >>> %s length: %d", u.src, u.dst, u.length)
p.connInbound <- u
}
}
func (p *Peer) routePeer() {
for e := range p.parsedConnInfo {
if routeToAddr := p.routeNAT.RouteTo(e.dst); routeToAddr != nil {
log.Debugf("[tun] find route: %s -> %s", e.dst, routeToAddr)
defer util.HandleCrash()
for e := range p.connInbound {
if routeToAddr := p.routeMapUDP.RouteTo(e.dst); routeToAddr != nil {
log.Debugf("[UDP] Find UDP route to dst: %s -> %s", e.dst, routeToAddr)
_, err := p.conn.WriteTo(e.data[:e.length], routeToAddr)
config.LPool.Put(e.data[:])
if err != nil {
p.sendErr(err)
return
}
} else if conn, ok := p.routeConnNAT.Load(e.dst.String()); ok {
} else if conn, ok := p.routeMapTCP.Load(e.dst.String()); ok {
log.Debugf("[TCP] Find TCP route to dst: %s -> %s", e.dst.String(), conn.(net.Conn).RemoteAddr())
dgram := newDatagramPacket(e.data[:e.length])
if err := dgram.Write(conn.(net.Conn)); err != nil {
log.Debugf("[tcpserver] udp-tun %s <- %s : %s", conn.(net.Conn).RemoteAddr(), dgram.Addr(), err)
err := dgram.Write(conn.(net.Conn))
config.LPool.Put(e.data[:])
if err != nil {
log.Errorf("[TCP] udp-tun %s <- %s : %s", conn.(net.Conn).RemoteAddr(), dgram.Addr(), err)
p.sendErr(err)
return
}
config.LPool.Put(e.data[:])
} else {
log.Debugf("[TUN] Not found route to dst: %s, write to TUN device", e.dst.String())
p.tunOutbound <- &DataElem{
data: e.data,
length: e.length,
@@ -558,28 +368,30 @@ func (p *Peer) routePeer() {
}
func (p *Peer) routeTUN() {
defer util.HandleCrash()
for e := range p.tunInbound {
if addr := p.routeNAT.RouteTo(e.dst); addr != nil {
log.Debugf("[tun] find route: %s -> %s", e.dst, addr)
if addr := p.routeMapUDP.RouteTo(e.dst); addr != nil {
log.Debugf("[TUN] Find UDP route to dst: %s -> %s", e.dst, addr)
_, err := p.conn.WriteTo(e.data[:e.length], addr)
config.LPool.Put(e.data[:])
if err != nil {
log.Debugf("[tun] can not route: %s -> %s", e.dst, addr)
log.Debugf("[TUN] Failed wirte to route dst: %s -> %s", e.dst, addr)
p.sendErr(err)
return
}
} else if conn, ok := p.routeConnNAT.Load(e.dst.String()); ok {
} else if conn, ok := p.routeMapTCP.Load(e.dst.String()); ok {
log.Debugf("[TUN] Find TCP route to dst: %s -> %s", e.dst.String(), conn.(net.Conn).RemoteAddr())
dgram := newDatagramPacket(e.data[:e.length])
err := dgram.Write(conn.(net.Conn))
config.LPool.Put(e.data[:])
if err != nil {
log.Debugf("[tcpserver] udp-tun %s <- %s : %s", conn.(net.Conn).RemoteAddr(), dgram.Addr(), err)
log.Errorf("[TUN] Failed to write TCP %s <- %s : %s", conn.(net.Conn).RemoteAddr(), dgram.Addr(), err)
p.sendErr(err)
return
}
} else {
log.Errorf("[TUN] No route for src: %s -> dst: %s, drop it", e.src, e.dst)
config.LPool.Put(e.data[:])
log.Debug(fmt.Errorf("[tun] no route for %s -> %s", e.src, e.dst))
}
}
}
@@ -587,9 +399,6 @@ func (p *Peer) routeTUN() {
func (p *Peer) Start() {
go p.readFromConn()
go p.readFromTCPConn()
for i := 0; i < p.thread; i++ {
go p.parseHeader()
}
go p.routePeer()
go p.routeTUN()
}
@@ -598,17 +407,15 @@ func (p *Peer) Close() {
p.conn.Close()
}
func transportTun(ctx context.Context, tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem, packetConn net.PacketConn, nat *NAT, connNAT *sync.Map) error {
func transportTunServer(ctx context.Context, tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem, packetConn net.PacketConn, routeMapUDP *RouteMap, routeMapTCP *sync.Map) error {
p := &Peer{
conn: packetConn,
thread: MaxThread,
connInbound: make(chan *udpElem, MaxSize),
parsedConnInfo: make(chan *udpElem, MaxSize),
tunInbound: tunInbound,
tunOutbound: tunOutbound,
routeNAT: nat,
routeConnNAT: connNAT,
errChan: make(chan error, 2),
conn: packetConn,
connInbound: make(chan *udpElem, MaxSize),
tunInbound: tunInbound,
tunOutbound: tunOutbound,
routeMapUDP: routeMapUDP,
routeMapTCP: routeMapTCP,
errChan: make(chan error, 2),
}
defer p.Close()

View File

@@ -2,28 +2,28 @@ package core
import (
"context"
"errors"
"fmt"
"net"
"time"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (h *tunHandler) HandleClient(ctx context.Context, tun net.Conn) {
defer tun.Close()
remoteAddr, err := net.ResolveUDPAddr("udp", h.node.Remote)
if err != nil {
log.Errorf("[tun] %s: remote addr: %v", tun.LocalAddr(), err)
log.Errorf("[TUN-CLIENT] Failed to resolve udp addr %s: %v", h.node.Remote, err)
return
}
in := make(chan *DataElem, MaxSize)
out := make(chan *DataElem, MaxSize)
engine := h.node.Get(config.ConfigKubeVPNTransportEngine)
endpoint := NewTunEndpoint(ctx, tun, uint32(config.DefaultMTU), config.Engine(engine), in, out)
stack := NewStack(ctx, endpoint)
go stack.Wait()
defer util.SafeClose(in)
defer util.SafeClose(out)
d := &ClientDevice{
tun: tun,
@@ -32,22 +32,16 @@ func (h *tunHandler) HandleClient(ctx context.Context, tun net.Conn) {
chExit: h.chExit,
}
d.SetTunInboundHandler(func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem) {
for {
select {
case <-ctx.Done():
return
default:
}
for ctx.Err() == nil {
packetConn, err := getRemotePacketConn(ctx, h.chain)
if err != nil {
log.Debugf("[tun-client] %s - %s: %s", tun.LocalAddr(), remoteAddr, err)
time.Sleep(time.Second * 2)
log.Debugf("[TUN-CLIENT] Failed to get remote conn from %s -> %s: %s", tun.LocalAddr(), remoteAddr, err)
time.Sleep(time.Millisecond * 200)
continue
}
err = transportTunClient(ctx, tunInbound, tunOutbound, packetConn, remoteAddr)
if err != nil {
log.Debugf("[tun-client] %s: %v", tun.LocalAddr(), err)
log.Debugf("[TUN-CLIENT] %s: %v", tun.LocalAddr(), err)
}
}
})
@@ -87,29 +81,32 @@ func transportTunClient(ctx context.Context, tunInbound <-chan *DataElem, tunOut
defer packetConn.Close()
go func() {
defer util.HandleCrash()
for e := range tunInbound {
if e.src.Equal(e.dst) {
tunOutbound <- e
util.SafeWrite(tunOutbound, e)
continue
}
_, err := packetConn.WriteTo(e.data[:e.length], remoteAddr)
config.LPool.Put(e.data[:])
if err != nil {
errChan <- err
util.SafeWrite(errChan, errors.Wrap(err, fmt.Sprintf("failed to write packet to remote %s", remoteAddr)))
return
}
}
}()
go func() {
defer util.HandleCrash()
for {
b := config.LPool.Get().([]byte)[:]
n, _, err := packetConn.ReadFrom(b[:])
buf := config.LPool.Get().([]byte)[:]
n, _, err := packetConn.ReadFrom(buf[:])
if err != nil {
errChan <- err
config.LPool.Put(buf[:])
util.SafeWrite(errChan, errors.Wrap(err, fmt.Sprintf("failed to read packet from remote %s", remoteAddr)))
return
}
tunOutbound <- &DataElem{data: b[:], length: n}
util.SafeWrite(tunOutbound, &DataElem{data: buf[:], length: n})
}
}()
@@ -132,11 +129,13 @@ type ClientDevice struct {
func (d *ClientDevice) Start(ctx context.Context) {
go d.tunInboundHandler(d.tunInbound, d.tunOutbound)
go heartbeats(d.tun, d.tunInbound)
go heartbeats(ctx, d.tun)
go d.readFromTun()
go d.writeToTun()
select {
case err := <-d.chExit:
log.Errorf("[tun-client]: %v", err)
log.Errorf("[TUN-CLIENT]: %v", err)
return
case <-ctx.Done():
return
@@ -146,3 +145,43 @@ func (d *ClientDevice) Start(ctx context.Context) {
func (d *ClientDevice) SetTunInboundHandler(handler func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem)) {
d.tunInboundHandler = handler
}
func (d *ClientDevice) readFromTun() {
defer util.HandleCrash()
for {
buf := config.LPool.Get().([]byte)[:]
n, err := d.tun.Read(buf[:])
if err != nil {
util.SafeWrite(d.chExit, err)
config.LPool.Put(buf[:])
return
}
if n == 0 {
config.LPool.Put(buf[:])
continue
}
// Try to determine network protocol number, default zero.
var src, dst net.IP
src, dst, err = util.ParseIP(buf[:n])
if err != nil {
log.Debugf("[TUN-GVISOR] Unknown packet: %v", err)
config.LPool.Put(buf[:])
continue
}
log.Tracef("[TUN-RAW] SRC: %s, DST: %s, Length: %d", src.String(), dst, n)
util.SafeWrite(d.tunInbound, NewDataElem(buf[:], n, src, dst))
}
}
func (d *ClientDevice) writeToTun() {
defer util.HandleCrash()
for e := range d.tunOutbound {
_, err := d.tun.Write(e.data[:e.length])
config.LPool.Put(e.data[:])
if err != nil {
util.SafeWrite(d.chExit, err)
return
}
}
}

View File

@@ -6,7 +6,7 @@ import (
"io"
"net"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
)
type datagramPacket struct {
@@ -61,10 +61,10 @@ func readDatagramPacketServer(r io.Reader, b []byte) (*datagramPacket, error) {
}
func (addr *datagramPacket) Write(w io.Writer) error {
b := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(b[:])
binary.BigEndian.PutUint16(b[:2], uint16(len(addr.Data)))
n := copy(b[2:], addr.Data)
_, err := w.Write(b[:n+2])
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
binary.BigEndian.PutUint16(buf[:2], uint16(len(addr.Data)))
n := copy(buf[2:], addr.Data)
_, err := w.Write(buf[:n+2])
return err
}

View File

@@ -12,7 +12,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/genericiooptions"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubectl/pkg/cmd/exec"
@@ -32,11 +32,11 @@ type CopyOptions struct {
args []string
genericclioptions.IOStreams
genericiooptions.IOStreams
}
// NewCopyOptions creates the options for copy
func NewCopyOptions(ioStreams genericclioptions.IOStreams) *CopyOptions {
func NewCopyOptions(ioStreams genericiooptions.IOStreams) *CopyOptions {
return &CopyOptions{
IOStreams: ioStreams,
}
@@ -149,7 +149,7 @@ func (o *CopyOptions) Run() error {
func (o *CopyOptions) checkDestinationIsDir(dest fileSpec) error {
options := &exec.ExecOptions{
StreamOptions: exec.StreamOptions{
IOStreams: genericclioptions.IOStreams{
IOStreams: genericiooptions.IOStreams{
Out: bytes.NewBuffer([]byte{}),
ErrOut: bytes.NewBuffer([]byte{}),
},
@@ -199,7 +199,7 @@ func (o *CopyOptions) copyToPod(src, dest fileSpec, options *exec.ExecOptions) e
}
options.StreamOptions = exec.StreamOptions{
IOStreams: genericclioptions.IOStreams{
IOStreams: genericiooptions.IOStreams{
In: reader,
Out: o.Out,
ErrOut: o.ErrOut,
@@ -246,7 +246,7 @@ func (t *TarPipe) initReadFrom(n uint64) {
t.reader, t.outStream = io.Pipe()
options := &exec.ExecOptions{
StreamOptions: exec.StreamOptions{
IOStreams: genericclioptions.IOStreams{
IOStreams: genericiooptions.IOStreams{
In: nil,
Out: t.outStream,
ErrOut: t.o.Out,

View File

@@ -1,39 +1,39 @@
package action
import (
"fmt"
"context"
"io"
log "github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) error {
func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) (err error) {
defer func() {
util.InitLoggerForServer(true)
log.SetOutput(svr.LogFile)
log.SetLevel(log.DebugLevel)
config.Debug = false
}()
config.Debug = req.Level == int32(log.DebugLevel)
out := io.MultiWriter(newCloneWarp(resp), svr.LogFile)
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
log.SetLevel(log.InfoLevel)
var sshConf = util.ParseSshFromRPC(req.SshJump)
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
connReq := &rpc.ConnectRequest{
KubeconfigBytes: req.KubeconfigBytes,
Namespace: req.Namespace,
ExtraCIDR: req.ExtraCIDR,
ExtraDomain: req.ExtraDomain,
UseLocalDNS: req.UseLocalDNS,
ExtraRoute: req.ExtraRoute,
Engine: req.Engine,
SshJump: req.SshJump,
TransferImage: req.TransferImage,
Image: req.Image,
ImagePullSecretName: req.ImagePullSecretName,
Level: req.Level,
OriginKubeconfigPath: req.OriginKubeconfigPath,
}
@@ -42,29 +42,20 @@ func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) err
if err != nil {
return err
}
var msg *rpc.ConnectResponse
for {
msg, err = connResp.Recv()
if err == io.EOF {
break
} else if err == nil {
fmt.Fprint(out, msg.Message)
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
return nil
} else {
return err
}
err = util.PrintGRPCStream[rpc.ConnectResponse](connResp, out)
if err != nil {
return err
}
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
options := &handler.CloneOptions{
Namespace: req.Namespace,
Headers: req.Headers,
Workloads: req.Workloads,
ExtraCIDR: req.ExtraCIDR,
ExtraDomain: req.ExtraDomain,
UseLocalDNS: req.UseLocalDNS,
Engine: config.Engine(req.Engine),
Namespace: req.Namespace,
Headers: req.Headers,
Workloads: req.Workloads,
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
Engine: config.Engine(req.Engine),
OriginKubeconfigPath: req.OriginKubeconfigPath,
TargetKubeconfig: req.TargetKubeconfig,
TargetNamespace: req.TargetNamespace,
@@ -72,6 +63,9 @@ func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) err
TargetImage: req.TargetImage,
TargetRegistry: req.TargetRegistry,
IsChangeTargetRegistry: req.IsChangeTargetRegistry,
TargetWorkloadNames: map[string]string{},
LocalDir: req.LocalDir,
RemoteDir: req.RemoteDir,
}
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
if err != nil {
@@ -82,23 +76,34 @@ func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) err
Name: "kubeconfig",
DefValue: file,
})
sshCtx, sshFunc := context.WithCancel(context.Background())
defer func() {
if err != nil {
_ = options.Cleanup()
sshFunc()
}
}()
options.AddRollbackFunc(func() error {
sshFunc()
return nil
})
var path string
path, err = handler.SshJump(resp.Context(), sshConf, flags, false)
path, err = ssh.SshJump(sshCtx, sshConf, flags, false)
if err != nil {
return err
}
f := InitFactoryByPath(path, req.Namespace)
f := util.InitFactoryByPath(path, req.Namespace)
err = options.InitClient(f)
if err != nil {
log.Errorf("init client failed: %v", err)
log.Errorf("Failed to init client: %v", err)
return err
}
config.Image = req.Image
log.Infof("clone workloads...")
err = options.DoClone(resp.Context())
log.Infof("Clone workloads...")
options.SetContext(sshCtx)
err = options.DoClone(resp.Context(), []byte(req.KubeconfigBytes))
if err != nil {
log.Errorf("clone workloads failed: %v", err)
_ = options.Cleanup()
log.Errorf("Clone workloads failed: %v", err)
return err
}
svr.clone = options
@@ -110,10 +115,10 @@ type cloneWarp struct {
}
func (r *cloneWarp) Write(p []byte) (n int, err error) {
err = r.server.Send(&rpc.CloneResponse{
_ = r.server.Send(&rpc.CloneResponse{
Message: string(p),
})
return len(p), err
return len(p), nil
}
func newCloneWarp(server rpc.Daemon_CloneServer) io.Writer {

View File

@@ -4,17 +4,17 @@ import (
"context"
"github.com/spf13/pflag"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
var CancelFunc = make(map[string]context.CancelFunc)
func (svr *Server) ConfigAdd(ctx context.Context, req *rpc.ConfigAddRequest) (*rpc.ConfigAddResponse, error) {
var sshConf = util.ParseSshFromRPC(req.SshJump)
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
func (svr *Server) ConfigAdd(ctx context.Context, req *rpc.ConfigAddRequest) (resp *rpc.ConfigAddResponse, err error) {
var file string
file, err = util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
if err != nil {
return nil, err
}
@@ -24,13 +24,19 @@ func (svr *Server) ConfigAdd(ctx context.Context, req *rpc.ConfigAddRequest) (*r
DefValue: file,
})
sshCtx, sshCancel := context.WithCancel(context.Background())
defer func() {
if err != nil {
sshCancel()
}
}()
var path string
path, err = handler.SshJump(sshCtx, sshConf, flags, true)
CancelFunc[path] = sshCancel
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
path, err = ssh.SshJump(sshCtx, sshConf, flags, true)
if err != nil {
return nil, err
}
CancelFunc[path] = sshCancel
return &rpc.ConfigAddResponse{ClusterID: path}, nil
}

View File

@@ -8,22 +8,24 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"k8s.io/utils/pointer"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectForkServer) error {
func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectForkServer) (err error) {
defer func() {
util.InitLoggerForServer(true)
log.SetOutput(svr.LogFile)
log.SetLevel(log.DebugLevel)
config.Debug = false
}()
config.Debug = req.Level == int32(log.DebugLevel)
out := io.MultiWriter(newConnectForkWarp(resp), svr.LogFile)
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
log.SetLevel(log.InfoLevel)
if !svr.IsSudo {
return svr.redirectConnectForkToSudoDaemon(req, resp)
}
@@ -33,23 +35,13 @@ func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectF
Namespace: req.Namespace,
Headers: req.Headers,
Workloads: req.Workloads,
ExtraCIDR: req.ExtraCIDR,
ExtraDomain: req.ExtraDomain,
UseLocalDNS: req.UseLocalDNS,
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
Engine: config.Engine(req.Engine),
OriginKubeconfigPath: req.OriginKubeconfigPath,
Lock: &svr.Lock,
ImagePullSecretName: req.ImagePullSecretName,
}
var sshConf = util.ParseSshFromRPC(req.SshJump)
var transferImage = req.TransferImage
go util.StartupPProf(config.PProfPort)
defaultlog.Default().SetOutput(io.Discard)
if transferImage {
err := util.TransferImage(ctx, sshConf, config.OriginImage, req.Image, out)
if err != nil {
return err
}
}
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
if err != nil {
return err
@@ -65,20 +57,23 @@ func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectF
sshCancel()
return nil
})
defer func() {
if err != nil {
connect.Cleanup()
sshCancel()
}
}()
var path string
path, err = handler.SshJump(sshCtx, sshConf, flags, false)
path, err = ssh.SshJump(sshCtx, ssh.ParseSshFromRPC(req.SshJump), flags, false)
if err != nil {
return err
}
err = connect.InitClient(InitFactoryByPath(path, req.Namespace))
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
if err != nil {
return err
}
err = connect.PreCheckResource()
if err != nil {
return err
}
_, err = connect.RentInnerIP(ctx)
err = connect.GetIPFromContext(ctx)
if err != nil {
return err
}
@@ -86,16 +81,18 @@ func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectF
config.Image = req.Image
err = connect.DoConnect(sshCtx, true)
if err != nil {
log.Errorf("do connect error: %v", err)
connect.Cleanup()
log.Errorf("Do connect error: %v", err)
return err
}
svr.secondaryConnect = append(svr.secondaryConnect, connect)
if resp.Context().Err() != nil {
return resp.Context().Err()
}
svr.secondaryConnect = append(svr.secondaryConnect, connect)
return nil
}
func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer) error {
func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer) (err error) {
cli := svr.GetClient(true)
if cli == nil {
return fmt.Errorf("sudo daemon not start")
@@ -104,13 +101,11 @@ func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp
Namespace: req.Namespace,
Headers: req.Headers,
Workloads: req.Workloads,
ExtraCIDR: req.ExtraCIDR,
ExtraDomain: req.ExtraDomain,
UseLocalDNS: req.UseLocalDNS,
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
Engine: config.Engine(req.Engine),
OriginKubeconfigPath: req.OriginKubeconfigPath,
}
var sshConf = util.ParseSshFromRPC(req.SshJump)
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
if err != nil {
return err
@@ -125,34 +120,35 @@ func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp
sshCancel()
return nil
})
defer func() {
if err != nil {
sshCancel()
}
}()
var path string
path, err = handler.SshJump(sshCtx, sshConf, flags, true)
path, err = ssh.SshJump(sshCtx, sshConf, flags, true)
if err != nil {
return err
}
err = connect.InitClient(InitFactoryByPath(path, req.Namespace))
if err != nil {
return err
}
err = connect.PreCheckResource()
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
if err != nil {
return err
}
for _, options := range svr.secondaryConnect {
var isSameCluster bool
isSameCluster, err = util.IsSameCluster(
isSameCluster, _ := util.IsSameCluster(
sshCtx,
options.GetClientset().CoreV1().ConfigMaps(options.Namespace), options.Namespace,
connect.GetClientset().CoreV1().ConfigMaps(connect.Namespace), connect.Namespace,
)
if err == nil && isSameCluster && options.Equal(connect) {
if isSameCluster {
// same cluster, do nothing
log.Infof("already connect to cluster")
log.Infof("Connected with cluster")
return nil
}
}
ctx, err := connect.RentInnerIP(resp.Context())
ctx, err := connect.RentIP(resp.Context())
if err != nil {
return err
}
@@ -161,50 +157,15 @@ func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp
if err != nil {
return err
}
for {
recv, err := connResp.Recv()
if err == io.EOF {
break
} else if err != nil {
return err
}
err = resp.Send(recv)
if err != nil {
return err
}
err = util.CopyGRPCStream[rpc.ConnectResponse](connResp, resp)
if err != nil {
return err
}
if resp.Context().Err() != nil {
return resp.Context().Err()
}
svr.secondaryConnect = append(svr.secondaryConnect, connect)
if req.Foreground {
<-resp.Context().Done()
for i := 0; i < len(svr.secondaryConnect); i++ {
if svr.secondaryConnect[i] == connect {
cli := svr.GetClient(false)
if cli == nil {
return fmt.Errorf("sudo daemon not start")
}
disconnect, err := cli.Disconnect(context.Background(), &rpc.DisconnectRequest{
ID: pointer.Int32(int32(i)),
})
if err != nil {
log.Errorf("disconnect error: %v", err)
return err
}
for {
recv, err := disconnect.Recv()
if err == io.EOF {
break
} else if err != nil {
return err
}
log.Info(recv.Message)
}
break
}
}
}
return nil
}
@@ -213,10 +174,10 @@ type connectForkWarp struct {
}
func (r *connectForkWarp) Write(p []byte) (n int, err error) {
err = r.server.Send(&rpc.ConnectResponse{
_ = r.server.Send(&rpc.ConnectResponse{
Message: string(p),
})
return len(p), err
return len(p), nil
}
func newConnectForkWarp(server rpc.Daemon_ConnectForkServer) io.Writer {

View File

@@ -4,67 +4,64 @@ import (
"context"
"fmt"
"io"
defaultlog "log"
"os"
golog "log"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/rest"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/utils/pointer"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer) error {
func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer) (e error) {
defer func() {
util.InitLoggerForServer(true)
log.SetOutput(svr.LogFile)
log.SetLevel(log.DebugLevel)
config.Debug = false
}()
config.Debug = req.Level == int32(log.DebugLevel)
out := io.MultiWriter(newWarp(resp), svr.LogFile)
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
log.SetLevel(log.InfoLevel)
if !svr.IsSudo {
return svr.redirectToSudoDaemon(req, resp)
}
ctx := resp.Context()
if !svr.t.IsZero() {
log.Debugf("already connect to another cluster, you can disconnect this connect by command `kubevpn disconnect`")
s := "Already connected to cluster in full mode, you can use options `--lite` to connect to another cluster"
log.Debugf(s)
// todo define already connect error?
return status.Error(codes.AlreadyExists, "already connect to another cluster, you can disconnect this connect by command `kubevpn disconnect`")
return status.Error(codes.AlreadyExists, s)
}
defer func() {
if e != nil || ctx.Err() != nil {
if svr.connect != nil {
svr.connect.Cleanup()
svr.connect = nil
}
svr.t = time.Time{}
}
}()
svr.t = time.Now()
svr.connect = &handler.ConnectOptions{
Namespace: req.Namespace,
Headers: req.Headers,
PortMap: req.PortMap,
Workloads: req.Workloads,
ExtraCIDR: req.ExtraCIDR,
ExtraDomain: req.ExtraDomain,
UseLocalDNS: req.UseLocalDNS,
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
Engine: config.Engine(req.Engine),
OriginKubeconfigPath: req.OriginKubeconfigPath,
Lock: &svr.Lock,
ImagePullSecretName: req.ImagePullSecretName,
}
var sshConf = util.ParseSshFromRPC(req.SshJump)
var transferImage = req.TransferImage
go util.StartupPProf(config.PProfPort)
defaultlog.Default().SetOutput(io.Discard)
if transferImage {
err := util.TransferImage(ctx, sshConf, config.OriginImage, req.Image, out)
if err != nil {
return err
}
}
golog.Default().SetOutput(io.Discard)
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
if err != nil {
return err
@@ -80,20 +77,21 @@ func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServe
sshCancel()
return nil
})
defer func() {
if e != nil {
sshCancel()
}
}()
var path string
path, err = handler.SshJump(sshCtx, sshConf, flags, false)
path, err = ssh.SshJump(sshCtx, ssh.ParseSshFromRPC(req.SshJump), flags, false)
if err != nil {
return err
}
err = svr.connect.InitClient(InitFactoryByPath(path, req.Namespace))
err = svr.connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
if err != nil {
return err
}
err = svr.connect.PreCheckResource()
if err != nil {
return err
}
_, err = svr.connect.RentInnerIP(ctx)
err = svr.connect.GetIPFromContext(ctx)
if err != nil {
return err
}
@@ -101,7 +99,7 @@ func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServe
config.Image = req.Image
err = svr.connect.DoConnect(sshCtx, false)
if err != nil {
log.Errorf("do connect error: %v", err)
log.Errorf("Failed to connect: %v", err)
svr.connect.Cleanup()
svr.connect = nil
svr.t = time.Time{}
@@ -110,7 +108,7 @@ func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServe
return nil
}
func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer) error {
func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer) (e error) {
cli := svr.GetClient(true)
if cli == nil {
return fmt.Errorf("sudo daemon not start")
@@ -118,14 +116,13 @@ func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon
connect := &handler.ConnectOptions{
Namespace: req.Namespace,
Headers: req.Headers,
PortMap: req.PortMap,
Workloads: req.Workloads,
ExtraCIDR: req.ExtraCIDR,
ExtraDomain: req.ExtraDomain,
UseLocalDNS: req.UseLocalDNS,
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
Engine: config.Engine(req.Engine),
OriginKubeconfigPath: req.OriginKubeconfigPath,
}
var sshConf = util.ParseSshFromRPC(req.SshJump)
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
if err != nil {
return err
@@ -140,34 +137,35 @@ func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon
sshCancel()
return nil
})
defer func() {
if e != nil {
sshCancel()
}
}()
var path string
path, err = handler.SshJump(sshCtx, sshConf, flags, true)
path, err = ssh.SshJump(sshCtx, sshConf, flags, true)
if err != nil {
return err
}
err = connect.InitClient(InitFactoryByPath(path, req.Namespace))
if err != nil {
return err
}
err = connect.PreCheckResource()
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
if err != nil {
return err
}
if svr.connect != nil {
var isSameCluster bool
isSameCluster, err = util.IsSameCluster(
isSameCluster, _ := util.IsSameCluster(
sshCtx,
svr.connect.GetClientset().CoreV1().ConfigMaps(svr.connect.Namespace), svr.connect.Namespace,
connect.GetClientset().CoreV1().ConfigMaps(connect.Namespace), connect.Namespace,
)
if err == nil && isSameCluster && svr.connect.Equal(connect) {
if isSameCluster {
// same cluster, do nothing
log.Infof("already connect to cluster")
log.Infof("Connected to cluster")
return nil
}
}
ctx, err := connect.RentInnerIP(resp.Context())
ctx, err := connect.RentIP(resp.Context())
if err != nil {
return err
}
@@ -176,49 +174,17 @@ func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon
if err != nil {
return err
}
for {
recv, err := connResp.Recv()
if err == io.EOF {
break
} else if err != nil {
return err
}
err = resp.Send(recv)
if err != nil {
return err
}
err = util.CopyGRPCStream[rpc.ConnectResponse](connResp, resp)
if err != nil {
return err
}
if resp.Context().Err() != nil {
return resp.Context().Err()
}
svr.t = time.Now()
svr.connect = connect
// hangup
if req.Foreground {
<-resp.Context().Done()
client := svr.GetClient(false)
if client == nil {
return fmt.Errorf("daemon not start")
}
disconnect, err := client.Disconnect(context.Background(), &rpc.DisconnectRequest{
ID: pointer.Int32(int32(0)),
})
if err != nil {
log.Errorf("disconnect error: %v", err)
return err
}
for {
recv, err := disconnect.Recv()
if err == io.EOF {
break
} else if err != nil {
log.Error(err)
return err
}
log.Info(recv.Message)
}
}
return nil
}
@@ -227,52 +193,12 @@ type warp struct {
}
func (r *warp) Write(p []byte) (n int, err error) {
err = r.server.Send(&rpc.ConnectResponse{
_ = r.server.Send(&rpc.ConnectResponse{
Message: string(p),
})
return len(p), err
return len(p), nil
}
func newWarp(server rpc.Daemon_ConnectServer) io.Writer {
return &warp{server: server}
}
func InitFactory(kubeconfigBytes string, ns string) cmdutil.Factory {
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
configFlags.WrapConfigFn = func(c *rest.Config) *rest.Config {
if path, ok := os.LookupEnv(config.EnvSSHJump); ok {
bytes, err := os.ReadFile(path)
cmdutil.CheckErr(err)
var conf *restclient.Config
conf, err = clientcmd.RESTConfigFromKubeConfig(bytes)
cmdutil.CheckErr(err)
return conf
}
return c
}
// todo optimize here
temp, err := os.CreateTemp("", "*.json")
if err != nil {
return nil
}
err = temp.Close()
if err != nil {
return nil
}
err = os.WriteFile(temp.Name(), []byte(kubeconfigBytes), os.ModePerm)
if err != nil {
return nil
}
configFlags.KubeConfig = pointer.String(temp.Name())
configFlags.Namespace = pointer.String(ns)
matchVersionFlags := cmdutil.NewMatchVersionFlags(configFlags)
return cmdutil.NewFactory(matchVersionFlags)
}
func InitFactoryByPath(kubeconfig string, ns string) cmdutil.Factory {
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
configFlags.KubeConfig = pointer.String(kubeconfig)
configFlags.Namespace = pointer.String(ns)
matchVersionFlags := cmdutil.NewMatchVersionFlags(configFlags)
return cmdutil.NewFactory(matchVersionFlags)
}

View File

@@ -1,17 +1,114 @@
package action
import (
"context"
"fmt"
"io"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/pkg/dns"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/dns"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
func (svr *Server) Disconnect(req *rpc.DisconnectRequest, resp rpc.Daemon_DisconnectServer) error {
defer func() {
util.InitLoggerForServer(true)
log.SetOutput(svr.LogFile)
config.Debug = false
}()
out := io.MultiWriter(newDisconnectWarp(resp), svr.LogFile)
util.InitLoggerForClient(config.Debug)
log.SetOutput(out)
switch {
case req.GetAll():
if svr.clone != nil {
_ = svr.clone.Cleanup()
}
svr.clone = nil
connects := handler.Connects(svr.secondaryConnect).Append(svr.connect)
for _, connect := range connects.Sort() {
if connect != nil {
connect.Cleanup()
}
}
svr.secondaryConnect = nil
svr.connect = nil
svr.t = time.Time{}
case req.ID != nil && req.GetID() == 0:
if svr.connect != nil {
svr.connect.Cleanup()
}
svr.connect = nil
svr.t = time.Time{}
if svr.clone != nil {
_ = svr.clone.Cleanup()
}
svr.clone = nil
case req.ID != nil:
index := req.GetID() - 1
if index < int32(len(svr.secondaryConnect)) {
svr.secondaryConnect[index].Cleanup()
svr.secondaryConnect = append(svr.secondaryConnect[:index], svr.secondaryConnect[index+1:]...)
} else {
log.Errorf("Index %d out of range", req.GetID())
}
case req.KubeconfigBytes != nil && req.Namespace != nil:
err := disconnectByKubeConfig(
resp.Context(),
svr,
req.GetKubeconfigBytes(),
req.GetNamespace(),
req.GetSshJump(),
)
if err != nil {
return err
}
case len(req.ClusterIDs) != 0:
s := sets.New(req.ClusterIDs...)
var connects = *new(handler.Connects)
var foundModeFull bool
if s.Has(svr.connect.GetClusterID()) {
connects = connects.Append(svr.connect)
foundModeFull = true
}
for i := 0; i < len(svr.secondaryConnect); i++ {
if s.Has(svr.secondaryConnect[i].GetClusterID()) {
connects = connects.Append(svr.secondaryConnect[i])
svr.secondaryConnect = append(svr.secondaryConnect[:i], svr.secondaryConnect[i+1:]...)
i--
}
}
for _, connect := range connects.Sort() {
if connect != nil {
connect.Cleanup()
}
}
if foundModeFull {
svr.connect = nil
svr.t = time.Time{}
if svr.clone != nil {
_ = svr.clone.Cleanup()
}
svr.clone = nil
}
}
if svr.connect == nil && len(svr.secondaryConnect) == 0 {
if svr.IsSudo {
_ = dns.CleanupHosts()
}
}
if !svr.IsSudo {
cli := svr.GetClient(true)
if cli == nil {
@@ -21,79 +118,86 @@ func (svr *Server) Disconnect(req *rpc.DisconnectRequest, resp rpc.Daemon_Discon
if err != nil {
return err
}
var recv *rpc.DisconnectResponse
for {
recv, err = connResp.Recv()
if err == io.EOF {
break
} else if err != nil {
return err
}
err = resp.Send(recv)
if err != nil {
return err
}
err = util.CopyGRPCStream[rpc.DisconnectResponse](connResp, resp)
if err != nil {
return err
}
}
defer func() {
log.SetOutput(svr.LogFile)
log.SetLevel(log.DebugLevel)
}()
out := io.MultiWriter(newDisconnectWarp(resp), svr.LogFile)
log.SetOutput(out)
log.SetLevel(log.InfoLevel)
if req.GetAll() {
if svr.connect != nil {
svr.connect.Cleanup()
}
if svr.clone != nil {
_ = svr.clone.Cleanup()
}
svr.t = time.Time{}
svr.connect = nil
svr.clone = nil
for _, options := range svr.secondaryConnect {
options.Cleanup()
}
svr.secondaryConnect = nil
} else if req.ID != nil && req.GetID() == 0 {
if svr.connect != nil {
svr.connect.Cleanup()
}
if svr.clone != nil {
_ = svr.clone.Cleanup()
}
svr.t = time.Time{}
svr.connect = nil
svr.clone = nil
} else if req.ID != nil {
index := req.GetID() - 1
if index < int32(len(svr.secondaryConnect)) {
svr.secondaryConnect[index].Cleanup()
svr.secondaryConnect = append(svr.secondaryConnect[:index], svr.secondaryConnect[index+1:]...)
} else {
log.Errorf("index %d out of range", req.GetID())
}
}
if svr.connect == nil && len(svr.secondaryConnect) == 0 {
dns.CleanupHosts()
}
return nil
}
func disconnectByKubeConfig(ctx context.Context, svr *Server, kubeconfigBytes string, ns string, jump *rpc.SshJump) error {
file, err := util.ConvertToTempKubeconfigFile([]byte(kubeconfigBytes))
if err != nil {
return err
}
flags := pflag.NewFlagSet("", pflag.ContinueOnError)
flags.AddFlag(&pflag.Flag{
Name: "kubeconfig",
DefValue: file,
})
var sshConf = ssh.ParseSshFromRPC(jump)
var path string
path, err = ssh.SshJump(ctx, sshConf, flags, false)
if err != nil {
return err
}
connect := &handler.ConnectOptions{
Namespace: ns,
}
err = connect.InitClient(util.InitFactoryByPath(path, ns))
if err != nil {
return err
}
disconnect(ctx, svr, connect)
return nil
}
func disconnect(ctx context.Context, svr *Server, connect *handler.ConnectOptions) {
client := svr.GetClient(false)
if client == nil {
return
}
if svr.connect != nil {
isSameCluster, _ := util.IsSameCluster(
ctx,
svr.connect.GetClientset().CoreV1().ConfigMaps(svr.connect.Namespace), svr.connect.Namespace,
connect.GetClientset().CoreV1().ConfigMaps(connect.Namespace), connect.Namespace,
)
if isSameCluster {
log.Infof("Disconnecting from the cluster...")
svr.connect.Cleanup()
svr.connect = nil
svr.t = time.Time{}
}
}
for i := 0; i < len(svr.secondaryConnect); i++ {
options := svr.secondaryConnect[i]
isSameCluster, _ := util.IsSameCluster(
ctx,
options.GetClientset().CoreV1().ConfigMaps(options.Namespace), options.Namespace,
connect.GetClientset().CoreV1().ConfigMaps(connect.Namespace), connect.Namespace,
)
if isSameCluster {
log.Infof("Disconnecting from the cluster...")
options.Cleanup()
svr.secondaryConnect = append(svr.secondaryConnect[:i], svr.secondaryConnect[i+1:]...)
i--
}
}
}
type disconnectWarp struct {
server rpc.Daemon_DisconnectServer
}
func (r *disconnectWarp) Write(p []byte) (n int, err error) {
err = r.server.Send(&rpc.DisconnectResponse{
_ = r.server.Send(&rpc.DisconnectResponse{
Message: string(p),
})
return len(p), err
return len(p), nil
}
func newDisconnectWarp(server rpc.Daemon_DisconnectServer) io.Writer {

View File

@@ -2,35 +2,40 @@ package action
import (
"context"
"encoding/json"
"errors"
"time"
"k8s.io/apimachinery/pkg/api/meta"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/discovery"
"k8s.io/client-go/informers"
"k8s.io/client-go/metadata"
"k8s.io/client-go/metadata/metadatainformer"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/utils/ptr"
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
)
func (svr *Server) Get(ctx context.Context, req *rpc.GetRequest) (*rpc.GetResponse, error) {
if svr.connect == nil {
if svr.connect == nil || svr.connect.Context() == nil {
return nil, errors.New("not connected")
}
if svr.gr == nil {
if svr.resourceLists == nil {
restConfig, err := svr.connect.GetFactory().ToRESTConfig()
if err != nil {
return nil, err
}
restConfig.WarningHandler = rest.NoWarnings{}
config, err := discovery.NewDiscoveryClientForConfig(restConfig)
if err != nil {
return nil, err
}
svr.gr, err = restmapper.GetAPIGroupResources(config)
svr.resourceLists, err = discovery.ServerPreferredResources(config)
if err != nil {
return nil, err
}
@@ -42,73 +47,77 @@ func (svr *Server) Get(ctx context.Context, req *rpc.GetRequest) (*rpc.GetRespon
if err != nil {
return nil, err
}
svr.informer = metadatainformer.NewSharedInformerFactory(forConfig, time.Second*5)
for _, resources := range svr.gr {
for _, apiResources := range resources.VersionedResources {
for _, resource := range apiResources {
have := sets.New[string](resource.Kind, resource.Name, resource.SingularName).Insert(resource.ShortNames...).Has(req.Resource)
if have {
resourcesFor, err := mapper.RESTMapping(schema.GroupKind{
Group: resource.Group,
Kind: resource.Kind,
}, resource.Version)
if err != nil {
return nil, err
}
svr.informer.ForResource(resourcesFor.Resource)
}
for _, resourceList := range svr.resourceLists {
for _, resource := range resourceList.APIResources {
var groupVersion schema.GroupVersion
groupVersion, err = schema.ParseGroupVersion(resourceList.GroupVersion)
if err != nil {
continue
}
var mapping schema.GroupVersionResource
mapping, err = mapper.ResourceFor(groupVersion.WithResource(resource.Name))
if err != nil {
if meta.IsNoMatchError(err) {
continue
}
return nil, err
}
_ = svr.informer.ForResource(mapping).Informer().SetWatchErrorHandler(func(r *cache.Reflector, err error) {
_, _ = svr.LogFile.Write([]byte(err.Error()))
})
}
}
go svr.informer.Start(svr.connect.Context().Done())
go svr.informer.WaitForCacheSync(make(chan struct{}))
svr.informer.Start(svr.connect.Context().Done())
svr.informer.WaitForCacheSync(ctx.Done())
}
informer, err := svr.getInformer(req)
informer, gvk, err := svr.getInformer(req)
if err != nil {
return nil, err
}
var result []*rpc.Metadata
for _, m := range informer.Informer().GetIndexer().List() {
object, err := meta.Accessor(m)
if err != nil {
return nil, err
var result []string
for _, m := range informer.Informer().GetStore().List() {
objectMetadata, ok := m.(*v1.PartialObjectMetadata)
if ok {
deepCopy := objectMetadata.DeepCopy()
deepCopy.SetGroupVersionKind(*gvk)
deepCopy.ManagedFields = nil
marshal, err := json.Marshal(deepCopy)
if err != nil {
continue
}
result = append(result, string(marshal))
}
result = append(result, &rpc.Metadata{
Name: object.GetName(),
Namespace: object.GetNamespace(),
})
}
return &rpc.GetResponse{Metadata: result}, nil
}
func (svr *Server) getInformer(req *rpc.GetRequest) (informers.GenericInformer, error) {
func (svr *Server) getInformer(req *rpc.GetRequest) (informers.GenericInformer, *schema.GroupVersionKind, error) {
mapper, err := svr.connect.GetFactory().ToRESTMapper()
if err != nil {
return nil, err
return nil, nil, err
}
var resourcesFor *meta.RESTMapping
out:
for _, resources := range svr.gr {
for _, apiResources := range resources.VersionedResources {
for _, resource := range apiResources {
have := sets.New[string](resource.Kind, resource.Name, resource.SingularName).Insert(resource.ShortNames...).Has(req.Resource)
if have {
resourcesFor, err = mapper.RESTMapping(schema.GroupKind{
Group: resource.Group,
Kind: resource.Kind,
}, resource.Version)
if err != nil {
return nil, err
}
break out
for _, resources := range svr.resourceLists {
for _, resource := range resources.APIResources {
have := sets.New[string](resource.Kind, resource.Name, resource.SingularName).Insert(resource.ShortNames...).Has(req.Resource)
if have {
var groupVersion schema.GroupVersion
groupVersion, err = schema.ParseGroupVersion(resources.GroupVersion)
if err != nil {
continue
}
var mapping schema.GroupVersionResource
mapping, err = mapper.ResourceFor(groupVersion.WithResource(resource.Name))
if err != nil {
if meta.IsNoMatchError(err) {
continue
}
return nil, nil, err
}
return svr.informer.ForResource(mapping), ptr.To(groupVersion.WithKind(resource.Kind)), nil
}
}
}
if resourcesFor == nil {
return nil, errors.New("ErrResourceNotFound")
}
return svr.informer.ForResource(resourcesFor.Resource), nil
return nil, nil, errors.New("ErrResourceNotFound")
}

View File

@@ -0,0 +1,11 @@
package action
import (
"context"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
)
func (svr *Server) Identify(ctx context.Context, req *rpc.IdentifyRequest) (*rpc.IdentifyResponse, error) {
return &rpc.IdentifyResponse{ID: svr.ID}, nil
}

Some files were not shown because too many files have changed in this diff Show More