Compare commits

...

244 Commits

Author SHA1 Message Date
fengcaiwen
453afc5d49 fix: fix no avaliable buffer bug 2023-08-23 22:58:24 +08:00
wencaiwulue
172033a227 feat: update krew index version to refs/tags/v1.1.36 2023-08-20 17:10:55 +08:00
fengcaiwen
09b02bc2d7 feat: remove gvisor mode temporally 2023-08-20 16:10:20 +08:00
fengcaiwen
92bf36bd3d feat: complete gvisor 2023-08-20 15:17:29 +08:00
fengcaiwen
d94db893db feat: update github action go version 2023-08-20 15:17:29 +08:00
fengcaiwen
b0e2e0e2b9 feat: single connection broken will not redo port-forward 2023-08-20 15:17:29 +08:00
fengcaiwen
ff2fcf939f feat: complete gvisor 2023-08-20 15:17:29 +08:00
fengcaiwen
30bf4838c2 feat: fix route conn bugs 2023-08-20 15:17:29 +08:00
fengcaiwen
c07879e78a feat: use gvisor to optimize udp performance 2023-08-20 15:17:29 +08:00
fengcaiwen
bf47c6f4e1 feat: use gvisor to optimize performance 2023-08-20 15:17:29 +08:00
fengcaiwen
27482158e7 hotfix: github action setup docker on macOS failed issue 2023-08-19 23:40:26 +08:00
fengcaiwen
3265d26b27 feat: optimize dns query while special parameter extra-domain 2023-08-19 17:56:58 +08:00
wencaiwulue
a3651cf370 feat: update krew index version to refs/tags/v1.1.35 2023-08-16 23:11:07 +08:00
naison
68ff79ca98 hotfix: rollback mtu 2023-08-16 10:54:03 +00:00
wencaiwulue
414ac0e79d feat: update krew index version to refs/tags/v1.1.35 2023-08-16 10:58:38 +08:00
naison
22d34fe362 hotfix(proxy): fix proxy without header not work bug 2023-08-16 02:04:34 +00:00
wencaiwulue
37c901c633 feat: update dockerfile 2023-08-08 22:28:38 +08:00
fengcaiwen
6fb80496e6 feat: nat use rlock while route 2023-08-06 12:02:44 +08:00
fengcaiwen
5f08427105 feat: optimize code 2023-08-04 17:11:15 +08:00
fengcaiwen
97042d6ed0 fix: judge list pod permission to add route 2023-08-02 10:45:45 +08:00
fengcaiwen
3c854cb1c7 fix: add heartbeats 2023-08-01 15:47:57 +08:00
wencaiwulue
7555311599 chore(doc): update readme 2023-07-30 15:21:15 +08:00
wencaiwulue
f550cc9a05 feat: update krew index version to refs/tags/v1.1.35 2023-07-29 20:20:23 +08:00
fengcaiwen
ca67d1144a fix: fix connect-mode is container and no permission to set ipv6 address 2023-07-29 19:02:12 +08:00
wencaiwulue
9f6304d4f8 feat: update krew index version to refs/tags/v1.1.35 2023-07-28 16:49:17 +08:00
fengcaiwen
f934c33952 fix: fix bug 2023-07-28 16:09:21 +08:00
fengcaiwen
913f8648e3 feat: enable build image 2023-07-26 13:07:19 +08:00
fengcaiwen
44e4dcc678 feat: adjust resource 2023-07-26 13:04:23 +08:00
fengcaiwen
3b3165c88b fix: keep route ip 2023-07-26 12:59:41 +08:00
fengcaiwen
6511d58dc2 perf: use chan to communicate between tcpserver and tun 2023-07-26 12:50:32 +08:00
fengcaiwen
2ba7b2027f perf: use chan to communicate between tcpserver and tun 2023-07-26 12:50:32 +08:00
fengcaiwen
d87363d2cd perf: use chan to communicate between tcpserver and tun 2023-07-26 12:50:32 +08:00
fengcaiwen
fdf75b0f0f fix: keep router ip 2023-07-22 18:12:31 +08:00
fengcaiwen
dc07514a1c chore: update comment 2023-07-18 15:20:53 +08:00
fengcaiwen
b90f9c3674 fix: update krew index repo name 2023-07-17 17:37:00 +08:00
wencaiwulue
e63229afde feat: update krew index version to refs/tags/v1.1.34 2023-07-17 17:34:27 +08:00
fengcaiwen
b0ed57794f fix: fix cidr-domain not works bug 2023-07-17 16:25:11 +08:00
wencaiwulue
6a4c787006 feat: update krew index version to refs/tags/v1.1.34 2023-07-15 19:14:27 +08:00
fengcaiwen
32886a5a5d Merge remote-tracking branch 'origin/master' 2023-07-15 18:35:45 +08:00
fengcaiwen
67786f82fd fix: re-rent ip if pod restart 2023-07-15 18:35:26 +08:00
fengcaiwen
caac77c7e5 fix: ping each other on Windows 2023-07-15 18:35:07 +08:00
fengcaiwen
0872c39b63 fix: auto determine protocol 2023-07-15 18:34:46 +08:00
wencaiwulue
c5cdafd389 feat: update krew index version to refs/tags/v1.1.34 2023-07-13 19:56:30 +08:00
Weijie
c65d4be05e fix typo 2023-07-13 15:52:22 +08:00
fengcaiwen
cc8eb9d939 feat: support client-go multiple auth 2023-06-26 19:24:48 +08:00
naison
2a7e522861 Update README_ZH.md 2023-06-20 17:47:43 +08:00
naison
e85aed59ec Update README.md 2023-06-20 17:47:28 +08:00
fengcaiwen
964ee73eb3 feat: enable test build on windows 2023-06-15 14:29:57 +08:00
fengcaiwen
75e2929f4a feat: migrate to organization 2023-06-09 17:57:36 +08:00
fengcaiwen
fd061499c1 feat: update README.md 2023-06-08 17:30:16 +08:00
fengcaiwen
45c08641d4 Merge remote-tracking branch 'origin/master' 2023-06-02 21:37:48 +08:00
fengcaiwen
bb29be937e feat: update README.md 2023-06-02 21:37:24 +08:00
wencaiwulue
57ff87bee8 feat: update krew index version to refs/tags/v1.1.33 2023-06-01 11:55:18 +08:00
naison
f3568f5b13 fix: fix grpc origin cluster protocol error 2023-06-01 02:48:21 +00:00
Pengfei Jiang
5fe1bf3910 Merge pull request #42 from joyme123/feat-localdns
feat(dns): support localdns to forward dns query
2023-05-31 17:53:25 +08:00
江鹏飞
1ab038d153 chore(build): build for mac and windows 2023-05-31 17:41:12 +08:00
江鹏飞
75c1b81786 feat(dns): support localdns to forward dns query 2023-05-31 17:31:16 +08:00
naison
19bcb290bb feat: update test.yml 2023-05-31 07:17:33 +00:00
naison
eff162f22d feat: update test.yaml 2023-05-31 06:05:15 +00:00
fengcaiwen
bd17575b87 feat: ssh support use remote kubeconfig 2023-05-27 10:20:04 +08:00
wencaiwulue
5a637b5efe feat: update krew index version to refs/tags/v1.1.32 2023-05-20 13:11:08 +08:00
fengcaiwen
b8e183ca82 feat: support more docker options 2023-05-18 14:48:00 +08:00
naison
f20bf21e6b Revert "feat: update krew index version to refs/tags/v1.1.32"
This reverts commit c21088ee1e.
2023-05-18 14:09:30 +08:00
wencaiwulue
c21088ee1e feat: update krew index version to refs/tags/v1.1.32 2023-05-17 21:36:08 +08:00
wencaiwulue
b05a565304 feat: use errgroup to run server 2023-05-17 20:53:53 +08:00
fengcaiwen
f9c0a674be feat: support more docker options 2023-05-17 14:27:52 +08:00
wencaiwulue
f77a0170d7 Merge remote-tracking branch 'origin/master' 2023-05-07 22:48:31 +08:00
fengcaiwen
41049d46f7 feat: use errgroup to run server 2023-05-06 12:38:08 +08:00
wencaiwulue
1824da1760 feat: update krew index version to refs/tags/v1.1.31 2023-05-05 10:23:29 +08:00
wencaiwulue
c26f9495e0 fix: add route table except api server address 2023-05-04 18:32:35 +08:00
fengcaiwen
1ff2df43c2 fix: fix platform not works bug 2023-04-28 17:33:07 +08:00
wencaiwulue
fbc156fc16 feat: update krew index version to refs/tags/v1.1.30 2023-04-13 00:37:48 +08:00
wencaiwulue
25dc7c5786 feat: fix npe 2023-04-12 20:38:25 +08:00
fengcaiwen
f1fe93ab25 feat: update resource quota 2023-04-10 20:58:46 +08:00
fengcaiwen
8f6c987778 fix: fix bug 2023-04-10 17:57:05 +08:00
fengcaiwen
f2de9a8b1d feat: dns server add rate limit 2023-04-10 14:56:31 +08:00
fengcaiwen
71ed7e6bdb feat: support transfer image to remote 2023-04-10 12:51:26 +08:00
fengcaiwen
b2a6596405 feat: set cleanup timeout to 5 seconds 2023-04-09 17:07:54 +08:00
fengcaiwen
2b97dd3038 feat: ssh keyfile support "~" and support include syntax 2023-04-08 12:48:36 +08:00
fengcaiwen
f7c0d3c0ce feat: extra-domain support ipv6 fix ssh-jump not use --kubeconfig options bug 2023-04-08 12:05:24 +08:00
fengcaiwen
1fed5cc266 feat: pprof use random port 2023-04-03 21:24:57 +08:00
wencaiwulue
2227a82125 feat: support ipv6 2023-04-02 22:11:43 +08:00
wencaiwulue
16eb86290f feat: update krew index version to refs/tags/v1.1.29 2023-03-27 10:16:17 +08:00
wencaiwulue
aafee9ca5d feat: optimize code 2023-03-27 09:34:44 +08:00
fengcaiwen
4d01468e1d feat: use netlink 2023-03-24 14:59:23 +08:00
fengcaiwen
7435d2c75b feat: set image 2023-03-24 11:52:11 +08:00
wencaiwulue
6fccccc3bf feat: header match ignore case 2023-03-23 23:59:49 +08:00
wencaiwulue
5c9928ad9a feat: auto update image version 2023-03-23 23:59:19 +08:00
fengcaiwen
a323bae035 feat: delete deployment only reset 2023-03-23 14:22:23 +08:00
fengcaiwen
5ecebf2958 feat: remove docker network if container length is zero 2023-03-23 10:48:08 +08:00
wencaiwulue
100c60a90a feat: kill container with signal SIGTERM 2023-03-23 09:37:40 +08:00
fengcaiwen
593f42aeca feat: add mode connect-mode 2023-03-22 23:50:09 +08:00
wencaiwulue
2ccf5776a8 feat: not create network if already exist 2023-03-22 09:38:41 +08:00
wencaiwulue
2b41cfa11f Merge branch 'master' of github.com:wencaiwulue/kubevpn 2023-03-21 23:18:59 +08:00
wencaiwulue
3cef2861f0 feat: add tools conntrack 2023-03-21 23:15:16 +08:00
wencaiwulue
a9c7f8dcb0 feat: use created network to startup container 2023-03-21 23:14:47 +08:00
fengcaiwen
3f7a8f07ee feat: use one clientset 2023-03-21 11:41:55 +08:00
wencaiwulue
6bbc1c66d9 feat: update krew version 2023-03-21 09:17:02 +08:00
fengcaiwen
feabc95ee8 feat: add dlv for container-local 2023-03-20 22:29:15 +08:00
fengcaiwen
bde2ee42e0 feat: add options extra-domain 2023-03-20 19:43:42 +08:00
fengcaiwen
37dec0506f feat: use user root as container user 2023-03-20 19:25:07 +08:00
fengcaiwen
661032c012 feat: free tun library 2023-03-20 19:24:26 +08:00
fengcaiwen
d074bd0e62 fix: fix dev port-mapping not work 2023-03-20 19:24:03 +08:00
fengcaiwen
396bcdf11f feat: pull image use authconfig 2023-03-20 19:23:32 +08:00
fengcaiwen
cb10a537b3 feat: add timezone info 2023-03-20 19:22:45 +08:00
fengcaiwen
66cb70e1c2 feat: show options as prompt 2023-03-20 09:35:32 +08:00
fengcaiwen
68b43d1a73 feat: dns enable singleInFlight 2023-03-20 08:46:13 +08:00
fengcaiwen
c99f14bb8d feat: add cache for control-plane 2023-03-20 08:33:18 +08:00
fengcaiwen
b120dc4b0d feat: update krew version 2023-03-17 22:36:38 +08:00
fengcaiwen
fbae06a5ae feat: fix dind network bug 2023-03-17 21:48:46 +08:00
fengcaiwen
e2dd6e8f99 feat: call uninstall 2023-03-17 21:48:46 +08:00
fengcaiwen
5fe3560d93 feat: add heartbeats to each other 2023-03-17 21:48:46 +08:00
fengcaiwen
4a5c1374fb feat: optimize code 2023-03-17 21:48:46 +08:00
fengcaiwen
1970f30f9d feat: optimize code 2023-03-17 21:48:46 +08:00
fengcaiwen
a545f3a958 feat: use 32345 as pprof port 2023-03-17 21:48:46 +08:00
fengcaiwen
ac3c7c218f feat: use network instead of parent-container 2023-03-17 21:48:46 +08:00
wencaiwulue
321a63ab96 feat: disable update official krew index 2023-03-15 21:35:04 +08:00
fengcaiwen
4f4a545ecb feat: add options target-registry for duplicate mode 2023-03-15 21:17:29 +08:00
fengcaiwen
ad3faed1e6 feat: add option extra-cidr 2023-03-15 21:17:29 +08:00
fengcaiwen
198f8a0ced feat: update usage 2023-03-15 21:17:29 +08:00
fengcaiwen
7ea21f4aeb feat: set volume and env for duplicate mode 2023-03-15 21:17:29 +08:00
wencaiwulue
09528748b0 feat: optimize code 2023-03-15 21:17:29 +08:00
wencaiwulue
4f9d1f7db8 feat: add mode duplicate 2023-03-15 21:17:29 +08:00
wencaiwulue
42d5f3c8cf feat: set nameserver amount 2 2023-03-14 21:24:48 +08:00
wencaiwulue
c76ea03dfe feat: add command cp 2023-03-14 21:20:46 +08:00
wencaiwulue
30a82e25eb feat: pre-check oom in mode kubevpn dev 2023-03-14 09:38:14 +08:00
wencaiwulue
49229e70fe feat: heartbeats four times 2023-03-11 09:13:05 +08:00
fengcaiwen
caeaab9ba2 feat: split connect into connect and proxy 2023-03-10 20:49:22 +08:00
fengcaiwen
0d7f78f8ae feat: update comment 2023-03-10 18:41:52 +08:00
fengcaiwen
eb1eeb698e feat: call powershell and netsh add option no-window 2023-03-10 18:24:05 +08:00
fengcaiwen
75af5c2b14 feat: send heartbeats with gopacket 2023-03-10 17:35:59 +08:00
wencaiwulue
9a8e18f06f feat: fix build image 2023-03-09 21:39:29 +08:00
fengcaiwen
f7413ce0b9 feat: delete extension lib before exit 2023-03-09 18:02:12 +08:00
fengcaiwen
40de53fced feat: add FAQ 2023-03-09 16:00:43 +08:00
fengcaiwen
1f7678af66 feat: fix bugs 2023-03-09 12:06:11 +08:00
fengcaiwen
1b7794aa92 feat: optimize get cidr logic 2023-03-08 21:43:09 +08:00
fengcaiwen
9ab86c3baf feat: remove useless testcase 2023-03-08 09:22:23 +08:00
fengcaiwen
5d622c19d3 feat: support linux/arm64 2023-03-07 15:50:28 +08:00
fengcaiwen
a3d78da25c feat: use another way to startup container in dind 2023-03-06 21:28:02 +08:00
wencaiwulue
c1ef13ff87 feat: use json envoy config 2023-03-05 20:08:50 +08:00
wencaiwulue
226c7034d5 feat: update krew index version 2023-03-04 14:37:21 +08:00
fengcaiwen
8af9a9e6fa feat: support ssh ProxyJump 2023-03-04 13:51:04 +08:00
fengcaiwen
ac4c254cec feat: support ssh jumper 2023-03-03 18:12:59 +08:00
fengcaiwen
0ba0659ce3 feat: print version 2023-03-02 10:56:05 +08:00
wencaiwulue
4c9b1075ba feat: update krew index version 2023-03-01 23:36:34 +08:00
wencaiwulue
9469469689 feat: update envoy log level from debug to error 2023-03-01 22:40:14 +08:00
fengcaiwen
edac2dde39 feat: optimize 2023-02-28 19:52:28 +08:00
wencaiwulue
f2c663f7fb feat: optimize redo port-forward logic 2023-02-27 21:10:26 +08:00
wencaiwulue
f30a5dad19 feat: return err if setup dns failed 2023-02-27 08:52:39 +08:00
wencaiwulue
98358e0d2b feat: update krew registry version 2023-02-26 19:39:08 +08:00
wencaiwulue
5f814f6d02 feat: optimize performance 2023-02-26 18:25:50 +08:00
wencaiwulue
f490801f99 feat: update krew plugin version 2023-02-23 23:21:48 +08:00
fengcaiwen
c883398f37 feat: support dev docker in docker (dind) 2023-02-23 21:15:11 +08:00
fengcaiwen
c56e0c0baf feat: listen service add route 2023-02-23 09:06:38 +08:00
wencaiwulue
6326114bb1 feat: update krew plugin version 2023-02-22 23:37:22 +08:00
fengcaiwen
8b4b38d6c2 feat: use api to get and release ip 2023-02-22 17:11:13 +08:00
wencaiwulue
48c34d8512 feat: rent and release ip use api 2023-02-22 09:01:33 +08:00
wencaiwulue
1211a76700 feat: change sa if permission can't curd secrets and configmaps 2023-02-22 00:22:53 +08:00
wencaiwulue
faec23a854 feat: webhook not use klog 2023-02-21 23:02:36 +08:00
wencaiwulue
9c73aabcce feat: add cap for supporting dlv debug 2023-02-20 21:38:33 +08:00
wencaiwulue
85405c1a0f performance: change tun lib to wg 2023-02-19 12:14:47 +08:00
wencaiwulue
840695182c feat: fix testcase 2023-02-18 21:03:29 +08:00
wencaiwulue
9765b78ca4 feat: fix testcase 2023-02-18 20:31:33 +08:00
wencaiwulue
0e0533b307 feat: update custom krew index 2023-02-18 16:25:11 +08:00
wencaiwulue
bb124102e0 feat: update readme 2023-02-18 15:52:56 +08:00
wencaiwulue
acf7e67d59 feat: add default upgrade logic 2023-02-18 15:15:33 +08:00
wencaiwulue
35bf309fbd feat: add route dynamic 2023-02-17 21:36:39 +08:00
wencaiwulue
74194c3d76 feat: add hostname 2023-02-17 21:36:20 +08:00
wencaiwulue
f5084c04c6 feat: print port are mapping to host 2023-02-16 22:17:02 +08:00
wencaiwulue
c71cac977a feat: remove useless rollback 2023-02-16 22:04:02 +08:00
fengcaiwen
589f57afb0 feat: restore sa 2023-02-16 21:01:55 +08:00
fengcaiwen
674d4aeefe feat: add dev mode in docker container 2023-02-16 21:01:42 +08:00
wencaiwulue
cd41ebf2d8 feat: use download api to get current namespace 2023-02-14 22:15:19 +08:00
wencaiwulue
4b188ba6e9 feat: update krew plugin sha256 2023-02-12 16:55:45 +08:00
wencaiwulue
16702a5f3b feat: clean mesh envoy rule after exit and disable add route dynamic 2023-02-12 15:42:25 +08:00
wencaiwulue
46c2f01053 feat: add sa to mesh pod 2023-02-12 12:23:15 +08:00
wencaiwulue
826def9a4e feat: update customized krew index version from v1.1.16 --> v1.1.17 2023-02-11 21:04:49 +08:00
wencaiwulue
972cd55314 feat: optimize DNS experience on macOS 2023-02-11 20:33:05 +08:00
wencaiwulue
3c14dc4617 fix: update readme 2023-02-11 16:15:10 +08:00
wencaiwulue
84267a0491 fix: remove useless offset 2023-02-11 15:14:17 +08:00
wencaiwulue
b875c3ec5d feat: add current namespace service to hosts file 2023-02-11 14:48:43 +08:00
wencaiwulue
ec0e00e5cf feat: try to solve issue [Failed to set DNS configuration: Unit dbus-org.freedesktop.resolve1.service not found] 2023-02-10 23:04:41 +08:00
wencaiwulue
0c6b25ac1f feat: set namespace to mesh container 2023-02-10 21:27:35 +08:00
wencaiwulue
64e4070166 feat: set namespace to mesh container 2023-02-10 21:20:05 +08:00
fengcaiwen
93151f03f0 fix: ignore ping error 2023-02-10 12:03:51 +08:00
fengcaiwen
e9b99a1c18 fix: fix use cpu too high 2023-02-10 11:57:05 +08:00
fengcaiwen
db7286abec feat: feat: optimize code and remove detect conflict interface 2023-02-08 16:26:25 +08:00
wencaiwulue
2adfe3c525 feat: add custom krew index 2023-02-05 11:23:01 +08:00
wencaiwulue
7bc36352ff feat: fix upgrade unzip logic 2023-02-04 17:28:42 +08:00
wencaiwulue
ef980ad66e feat(release): zip release and add to krew 2023-02-04 12:21:21 +08:00
wencaiwulue
521dd43527 feat: ignore create webhook configuration permission deny 2023-02-04 12:09:42 +08:00
fengcaiwen
992f1e439d fix: final ways to keep get inbound tun ip 2023-02-02 11:05:53 +08:00
fengcaiwen
9cf8611144 feat: mesh mode support GRPC using envoy 2023-01-29 20:31:57 +08:00
fengcaiwen
68ec4440cb feat: upgrade envoy version from v1.21.1 --> v1.25.0 2023-01-29 20:30:50 +08:00
fengcaiwen
6c97c98bea feat: optimize code 2023-01-29 20:30:13 +08:00
wencaiwulue
b9e73eb105 feat: still try to get cidr from resources 2023-01-28 21:27:17 +08:00
wencaiwulue
24c9441f6c feat: still try to get cidr from resources 2023-01-28 20:55:55 +08:00
fengcaiwen
203e336341 feat: add pod listener to add route dynamically 2023-01-28 17:34:15 +08:00
wencaiwulue
c73f6c5ab2 feat: use factory to init clientset instead of use in cluster config 2023-01-27 11:10:26 +08:00
wencaiwulue
f128f5d58e feat: only retry to update ref-count on conflict 2023-01-26 21:57:15 +08:00
wencaiwulue
95f81df658 feat: remove useless code 2023-01-20 21:58:02 +08:00
wencaiwulue
eab3fde83a feat: upgrade dockerfile go version to 1.19 2023-01-20 11:07:14 +08:00
wencaiwulue
1ae99e42f5 feat: optimize chinese display issue on Windows 2023-01-19 20:47:56 +08:00
fengcaiwen
17bc64559b feat: update go mod to 1.19 2023-01-19 19:40:06 +08:00
fengcaiwen
fd7c81a104 feat(upgrade): check permission of current kubevpn folder and pretty download progress bar 2023-01-19 16:48:13 +08:00
fengcaiwen
ac30ed7956 feat(doc): update readme usage 2023-01-19 14:54:13 +08:00
fengcaiwen
74beaceb9f feat: update all go dependency to latest if available 2023-01-17 14:30:02 +08:00
wencaiwulue
87dac42dad feat: warning more human-readable if using sudo to exec kubevpn 2023-01-15 12:09:07 +08:00
wencaiwulue
121ebe07ed feat: just warning instead of exit if using sudo to exec kubevpn 2023-01-14 23:38:23 +08:00
fengcaiwen
74c08e391a feat: refactor code 2023-01-14 21:00:50 +08:00
fengcaiwen
710904b350 feat: add sub-command options to print support global flags 2023-01-14 19:10:18 +08:00
fengcaiwen
21f79e03d8 feat: reflector use context as function first parameter 2023-01-14 17:22:59 +08:00
fengcaiwen
438509ffef feat: support env KUBECONFIG finally 2023-01-14 17:10:34 +08:00
fengcaiwen
d71e51588d Merge remote-tracking branch 'origin/master' 2023-01-14 16:43:56 +08:00
fengcaiwen
faebedad0a feat: use kubectl style new cmd 2023-01-14 16:42:49 +08:00
fengcaiwen
7e5aa5e944 feat: rename 2023-01-12 16:49:36 +08:00
fengcaiwen
8df6c9c0f8 feat: support env KUBECONFIG 2023-01-12 16:48:27 +08:00
fengcaiwen
68c580d636 feat: delete useless test file 2023-01-12 12:39:57 +08:00
fengcaiwen
6f30e2361d feat: close temp file before rename it 2023-01-12 12:37:29 +08:00
wencaiwulue
dfed43c6e9 feat: add sub-command upgrade 2023-01-11 23:11:30 +08:00
wencaiwulue
1786a7965e feat: add sub-command upgrade 2023-01-11 21:37:42 +08:00
wencaiwulue
ecf9fe7353 feat: webhook ignore error and wait all container of pod kubevpn-traffic-manager are ready 2023-01-11 21:34:51 +08:00
fengcaiwen
de8dcd8668 feat: add service ip to dns server list 2023-01-11 20:53:51 +08:00
fengcaiwen
7c85d9a628 feat: support websocket 2023-01-11 20:52:33 +08:00
wencaiwulue
a44f323f23 feat(test): add cache for testcase 2023-01-08 17:28:51 +08:00
fengcaiwen
f6471ef948 feat: use cm data to store ref-count and move control-plane into a subcommand of kubevpn 2023-01-06 19:29:57 +08:00
wencaiwulue
6ee83d6e65 optimize comment 2023-01-05 23:02:48 +08:00
fengcaiwen
45b4c9c98d feat: add sub-command reset 2023-01-05 14:06:09 +08:00
wencaiwulue
1b85450ef4 feat: add usage 2022-12-28 14:56:52 +08:00
wencaiwulue
200637f717 feat: compromise 2022-12-27 15:18:45 +08:00
wencaiwulue
f826357bae feat: optimize check pod status logic for redo port-forward 2022-12-27 11:52:25 +08:00
wencaiwulue
6816c02933 feat: optimize get cidr from pod logic 2022-12-25 16:52:27 +08:00
wencaiwulue
3452a71126 feat: optimize get cidr from pod logic 2022-12-25 16:14:37 +08:00
wencaiwulue
33a1b0add3 feat: delete get cidr pod anyway 2022-12-17 18:33:24 +08:00
fengcaiwen
a07b5cae2e feat: retry dns query 2022-12-14 21:57:31 +08:00
wencaiwulue
dac083e7b3 feat: optimize webhook 2022-12-13 21:56:41 +08:00
fengcaiwen
5a562ee0a1 feat: use webhook to manage ip allocate 2022-12-13 21:35:41 +08:00
wencaiwulue
5d5c6c4717 fix(test): link resolvectl to systemd-resolve 2022-12-11 15:16:18 +08:00
wencaiwulue
b4394ebce3 fix(test): link resolvectl to systemd-resolve 2022-12-11 13:50:15 +08:00
wencaiwulue
182905c8e2 feat: try to collect any cidr we could find 2022-12-11 12:51:05 +08:00
wencaiwulue
e181ea417d feat: add vendor to gitignore 2022-12-11 10:12:53 +08:00
wencaiwulue
8a0290bbd2 feat: docker build with platform 2022-12-11 10:11:43 +08:00
fengcaiwen
8356ff68d2 feat: disable envoy stream timeout 2022-12-09 22:03:42 +08:00
wencaiwulue
1004db36b9 feat: use random port instead of 10800 2022-11-28 20:04:39 +08:00
150 changed files with 16547 additions and 3945 deletions

103
.github/krew.yaml vendored Normal file
View File

@@ -0,0 +1,103 @@
apiVersion: krew.googlecontainertools.github.com/v1alpha2
kind: Plugin
metadata:
name: kubevpn
spec:
version: {{ .TagName }}
homepage: https://github.com/KubeNetworks/kubevpn
shortDescription: "A vpn tunnel tools which can connect to kubernetes cluster network"
description: |
KubeVPN is Cloud Native Dev Environment, connect to kubernetes cluster network, you can access remote kubernetes
cluster network, remote
kubernetes cluster service can also access your local service. and more, you can run your kubernetes pod on local Docker
container with same environment、volume、and network. you can develop your application on local PC totally.
platforms:
- selector:
matchLabels:
os: windows
arch: amd64
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_amd64.zip" .TagName }}
files:
- from: ./bin/kubevpn.exe
to: .
- from: LICENSE
to: .
bin: kubevpn.exe
- selector:
matchLabels:
os: windows
arch: arm64
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_arm64.zip" .TagName }}
files:
- from: ./bin/kubevpn.exe
to: .
- from: LICENSE
to: .
bin: kubevpn.exe
- selector:
matchLabels:
os: windows
arch: 386
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_386.zip" .TagName }}
files:
- from: ./bin/kubevpn.exe
to: .
- from: LICENSE
to: .
bin: kubevpn.exe
- selector:
matchLabels:
os: linux
arch: amd64
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_amd64.zip" .TagName }}
files:
- from: ./bin/kubevpn
to: .
- from: LICENSE
to: .
bin: kubevpn
- selector:
matchLabels:
os: linux
arch: arm64
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_arm64.zip" .TagName }}
files:
- from: ./bin/kubevpn
to: .
- from: LICENSE
to: .
bin: kubevpn
- selector:
matchLabels:
os: linux
arch: 386
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_386.zip" .TagName }}
files:
- from: ./bin/kubevpn
to: .
- from: LICENSE
to: .
bin: kubevpn
- selector:
matchLabels:
os: darwin
arch: amd64
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_darwin_amd64.zip" .TagName }}
files:
- from: ./bin/kubevpn
to: .
- from: LICENSE
to: .
bin: kubevpn
- selector:
matchLabels:
os: darwin
arch: arm64
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_darwin_arm64.zip" .TagName }}
files:
- from: ./bin/kubevpn
to: .
- from: LICENSE
to: .
bin: kubevpn

View File

@@ -8,9 +8,10 @@ CHANGELOG=$(git log --no-merges --date=short --pretty=format:'- %h %an %ad %s' "
cat <<EOF
## ${RELEASE}
KubeVPN ${RELEASE} is available now !
KubeVPN ${RELEASE} is available now ! 🎉
- fix known bugs 🛠
## Installation and Upgrading
You can download binary file to use it directly
wget -LO "https://github.com/KubeNetworks/kubevpn/releases/download/$(curl -L -s https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/plugins/stable.txt)/kubevpn_$(curl -L -s https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/plugins/stable.txt)_darwin_amd64.zip"
## Changelog
${CHANGELOG}
EOF

View File

@@ -13,7 +13,8 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.18
go-version: '1.20'
check-latest: true
- name: Checkout code
uses: actions/checkout@v2
with:
@@ -58,11 +59,48 @@ jobs:
- name: Push image to docker hub
run: |
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USER }} --password-stdin
docker buildx create --use
make container
- name: Repository Dispatch
uses: peter-evans/repository-dispatch@v1
uses: aurelien-baudet/workflow-dispatch@v2
with:
workflow: Upload_release
token: ${{ secrets.REPOSITORYDISPATCH }}
event-type: release-event
client-payload: '{"url": "${{ steps.create_release.outputs.upload_url }}", "tag": "${{ github.ref }}"}'
inputs: '{"url": "${{ steps.create_release.outputs.upload_url }}", "tag": "${{ github.ref_name }}"}'
- name: Make changes to pull request
run: make version && echo ${GITHUB_REF#refs/*/} > plugins/stable.txt
- name: Create Pull Request
id: cpr
uses: peter-evans/create-pull-request@v4
with:
add-paths: |
*.yaml
plugins/stable.txt
token: ${{ secrets.REPOSITORYDISPATCH }}
commit-message: "feat: update krew index version to ${{ github.ref }}"
committer: GitHub <noreply@github.com>
author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com>
signoff: false
branch: feat/update-krew-index-version
base: master
delete-branch: true
title: 'feat: update krew index version to ${{ github.ref }}'
body: |
update report
- update with *today's* date
- update krew index version to ${{ github.ref }}
labels: |
report
automated pr
# team-reviewers: |
# owners
# maintainers
draft: false
# - name: Update new version in krew-index
# uses: rajatjindal/krew-release-bot@v0.0.43
# with:
# krew_template_file: .github/krew.yaml
# debug: true

View File

@@ -14,10 +14,12 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.18
go-version: '1.20'
check-latest: true
- name: Push image to docker hub
run: |
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USER }} --password-stdin
docker buildx create --use
export VERSION=test
make container
linux:
@@ -29,11 +31,14 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.18
go-version: '1.20'
check-latest: true
- name: Setup Minikube
id: minikube
timeout-minutes: 30
uses: medyagh/setup-minikube@master
with:
cache: true
- name: Kubernetes info
run: |
@@ -41,28 +46,43 @@ jobs:
cat ~/.kube/config
kubectl get pods -n kube-system -o wide
- name: Install demo bookinfo
run: kubectl apply -f https://raw.githubusercontent.com/wencaiwulue/kubevpn/master/samples/bookinfo.yaml
run: |
minikube image load --remote istio/examples-bookinfo-details-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-ratings-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-reviews-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-productpage-v1:1.16.2
minikube image load --remote naison/authors:latest
minikube image load --remote nginx:latest
minikube image load --remote naison/kubevpn:test
minikube image ls
eval $(minikube docker-env)
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
- name: Build
run: |
export VERSION=test
make kubevpn-linux-amd64
chmod +x ./bin/kubevpn-linux-amd64
cp ./bin/kubevpn-linux-amd64 /usr/local/bin/kubevpn
chmod +x ./bin/kubevpn
cp ./bin/kubevpn /usr/local/bin/kubevpn
kubevpn version
- name: Wait for pods reviews to be ready
run: |
kubectl wait pods -l app=reviews --for=condition=Ready --timeout=3600s
kubectl wait pods -l app=productpage --for=condition=Ready --timeout=3600s
kubectl get svc -A -o wide
kubectl get pod -A -o wide
kubectl get all -o wide
kubectl get nodes -o yaml
ifconfig
route -n
sudo ln /usr/bin/resolvectl /usr/bin/systemd-resolve
- name: Test
run: go test -v ./pkg/test/function_test.go
run: go test -v -failfast ./... -timeout=60m
macos:
runs-on: macos-10.15
runs-on: macos-latest
needs: [ "image" ]
steps:
- uses: actions/checkout@v2
@@ -70,30 +90,19 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.18
- uses: docker-practice/actions-setup-docker@master
- name: Pull image in advance
run: |
rm '/usr/local/bin/kubectl'
set -x
docker version
tag=`echo ${{ github.ref }} | sed 's/refs\/tags\///' | sed 's/\(.*\)-.*/\1/' | sed 's/-[0-9]*$//' || true`
docker pull naison/kubevpn:test || true
docker pull naison/kubevpn:latest || true
docker pull naison/kubevpn:${tag} || true
docker pull naison/kubevpn-mesh:test || true
docker pull naison/kubevpn-mesh:latest || true
docker pull naison/kubevpn-mesh:${tag} || true
docker pull naison/envoy-xds-server:test || true
docker pull naison/envoy-xds-server:latest || true
docker pull naison/envoy-xds-server:${tag} || true
go-version: '1.20'
check-latest: true
- name: Set up Docker
uses: crazy-max/ghaction-setup-docker@v1
- name: Install minikube
run: |
set -x
docker version
brew install minikube
minikube start --driver=docker
kubectl get po -A
minikube kubectl -- get po -A
kubectl get pod -A -o wide
minikube kubectl -- get pod -A -o wide
- name: Kubernetes info
run: |
@@ -102,62 +111,63 @@ jobs:
kubectl get pods -n kube-system -o wide
- name: Install demo bookinfo
run: kubectl apply -f https://raw.githubusercontent.com/wencaiwulue/kubevpn/master/samples/bookinfo.yaml
run: |
minikube image load --remote istio/examples-bookinfo-details-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-ratings-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-reviews-v1:1.16.2
minikube image load --remote istio/examples-bookinfo-productpage-v1:1.16.2
minikube image load --remote naison/authors:latest
minikube image load --remote nginx:latest
minikube image load --remote naison/kubevpn:test
minikube image ls
eval $(minikube docker-env)
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
- name: Build
run: |
export VERSION=test
make kubevpn-darwin-amd64
chmod +x ./bin/kubevpn-darwin-amd64
cp ./bin/kubevpn-darwin-amd64 /usr/local/bin/kubevpn
chmod +x ./bin/kubevpn
cp ./bin/kubevpn /usr/local/bin/kubevpn
kubevpn version
- name: Wait for pods reviews to be ready
run: |
kubectl wait pods -l app=reviews --for=condition=Ready --timeout=3600s
kubectl get all -o wide
kubectl get nodes -o yaml
kubectl wait pods -l app=productpage --for=condition=Ready --timeout=3600s
kubectl get svc -A -o wide || true
kubectl get pod -A -o wide || true
kubectl get all -o wide || true
kubectl get nodes -o yaml || true
ifconfig
netstat -anr
- name: Test
run: go test -v ./pkg/test/function_test.go
run: go test -v -failfast ./... -timeout=60m
# windows:
# runs-on: windows-latest
# steps:
# - uses: actions/checkout@v2
#
# - name: Set up Go
# uses: actions/setup-go@v2
# with:
# go-version: 1.18
# # - run: |
# # choco install docker-desktop
# # docker version
# # docker run --rm hello-world
# - run: |
# choco install virtualbox
# choco install minikube
# minikube start --driver=virtualbox
# minikube kubectl -- get po -A
# choco install make
# - name: Kubernetes info
# run: |
# kubectl cluster-info dump
# kubectl get pods -n kube-system -o wide
# - name: Install demo bookinfo
# run: kubectl apply -f https://raw.githubusercontent.com/wencaiwulue/kubevpn/master/samples/bookinfo.yaml
#
# - name: Build
# run: make kubevpn-windows
#
# - name: Wait for pods reviews to be ready
# run: |
# kubectl wait pods -l app=reviews --for=condition=Ready --timeout=600s
# kubectl get all -o wide
# kubectl get nodes -o yaml
# ipconfig
#
# - name: Test
# run: go test -v ./test/
windows:
runs-on: windows-latest
needs: [ "image" ]
steps:
- uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: '1.20'
- name: Set up Docker
uses: crazy-max/ghaction-setup-docker@v1
- run: |
dir C:\hostedtoolcache\windows\docker-stable\24.0.5\x64
C:\hostedtoolcache\windows\docker-stable\24.0.5\x64\docker-proxy.exe --help
C:\hostedtoolcache\windows\docker-stable\24.0.5\x64\dockerd.exe --help
C:\hostedtoolcache\windows\docker-stable\24.0.5\x64\docker.exe --help
docker info --format '{{.OSType}}'
- run: |
choco install minikube
minikube start --driver=docker
choco install make
- name: Build
run: make kubevpn-windows-amd64

View File

@@ -1,8 +1,14 @@
name: Upload release
name: Upload_release
on:
repository_dispatch:
types: [ release-event ]
workflow_dispatch:
inputs:
url:
description: 'github release url'
required: true
tag:
description: 'latest tag'
required: true
jobs:
build:
@@ -10,35 +16,40 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
os-arch: [
kubevpn-darwin-amd64,
kubevpn-darwin-arm64,
kubevpn-windows-amd64.exe,
kubevpn-windows-arm64.exe,
kubevpn-windows-386.exe,
kubevpn-linux-amd64,
kubevpn-linux-arm64,
kubevpn-linux-386,
]
os: [ darwin, windows, linux ]
arch: [ amd64, arm64, 386 ]
exclude:
- os: darwin
arch: 386
steps:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.18
go-version: '1.20'
check-latest: true
- name: Checkout code
uses: actions/checkout@v2
- name: Build kubevpn-all-arch
- name: Build kubevpn
run: |
git tag `echo ${{ github.event.client_payload.tag }} | sed 's/refs\/tags\///' | sed 's/\(.*\)-.*/\1/' | sed 's/-[0-9]*$//' || true` || true
make all-kubevpn
git tag ${{ github.event.inputs.tag }} || true
export GitHubOAuthToken=${{ secrets.KUBEVPN_UPGRADE_OAUTH }}
make kubevpn-${{ matrix.os }}-${{ matrix.arch }}
SUFFIX=""
if [ "${{ matrix.os }}" = "windows" ]; then
SUFFIX=".exe"
fi
shasum -a 256 ./bin/kubevpn${SUFFIX} | awk '{print $1}' > checksums.txt
zip -r kubevpn_${{ github.event.inputs.tag }}_${{ matrix.os }}_${{ matrix.arch }}.zip ./bin/kubevpn${SUFFIX} LICENSE README.md README_ZH.md checksums.txt
- name: Upload Release Asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.client_payload.url }}
asset_path: ./bin/${{ matrix.os-arch }}
asset_name: ${{ matrix.os-arch }}
asset_content_type: application/octet-stream
upload_url: ${{ github.event.inputs.url }}
asset_path: ./kubevpn_${{ github.event.inputs.tag }}_${{ matrix.os }}_${{ matrix.arch }}.zip
asset_name: kubevpn_${{ github.event.inputs.tag }}_${{ matrix.os }}_${{ matrix.arch }}.zip
asset_content_type: application/zip

3
.gitignore vendored
View File

@@ -20,3 +20,6 @@
# Build artifacts
bin
vendor/
*.DS_Store

View File

@@ -21,21 +21,22 @@ IMAGE_DEFAULT = docker.io/naison/kubevpn:latest
# Setup the -ldflags option for go build here, interpolate the variable values
LDFLAGS=--ldflags "\
-X ${BASE}/pkg/config.Image=${IMAGE} \
-X ${FOLDER}/cmds.Version=${VERSION} \
-X ${BASE}/pkg/config.Version=${VERSION} \
-X ${FOLDER}/cmds.BuildTime=${BUILD_TIME} \
-X ${FOLDER}/cmds.GitCommit=${GIT_COMMIT} \
-X ${FOLDER}/cmds.Branch=${BRANCH} \
-X ${FOLDER}/cmds.OsArch=${OS_ARCH} \
-X ${FOLDER}/cmds.GitHubOAuthToken=${GitHubOAuthToken} \
"
GO111MODULE=on
GOPROXY=https://goproxy.cn,direct
.PHONY: all
all: all-kubevpn container
all: kubevpn-all container
.PHONY: all-kubevpn
all-kubevpn: kubevpn-darwin-amd64 kubevpn-darwin-arm64 \
.PHONY: kubevpn-all
kubevpn-all: kubevpn-darwin-amd64 kubevpn-darwin-arm64 \
kubevpn-windows-amd64 kubevpn-windows-386 kubevpn-windows-arm64 \
kubevpn-linux-amd64 kubevpn-linux-386 kubevpn-linux-arm64
@@ -46,54 +47,54 @@ kubevpn:
# ---------darwin-----------
.PHONY: kubevpn-darwin-amd64
kubevpn-darwin-amd64:
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn-darwin-amd64 ${FOLDER}
chmod +x $(OUTPUT_DIR)/kubevpn-darwin-amd64
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn ${FOLDER}
chmod +x $(OUTPUT_DIR)/kubevpn
.PHONY: kubevpn-darwin-arm64
kubevpn-darwin-arm64:
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn-darwin-arm64 ${FOLDER}
chmod +x $(OUTPUT_DIR)/kubevpn-darwin-arm64
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn ${FOLDER}
chmod +x $(OUTPUT_DIR)/kubevpn
# ---------darwin-----------
# ---------windows-----------
.PHONY: kubevpn-windows-amd64
kubevpn-windows-amd64:
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn-windows-amd64.exe ${FOLDER}
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn.exe ${FOLDER}
.PHONY: kubevpn-windows-arm64
kubevpn-windows-arm64:
CGO_ENABLED=0 GOOS=windows GOARCH=arm64 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn-windows-arm64.exe ${FOLDER}
CGO_ENABLED=0 GOOS=windows GOARCH=arm64 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn.exe ${FOLDER}
.PHONY: kubevpn-windows-386
kubevpn-windows-386:
CGO_ENABLED=0 GOOS=windows GOARCH=386 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn-windows-386.exe ${FOLDER}
CGO_ENABLED=0 GOOS=windows GOARCH=386 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn.exe ${FOLDER}
# ---------windows-----------
# ---------linux-----------
.PHONY: kubevpn-linux-amd64
kubevpn-linux-amd64:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn-linux-amd64 ${FOLDER}
chmod +x $(OUTPUT_DIR)/kubevpn-linux-amd64
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn ${FOLDER}
chmod +x $(OUTPUT_DIR)/kubevpn
.PHONY: kubevpn-linux-arm64
kubevpn-linux-arm64:
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn-linux-arm64 ${FOLDER}
chmod +x $(OUTPUT_DIR)/kubevpn-linux-arm64
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn ${FOLDER}
chmod +x $(OUTPUT_DIR)/kubevpn
.PHONY: kubevpn-linux-386
kubevpn-linux-386:
CGO_ENABLED=0 GOOS=linux GOARCH=386 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn-linux-386 ${FOLDER}
chmod +x $(OUTPUT_DIR)/kubevpn-linux-386
CGO_ENABLED=0 GOOS=linux GOARCH=386 go build ${LDFLAGS} -o $(OUTPUT_DIR)/kubevpn ${FOLDER}
chmod +x $(OUTPUT_DIR)/kubevpn
# ---------linux-----------
.PHONY: container
container:
docker build -t ${IMAGE} -f $(BUILD_DIR)/Dockerfile .
docker push ${IMAGE}
docker tag ${IMAGE} ${IMAGE_DEFAULT}
docker push ${IMAGE_DEFAULT}
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE} -t ${IMAGE_DEFAULT} -f $(BUILD_DIR)/Dockerfile --push .
############################ build local
.PHONY: container-local
container-local: kubevpn-linux-amd64
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./bin/envoy-xds-server ./cmd/mesh
docker build -t ${IMAGE} -f $(BUILD_DIR)/local.Dockerfile .
docker push ${IMAGE}
docker tag ${IMAGE} ${IMAGE_DEFAULT}
docker push ${IMAGE_DEFAULT}
docker buildx build --platform linux/amd64,linux/arm64 -t docker.io/naison/kubevpn:latest -f $(BUILD_DIR)/local.Dockerfile --push .
.PHONY: container-test
container-test: kubevpn-linux-amd64
docker buildx build --platform linux/amd64,linux/arm64 -t docker.io/naison/kubevpn:test -f $(BUILD_DIR)/test.Dockerfile --push .
.PHONY: version
version:
go run github.com/wencaiwulue/kubevpn/pkg/util/krew

552
README.md
View File

@@ -1,36 +1,65 @@
![kubevpn](samples/flat_log.png)
[![GitHub Workflow][1]](https://github.com/KubeNetworks/kubevpn/actions)
[![Go Version][2]](https://github.com/KubeNetworks/kubevpn/blob/master/go.mod)
[![Go Report][3]](https://goreportcard.com/badge/github.com/KubeNetworks/kubevpn)
[![Maintainability][4]](https://codeclimate.com/github/KubeNetworks/kubevpn/maintainability)
[![GitHub License][5]](https://github.com/KubeNetworks/kubevpn/blob/main/LICENSE)
[![Docker Pulls][6]](https://hub.docker.com/r/naison/kubevpn)
[![Releases][7]](https://github.com/KubeNetworks/kubevpn/releases)
[1]: https://img.shields.io/github/actions/workflow/status/KubeNetworks/kubevpn/release.yml?logo=github
[2]: https://img.shields.io/github/go-mod/go-version/KubeNetworks/kubevpn?logo=go
[3]: https://img.shields.io/badge/go%20report-A+-brightgreen.svg?style=flat
[4]: https://api.codeclimate.com/v1/badges/b5b30239174fc6603aca/maintainability
[5]: https://img.shields.io/github/license/KubeNetworks/kubevpn
[6]: https://img.shields.io/docker/pulls/naison/kubevpn?logo=docker
[7]: https://img.shields.io/github/v/release/KubeNetworks/kubevpn?logo=smartthings
# KubeVPN
[中文](README_ZH.md) | [English](README.md) | [Wiki](https://github.com/wencaiwulue/kubevpn/wiki/Architecture)
[中文](README_ZH.md) | [English](README.md) | [Wiki](https://github.com/KubeNetworks/kubevpn/wiki/Architecture)
A tools which can connect to kubernetes cluster network, you can access remote kubernetes cluster network, remote
kubernetes cluster service can also access your local service
KubeVPN is Cloud Native Dev Environment, connect to kubernetes cluster network, you can access remote kubernetes
cluster network, remote
kubernetes cluster service can also access your local service. and more, you can run your kubernetes pod on local Docker
container with same environment、volume、and network. you can develop your application on local PC totally.
## QuickStart
#### Install from GitHub release
[LINK](https://github.com/KubeNetworks/kubevpn/releases/latest)
#### Install from custom krew index
```shell
git clone https://github.com/wencaiwulue/kubevpn.git
cd kubevpn
make kubevpn-linux-amd64
make kubevpn-darwin-amd64
make kubevpn-windows-amd64
(
kubectl krew index add kubevpn https://github.com/KubeNetworks/kubevpn.git && \
kubectl krew install kubevpn/kubevpn && kubectl kubevpn
)
```
if you are using windows, you can build by this command:
#### Install from build it manually
```shell
go build github.com/wencaiwulue/kubevpn/cmd/kubevpn -o kubevpn.exe
```
(
git clone https://github.com/KubeNetworks/kubevpn.git && \
cd kubevpn && make kubevpn && ./bin/kubevpn
)
if you installed Go 1.16+, you can use install it by this command directly:
```shell
go install github.com/wencaiwulue/kubevpn/cmd/kubevpn@latest
```
### Install bookinfo as demo application
```shell
kubectl apply -f https://raw.githubusercontent.com/wencaiwulue/kubevpn/master/samples/bookinfo.yaml
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
```
## Functions
@@ -39,28 +68,49 @@ kubectl apply -f https://raw.githubusercontent.com/wencaiwulue/kubevpn/master/sa
```shell
➜ ~ kubevpn connect
INFO[0000] [sudo kubevpn connect]
Password:
2022/02/05 12:09:22 connect.go:303: kubeconfig path: /Users/naison/.kube/config, namespace: default, services: []
2022/02/05 12:09:28 remote.go:47: traffic manager not exist, try to create it...
2022/02/05 12:09:28 remote.go:121: pod kubevpn.traffic.manager status is Pending
2022/02/05 12:09:29 remote.go:121: pod kubevpn.traffic.manager status is Running
Forwarding from 0.0.0.0:10800 -> 10800
2022/02/05 12:09:31 connect.go:171: port forward ready
2022/02/05 12:09:31 connect.go:193: your ip is 223.254.254.176
2022/02/05 12:09:31 connect.go:197: tunnel connected
Handling connection for 10800
2022/02/05 12:09:31 connect.go:211: dns service ok
get cidr from cluster info...
get cidr from cluster info ok
get cidr from cni...
get cidr from svc...
get cidr from svc ok
traffic manager not exist, try to create it...
pod [kubevpn-traffic-manager] status is Pending
Container Reason Message
pod [kubevpn-traffic-manager] status is Pending
Container Reason Message
control-plane ContainerCreating
vpn ContainerCreating
webhook ContainerCreating
pod [kubevpn-traffic-manager] status is Running
Container Reason Message
control-plane ContainerRunning
vpn ContainerRunning
webhook ContainerRunning
update ref count successfully
port forward ready
your ip is 223.254.0.101
tunnel connected
dns service ok
---------------------------------------------------------------------------
Now you can access resources in the kubernetes cluster, enjoy it :)
---------------------------------------------------------------------------
```
**after you see this prompt, then leave this terminal alone, open a new terminal, continue operation**
```shell
➜ ~ kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
details-7db5668668-mq9qr 1/1 Running 0 7m 172.27.0.199 172.30.0.14 <none> <none>
kubevpn.traffic.manager 1/1 Running 0 74s 172.27.0.207 172.30.0.14 <none> <none>
productpage-8f9d86644-z8snh 1/1 Running 0 6m59s 172.27.0.206 172.30.0.14 <none> <none>
ratings-859b96848d-68d7n 1/1 Running 0 6m59s 172.27.0.201 172.30.0.14 <none> <none>
reviews-dcf754f9d-46l4j 1/1 Running 0 6m59s 172.27.0.202 172.30.0.14 <none> <none>
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
details-7db5668668-mq9qr 1/1 Running 0 7m 172.27.0.199 172.30.0.14 <none> <none>
kubevpn-traffic-manager-99f8c8d77-x9xjt 1/1 Running 0 74s 172.27.0.207 172.30.0.14 <none> <none>
productpage-8f9d86644-z8snh 1/1 Running 0 6m59s 172.27.0.206 172.30.0.14 <none> <none>
ratings-859b96848d-68d7n 1/1 Running 0 6m59s 172.27.0.201 172.30.0.14 <none> <none>
reviews-dcf754f9d-46l4j 1/1 Running 0 6m59s 172.27.0.202 172.30.0.14 <none> <none>
```
```shell
@@ -126,22 +176,31 @@ reviews ClusterIP 172.27.255.155 <none> 9080/TCP 9m6s app=
### Reverse proxy
```shell
➜ ~ kubevpn connect --workloads=service/productpage
INFO[0000] [sudo kubevpn connect --workloads=service/productpage]
Password:
2022/02/05 12:18:22 connect.go:303: kubeconfig path: /Users/naison/.kube/config, namespace: default, services: [service/productpage]
2022/02/05 12:18:28 remote.go:47: traffic manager not exist, try to create it...
2022/02/05 12:18:28 remote.go:121: pod kubevpn.traffic.manager status is Pending
2022/02/05 12:18:29 remote.go:121: pod kubevpn.traffic.manager status is Running
➜ ~ kubevpn proxy deployment/productpage
got cidr from cache
traffic manager not exist, try to create it...
pod [kubevpn-traffic-manager] status is Running
Container Reason Message
control-plane ContainerRunning
vpn ContainerRunning
webhook ContainerRunning
update ref count successfully
Waiting for deployment "productpage" rollout to finish: 1 out of 2 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 out of 2 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 out of 2 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
deployment "productpage" successfully rolled out
Forwarding from 0.0.0.0:10800 -> 10800
2022/02/05 12:18:34 connect.go:171: port forward ready
2022/02/05 12:18:34 connect.go:193: your ip is 223.254.254.176
2022/02/05 12:18:34 connect.go:197: tunnel connected
Handling connection for 10800
2022/02/05 12:18:35 connect.go:211: dns service ok
port forward ready
your ip is 223.254.0.101
tunnel connected
dns service ok
---------------------------------------------------------------------------
Now you can access resources in the kubernetes cluster, enjoy it :)
---------------------------------------------------------------------------
```
```go
@@ -169,24 +228,34 @@ Hello world!%
### Reverse proxy with mesh
Only support HTTP and GRPC, with specific header `"a: 1"` will route to your local machine
Support HTTP, GRPC and WebSocket etc. with specific header `"a: 1"` will route to your local machine
```shell
➜ ~ kubevpn connect --workloads=service/productpage --headers a=1
INFO[0000] [sudo kubevpn connect --workloads=service/productpage --headers a=1]
2022/02/05 12:22:28 connect.go:303: kubeconfig path: /Users/naison/.kube/config, namespace: default, services: [service/productpage]
2022/02/05 12:22:34 remote.go:47: traffic manager not exist, try to create it...
2022/02/05 12:22:34 remote.go:121: pod kubevpn.traffic.manager status is Pending
2022/02/05 12:22:36 remote.go:121: pod kubevpn.traffic.manager status is Running
➜ ~ kubevpn proxy deployment/productpage --headers a=1
got cidr from cache
traffic manager not exist, try to create it...
pod [kubevpn-traffic-manager] status is Running
Container Reason Message
control-plane ContainerRunning
vpn ContainerRunning
webhook ContainerRunning
update ref count successfully
Waiting for deployment "productpage" rollout to finish: 1 out of 2 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 out of 2 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 out of 2 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
deployment "productpage" successfully rolled out
Forwarding from 0.0.0.0:10800 -> 10800
2022/02/05 12:22:43 connect.go:171: port forward ready
2022/02/05 12:22:43 connect.go:193: your ip is 223.254.254.176
2022/02/05 12:22:43 connect.go:197: tunnel connected
Handling connection for 10800
2022/02/05 12:22:43 connect.go:211: dns service ok
port forward ready
your ip is 223.254.0.101
tunnel connected
dns service ok
---------------------------------------------------------------------------
Now you can access resources in the kubernetes cluster, enjoy it :)
---------------------------------------------------------------------------
```
```shell
@@ -206,12 +275,212 @@ Handling connection for 10800
Hello world!%
```
### Dev mode in local 🐳
Run the Kubernetes pod in the local Docker container, and cooperate with the service mesh to intercept the traffic with
the specified header to the local, or all the traffic to the local.
```shell
➜ ~ kubevpn -n kube-system --headers a=1 -p 9080:9080 -p 80:80 dev deployment/authors
got cidr from cache
update ref count successfully
traffic manager already exist, reuse it
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
deployment "authors" successfully rolled out
port forward ready
tunnel connected
dns service ok
tar: removing leading '/' from member names
/var/folders/4_/wt19r8113kq_mfws8sb_w1z00000gn/T/3264799524258261475:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading '/' from member names
tar: Removing leading '/' from hard link targets
/var/folders/4_/wt19r8113kq_mfws8sb_w1z00000gn/T/4472770436329940969:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading '/' from member names
tar: Removing leading '/' from hard link targets
/var/folders/4_/wt19r8113kq_mfws8sb_w1z00000gn/T/359584695576599326:/var/run/secrets/kubernetes.io/serviceaccount
Created container: authors_kube-system_kubevpn_a7d82
Wait container authors_kube-system_kubevpn_a7d82 to be running...
Container authors_kube-system_kubevpn_a7d82 is running on port 9080/tcp:32771 now
Created container: nginx_kube-system_kubevpn_a7d82
Wait container nginx_kube-system_kubevpn_a7d82 to be running...
Container nginx_kube-system_kubevpn_a7d82 is running now
/opt/microservices # ls
app
/opt/microservices # ps -ef
PID USER TIME COMMAND
1 root 0:00 ./app
10 root 0:00 nginx: master process nginx -g daemon off;
32 root 0:00 /bin/sh
44 101 0:00 nginx: worker process
45 101 0:00 nginx: worker process
46 101 0:00 nginx: worker process
47 101 0:00 nginx: worker process
49 root 0:00 ps -ef
/opt/microservices # apk add curl
fetch https://dl-cdn.alpinelinux.org/alpine/v3.14/main/x86_64/APKINDEX.tar.gz
fetch https://dl-cdn.alpinelinux.org/alpine/v3.14/community/x86_64/APKINDEX.tar.gz
(1/4) Installing brotli-libs (1.0.9-r5)
(2/4) Installing nghttp2-libs (1.43.0-r0)
(3/4) Installing libcurl (7.79.1-r5)
(4/4) Installing curl (7.79.1-r5)
Executing busybox-1.33.1-r3.trigger
OK: 8 MiB in 19 packages
/opt/microservices # curl localhost:9080
404 page not found
/opt/microservices # curl localhost:9080/health
{"status":"Authors is healthy"}/opt/microservices # exit
prepare to exit, cleaning up
update ref count successfully
clean up successful
```
You can see that it will start up two containers with docker, mapping to pod two container, and share port with same
network, you can use `localhost:port`
to access another container. And more, all environment、volume and network are the same as remote kubernetes pod, it is
truly consistent with the kubernetes runtime. Makes develop on local PC comes true.
```shell
➜ ~ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
de9e2f8ab57d nginx:latest "/docker-entrypoint.…" 5 seconds ago Up 5 seconds nginx_kube-system_kubevpn_e21d8
28aa30e8929e naison/authors:latest "./app" 6 seconds ago Up 5 seconds 0.0.0.0:80->80/tcp, 0.0.0.0:9080->9080/tcp authors_kube-system_kubevpn_e21d8
➜ ~
```
If you want to specify the image to start the container locally, you can use the parameter `--docker-image`. When the
image does not exist locally, it will be pulled from the corresponding mirror warehouse. If you want to specify startup
parameters, you can use `--entrypoint` parameter, replace it with the command you want to execute, such
as `--entrypoint /bin/bash`, for more parameters, see `kubevpn dev --help`.
### DinD ( Docker in Docker ) use kubevpn in Docker
If you want to start the development mode locally using Docker in Docker (DinD), because the program will read and
write the `/tmp` directory, you need to manually add the parameter `-v /tmp:/tmp` (outer docker) and other thing is you
need to special parameter `--network` (inner docker) for sharing network and pid
Example:
```shell
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v /Users/naison/.kube/config:/root/.kube/config naison/kubevpn:v1.1.35
```
```shell
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -c authors -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v /Users/naison/.kube/vke:/root/.kube/config -v /Users/naison/Desktop/kubevpn/bin:/app naison/kubevpn:v1.1.35
root@4d0c3c4eae2b:/# hostname
4d0c3c4eae2b
root@4d0c3c4eae2b:/# kubevpn -n kube-system --image naison/kubevpn:v1.1.35 --headers user=naison --network container:4d0c3c4eae2b --entrypoint /bin/bash dev deployment/authors
----------------------------------------------------------------------------------
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
Because of sudo user env and user env are different.
Current env KUBECONFIG value:
----------------------------------------------------------------------------------
got cidr from cache
traffic manager not exist, try to create it...
pod [kubevpn-traffic-manager] status is Pending
Container Reason Message
pod [kubevpn-traffic-manager] status is Pending
Container Reason Message
control-plane ContainerCreating
vpn ContainerCreating
webhook ContainerCreating
pod [kubevpn-traffic-manager] status is Running
Container Reason Message
control-plane ContainerRunning
vpn ContainerRunning
webhook ContainerRunning
update ref count successfully
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
deployment "authors" successfully rolled out
port forward ready
tunnel connected
dns service ok
tar: removing leading '/' from member names
/tmp/3122262358661539581:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading '/' from member names
tar: Removing leading '/' from hard link targets
/tmp/7677066538742627822:/var/run/secrets/kubernetes.io/serviceaccount
latest: Pulling from naison/authors
Digest: sha256:2e7b2d6a4c6143cde888fcdb70ba091d533e11de70e13e151adff7510a5d52d4
Status: Downloaded newer image for naison/authors:latest
Created container: authors_kube-system_kubevpn_c68e4
Wait container authors_kube-system_kubevpn_c68e4 to be running...
Container authors_kube-system_kubevpn_c68e4 is running now
Created container: nginx_kube-system_kubevpn_c68e7
Wait container nginx_kube-system_kubevpn_c68e7 to be running...
Container nginx_kube-system_kubevpn_c68e7 is running now
/opt/microservices # ps -ef
PID USER TIME COMMAND
1 root 0:00 {bash} /usr/bin/qemu-x86_64 /bin/bash /bin/bash
60 root 0:07 {kubevpn} /usr/bin/qemu-x86_64 kubevpn kubevpn dev deployment/authors -n kube-system --image naison/kubevpn:v1.1.35 --headers user=naison --parent
73 root 0:00 {tail} /usr/bin/qemu-x86_64 /usr/bin/tail tail -f /dev/null
80 root 0:00 {nginx} /usr/bin/qemu-x86_64 /usr/sbin/nginx nginx -g daemon off;
92 root 0:00 {sh} /usr/bin/qemu-x86_64 /bin/sh /bin/sh
156 101 0:00 {nginx} /usr/bin/qemu-x86_64 /usr/sbin/nginx nginx -g daemon off;
158 101 0:00 {nginx} /usr/bin/qemu-x86_64 /usr/sbin/nginx nginx -g daemon off;
160 101 0:00 {nginx} /usr/bin/qemu-x86_64 /usr/sbin/nginx nginx -g daemon off;
162 101 0:00 {nginx} /usr/bin/qemu-x86_64 /usr/sbin/nginx nginx -g daemon off;
164 root 0:00 ps -ef
/opt/microservices # ls
app
/opt/microservices # apk add curl
fetch https://dl-cdn.alpinelinux.org/alpine/v3.14/main/x86_64/APKINDEX.tar.gz
fetch https://dl-cdn.alpinelinux.org/alpine/v3.14/community/x86_64/APKINDEX.tar.gz
(1/4) Installing brotli-libs (1.0.9-r5)
(2/4) Installing nghttp2-libs (1.43.0-r0)
(3/4) Installing libcurl (7.79.1-r5)
(4/4) Installing curl (7.79.1-r5)
Executing busybox-1.33.1-r3.trigger
OK: 8 MiB in 19 packages
/opt/microservices # curl localhost:80
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
/opt/microservices # ls
app
/opt/microservices # exit
prepare to exit, cleaning up
update ref count successfully
ref-count is zero, prepare to clean up resource
clean up successful
root@4d0c3c4eae2b:/# exit
exit
```
### Multiple Protocol
- TCP
- UDP
- HTTP
- ICMP
- GRPC
- WebSocket
- HTTP
- ...
### Cross-platform
@@ -223,3 +492,164 @@ Hello world!%
on Windows platform, you need to
install [PowerShell](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.2)
in advance
## FAQ
### 1, What should I do if the dependent image cannot be pulled, or the inner environment cannot access docker.io?
Answer: here are two solution to solve this problem
- Solution 1: In the network that can access docker.io, transfer the image in the command `kubevpn version` to your own
private image registry, and then add option `--image` to special image when starting the command.
Example:
``` shell
➜ ~ kubevpn version
KubeVPN: CLI
Version: v1.1.35
Image: docker.io/naison/kubevpn:v1.1.35
Branch: master
Git commit: 87dac42dad3d8f472a9dcdfc2c6cd801551f23d1
Built time: 2023-01-15 04:19:45
Built OS/Arch: linux/amd64
Built Go version: go1.18.10
➜ ~
```
Image is `docker.io/naison/kubevpn:v1.1.35`, transfer this image to private docker registry
```text
docker pull docker.io/naison/kubevpn:v1.1.35
docker tag docker.io/naison/kubevpn:v1.1.35 [docker registry]/[namespace]/[repo]:[tag]
docker push [docker registry]/[namespace]/[repo]:[tag]
```
Then you can use this image, as follows:
```text
➜ ~ kubevpn connect --image [docker registry]/[namespace]/[repo]:[tag]
got cidr from cache
traffic manager not exist, try to create it...
pod [kubevpn-traffic-manager] status is Running
...
```
- Solution 2: Use options `--transfer-image`, enable this flags will transfer image from default image to `--image`
special address automatically。
Example
```shell
➜ ~ kubevpn connect --transfer-image --image nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn:v1.1.33
Password:
v1.1.33: Pulling from naison/kubevpn
Digest: sha256:970c0c82a2d9cbac1595edb56a31e8fc84e02712c00a7211762efee5f66ea70c
Status: Image is up to date for naison/kubevpn:v1.1.33
The push refers to repository [nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn]
9d72fec6b077: Pushed
12a6a77eb79e: Pushed
c7d0f62ec57f: Pushed
5605cea4b7c8: Pushed
4231fec7b258: Pushed
babe72b5fcae: Pushed
6caa74b4bcf0: Pushed
b8a36d10656a: Pushed
v1.1.33: digest: sha256:1bc5e589bec6dc279418009b5e82ce0fd29a2c0e8b9266988964035ad7fbeba5 size: 2000
got cidr from cache
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
```
### 2, When use `kubevpn dev`, but got error code 137, how to resolve ?
```text
dns service ok
tar: Removing leading `/' from member names
tar: Removing leading `/' from hard link targets
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/7375606548554947868:/var/run/secrets/kubernetes.io/serviceaccount
Created container: server_vke-system_kubevpn_0db84
Wait container server_vke-system_kubevpn_0db84 to be running...
Container server_vke-system_kubevpn_0db84 is running on port 8888/tcp: 6789/tcp:6789 now
$ Status: , Code: 137
prepare to exit, cleaning up
port-forward occurs error, err: lost connection to pod, retrying
update ref count successfully
ref-count is zero, prepare to clean up resource
clean up successful
```
This is because of your docker-desktop required resource is less than pod running request resource, it OOM killed, so
you can add more resource in your docker-desktop setting `Preferences --> Resources --> Memory`
### 3, Using WSL( Windows Sub Linux ) Docker, when use mode `kubevpn dev`, can not connect to cluster network, how to solve this problem?
Answer:
this is because WSL'Docker using Windows's Network, so if even start a container in WSL, this container will not use WSL
network, but use Windows network
Solution:
- 1): install docker in WSL, not use Windows Docker-desktop
- 2): use command `kubevpn connect` on Windows, and then startup `kubevpn dev` in WSL
- 3): startup a container using command `kubevpn connect` on Windows, and then
startup `kubevpn dev --network container:$CONTAINER_ID` in WSL
### 4After use command `kubevpn dev` enter develop modebut can't assess kubernetes api-serveroccur error `172.17.0.1:443 connect refusued`how to solve this problem?
Answer:
Maybe k8s network subnet is conflict with docker subnet
Solution:
- Use option `--connect-mode container` to startup command `kubevpn dev`
- Modify `~/.docker/daemon.json`, add not conflict subnet, eg: `"bip": "172.15.0.1/24"`.
```shell
➜ ~ cat ~/.docker/daemon.json
{
"builder": {
"gc": {
"defaultKeepStorage": "20GB",
"enabled": true
}
},
"experimental": false,
"features": {
"buildkit": true
},
"insecure-registries": [
],
}
```
add subnet not conflict, eg: 172.15.0.1/24
```shell
➜ ~ cat ~/.docker/daemon.json
{
"builder": {
"gc": {
"defaultKeepStorage": "20GB",
"enabled": true
}
},
"experimental": false,
"features": {
"buildkit": true
},
"insecure-registries": [
],
"bip": "172.15.0.1/24"
}
```
restart docker and retry

View File

@@ -1,35 +1,63 @@
![kubevpn](samples/flat_log.png)
[![GitHub Workflow][1]](https://github.com/KubeNetworks/kubevpn/actions)
[![Go Version][2]](https://github.com/KubeNetworks/kubevpn/blob/master/go.mod)
[![Go Report][3]](https://goreportcard.com/badge/github.com/KubeNetworks/kubevpn)
[![Maintainability][4]](https://codeclimate.com/github/KubeNetworks/kubevpn/maintainability)
[![GitHub License][5]](https://github.com/KubeNetworks/kubevpn/blob/main/LICENSE)
[![Docker Pulls][6]](https://hub.docker.com/r/naison/kubevpn)
[![Releases][7]](https://github.com/KubeNetworks/kubevpn/releases)
[1]: https://img.shields.io/github/actions/workflow/status/KubeNetworks/kubevpn/release.yml?logo=github
[2]: https://img.shields.io/github/go-mod/go-version/KubeNetworks/kubevpn?logo=go
[3]: https://img.shields.io/badge/go%20report-A+-brightgreen.svg?style=flat
[4]: https://api.codeclimate.com/v1/badges/b5b30239174fc6603aca/maintainability
[5]: https://img.shields.io/github/license/KubeNetworks/kubevpn
[6]: https://img.shields.io/docker/pulls/naison/kubevpn?logo=docker
[7]: https://img.shields.io/github/v/release/KubeNetworks/kubevpn?logo=smartthings
# KubeVPN
[English](README.md) | [中文](README_ZH.md) | [维基](https://github.com/wencaiwulue/kubevpn/wiki/%E6%9E%B6%E6%9E%84)
[English](README.md) | [中文](README_ZH.md) | [维基](https://github.com/KubeNetworks/kubevpn/wiki/%E6%9E%B6%E6%9E%84)
一个本地连接云端 kubernetes 网络的工具,可以在本地直接访问远端集群的服务。也可以在远端集群访问到本地服务,便于调试及开发
KubeVPN 是一个云原生开发工具, 可以在本地连接云端 kubernetes 网络的工具,可以在本地直接访问远端集群的服务。也可以在远端集群访问到本地服务,便于调试及开发。同时还可以使用开发模式,直接在本地使用 Docker
将远程容器运行在本地。
## 快速开始
#### 从 Github release 下载编译好的二进制文件
[链接](https://github.com/KubeNetworks/kubevpn/releases/latest)
#### 从 自定义 Krew 仓库安装
```shell
git clone https://github.com/wencaiwulue/kubevpn.git
cd kubevpn
make kubevpn-linux-amd64
make kubevpn-darwin-amd64
make kubevpn-windows-amd64
(
kubectl krew index add kubevpn https://github.com/KubeNetworks/kubevpn.git && \
kubectl krew install kubevpn/kubevpn && kubectl kubevpn
)
```
如果你在使用 Windows 系统,可以使用下面这条命令构建:
#### 自己构建二进制文件
```shell
go build github.com/wencaiwulue/kubevpn/cmd/kubevpn -o kubevpn.exe
(
git clone https://github.com/KubeNetworks/kubevpn.git && \
cd kubevpn && make kubevpn && ./bin/kubevpn
)
```
如果安装了 Go 1.16 及以上版本,可以使用如下命令安装:
#### 安装 bookinfo 作为 demo 应用
```shell
go install github.com/wencaiwulue/kubevpn/cmd/kubevpn@latest
```
### 安装 bookinfo 作为 demo 应用
```shell
kubectl apply -f https://raw.githubusercontent.com/wencaiwulue/kubevpn/master/samples/bookinfo.yaml
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
```
## 功能
@@ -38,28 +66,49 @@ kubectl apply -f https://raw.githubusercontent.com/wencaiwulue/kubevpn/master/sa
```shell
➜ ~ kubevpn connect
INFO[0000] [sudo kubevpn connect]
Password:
2022/02/05 12:09:22 connect.go:303: kubeconfig path: /Users/naison/.kube/config, namespace: default, services: []
2022/02/05 12:09:28 remote.go:47: traffic manager not exist, try to create it...
2022/02/05 12:09:28 remote.go:121: pod kubevpn.traffic.manager status is Pending
2022/02/05 12:09:29 remote.go:121: pod kubevpn.traffic.manager status is Running
Forwarding from 0.0.0.0:10800 -> 10800
2022/02/05 12:09:31 connect.go:171: port forward ready
2022/02/05 12:09:31 connect.go:193: your ip is 223.254.254.176
2022/02/05 12:09:31 connect.go:197: tunnel connected
Handling connection for 10800
2022/02/05 12:09:31 connect.go:211: dns service ok
get cidr from cluster info...
get cidr from cluster info ok
get cidr from cni...
get cidr from svc...
get cidr from svc ok
traffic manager not exist, try to create it...
pod [kubevpn-traffic-manager] status is Pending
Container Reason Message
pod [kubevpn-traffic-manager] status is Pending
Container Reason Message
control-plane ContainerCreating
vpn ContainerCreating
webhook ContainerCreating
pod [kubevpn-traffic-manager] status is Running
Container Reason Message
control-plane ContainerRunning
vpn ContainerRunning
webhook ContainerRunning
update ref count successfully
port forward ready
your ip is 223.254.0.101
tunnel connected
dns service ok
---------------------------------------------------------------------------
Now you can access resources in the kubernetes cluster, enjoy it :)
---------------------------------------------------------------------------
```
**有这个提示出来后, 当前 terminal 不要关闭,新打开一个 terminal, 执行新的操作**
```shell
➜ ~ kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
details-7db5668668-mq9qr 1/1 Running 0 7m 172.27.0.199 172.30.0.14 <none> <none>
kubevpn.traffic.manager 1/1 Running 0 74s 172.27.0.207 172.30.0.14 <none> <none>
productpage-8f9d86644-z8snh 1/1 Running 0 6m59s 172.27.0.206 172.30.0.14 <none> <none>
ratings-859b96848d-68d7n 1/1 Running 0 6m59s 172.27.0.201 172.30.0.14 <none> <none>
reviews-dcf754f9d-46l4j 1/1 Running 0 6m59s 172.27.0.202 172.30.0.14 <none> <none>
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
details-7db5668668-mq9qr 1/1 Running 0 7m 172.27.0.199 172.30.0.14 <none> <none>
kubevpn-traffic-manager-99f8c8d77-x9xjt 1/1 Running 0 74s 172.27.0.207 172.30.0.14 <none> <none>
productpage-8f9d86644-z8snh 1/1 Running 0 6m59s 172.27.0.206 172.30.0.14 <none> <none>
ratings-859b96848d-68d7n 1/1 Running 0 6m59s 172.27.0.201 172.30.0.14 <none> <none>
reviews-dcf754f9d-46l4j 1/1 Running 0 6m59s 172.27.0.202 172.30.0.14 <none> <none>
```
```shell
@@ -125,22 +174,31 @@ reviews ClusterIP 172.27.255.155 <none> 9080/TCP 9m6s app=
### 反向代理
```shell
➜ ~ kubevpn connect --workloads=service/productpage
INFO[0000] [sudo kubevpn connect --workloads=service/productpage]
Password:
2022/02/05 12:18:22 connect.go:303: kubeconfig path: /Users/naison/.kube/config, namespace: default, services: [service/productpage]
2022/02/05 12:18:28 remote.go:47: traffic manager not exist, try to create it...
2022/02/05 12:18:28 remote.go:121: pod kubevpn.traffic.manager status is Pending
2022/02/05 12:18:29 remote.go:121: pod kubevpn.traffic.manager status is Running
➜ ~ kubevpn proxy deployment/productpage
got cidr from cache
traffic manager not exist, try to create it...
pod [kubevpn-traffic-manager] status is Running
Container Reason Message
control-plane ContainerRunning
vpn ContainerRunning
webhook ContainerRunning
update ref count successfully
Waiting for deployment "productpage" rollout to finish: 1 out of 2 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 out of 2 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 out of 2 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
deployment "productpage" successfully rolled out
Forwarding from 0.0.0.0:10800 -> 10800
2022/02/05 12:18:34 connect.go:171: port forward ready
2022/02/05 12:18:34 connect.go:193: your ip is 223.254.254.176
2022/02/05 12:18:34 connect.go:197: tunnel connected
Handling connection for 10800
2022/02/05 12:18:35 connect.go:211: dns service ok
port forward ready
your ip is 223.254.0.101
tunnel connected
dns service ok
---------------------------------------------------------------------------
Now you can access resources in the kubernetes cluster, enjoy it :)
---------------------------------------------------------------------------
```
```go
@@ -168,24 +226,34 @@ Hello world!%
### 反向代理支持 service mesh
支持 HTTP GRPC, 携带了指定 header `"a: 1"` 的流量,将会路由到本地
支持 HTTP, GRPC 和 WebSocket 等, 携带了指定 header `"a: 1"` 的流量,将会路由到本地
```shell
➜ ~ kubevpn connect --workloads=service/productpage --headers a=1
INFO[0000] [sudo kubevpn connect --workloads=service/productpage --headers a=1]
2022/02/05 12:22:28 connect.go:303: kubeconfig path: /Users/naison/.kube/config, namespace: default, services: [service/productpage]
2022/02/05 12:22:34 remote.go:47: traffic manager not exist, try to create it...
2022/02/05 12:22:34 remote.go:121: pod kubevpn.traffic.manager status is Pending
2022/02/05 12:22:36 remote.go:121: pod kubevpn.traffic.manager status is Running
➜ ~ kubevpn proxy deployment/productpage --headers a=1
got cidr from cache
traffic manager not exist, try to create it...
pod [kubevpn-traffic-manager] status is Running
Container Reason Message
control-plane ContainerRunning
vpn ContainerRunning
webhook ContainerRunning
update ref count successfully
Waiting for deployment "productpage" rollout to finish: 1 out of 2 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 out of 2 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 out of 2 new replicas have been updated...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
deployment "productpage" successfully rolled out
Forwarding from 0.0.0.0:10800 -> 10800
2022/02/05 12:22:43 connect.go:171: port forward ready
2022/02/05 12:22:43 connect.go:193: your ip is 223.254.254.176
2022/02/05 12:22:43 connect.go:197: tunnel connected
Handling connection for 10800
2022/02/05 12:22:43 connect.go:211: dns service ok
port forward ready
your ip is 223.254.0.101
tunnel connected
dns service ok
---------------------------------------------------------------------------
Now you can access resources in the kubernetes cluster, enjoy it :)
---------------------------------------------------------------------------
```
```shell
@@ -205,12 +273,206 @@ Handling connection for 10800
Hello world!%
```
### 本地进入开发模式 🐳
将 Kubernetes pod 运行在本地的 Docker 容器中,同时配合 service mesh, 拦截带有指定 header 的流量到本地,或者所有的流量到本地。这个开发模式依赖于本地 Docker。
```shell
➜ ~ kubevpn -n kube-system --headers a=1 -p 9080:9080 -p 80:80 dev deployment/authors
got cidr from cache
update ref count successfully
traffic manager already exist, reuse it
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
deployment "authors" successfully rolled out
port forward ready
tunnel connected
dns service ok
tar: removing leading '/' from member names
/var/folders/4_/wt19r8113kq_mfws8sb_w1z00000gn/T/3264799524258261475:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading '/' from member names
tar: Removing leading '/' from hard link targets
/var/folders/4_/wt19r8113kq_mfws8sb_w1z00000gn/T/4472770436329940969:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading '/' from member names
tar: Removing leading '/' from hard link targets
/var/folders/4_/wt19r8113kq_mfws8sb_w1z00000gn/T/359584695576599326:/var/run/secrets/kubernetes.io/serviceaccount
Created container: authors_kube-system_kubevpn_a7d82
Wait container authors_kube-system_kubevpn_a7d82 to be running...
Container authors_kube-system_kubevpn_a7d82 is running on port 9080/tcp:32771 now
Created container: nginx_kube-system_kubevpn_a7d82
Wait container nginx_kube-system_kubevpn_a7d82 to be running...
Container nginx_kube-system_kubevpn_a7d82 is running now
/opt/microservices # ls
app
/opt/microservices # ps -ef
PID USER TIME COMMAND
1 root 0:00 ./app
10 root 0:00 nginx: master process nginx -g daemon off;
32 root 0:00 /bin/sh
44 101 0:00 nginx: worker process
45 101 0:00 nginx: worker process
46 101 0:00 nginx: worker process
47 101 0:00 nginx: worker process
49 root 0:00 ps -ef
/opt/microservices # apk add curl
fetch https://dl-cdn.alpinelinux.org/alpine/v3.14/main/x86_64/APKINDEX.tar.gz
fetch https://dl-cdn.alpinelinux.org/alpine/v3.14/community/x86_64/APKINDEX.tar.gz
(1/4) Installing brotli-libs (1.0.9-r5)
(2/4) Installing nghttp2-libs (1.43.0-r0)
(3/4) Installing libcurl (7.79.1-r5)
(4/4) Installing curl (7.79.1-r5)
Executing busybox-1.33.1-r3.trigger
OK: 8 MiB in 19 packages
/opt/microservices # curl localhost:9080
404 page not found
/opt/microservices # curl localhost:9080/health
{"status":"Authors is healthy"}/opt/microservices # exit
prepare to exit, cleaning up
update ref count successfully
clean up successful
```
此时本地会启动两个 container, 对应 pod 容器中的两个 container, 并且共享端口, 可以直接使用 localhost:port 的形式直接访问另一个 container,
并且, 所有的环境变量、挂载卷、网络条件都和 pod 一样, 真正做到与 kubernetes 运行环境一致。
```shell
➜ ~ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
de9e2f8ab57d nginx:latest "/docker-entrypoint.…" 5 seconds ago Up 5 seconds nginx_kube-system_kubevpn_e21d8
28aa30e8929e naison/authors:latest "./app" 6 seconds ago Up 5 seconds 0.0.0.0:80->80/tcp, 0.0.0.0:9080->9080/tcp authors_kube-system_kubevpn_e21d8
➜ ~
```
如果你想指定在本地启动容器的镜像, 可以使用参数 `--docker-image`, 当本地不存在该镜像时, 会从对应的镜像仓库拉取。如果你想指定启动参数,可以使用 `--entrypoint`
参数,替换为你想要执行的命令,比如 `--entrypoint /bin/bash`, 更多使用参数,请参见 `kubevpn dev --help`.
### DinD ( Docker in Docker ) 在 Docker 中使用 kubevpn
如果你想在本地使用 Docker in Docker (DinD) 的方式启动开发模式, 由于程序会读写 `/tmp` 目录,您需要手动添加参数 `-v /tmp:/tmp`, 还有一点需要注意, 如果使用 DinD
模式,为了共享容器网络和 pid, 还需要指定参数 `--network`
例如:
```shell
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v /Users/naison/.kube/config:/root/.kube/config naison/kubevpn:v1.1.35
```
```shell
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -c authors -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v /Users/naison/.kube/config:/root/.kube/config naison/kubevpn:v1.1.35
root@4d0c3c4eae2b:/# hostname
4d0c3c4eae2b
root@4d0c3c4eae2b:/# kubevpn -n kube-system --image naison/kubevpn:v1.1.35 --headers user=naison --network container:4d0c3c4eae2b --entrypoint /bin/bash dev deployment/authors
----------------------------------------------------------------------------------
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
Because of sudo user env and user env are different.
Current env KUBECONFIG value:
----------------------------------------------------------------------------------
got cidr from cache
traffic manager not exist, try to create it...
pod [kubevpn-traffic-manager] status is Pending
Container Reason Message
pod [kubevpn-traffic-manager] status is Pending
Container Reason Message
control-plane ContainerCreating
vpn ContainerCreating
webhook ContainerCreating
pod [kubevpn-traffic-manager] status is Running
Container Reason Message
control-plane ContainerRunning
vpn ContainerRunning
webhook ContainerRunning
update ref count successfully
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
deployment "authors" successfully rolled out
port forward ready
tunnel connected
dns service ok
tar: removing leading '/' from member names
/tmp/3122262358661539581:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading '/' from member names
tar: Removing leading '/' from hard link targets
/tmp/7677066538742627822:/var/run/secrets/kubernetes.io/serviceaccount
latest: Pulling from naison/authors
Digest: sha256:2e7b2d6a4c6143cde888fcdb70ba091d533e11de70e13e151adff7510a5d52d4
Status: Downloaded newer image for naison/authors:latest
Created container: authors_kube-system_kubevpn_c68e4
Wait container authors_kube-system_kubevpn_c68e4 to be running...
Container authors_kube-system_kubevpn_c68e4 is running now
Created container: nginx_kube-system_kubevpn_c68e7
Wait container nginx_kube-system_kubevpn_c68e7 to be running...
Container nginx_kube-system_kubevpn_c68e7 is running now
/opt/microservices # ps -ef
PID USER TIME COMMAND
1 root 0:00 {bash} /usr/bin/qemu-x86_64 /bin/bash /bin/bash
60 root 0:07 {kubevpn} /usr/bin/qemu-x86_64 kubevpn kubevpn dev deployment/authors -n kube-system --image naison/kubevpn:v1.1.35 --headers user=naison --parent
73 root 0:00 {tail} /usr/bin/qemu-x86_64 /usr/bin/tail tail -f /dev/null
80 root 0:00 {nginx} /usr/bin/qemu-x86_64 /usr/sbin/nginx nginx -g daemon off;
92 root 0:00 {sh} /usr/bin/qemu-x86_64 /bin/sh /bin/sh
156 101 0:00 {nginx} /usr/bin/qemu-x86_64 /usr/sbin/nginx nginx -g daemon off;
158 101 0:00 {nginx} /usr/bin/qemu-x86_64 /usr/sbin/nginx nginx -g daemon off;
160 101 0:00 {nginx} /usr/bin/qemu-x86_64 /usr/sbin/nginx nginx -g daemon off;
162 101 0:00 {nginx} /usr/bin/qemu-x86_64 /usr/sbin/nginx nginx -g daemon off;
164 root 0:00 ps -ef
/opt/microservices # ls
app
/opt/microservices # apk add curl
fetch https://dl-cdn.alpinelinux.org/alpine/v3.14/main/x86_64/APKINDEX.tar.gz
fetch https://dl-cdn.alpinelinux.org/alpine/v3.14/community/x86_64/APKINDEX.tar.gz
(1/4) Installing brotli-libs (1.0.9-r5)
(2/4) Installing nghttp2-libs (1.43.0-r0)
(3/4) Installing libcurl (7.79.1-r5)
(4/4) Installing curl (7.79.1-r5)
Executing busybox-1.33.1-r3.trigger
OK: 8 MiB in 19 packages
/opt/microservices # curl localhost:80
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
/opt/microservices # ls
app
/opt/microservices # exit
prepare to exit, cleaning up
update ref count successfully
ref-count is zero, prepare to clean up resource
clean up successful
root@4d0c3c4eae2b:/# exit
exit
```
### 支持多种协议
- TCP
- UDP
- HTTP
- ICMP
- GRPC
- WebSocket
- HTTP
- ...
### 支持三大平台
@@ -220,4 +482,157 @@ Hello world!%
- Windows
Windows
下需要安装 [PowerShell](https://docs.microsoft.com/zh-cn/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.2)
下需要安装 [PowerShell](https://docs.microsoft.com/zh-cn/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.2)
## 问答
### 1依赖的镜像拉不下来或者内网环境无法访问 docker.io 怎么办?
答:有两种方法可以解决
- 第一种,在可以访问 docker.io 的网络中,将命令 `kubevpn version` 中的 image 镜像, 转存到自己的私有镜像仓库,然后启动命令的时候,加上 `--image 新镜像` 即可。
例如:
``` shell
➜ ~ kubevpn version
KubeVPN: CLI
Version: v1.1.35
Image: docker.io/naison/kubevpn:v1.1.35
Branch: master
Git commit: 87dac42dad3d8f472a9dcdfc2c6cd801551f23d1
Built time: 2023-01-15 04:19:45
Built OS/Arch: linux/amd64
Built Go version: go1.18.10
➜ ~
```
镜像是 `docker.io/naison/kubevpn:v1.1.35`,将此镜像转存到自己的镜像仓库。
```text
docker pull docker.io/naison/kubevpn:v1.1.35
docker tag docker.io/naison/kubevpn:v1.1.35 [镜像仓库地址]/[命名空间]/[镜像仓库]:[镜像版本号]
docker push [镜像仓库地址]/[命名空间]/[镜像仓库]:[镜像版本号]
```
然后就可以使用这个镜像了,如下:
```text
➜ ~ kubevpn connect --image docker.io/naison/kubevpn:v1.1.35
got cidr from cache
traffic manager not exist, try to create it...
pod [kubevpn-traffic-manager] status is Running
...
```
- 第二种,使用选项 `--transfer-image`, 这个选项将会自动转存镜像到选项 `--image` 指定的地址。
例如:
```shell
➜ ~ kubevpn connect --transfer-image --image nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn:v1.1.33
Password:
v1.1.33: Pulling from naison/kubevpn
Digest: sha256:970c0c82a2d9cbac1595edb56a31e8fc84e02712c00a7211762efee5f66ea70c
Status: Image is up to date for naison/kubevpn:v1.1.33
The push refers to repository [nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn]
9d72fec6b077: Pushed
12a6a77eb79e: Pushed
c7d0f62ec57f: Pushed
5605cea4b7c8: Pushed
4231fec7b258: Pushed
babe72b5fcae: Pushed
6caa74b4bcf0: Pushed
b8a36d10656a: Pushed
v1.1.33: digest: sha256:1bc5e589bec6dc279418009b5e82ce0fd29a2c0e8b9266988964035ad7fbeba5 size: 2000
got cidr from cache
update ref count successfully
traffic manager already exist, reuse it
port forward ready
tunnel connected
dns service ok
+---------------------------------------------------------------------------+
| Now you can access resources in the kubernetes cluster, enjoy it :) |
+---------------------------------------------------------------------------+
```
### 2在使用 `kubevpn dev` 进入开发模式的时候,有出现报错 137, 改怎么解决 ?
```text
dns service ok
tar: Removing leading `/' from member names
tar: Removing leading `/' from hard link targets
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/7375606548554947868:/var/run/secrets/kubernetes.io/serviceaccount
Created container: server_vke-system_kubevpn_0db84
Wait container server_vke-system_kubevpn_0db84 to be running...
Container server_vke-system_kubevpn_0db84 is running on port 8888/tcp: 6789/tcp:6789 now
$ Status: , Code: 137
prepare to exit, cleaning up
port-forward occurs error, err: lost connection to pod, retrying
update ref count successfully
ref-count is zero, prepare to clean up resource
clean up successful
```
这是因为你的 `Docker-desktop` 声明的资源, 小于 container 容器启动时所需要的资源, 因此被 OOM 杀掉了, 你可以增加 `Docker-desktop` 对于 resources
的设置, 目录是:`Preferences --> Resources --> Memory`
### 3使用 WSL( Windows Sub Linux ) Docker, 用命令 `kubevpn dev` 进入开发模式的时候, 在 terminal 中无法提示链接集群网络, 这是为什么, 如何解决?
答案: 这是因为 WSL 的 Docker 使用的是 主机 Windows 的网络, 所以即便在 WSL 中启动 container, 这个 container 不会使用 WSL 的网络,而是使用 Windows 的网络。
解决方案:
- 1): 在 WSL 中安装 Docker, 不要使用 Windows 版本的 Docker-desktop
- 2): 在主机 Windows 使用命令 `kubevpn connect`, 然后在 WSL 中使用 `kubevpn dev` 进入开发模式
- 3): 在主机 Windows 上启动一个 container在 container 中使用命令 `kubevpn connect`, 然后在 WSL
中使用 `kubevpn dev --network container:$CONTAINER_ID`
### 4在使用 `kubevpn dev` 进入开发模式后,无法访问容器网络,出现错误 `172.17.0.1:443 connect refusued`,该如何解决?
答案:大概率是因为 k8s 容器网络和 docker 网络网段冲突了。
解决方案:
- 使用参数 `--connect-mode container` 在容器中链接,也可以解决此问题
- 可以修改文件 `~/.docker/daemon.json` 增加不冲突的网络,例如 `"bip": "172.15.0.1/24"`.
```shell
➜ ~ cat ~/.docker/daemon.json
{
"builder": {
"gc": {
"defaultKeepStorage": "20GB",
"enabled": true
}
},
"experimental": false,
"features": {
"buildkit": true
},
"insecure-registries": [
],
}
```
增加不冲突的网段
```shell
➜ ~ cat ~/.docker/daemon.json
{
"builder": {
"gc": {
"defaultKeepStorage": "20GB",
"enabled": true
}
},
"experimental": false,
"features": {
"buildkit": true
},
"insecure-registries": [
],
"bip": "172.15.0.1/24"
}
```
重启 docker重新操作即可

View File

@@ -19,6 +19,6 @@
- [x] 自己写个 control plane
- [x] 考虑是否将 control plane 和服务分开
- [x] 写单元测试,优化 GitHub action
- [ ] Linux 和 macOS 也改用 WireGuard library
- [x] Linux 和 macOS 也改用 WireGuard library
- [x] 探测是否有重复路由的 utun设备禁用 `sudo ifconfig utun1 down`

View File

@@ -1,26 +1,34 @@
FROM envoyproxy/envoy:v1.25.0 AS envoy
FROM golang:1.20 AS builder
ARG BASE=github.com/wencaiwulue/kubevpn
FROM envoyproxy/envoy:v1.21.1 AS envoy
FROM golang:1.18 AS builder
COPY . /go/src/$BASE
WORKDIR /go/src/$BASE
RUN go env -w GO111MODULE=on && go env -w GOPROXY=https://goproxy.cn,direct
RUN make kubevpn-linux-amd64
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o bin/envoy-xds-server /go/src/$BASE/cmd/mesh
RUN make kubevpn
RUN go install github.com/go-delve/delve/cmd/dlv@latest
FROM ubuntu:latest
ARG BASE=github.com/wencaiwulue/kubevpn
RUN sed -i s@/security.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list \
&& sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list
RUN apt-get clean && apt-get update && apt-get install -y wget dnsutils vim curl \
net-tools iptables iputils-ping lsof iproute2 tcpdump
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3
ENV TZ=Asia/Shanghai \
DEBIAN_FRONTEND=noninteractive
RUN apt update \
&& apt install -y tzdata \
&& ln -fs /usr/share/zoneinfo/${TZ} /etc/localtime \
&& echo ${TZ} > /etc/timezone \
&& dpkg-reconfigure --frontend noninteractive tzdata \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY --from=builder /go/src/$BASE/bin/kubevpn-linux-amd64 /usr/local/bin/kubevpn
COPY --from=builder /go/src/$BASE/bin/envoy-xds-server /bin/envoy-xds-server
COPY --from=envoy /usr/local/bin/envoy /usr/local/bin/envoy
COPY --from=builder /go/src/$BASE/bin/kubevpn /usr/local/bin/kubevpn
COPY --from=builder /go/bin/dlv /usr/local/bin/dlv
COPY --from=envoy /usr/local/bin/envoy /usr/local/bin/envoy

6
build/dlv.Dockerfile Normal file
View File

@@ -0,0 +1,6 @@
FROM golang:1.20 as delve
RUN curl --location --output delve-1.20.1.tar.gz https://github.com/go-delve/delve/archive/v1.20.1.tar.gz \
&& tar xzf delve-1.20.1.tar.gz
RUN cd delve-1.20.1 && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /go/dlv -ldflags '-extldflags "-static"' ./cmd/dlv/
FROM busybox
COPY --from=delve /go/dlv /bin/dlv

View File

@@ -1,13 +1,26 @@
FROM envoyproxy/envoy:v1.21.1 AS envoy
FROM golang:1.20 AS builder
RUN go env -w GO111MODULE=on && go env -w GOPROXY=https://goproxy.cn,direct
RUN go install github.com/go-delve/delve/cmd/dlv@latest
FROM envoyproxy/envoy:v1.25.0 AS envoy
FROM ubuntu:latest
RUN sed -i s@/security.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list \
&& sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list
RUN apt-get clean && apt-get update && apt-get install -y wget dnsutils vim curl \
net-tools iptables iputils-ping lsof iproute2 tcpdump
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3
ENV TZ=Asia/Shanghai \
DEBIAN_FRONTEND=noninteractive
RUN apt update \
&& apt install -y tzdata \
&& ln -fs /usr/share/zoneinfo/${TZ} /etc/localtime \
&& echo ${TZ} > /etc/timezone \
&& dpkg-reconfigure --frontend noninteractive tzdata \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY bin/kubevpn-linux-amd64 /usr/local/bin/kubevpn
COPY bin/envoy-xds-server /bin/envoy-xds-server
COPY --from=envoy /usr/local/bin/envoy /usr/local/bin/envoy
COPY bin/kubevpn /usr/local/bin/kubevpn
COPY --from=envoy /usr/local/bin/envoy /usr/local/bin/envoy
COPY --from=builder /go/bin/dlv /usr/local/bin/dlv

5
build/test.Dockerfile Normal file
View File

@@ -0,0 +1,5 @@
FROM naison/kubevpn:latest
WORKDIR /app
COPY bin/kubevpn /usr/local/bin/kubevpn

View File

@@ -2,79 +2,83 @@ package cmds
import (
"fmt"
"net/http"
"io"
defaultlog "log"
"os"
"path/filepath"
"runtime"
"syscall"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/util/retry"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/driver"
"github.com/wencaiwulue/kubevpn/pkg/dev"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
var connect = handler.ConnectOptions{}
func CmdConnect(f cmdutil.Factory) *cobra.Command {
var connect = &handler.ConnectOptions{}
var sshConf = &util.SshConfig{}
var transferImage bool
cmd := &cobra.Command{
Use: "connect",
Short: i18n.T("Connect to kubernetes cluster network"),
Long: templates.LongDesc(i18n.T(`Connect to kubernetes cluster network`)),
Example: templates.Examples(i18n.T(`
# Connect to k8s cluster network
kubevpn connect
func init() {
connectCmd.Flags().StringVar(&connect.KubeconfigPath, "kubeconfig", "", "kubeconfig")
connectCmd.Flags().StringVarP(&connect.Namespace, "namespace", "n", "", "namespace")
connectCmd.PersistentFlags().StringArrayVar(&connect.Workloads, "workloads", []string{}, "workloads, like: pods/tomcat, deployment/nginx, replicaset/tomcat...")
connectCmd.Flags().StringToStringVarP(&connect.Headers, "headers", "H", map[string]string{}, "headers, format is k=v, like: k1=v1,k2=v2")
connectCmd.Flags().BoolVar(&config.Debug, "debug", false, "true/false")
connectCmd.Flags().StringVar(&config.Image, "image", config.Image, "use this image to startup container")
RootCmd.AddCommand(connectCmd)
}
# Connect to api-server behind of bastion host or ssh jump host
kubevpn connect --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile /Users/naison/.ssh/ssh.pem
var connectCmd = &cobra.Command{
Use: "connect",
Short: "connect",
Long: `connect`,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
if !util.IsAdmin() {
util.RunWithElevated()
os.Exit(0)
} else {
go func() { log.Info(http.ListenAndServe("localhost:6060", nil)) }()
}
},
PreRun: func(*cobra.Command, []string) {
util.InitLogger(config.Debug)
if util.IsWindows() {
driver.InstallWireGuardTunDriver()
}
},
Run: func(cmd *cobra.Command, args []string) {
if err := connect.InitClient(); err != nil {
log.Fatal(err)
}
connect.PreCheckResource()
if err := connect.DoConnect(); err != nil {
log.Errorln(err)
handler.Cleanup(syscall.SIGQUIT)
return
}
fmt.Println(`---------------------------------------------------------------------------`)
fmt.Println(` Now you can access resources in the kubernetes cluster, enjoy it :) `)
fmt.Println(`---------------------------------------------------------------------------`)
select {}
},
PostRun: func(_ *cobra.Command, _ []string) {
if util.IsWindows() {
if err := retry.OnError(retry.DefaultRetry, func(err error) bool {
return err != nil
}, func() error {
return driver.UninstallWireGuardTunDriver()
}); err != nil {
wd, _ := os.Getwd()
filename := filepath.Join(wd, "wintun.dll")
if err = os.Rename(filename, filepath.Join(os.TempDir(), "wintun.dll")); err != nil {
log.Debugln(err)
# it also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn connect --ssh-alias <alias>
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
if !util.IsAdmin() {
util.RunWithElevated()
os.Exit(0)
}
go util.StartupPProf(config.PProfPort)
util.InitLogger(config.Debug)
defaultlog.Default().SetOutput(io.Discard)
if transferImage {
if err = dev.TransferImage(cmd.Context(), sshConf); err != nil {
return err
}
}
}
},
return handler.SshJump(sshConf, cmd.Flags())
},
RunE: func(cmd *cobra.Command, args []string) error {
runtime.GOMAXPROCS(0)
if err := connect.InitClient(f); err != nil {
return err
}
if err := connect.DoConnect(); err != nil {
log.Errorln(err)
handler.Cleanup(syscall.SIGQUIT)
} else {
util.Print(os.Stdout, "Now you can access resources in the kubernetes cluster, enjoy it :)")
}
select {}
},
}
cmd.Flags().BoolVar(&config.Debug, "debug", false, "enable debug mode or not, true or false")
cmd.Flags().StringVar(&config.Image, "image", config.Image, "use this image to startup container")
cmd.Flags().StringArrayVar(&connect.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
cmd.Flags().StringArrayVar(&connect.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
cmd.Flags().BoolVar(&connect.UseLocalDNS, "use-localdns", false, "if use-lcoaldns is true, kubevpn will start coredns listen at 53 to forward your dns queries. only support on linux now")
cmd.Flags().StringVar((*string)(&connect.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
addSshFlags(cmd, sshConf)
return cmd
}

View File

@@ -0,0 +1,32 @@
package cmds
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/controlplane"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
func CmdControlPlane(_ cmdutil.Factory) *cobra.Command {
var (
watchDirectoryFilename string
port uint = 9002
)
cmd := &cobra.Command{
Use: "control-plane",
Hidden: true,
Short: "Control-plane is a envoy xds server",
Long: `Control-plane is a envoy xds server, distribute envoy route configuration`,
Run: func(cmd *cobra.Command, args []string) {
util.InitLogger(config.Debug)
go util.StartupPProf(0)
controlplane.Main(watchDirectoryFilename, port, log.StandardLogger())
},
}
cmd.Flags().StringVarP(&watchDirectoryFilename, "watchDirectoryFilename", "w", "/etc/envoy/envoy-config.yaml", "full path to directory to watch for files")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "true/false")
return cmd
}

140
cmd/kubevpn/cmds/cp.go Normal file
View File

@@ -0,0 +1,140 @@
package cmds
import (
"fmt"
"os"
"strings"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/completion"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/cp"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
var cpExample = templates.Examples(i18n.T(`
# !!!Important Note!!!
# Requires that the 'tar' binary is present in your container
# image. If 'tar' is not present, 'kubectl cp' will fail.
#
# For advanced use cases, such as symlinks, wildcard expansion or
# file mode preservation, consider using 'kubectl exec'.
# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace <some-namespace>
tar cf - /tmp/foo | kubectl exec -i -n <some-namespace> <some-pod> -- tar xf - -C /tmp/bar
# Copy /tmp/foo from a remote pod to /tmp/bar locally
kubectl exec -n <some-namespace> <some-pod> -- tar cf - /tmp/foo | tar xf - -C /tmp/bar
# Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in the default namespace
kubectl cp /tmp/foo_dir <some-pod>:/tmp/bar_dir
# Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific container
kubectl cp /tmp/foo <some-pod>:/tmp/bar -c <specific-container>
# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace <some-namespace>
kubectl cp /tmp/foo <some-namespace>/<some-pod>:/tmp/bar
# Copy /tmp/foo from a remote pod to /tmp/bar locally
kubectl cp <some-namespace>/<some-pod>:/tmp/foo /tmp/bar
# copy reverse proxy api-server behind of bastion host or ssh jump host
kubevpn cp deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile /Users/naison/.ssh/ssh.pem
# it also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn cp deployment/productpage --ssh-alias <alias>
`,
))
func CmdCp(f cmdutil.Factory) *cobra.Command {
o := cp.NewCopyOptions(genericclioptions.IOStreams{
In: os.Stdin,
Out: os.Stdout,
ErrOut: os.Stderr,
})
var sshConf = &util.SshConfig{}
cmd := &cobra.Command{
Use: "cp <file-spec-src> <file-spec-dest>",
DisableFlagsInUseLine: true,
Short: i18n.T("Copy files and directories to and from containers"),
Long: i18n.T("Copy files and directories to and from containers. Different between kubectl cp is it will de-reference symbol link."),
Example: cpExample,
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
cmdutil.CheckErr(handler.SshJump(sshConf, cmd.Flags()))
var comps []string
if len(args) == 0 {
if strings.IndexAny(toComplete, "/.~") == 0 {
// Looks like a path, do nothing
} else if strings.Contains(toComplete, ":") {
// TODO: complete remote files in the pod
} else if idx := strings.Index(toComplete, "/"); idx > 0 {
// complete <namespace>/<pod>
namespace := toComplete[:idx]
template := "{{ range .items }}{{ .metadata.namespace }}/{{ .metadata.name }}: {{ end }}"
comps = completion.CompGetFromTemplate(&template, f, namespace, cmd, []string{"pod"}, toComplete)
} else {
// Complete namespaces followed by a /
for _, ns := range completion.CompGetResource(f, cmd, "namespace", toComplete) {
comps = append(comps, fmt.Sprintf("%s/", ns))
}
// Complete pod names followed by a :
for _, pod := range completion.CompGetResource(f, cmd, "pod", toComplete) {
comps = append(comps, fmt.Sprintf("%s:", pod))
}
// Finally, provide file completion if we need to.
// We only do this if:
// 1- There are other completions found (if there are no completions,
// the shell will do file completion itself)
// 2- If there is some input from the user (or else we will end up
// listing the entire content of the current directory which could
// be too many choices for the user)
if len(comps) > 0 && len(toComplete) > 0 {
if files, err := os.ReadDir("."); err == nil {
for _, file := range files {
filename := file.Name()
if strings.HasPrefix(filename, toComplete) {
if file.IsDir() {
filename = fmt.Sprintf("%s/", filename)
}
// We are completing a file prefix
comps = append(comps, filename)
}
}
}
} else if len(toComplete) == 0 {
// If the user didn't provide any input to complete,
// we provide a hint that a path can also be used
comps = append(comps, "./", "/")
}
}
}
return comps, cobra.ShellCompDirectiveNoSpace
},
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(o.Complete(f, cmd, args))
cmdutil.CheckErr(o.Validate())
cmdutil.CheckErr(o.Run())
},
}
cmdutil.AddContainerVarFlags(cmd, &o.Container, o.Container)
cmd.Flags().BoolVarP(&o.NoPreserve, "no-preserve", "", false, "The copied file/directory's ownership and permissions will not be preserved in the container")
cmd.Flags().IntVarP(&o.MaxTries, "retries", "", 0, "Set number of retries to complete a copy operation from a container. Specify 0 to disable or any negative value for infinite retrying. The default is 0 (no retry).")
// for ssh jumper host
cmd.Flags().StringVar(&sshConf.Addr, "ssh-addr", "", "Optional ssh jump server address to dial as <hostname>:<port>, eg: 127.0.0.1:22")
cmd.Flags().StringVar(&sshConf.User, "ssh-username", "", "Optional username for ssh jump server")
cmd.Flags().StringVar(&sshConf.Password, "ssh-password", "", "Optional password for ssh jump server")
cmd.Flags().StringVar(&sshConf.Keyfile, "ssh-keyfile", "", "Optional file with private key for SSH authentication")
cmd.Flags().StringVar(&sshConf.ConfigAlias, "ssh-alias", "", "Optional config alias with ~/.ssh/config for SSH authentication")
return cmd
}

149
cmd/kubevpn/cmds/dev.go Normal file
View File

@@ -0,0 +1,149 @@
package cmds
import (
"fmt"
"os"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
dockercomp "github.com/docker/cli/cli/command/completion"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/completion"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/dev"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
func CmdDev(f cmdutil.Factory) *cobra.Command {
var devOptions = &dev.Options{
Factory: f,
NoProxy: false,
ExtraCIDR: []string{},
}
_, dockerCli, err := dev.GetClient()
if err != nil {
panic(err)
}
var sshConf = &util.SshConfig{}
var transferImage bool
cmd := &cobra.Command{
Use: "dev [OPTIONS] RESOURCE [COMMAND] [ARG...]",
Short: i18n.T("Startup your kubernetes workloads in local Docker container with same volume、env、and network"),
Long: templates.LongDesc(i18n.T(`
Startup your kubernetes workloads in local Docker container with same volume、env、and network
## What did i do:
- Download volume which MountPath point to, mount to docker container
- Connect to cluster network, set network to docker container
- Get all environment with command (env), set env to docker container
`)),
Example: templates.Examples(i18n.T(`
# Develop workloads
- develop deployment
kubevpn dev deployment/productpage
- develop service
kubevpn dev service/productpage
# Develop workloads with mesh, traffic with header a=1, will hit local PC, otherwise no effect
kubevpn dev --headers a=1 service/productpage
# Develop workloads without proxy traffic
kubevpn dev --no-proxy service/productpage
# Develop workloads which api-server behind of bastion host or ssh jump host
kubevpn dev --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile /Users/naison/.ssh/ssh.pem deployment/productpage
# it also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn dev --ssh-alias <alias> deployment/productpage
`)),
Args: cli.RequiresMinArgs(1),
DisableFlagsInUseLine: true,
PreRunE: func(cmd *cobra.Command, args []string) error {
if !util.IsAdmin() {
util.RunWithElevated()
os.Exit(0)
}
go util.StartupPProf(config.PProfPort)
util.InitLogger(config.Debug)
if transferImage {
if err := dev.TransferImage(cmd.Context(), sshConf); err != nil {
return err
}
}
// not support temporally
if devOptions.Engine == config.EngineGvisor {
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
}
return handler.SshJump(sshConf, cmd.Flags())
},
RunE: func(cmd *cobra.Command, args []string) error {
devOptions.Workload = args[0]
if len(args) > 1 {
devOptions.Copts.Args = args[1:]
}
return dev.DoDev(devOptions, cmd.Flags(), f)
},
}
cmd.Flags().SortFlags = false
cmd.Flags().StringToStringVarP(&devOptions.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "enable debug mode or not, true or false")
cmd.Flags().StringVar(&config.Image, "image", config.Image, "use this image to startup container")
cmd.Flags().BoolVar(&devOptions.NoProxy, "no-proxy", false, "Whether proxy remote workloads traffic into local or not, true: just startup container on local without inject containers to intercept traffic, false: intercept traffic and forward to local")
cmdutil.AddContainerVarFlags(cmd, &devOptions.ContainerName, devOptions.ContainerName)
cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc("container", completion.ContainerCompletionFunc(f)))
cmd.Flags().StringArrayVar(&devOptions.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
cmd.Flags().StringArrayVar(&devOptions.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
cmd.Flags().StringVar((*string)(&devOptions.ConnectMode), "connect-mode", string(dev.ConnectModeHost), "Connect to kubernetes network in container or in host, eg: ["+string(dev.ConnectModeContainer)+"|"+string(dev.ConnectModeHost)+"]")
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
cmd.Flags().StringVar((*string)(&devOptions.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
// diy docker options
cmd.Flags().StringVar(&devOptions.DockerImage, "docker-image", "", "Overwrite the default K8s pod of the image")
// origin docker options
flags := cmd.Flags()
flags.SetInterspersed(false)
// These are flags not stored in Config/HostConfig
flags.BoolVarP(&devOptions.Options.Detach, "detach", "d", false, "Run container in background and print container ID")
flags.StringVar(&devOptions.Options.Name, "name", "", "Assign a name to the container")
flags.StringVar(&devOptions.Options.Pull, "pull", dev.PullImageMissing, `Pull image before running ("`+dev.PullImageAlways+`"|"`+dev.PullImageMissing+`"|"`+dev.PullImageNever+`")`)
flags.BoolVarP(&devOptions.Options.Quiet, "quiet", "q", false, "Suppress the pull output")
// Add an explicit help that doesn't have a `-h` to prevent the conflict
// with hostname
flags.Bool("help", false, "Print usage")
command.AddPlatformFlag(flags, &devOptions.Options.Platform)
command.AddTrustVerificationFlags(flags, &devOptions.Options.Untrusted, dockerCli.ContentTrustEnabled())
devOptions.Copts = dev.AddFlags(flags)
_ = cmd.RegisterFlagCompletionFunc(
"env",
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return os.Environ(), cobra.ShellCompDirectiveNoFileComp
},
)
_ = cmd.RegisterFlagCompletionFunc(
"env-file",
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return nil, cobra.ShellCompDirectiveDefault
},
)
_ = cmd.RegisterFlagCompletionFunc(
"network",
dockercomp.NetworkNames(nil),
)
addSshFlags(cmd, sshConf)
return cmd
}

View File

@@ -0,0 +1,144 @@
package cmds
import (
"context"
"fmt"
"io"
defaultlog "log"
"os"
"syscall"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
utilcomp "k8s.io/kubectl/pkg/util/completion"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/dev"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
// CmdDuplicate multiple cluster operate, can start up one deployment to another cluster
// kubectl exec POD_NAME -c CONTAINER_NAME /sbin/killall5 or ephemeralcontainers
func CmdDuplicate(f cmdutil.Factory) *cobra.Command {
var duplicateOptions = handler.DuplicateOptions{}
var sshConf = &util.SshConfig{}
var transferImage bool
cmd := &cobra.Command{
Use: "duplicate",
Short: i18n.T("Duplicate workloads to target-kubeconfig cluster with same volume、env、and network"),
Long: templates.LongDesc(i18n.T(`Duplicate workloads to target-kubeconfig cluster with same volume、env、and network`)),
Example: templates.Examples(i18n.T(`
# duplicate
- duplicate deployment in current cluster and current namespace
kubevpn duplicate deployment/productpage
- duplicate deployment in current cluster with different namespace
kubevpn duplicate deployment/productpage -n test
- duplicate deployment to another cluster
kubevpn duplicate deployment/productpage --target-kubeconfig ~/.kube/other-kubeconfig
- duplicate multiple workloads
kubevpn duplicate deployment/authors deployment/productpage
or
kubevpn duplicate deployment authors productpage
# duplicate with mesh, traffic with header a=1, will hit duplicate workloads, otherwise hit origin workloads
kubevpn duplicate deployment/productpage --headers a=1
# duplicate workloads which api-server behind of bastion host or ssh jump host
kubevpn duplicate deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile /Users/naison/.ssh/ssh.pem --headers a=1
# it also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn duplicate service/productpage --ssh-alias <alias> --headers a=1
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
if !util.IsAdmin() {
util.RunWithElevated()
os.Exit(0)
}
go util.StartupPProf(config.PProfPort)
util.InitLogger(config.Debug)
defaultlog.Default().SetOutput(io.Discard)
if transferImage {
if err = dev.TransferImage(cmd.Context(), sshConf); err != nil {
return err
}
}
// not support temporally
if duplicateOptions.Engine == config.EngineGvisor {
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
}
return handler.SshJump(sshConf, cmd.Flags())
},
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
_, _ = fmt.Fprintf(os.Stdout, "You must specify the type of resource to proxy. %s\n\n", cmdutil.SuggestAPIResources("kubevpn"))
fullCmdName := cmd.Parent().CommandPath()
usageString := "Required resource not specified."
if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") {
usageString = fmt.Sprintf("%s\nUse \"%s explain <resource>\" for a detailed description of that resource (e.g. %[2]s explain pods).", usageString, fullCmdName)
}
return cmdutil.UsageErrorf(cmd, usageString)
}
// special empty string, eg: --target-registry ""
duplicateOptions.IsChangeTargetRegistry = cmd.Flags().Changed("target-registry")
connectOptions := handler.ConnectOptions{
Namespace: duplicateOptions.Namespace,
Workloads: args,
ExtraCIDR: duplicateOptions.ExtraCIDR,
Engine: duplicateOptions.Engine,
}
if err := connectOptions.InitClient(f); err != nil {
return err
}
err := connectOptions.PreCheckResource()
if err != nil {
return err
}
duplicateOptions.Workloads = connectOptions.Workloads
connectOptions.Workloads = []string{}
if err = connectOptions.DoConnect(); err != nil {
log.Errorln(err)
handler.Cleanup(syscall.SIGQUIT)
} else {
err = duplicateOptions.InitClient(f)
if err != nil {
return err
}
err = duplicateOptions.DoDuplicate(context.Background())
if err != nil {
return err
}
util.Print(os.Stdout, "Now duplicate workloads running successfully on other cluster, enjoy it :)")
}
select {}
},
}
cmd.Flags().StringToStringVarP(&duplicateOptions.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to duplicate workloads, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to duplicate workloads, format is k=v, like: k1=v1,k2=v2")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug mode or not, true or false")
cmd.Flags().StringVar(&config.Image, "image", config.Image, "Use this image to startup container")
cmd.Flags().StringArrayVar(&duplicateOptions.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
cmd.Flags().StringArrayVar(&duplicateOptions.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
cmd.Flags().StringVar((*string)(&duplicateOptions.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
cmd.Flags().StringVar(&duplicateOptions.TargetImage, "target-image", "", "Duplicate container use this image to startup container, if not special, use origin origin image")
cmd.Flags().StringVar(&duplicateOptions.TargetContainer, "target-container", "", "Duplicate container use special image to startup this container, if not special, use origin origin image")
cmd.Flags().StringVar(&duplicateOptions.TargetNamespace, "target-namespace", "", "Duplicate workloads in this namespace, if not special, use origin namespace")
cmd.Flags().StringVar(&duplicateOptions.TargetKubeconfig, "target-kubeconfig", "", "Duplicate workloads will create in this cluster, if not special, use origin cluster")
cmd.Flags().StringVar(&duplicateOptions.TargetRegistry, "target-registry", "", "Duplicate workloads will create this registry domain to replace origin registry, if not special, use origin registry")
addSshFlags(cmd, sshConf)
cmd.ValidArgsFunction = utilcomp.ResourceTypeAndNameCompletionFunc(f)
return cmd
}

View File

@@ -0,0 +1,33 @@
package cmds
import (
"os"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
)
var (
optionsExample = templates.Examples(i18n.T(`
# Print flags inherited by all commands
kubevpn options`))
)
func CmdOptions(cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "options",
Short: i18n.T("Print the list of flags inherited by all commands"),
Long: i18n.T("Print the list of flags inherited by all commands"),
Example: optionsExample,
Run: func(cmd *cobra.Command, args []string) {
cmd.Usage()
},
}
cmd.SetOut(os.Stdout)
templates.UseOptionsTemplates(cmd)
return cmd
}

126
cmd/kubevpn/cmds/proxy.go Normal file
View File

@@ -0,0 +1,126 @@
package cmds
import (
"fmt"
"io"
defaultlog "log"
"os"
"syscall"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
utilcomp "k8s.io/kubectl/pkg/util/completion"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/dev"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
func CmdProxy(f cmdutil.Factory) *cobra.Command {
var connect = handler.ConnectOptions{}
var sshConf = &util.SshConfig{}
var transferImage bool
cmd := &cobra.Command{
Use: "proxy",
Short: i18n.T("Proxy kubernetes workloads inbound traffic into local PC"),
Long: templates.LongDesc(i18n.T(`Proxy kubernetes workloads inbound traffic into local PC`)),
Example: templates.Examples(i18n.T(`
# Reverse proxy
- proxy deployment
kubevpn proxy deployment/productpage
- proxy service
kubevpn proxy service/productpage
- proxy multiple workloads
kubevpn proxy deployment/authors deployment/productpage
or
kubevpn proxy deployment authors productpage
# Reverse proxy with mesh, traffic with header a=1, will hit local PC, otherwise no effect
kubevpn proxy service/productpage --headers a=1
# Connect to api-server behind of bastion host or ssh jump host and proxy kubernetes resource traffic into local PC
kubevpn proxy deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile /Users/naison/.ssh/ssh.pem --headers a=1
# it also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn proxy service/productpage --ssh-alias <alias> --headers a=1
`)),
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
if !util.IsAdmin() {
util.RunWithElevated()
os.Exit(0)
}
go util.StartupPProf(config.PProfPort)
util.InitLogger(config.Debug)
defaultlog.Default().SetOutput(io.Discard)
if transferImage {
if err = dev.TransferImage(cmd.Context(), sshConf); err != nil {
return err
}
}
// not support temporally
if connect.Engine == config.EngineGvisor {
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
}
return handler.SshJump(sshConf, cmd.Flags())
},
RunE: func(cmd *cobra.Command, args []string) error {
if err := connect.InitClient(f); err != nil {
return err
}
if len(args) == 0 {
fmt.Fprintf(os.Stdout, "You must specify the type of resource to proxy. %s\n\n", cmdutil.SuggestAPIResources("kubevpn"))
fullCmdName := cmd.Parent().CommandPath()
usageString := "Required resource not specified."
if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") {
usageString = fmt.Sprintf("%s\nUse \"%s explain <resource>\" for a detailed description of that resource (e.g. %[2]s explain pods).", usageString, fullCmdName)
}
return cmdutil.UsageErrorf(cmd, usageString)
}
connect.Workloads = args
err := connect.PreCheckResource()
if err != nil {
return err
}
if err = connect.DoConnect(); err != nil {
log.Errorln(err)
handler.Cleanup(syscall.SIGQUIT)
} else {
util.Print(os.Stdout, "Now you can access resources in the kubernetes cluster, enjoy it :)")
}
select {}
},
}
cmd.Flags().StringToStringVarP(&connect.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug mode or not, true or false")
cmd.Flags().StringVar(&config.Image, "image", config.Image, "Use this image to startup container")
cmd.Flags().StringArrayVar(&connect.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
cmd.Flags().StringArrayVar(&connect.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
cmd.Flags().StringVar((*string)(&connect.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
addSshFlags(cmd, sshConf)
cmd.ValidArgsFunction = utilcomp.ResourceTypeAndNameCompletionFunc(f)
return cmd
}
func addSshFlags(cmd *cobra.Command, sshConf *util.SshConfig) {
// for ssh jumper host
cmd.Flags().StringVar(&sshConf.Addr, "ssh-addr", "", "Optional ssh jump server address to dial as <hostname>:<port>, eg: 127.0.0.1:22")
cmd.Flags().StringVar(&sshConf.User, "ssh-username", "", "Optional username for ssh jump server")
cmd.Flags().StringVar(&sshConf.Password, "ssh-password", "", "Optional password for ssh jump server")
cmd.Flags().StringVar(&sshConf.Keyfile, "ssh-keyfile", "", "Optional file with private key for SSH authentication")
cmd.Flags().StringVar(&sshConf.ConfigAlias, "ssh-alias", "", "Optional config alias with ~/.ssh/config for SSH authentication")
cmd.Flags().StringVar(&sshConf.RemoteKubeconfig, "remote-kubeconfig", "", "Remote kubeconfig abstract path of ssh server, default is /$ssh-user/.kube/config")
lookup := cmd.Flags().Lookup("remote-kubeconfig")
lookup.NoOptDefVal = "~/.kube/config"
}

63
cmd/kubevpn/cmds/reset.go Normal file
View File

@@ -0,0 +1,63 @@
package cmds
import (
"fmt"
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
func CmdReset(factory cmdutil.Factory) *cobra.Command {
var connect = handler.ConnectOptions{}
var sshConf = &util.SshConfig{}
cmd := &cobra.Command{
Use: "reset",
Short: "Reset all changes made by KubeVPN",
Long: `Reset all changes made by KubeVPN`,
Example: templates.Examples(i18n.T(`
# Reset default namespace
kubevpn reset
# Reset another namespace test
kubevpn reset -n test
# Reset cluster api-server behind of bastion host or ssh jump host
kubevpn reset --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile /Users/naison/.ssh/ssh.pem
# it also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
kubevpn reset --ssh-alias <alias>
`)),
PreRunE: func(cmd *cobra.Command, args []string) error {
return handler.SshJump(sshConf, cmd.Flags())
},
Run: func(cmd *cobra.Command, args []string) {
if err := connect.InitClient(factory); err != nil {
log.Fatal(err)
}
err := connect.Reset(cmd.Context())
if err != nil {
log.Fatal(err)
}
fmt.Fprint(os.Stdout, "Done")
},
}
// for ssh jumper host
cmd.Flags().StringVar(&sshConf.Addr, "ssh-addr", "", "Optional ssh jump server address to dial as <hostname>:<port>, eg: 127.0.0.1:22")
cmd.Flags().StringVar(&sshConf.User, "ssh-username", "", "Optional username for ssh jump server")
cmd.Flags().StringVar(&sshConf.Password, "ssh-password", "", "Optional password for ssh jump server")
cmd.Flags().StringVar(&sshConf.Keyfile, "ssh-keyfile", "", "Optional file with private key for SSH authentication")
cmd.Flags().StringVar(&sshConf.ConfigAlias, "ssh-alias", "", "Optional config alias with ~/.ssh/config for SSH authentication")
return cmd
}

View File

@@ -1,9 +1,71 @@
package cmds
import "github.com/spf13/cobra"
import (
"os"
var RootCmd = &cobra.Command{
Use: "kubevpn",
Short: "kubevpn",
Long: `kubevpn`,
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/rest"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
"github.com/wencaiwulue/kubevpn/pkg/config"
)
func NewKubeVPNCommand() *cobra.Command {
var cmd = &cobra.Command{
Use: "kubevpn",
Short: i18n.T("kubevpn connect to Kubernetes cluster network"),
Long: templates.LongDesc(`
kubevpn connect to Kubernetes cluster network.
`),
Run: func(cmd *cobra.Command, args []string) {
cmd.Help()
},
}
flags := cmd.PersistentFlags()
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
configFlags.WrapConfigFn = func(c *rest.Config) *rest.Config {
if path, ok := os.LookupEnv(config.EnvSSHJump); ok {
kubeconfigBytes, err := os.ReadFile(path)
cmdutil.CheckErr(err)
var conf *restclient.Config
conf, err = clientcmd.RESTConfigFromKubeConfig(kubeconfigBytes)
cmdutil.CheckErr(err)
return conf
}
return c
}
configFlags.AddFlags(flags)
matchVersionFlags := cmdutil.NewMatchVersionFlags(configFlags)
matchVersionFlags.AddFlags(flags)
factory := cmdutil.NewFactory(matchVersionFlags)
groups := templates.CommandGroups{
{
Message: "Client Commands:",
Commands: []*cobra.Command{
CmdConnect(factory),
CmdProxy(factory),
CmdDev(factory),
CmdDuplicate(factory),
CmdCp(factory),
CmdUpgrade(factory),
CmdReset(factory),
CmdVersion(factory),
// Hidden, Server Commands (DO NOT USE IT !!!)
CmdControlPlane(factory),
CmdServe(factory),
CmdWebhook(factory),
},
},
}
groups.Add(cmd)
templates.ActsAsRootCommand(cmd, []string{"options"}, groups...)
cmd.AddCommand(CmdOptions(factory))
return cmd
}

View File

@@ -1,39 +1,49 @@
package cmds
import (
"context"
"net/http"
"math/rand"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"go.uber.org/automaxprocs/maxprocs"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/core"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
var route handler.Route
func init() {
ServerCmd.Flags().StringArrayVarP(&route.ServeNodes, "nodeCommand", "L", []string{}, "command needs to be executed")
ServerCmd.Flags().StringVarP(&route.ChainNode, "chainCommand", "F", "", "command needs to be executed")
ServerCmd.Flags().BoolVar(&config.Debug, "debug", false, "true/false")
RootCmd.AddCommand(ServerCmd)
}
var ServerCmd = &cobra.Command{
Use: "serve",
Short: "serve",
Long: `serve`,
PreRun: func(*cobra.Command, []string) {
util.InitLogger(config.Debug)
go func() { log.Info(http.ListenAndServe("localhost:6060", nil)) }()
},
Run: func(cmd *cobra.Command, args []string) {
err := handler.Start(context.TODO(), route)
if err != nil {
log.Fatal(err)
}
select {}
},
func CmdServe(_ cmdutil.Factory) *cobra.Command {
var route = &core.Route{}
cmd := &cobra.Command{
Use: "serve",
Hidden: true,
Short: "Server side, startup traffic manager, forward inbound and outbound traffic",
Long: `Server side, startup traffic manager, forward inbound and outbound traffic.`,
PreRun: func(*cobra.Command, []string) {
util.InitLogger(config.Debug)
go util.StartupPProf(0)
},
RunE: func(cmd *cobra.Command, args []string) error {
rand.Seed(time.Now().UnixNano())
_, _ = maxprocs.Set(maxprocs.Logger(nil))
err := handler.Complete(route)
if err != nil {
return err
}
defer handler.Final()
servers, err := handler.Parse(*route)
if err != nil {
return err
}
ctx := ctrl.SetupSignalHandler()
return handler.Run(ctx, servers)
},
}
cmd.Flags().StringArrayVarP(&route.ServeNodes, "node", "L", []string{}, "Startup node server. eg: tcp://localhost:1080")
cmd.Flags().StringVarP(&route.ChainNode, "chain", "F", "", "Forward chain. eg: tcp://192.168.1.100:2345")
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug log or not")
return cmd
}

View File

@@ -0,0 +1,39 @@
package cmds
import (
"net/http"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/upgrade"
)
// GitHubOAuthToken
// --ldflags -X
var (
GitHubOAuthToken = ""
)
func CmdUpgrade(_ cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "upgrade",
Short: "Upgrade KubeVPN version",
Long: `Upgrade KubeVPN version, automatically download latest KubeVPN from GitHub`,
Run: func(cmd *cobra.Command, args []string) {
var client = http.DefaultClient
if GitHubOAuthToken != "" {
client = oauth2.NewClient(cmd.Context(), oauth2.StaticTokenSource(&oauth2.Token{AccessToken: GitHubOAuthToken, TokenType: "Bearer"}))
}
err := upgrade.Main(config.Version, client)
if err != nil {
log.Fatal(err)
}
println("Done")
},
}
return cmd
}

View File

@@ -7,6 +7,7 @@ import (
"time"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/wencaiwulue/kubevpn/pkg/config"
)
@@ -17,7 +18,6 @@ var (
GitCommit = ""
BuildTime = ""
Branch = ""
Version = "latest"
)
func reformatDate(buildTime string) string {
@@ -28,28 +28,30 @@ func reformatDate(buildTime string) string {
return buildTime
}
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print the version number of KubeVPN",
Long: `This is the version of KubeVPN`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("KubeVPN: CLI\n")
fmt.Printf(" Version: %s\n", Version)
fmt.Printf(" Image: %s\n", config.Image)
fmt.Printf(" Branch: %s\n", Branch)
fmt.Printf(" Git commit: %s\n", GitCommit)
fmt.Printf(" Built time: %s\n", reformatDate(BuildTime))
fmt.Printf(" Built OS/Arch: %s\n", OsArch)
fmt.Printf(" Built Go version: %s\n", runtime.Version())
},
func CmdVersion(cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "version",
Short: "Print the client version information",
Long: `Print the client version information`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("KubeVPN: CLI\n")
fmt.Printf(" Version: %s\n", config.Version)
fmt.Printf(" Image: %s\n", config.Image)
fmt.Printf(" Branch: %s\n", Branch)
fmt.Printf(" Git commit: %s\n", GitCommit)
fmt.Printf(" Built time: %s\n", reformatDate(BuildTime))
fmt.Printf(" Built OS/Arch: %s\n", OsArch)
fmt.Printf(" Built Go version: %s\n", runtime.Version())
},
}
return cmd
}
func init() {
RootCmd.AddCommand(versionCmd)
// Prefer version number inserted at build using --ldflags
if Version == "" {
if config.Version == "" {
if i, ok := debug.ReadBuildInfo(); ok {
Version = i.Main.Version
config.Version = i.Main.Version
}
}
}

View File

@@ -0,0 +1,29 @@
package cmds
import (
"github.com/spf13/cobra"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/wencaiwulue/kubevpn/pkg/util"
"github.com/wencaiwulue/kubevpn/pkg/webhook"
)
func CmdWebhook(f cmdutil.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "webhook",
Hidden: true,
Short: "Starts a HTTP server, useful for creating MutatingAdmissionWebhook",
Long: `Starts a HTTP server, useful for creating MutatingAdmissionWebhook.
After deploying it to Kubernetes cluster, the Administrator needs to create a MutatingWebhookConfiguration
in the Kubernetes cluster to register remote webhook admission controllers.`,
Args: cobra.MaximumNArgs(0),
PreRun: func(cmd *cobra.Command, args []string) {
util.InitLogger(true)
go util.StartupPProf(0)
},
RunE: func(cmd *cobra.Command, args []string) error {
return webhook.Main(f)
},
}
return cmd
}

View File

@@ -1,11 +1,12 @@
package main
import (
"github.com/wencaiwulue/kubevpn/cmd/kubevpn/cmds"
_ "k8s.io/client-go/plugin/pkg/client/auth"
_ "net/http/pprof"
"github.com/wencaiwulue/kubevpn/cmd/kubevpn/cmds"
)
func main() {
_ = cmds.RootCmd.Execute()
_ = cmds.NewKubeVPNCommand().Execute()
}

View File

@@ -1,56 +0,0 @@
package main
import (
"context"
"flag"
"github.com/envoyproxy/go-control-plane/pkg/cache/v3"
serverv3 "github.com/envoyproxy/go-control-plane/pkg/server/v3"
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/pkg/controlplane"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
var (
logger *log.Logger
watchDirectoryFileName string
port uint = 9002
)
func init() {
logger = log.New()
log.SetLevel(log.DebugLevel)
log.SetReportCaller(true)
log.SetFormatter(&util.Format{})
flag.StringVar(&watchDirectoryFileName, "watchDirectoryFileName", "/etc/envoy/envoy-config.yaml", "full path to directory to watch for files")
flag.Parse()
}
func main() {
snapshotCache := cache.NewSnapshotCache(false, cache.IDHash{}, logger)
proc := controlplane.NewProcessor(snapshotCache, logger)
go func() {
ctx := context.Background()
server := serverv3.NewServer(ctx, snapshotCache, nil)
controlplane.RunServer(ctx, server, port)
}()
notifyCh := make(chan controlplane.NotifyMessage, 100)
notifyCh <- controlplane.NotifyMessage{
Operation: controlplane.Create,
FilePath: watchDirectoryFileName,
}
go controlplane.Watch(watchDirectoryFileName, notifyCh)
for {
select {
case msg := <-notifyCh:
log.Infof("path: %s, event: %v", msg.FilePath, msg.Operation)
proc.ProcessFile(msg)
}
}
}

254
go.mod
View File

@@ -1,106 +1,212 @@
module github.com/wencaiwulue/kubevpn
go 1.18
go 1.20
require (
github.com/cilium/ipam v0.0.0-20211026130907-54a76012817c
github.com/cilium/ipam v0.0.0-20220824141044-46ef3d556735
github.com/docker/cli v23.0.1+incompatible
github.com/docker/docker v23.0.1+incompatible
github.com/docker/go-connections v0.4.0
github.com/docker/libcontainer v2.2.1+incompatible
github.com/envoyproxy/go-control-plane v0.10.1
github.com/fsnotify/fsnotify v1.5.1
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/miekg/dns v1.0.14
github.com/milosgajdos/tenus v0.0.3
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
github.com/envoyproxy/go-control-plane v0.10.3
github.com/fsnotify/fsnotify v1.6.0
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/miekg/dns v1.1.50
github.com/moby/term v0.0.0-20221205130635-1aeaba878587
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1
github.com/pkg/errors v0.9.1
github.com/shadowsocks/go-shadowsocks2 v0.1.5
github.com/sirupsen/logrus v1.8.1
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
github.com/spf13/cobra v1.4.0
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd
golang.org/x/sys v0.0.0-20220209214540-3681064d5158
golang.zx2c4.com/wireguard v0.0.0-20211209221555-9c9e7e272434
golang.zx2c4.com/wireguard/windows v0.4.10
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.6.1
golang.org/x/net v0.8.0
golang.org/x/sys v0.6.0
golang.zx2c4.com/wireguard v0.0.0-20220920152132-bb719d3a6e2c
golang.zx2c4.com/wireguard/windows v0.5.3
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/grpc v1.40.0
google.golang.org/protobuf v1.27.1
google.golang.org/grpc v1.53.0-dev.0.20230123225046-4075ef07c5d5
google.golang.org/protobuf v1.30.0
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/api v0.24.2
k8s.io/apimachinery v0.24.2
k8s.io/cli-runtime v0.24.2
k8s.io/client-go v0.24.2
k8s.io/klog/v2 v2.60.1 // indirect
k8s.io/kubectl v0.24.2
k8s.io/api v0.26.3
k8s.io/apimachinery v0.26.3
k8s.io/cli-runtime v0.26.1
k8s.io/client-go v0.26.3
k8s.io/klog/v2 v2.90.1 // indirect
k8s.io/kubectl v0.26.1
)
require (
github.com/containerd/containerd v1.5.18
github.com/containernetworking/cni v1.1.2
github.com/golang/protobuf v1.5.2
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
github.com/coredns/caddy v1.1.1
github.com/coredns/coredns v1.10.1
github.com/docker/distribution v2.8.1+incompatible
github.com/google/gopacket v1.1.19
github.com/google/uuid v1.3.0
github.com/hashicorp/go-version v1.6.0
github.com/kevinburke/ssh_config v1.2.0
github.com/libp2p/go-netroute v0.2.1
github.com/mattbaird/jsonpatch v0.0.0-20200820163806-098863c1fc24
github.com/prometheus-community/pro-bing v0.1.0
github.com/schollz/progressbar/v3 v3.13.0
github.com/spf13/pflag v1.0.5
go.uber.org/automaxprocs v1.5.1
golang.org/x/crypto v0.2.0
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
golang.org/x/oauth2 v0.6.0
golang.org/x/sync v0.1.0
golang.org/x/text v0.8.0
golang.org/x/time v0.3.0
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224
gvisor.dev/gvisor v0.0.0-20230603040744-5c9219dedd33
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749
sigs.k8s.io/controller-runtime v0.14.5
sigs.k8s.io/kustomize/api v0.12.1
sigs.k8s.io/yaml v1.3.0
)
require (
cloud.google.com/go/compute v1.15.1 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/census-instrumentation/opencensus-proto v0.2.1 // indirect
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.28 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/DataDog/datadog-agent/pkg/obfuscate v0.0.0-20211129110424-6491aa3bf583 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0-rc.1 // indirect
github.com/DataDog/datadog-go v4.8.2+incompatible // indirect
github.com/DataDog/datadog-go/v5 v5.0.2 // indirect
github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect
github.com/DataDog/sketches-go v1.2.1 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/antonmedv/expr v1.12.0 // indirect
github.com/apparentlymart/go-cidr v1.1.0 // indirect
github.com/aws/aws-sdk-go v1.44.194 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
github.com/dgraph-io/ristretto v0.1.0 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/dnstap/golang-dnstap v0.4.0 // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/emicklei/go-restful/v3 v3.10.2 // indirect
github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/farsightsec/golang-framestream v0.3.0 // indirect
github.com/fatih/camelcase v1.0.0 // indirect
github.com/fvbommel/sortorder v1.0.1 // indirect
github.com/go-errors/errors v1.0.1 // indirect
github.com/go-logr/logr v1.2.0 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/fvbommel/sortorder v1.0.2 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.5.5 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
github.com/golang/glog v1.0.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/gnostic v0.6.9 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.2.0 // indirect
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/imdario/mergo v0.3.14 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/infobloxopen/go-trees v0.0.0-20200715205103-96a057b8dfb9 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.15.15 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mitchellh/go-wordwrap v1.0.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.4.2 // indirect
github.com/moby/buildkit v0.9.0-rc1 // indirect
github.com/moby/patternmatcher v0.5.0 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/signal v0.7.0 // indirect
github.com/moby/sys/symlink v0.2.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/gomega v1.18.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/runc v1.1.4 // indirect
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 // indirect
github.com/openzipkin/zipkin-go v0.4.1 // indirect
github.com/oschwald/geoip2-golang v1.8.0 // indirect
github.com/oschwald/maxminddb-golang v1.10.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/riobard/go-bloom v0.0.0-20200614022211-cdc8013cb5b3 // indirect
github.com/russross/blackfriday v1.5.2 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/testify v1.7.0 // indirect
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 // indirect
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect
github.com/philhofer/fwd v1.1.1 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/tinylib/msgp v1.1.6 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.1.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.7 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect
go.etcd.io/etcd/client/v3 v3.5.7 // indirect
go.opencensus.io v0.24.0 // indirect
go.starlark.net v0.0.0-20230112144946-fae38c8a6d89 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.24.0 // indirect
go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect
go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect
golang.org/x/mod v0.11.0 // indirect
golang.org/x/term v0.6.0 // indirect
golang.org/x/tools v0.6.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/api v0.109.0 // indirect
google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5 // indirect
gopkg.in/DataDog/dd-trace-go.v1 v1.47.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/component-base v0.24.2 // indirect
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/kustomize/api v0.11.4 // indirect
sigs.k8s.io/kustomize/kyaml v0.13.6 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
inet.af/netaddr v0.0.0-20220617031823-097006376321 // indirect
k8s.io/apiextensions-apiserver v0.26.3 // indirect
k8s.io/component-base v0.26.3 // indirect
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
)

1297
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,10 @@ package config
import (
"net"
"sync"
"time"
"sigs.k8s.io/kustomize/api/konfig"
)
const (
@@ -11,8 +14,16 @@ const (
// config map keys
KeyDHCP = "DHCP"
KeyDHCP6 = "DHCP6"
KeyEnvoy = "ENVOY_CONFIG"
KeyClusterIPv4POOLS = "IPv4_POOLS"
KeyRefCount = "REF_COUNT"
// secret keys
// TLSCertKey is the key for tls certificates in a TLS secret.
TLSCertKey = "tls_crt"
// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
TLSPrivateKeyKey = "tls_key"
// container name
ContainerSidecarEnvoyProxy = "envoy-proxy"
@@ -21,32 +32,97 @@ const (
VolumeEnvoyConfig = "envoy-config"
innerIPv4Pool = "223.254.254.100/24"
innerIPv4Pool = "223.254.0.100/16"
// 原因在docker环境中设置docker的 gateway 和 subnet不能 inner 的冲突,也不能和 docker的 172.17 冲突
// 不然的话,请求会不通的
// 解决的问题:在 k8s 中的 名叫 kubernetes 的 service ip 为
// ➜ ~ kubectl get service kubernetes
//NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
//kubernetes ClusterIP 172.17.0.1 <none> 443/TCP 190d
//
// ➜ ~ docker network inspect bridge | jq '.[0].IPAM.Config'
//[
// {
// "Subnet": "172.17.0.0/16",
// "Gateway": "172.17.0.1"
// }
//]
// 如果不创建 network那么是无法请求到 这个 kubernetes 的 service 的
dockerInnerIPv4Pool = "223.255.0.100/16"
//The IPv6 address prefixes FE80::/10 and FF02::/16 are not routable
innerIPv6Pool = "efff:ffff:ffff:ffff:ffff:ffff:ffff:9999/64"
DefaultNetDir = "/etc/cni/net.d"
Proc = "/proc"
CniNetName = "cni-net-dir-kubevpn"
// env name
EnvTunNameOrLUID = "TunNameOrLUID"
EnvInboundPodTunIPv4 = "TunIPv4"
EnvInboundPodTunIPv6 = "TunIPv6"
EnvPodName = "POD_NAME"
EnvPodNamespace = "POD_NAMESPACE"
// header name
HeaderPodName = "POD_NAME"
HeaderPodNamespace = "POD_NAMESPACE"
HeaderIPv4 = "IPv4"
HeaderIPv6 = "IPv6"
// api
APIRentIP = "/rent/ip"
APIReleaseIP = "/release/ip"
KUBECONFIG = "kubeconfig"
// labels
ManageBy = konfig.ManagedbyLabelKey
// pprof port
PProfPort = 32345
// startup by KubeVPN
EnvStartSudoKubeVPNByKubeVPN = "DEPTH_SIGNED_BY_NAISON"
EnvSSHJump = "SSH_JUMP_BY_KUBEVPN"
// transport mode
EnvKubeVPNTransportEngine = "EnvKubeVPNTransportEngine"
)
var (
// Image inject --ldflags -X
Image = "docker.io/naison/kubevpn:latest"
Image = "docker.io/naison/kubevpn:latest"
Version = "latest"
OriginImage = "docker.io/naison/kubevpn:" + Version
)
var CIDR *net.IPNet
var (
CIDR *net.IPNet
CIDR6 *net.IPNet
RouterIP net.IP
RouterIP6 net.IP
var RouterIP net.IP
// for creating docker network
DockerCIDR *net.IPNet
DockerRouterIP net.IP
)
func init() {
RouterIP, CIDR, _ = net.ParseCIDR(innerIPv4Pool)
RouterIP6, CIDR6, _ = net.ParseCIDR(innerIPv6Pool)
DockerRouterIP, DockerCIDR, _ = net.ParseCIDR(dockerInnerIPv4Pool)
}
var Debug bool
var (
SmallBufferSize = 2 * 1024 // 2KB small buffer
MediumBufferSize = 8 * 1024 // 8KB medium buffer
LargeBufferSize = 32 * 1024 // 32KB large buffer
SmallBufferSize = (1 << 13) - 1 // 8KB small buffer
MediumBufferSize = (1 << 15) - 1 // 32KB medium buffer
LargeBufferSize = (1 << 16) - 1 // 64KB large buffer
)
var (
@@ -64,3 +140,25 @@ var (
// UDP over TCP header needs 22 bytes
DefaultMTU = 1500 - 20 - 8 - 21
)
var (
LPool = &sync.Pool{
New: func() interface{} {
return make([]byte, LargeBufferSize)
},
}
)
var SPool = sync.Pool{
New: func() any {
return make([]byte, 2)
},
}
type Engine string
const (
EngineGvisor Engine = "gvisor"
EngineMix Engine = "mix"
EngineRaw Engine = "raw"
)

View File

@@ -1,17 +0,0 @@
-----BEGIN CERTIFICATE-----
MIICnjCCAYYCCQCZjx/vIRKxhjANBgkqhkiG9w0BAQsFADAQMQ4wDAYDVQQDDAVj
aGluYTAgFw0yMTEwMzAxNDA2NDRaGA8yMTIxMTAwNjE0MDY0NFowEDEOMAwGA1UE
AwwFY2hpbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9Zpg8u+dl
gNbPeyWGKuG+yCIE4eh8s9OEuiBjBFV/+p40sQJuxjTtdQWG0LzrWse8IjR3xan4
fyACriP7YJYvzOA1Zi3+G4312NgEvpHZqzp1H6AbKa2voOrxws32RP4vHcCPsWO8
2hnCz0Q2NE4alVqBllTjIM5jESvAGko6C5XNSo9qOZUR8A1sMoufkZTx13A1gpeG
iboqgoCY0vagYB9lRqjBgyxj/bD++Kv5hUC9G5RY2i/l4ZYJx0AYgrLoy4lUtxWP
d5gyAuUAsi+38ziZzPVcGv8g/a/9ga24/QQu2iWdmSzu2h/sxd1pcj6jGCxhyylO
GxJZ1RAhNHalAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAJQzOiUS+3kufHpfvq2i
+07m4t+jo0x4zg1zsVe2Tdv9MCluPuC8fWnDzSPYICTFrVCdhMq4y8Y7HsY/JjG3
+tbArfguWlmxAPVA6f3yNoRZ3oihMUDNjq00Ho28UVomaywoDpcJ8fjWvcpH+xTH
h6Oh1rxQN+n3r7amfGdMVLdeCs7Wylmj0oCYdnkwla7OPEULLn/JPG7O+S9zqEi0
b4x9ij75erZp8mgRQs84C9vzuUgygtYw0b5zycKFP9Rp42Lm1xqV2lX8f3uVO38L
25S9mUtCe63zS1V2MXXldvQtBonO5I8UfV3IZoyAw9pA8s3MfJv4Fi5gaFjV+gZq
W90=
-----END CERTIFICATE-----

View File

@@ -1,28 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC9Zpg8u+dlgNbP
eyWGKuG+yCIE4eh8s9OEuiBjBFV/+p40sQJuxjTtdQWG0LzrWse8IjR3xan4fyAC
riP7YJYvzOA1Zi3+G4312NgEvpHZqzp1H6AbKa2voOrxws32RP4vHcCPsWO82hnC
z0Q2NE4alVqBllTjIM5jESvAGko6C5XNSo9qOZUR8A1sMoufkZTx13A1gpeGiboq
goCY0vagYB9lRqjBgyxj/bD++Kv5hUC9G5RY2i/l4ZYJx0AYgrLoy4lUtxWPd5gy
AuUAsi+38ziZzPVcGv8g/a/9ga24/QQu2iWdmSzu2h/sxd1pcj6jGCxhyylOGxJZ
1RAhNHalAgMBAAECggEBALFNpMBeSz1rQiyTJMqXxCKcKbSWYtSyZxV+RClNeljH
HWlIN3XJ2OxeOyE8sU5F+mr1PlbNVNOK9kVsDcUaYx42VcHHeNDDrL50E61FVTYG
pD/WrkQfXTfnlWljKvobFjS3Tnd14V9+cNU8wKdZibA7FrHvMGI7aNm2zlUUh14T
sGi1J8vqp69VX4VUtIs9nXMe8DWIs8bVcwbcOnitj/G7i441JCSOY5E/gMRN0JHg
UPNSw5erWQw6rlTZ7fqDhVx/xR5rGCfWsuszIy/kZkp9JMDDve3GyodljepZGA2n
4t0vW1s0OYtwbCjhecSXJpR7WFnZY/ll5sOru/pDIqECgYEA+4jUdOX+IWREHWb3
vNmREWuLmMct+KVaCYJ9GGMQfIwSIJQlJq+NuIcIzt1iN8I0l3EI52byxGyGtHkY
mU6w5eQRpMO/Dv7vKyL6S3aiqESxuy7HWd6mg7bEu/JGuQcBJcRWLLrGsj551K3f
RArltqcxjfw3CytrF/JPW43wMB0CgYEAwMNhXgW42yQNG60zhc4zYnPp8bVwJp/A
yL59d5QXjkyOEO+UlwoOwWGOg56+6cEZNfxc4zjE9nbyz7U5ypFICP82pE00AHXO
q6IUZxqGm/JxQhL61J7jT+wG3e/vwfsteduMlit7vOx/KC9xQW3eCCif0R6Uu1oD
j6Bhub24KikCgYEA9SPqYy9PGD3+wGT8ppmnb0HP4AqmfACymjgJML5DcA6XEBcx
id1oEmHQLMPHmC32UW3BTryfdt2J/tNSLP9rGfHHXDvFtZixgOnq46bwaWvhh1rU
wHpk8FMsszswv9zaunL5xUgWo9qNo7/8qvSv4e5aNlWLU6ByE/l8a+8OGeUCgYA/
KsdpbC6bgUDaZPORUXT9Okbbcj2CKq+eGO48lUby1ptnaVsj86PKMxHkh8zABQsh
6cT2oM/KhEglUJnTi7AzYo6hYLs9u10yWTaeYs7ho50Brf6MVlTfB9VoPQwwYQMR
/6QeQbmWu1kf7gwLCNnNiqJ0gLT0gBbSphfgKg+DoQKBgQCJutEP9+9M8IGtckUz
VWeyEMJKQ2gCn4zGCxEqJ6UsZpIUaw69SdRIaeLMqInAacuhvg7phswQUAZCEvrS
9xLcgBMF0l8DaQg2h+6nbmKn/jEdjkuvZ7yGkg8bx54TK51DAnMdn0V15b3SRbrB
G3rfc1Y6M1U3AjTdvktgivU1DQ==
-----END PRIVATE KEY-----

View File

@@ -1 +0,0 @@
openssl req -x509 -nodes -days 36500 -newkey rsa:2048 -subj "/CN=china" -keyout server.key -out server.crt

View File

@@ -1,34 +0,0 @@
package config
import (
"crypto/tls"
"embed"
log "github.com/sirupsen/logrus"
)
//go:embed server.crt
var crt embed.FS
//go:embed server.key
var key embed.FS
var TlsConfigServer *tls.Config
var TlsConfigClient *tls.Config
func init() {
crtBytes, _ := crt.ReadFile("server.crt")
keyBytes, _ := key.ReadFile("server.key")
pair, err := tls.X509KeyPair(crtBytes, keyBytes)
if err != nil {
log.Fatal(err)
}
TlsConfigServer = &tls.Config{
Certificates: []tls.Certificate{pair},
}
TlsConfigClient = &tls.Config{
Certificates: []tls.Certificate{pair},
InsecureSkipVerify: true,
}
}

View File

@@ -1,52 +0,0 @@
package config
import (
"crypto/tls"
"fmt"
"io"
"net"
"testing"
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
func init() {
util.InitLogger(true)
}
func TestName(t *testing.T) {
listen, _ := net.Listen("tcp", ":9090")
listener := tls.NewListener(listen, TlsConfigServer)
go func() {
for {
conn, err := listener.Accept()
if err != nil {
log.Errorln(err)
}
go func(conn net.Conn) {
bytes := make([]byte, 1024)
all, err2 := conn.Read(bytes)
if err2 != nil {
log.Errorln(err2)
return
}
defer conn.Close()
fmt.Println(string(bytes[:all]))
io.WriteString(conn, "hello client")
}(conn)
}
}()
dial, err := net.Dial("tcp", ":9090")
if err != nil {
log.Errorln(err)
}
client := tls.Client(dial, TlsConfigClient)
client.Write([]byte("hi server"))
all, err := io.ReadAll(client)
if err != nil {
log.Errorln(err)
}
fmt.Println(string(all))
}

View File

@@ -9,17 +9,22 @@ import (
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
corsv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/cors/v3"
grpcwebv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_web/v3"
routerv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3"
httpinspector "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/http_inspector/v3"
dstv3inspector "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/original_dst/v3"
httpconnectionmanager "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
tcpproxy "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3"
httpv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/v3"
matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
"github.com/envoyproxy/go-control-plane/pkg/cache/types"
"github.com/envoyproxy/go-control-plane/pkg/resource/v3"
"github.com/envoyproxy/go-control-plane/pkg/wellknown"
"github.com/golang/protobuf/ptypes/wrappers"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/wrapperspb"
corev1 "k8s.io/api/core/v1"
)
@@ -30,8 +35,9 @@ type Virtual struct {
}
type Rule struct {
Headers map[string]string
LocalTunIP string
Headers map[string]string
LocalTunIPv4 string
LocalTunIPv6 string
}
func (a *Virtual) To() (
@@ -48,12 +54,15 @@ func (a *Virtual) To() (
var rr []*route.Route
for _, rule := range a.Rules {
clusterName := fmt.Sprintf("%s_%v", rule.LocalTunIP, port.ContainerPort)
clusters = append(clusters, ToCluster(clusterName))
endpoints = append(endpoints, ToEndPoint(clusterName, rule.LocalTunIP, port.ContainerPort))
rr = append(rr, ToRoute(clusterName, rule.Headers))
for _, ip := range []string{rule.LocalTunIPv4, rule.LocalTunIPv6} {
clusterName := fmt.Sprintf("%s_%v", ip, port.ContainerPort)
clusters = append(clusters, ToCluster(clusterName))
endpoints = append(endpoints, ToEndPoint(clusterName, ip, port.ContainerPort))
rr = append(rr, ToRoute(clusterName, rule.Headers))
}
}
rr = append(rr, DefaultRoute())
clusters = append(clusters, OriginCluster())
routes = append(routes, &route.RouteConfiguration{
Name: routeName,
VirtualHosts: []*route.VirtualHost{
@@ -63,6 +72,7 @@ func (a *Virtual) To() (
Routes: rr,
},
},
MaxDirectResponseBodySizeBytes: nil,
})
}
return
@@ -93,12 +103,13 @@ func ToEndPoint(clusterName string, localTunIP string, port int32) *endpoint.Clu
}
func ToCluster(clusterName string) *cluster.Cluster {
anyFunc := func(m proto.Message) *anypb.Any {
pbst, _ := anypb.New(m)
return pbst
}
return &cluster.Cluster{
Name: clusterName,
ConnectTimeout: durationpb.New(5 * time.Second),
ClusterDiscoveryType: &cluster.Cluster_Type{Type: cluster.Cluster_EDS},
LbPolicy: cluster.Cluster_ROUND_ROBIN,
DnsLookupFamily: cluster.Cluster_V4_ONLY,
EdsClusterConfig: &cluster.Cluster_EdsClusterConfig{
EdsConfig: &core.ConfigSource{
ResourceApiVersion: resource.DefaultAPIVersion,
@@ -107,10 +118,24 @@ func ToCluster(clusterName string) *cluster.Cluster {
},
},
},
ConnectTimeout: durationpb.New(5 * time.Second),
LbPolicy: cluster.Cluster_ROUND_ROBIN,
TypedExtensionProtocolOptions: map[string]*anypb.Any{
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": anyFunc(&httpv3.HttpProtocolOptions{
UpstreamProtocolOptions: &httpv3.HttpProtocolOptions_UseDownstreamProtocolConfig{
UseDownstreamProtocolConfig: &httpv3.HttpProtocolOptions_UseDownstreamHttpConfig{},
},
}),
},
DnsLookupFamily: cluster.Cluster_ALL,
}
}
func OriginCluster() *cluster.Cluster {
anyFunc := func(m proto.Message) *anypb.Any {
pbst, _ := anypb.New(m)
return pbst
}
return &cluster.Cluster{
Name: "origin_cluster",
ConnectTimeout: durationpb.New(time.Second * 5),
@@ -118,6 +143,13 @@ func OriginCluster() *cluster.Cluster {
ClusterDiscoveryType: &cluster.Cluster_Type{
Type: cluster.Cluster_ORIGINAL_DST,
},
TypedExtensionProtocolOptions: map[string]*anypb.Any{
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": anyFunc(&httpv3.HttpProtocolOptions{
UpstreamProtocolOptions: &httpv3.HttpProtocolOptions_UseDownstreamProtocolConfig{
UseDownstreamProtocolConfig: &httpv3.HttpProtocolOptions_UseDownstreamHttpConfig{},
},
}),
},
}
}
@@ -131,6 +163,7 @@ func ToRoute(clusterName string, headers map[string]string) *route.Route {
MatchPattern: &matcher.StringMatcher_Exact{
Exact: v,
},
IgnoreCase: true,
},
},
})
@@ -147,6 +180,12 @@ func ToRoute(clusterName string, headers map[string]string) *route.Route {
ClusterSpecifier: &route.RouteAction_Cluster{
Cluster: clusterName,
},
Timeout: durationpb.New(0),
IdleTimeout: durationpb.New(0),
MaxStreamDuration: &route.RouteAction_MaxStreamDuration{
MaxStreamDuration: durationpb.New(0),
GrpcTimeoutHeaderMax: durationpb.New(0),
},
},
},
}
@@ -164,6 +203,12 @@ func DefaultRoute() *route.Route {
ClusterSpecifier: &route.RouteAction_Cluster{
Cluster: "origin_cluster",
},
Timeout: durationpb.New(0),
IdleTimeout: durationpb.New(0),
MaxStreamDuration: &route.RouteAction_MaxStreamDuration{
MaxStreamDuration: durationpb.New(0),
GrpcTimeoutHeaderMax: durationpb.New(0),
},
},
},
}
@@ -180,7 +225,7 @@ func ToListener(listenerName string, routeName string, port int32, p corev1.Prot
protocol = core.SocketAddress_TCP
}
any := func(m proto.Message) *anypb.Any {
anyFunc := func(m proto.Message) *anypb.Any {
pbst, _ := anypb.New(m)
return pbst
}
@@ -188,9 +233,6 @@ func ToListener(listenerName string, routeName string, port int32, p corev1.Prot
httpManager := &httpconnectionmanager.HttpConnectionManager{
CodecType: httpconnectionmanager.HttpConnectionManager_AUTO,
StatPrefix: "http",
HttpFilters: []*httpconnectionmanager.HttpFilter{{
Name: wellknown.Router,
}},
RouteSpecifier: &httpconnectionmanager.HttpConnectionManager_Rds{
Rds: &httpconnectionmanager.Rds{
ConfigSource: &core.ConfigSource{
@@ -211,6 +253,31 @@ func ToListener(listenerName string, routeName string, port int32, p corev1.Prot
RouteConfigName: routeName,
},
},
// "details": "Error: terminal filter named envoy.filters.http.router of type envoy.filters.http.router must be the last filter in a http filter chain."
HttpFilters: []*httpconnectionmanager.HttpFilter{
{
Name: wellknown.GRPCWeb,
ConfigType: &httpconnectionmanager.HttpFilter_TypedConfig{
TypedConfig: anyFunc(&grpcwebv3.GrpcWeb{}),
},
},
{
Name: wellknown.CORS,
ConfigType: &httpconnectionmanager.HttpFilter_TypedConfig{
TypedConfig: anyFunc(&corsv3.Cors{}),
},
},
{
Name: wellknown.Router,
ConfigType: &httpconnectionmanager.HttpFilter_TypedConfig{
TypedConfig: anyFunc(&routerv3.Router{}),
},
},
},
StreamIdleTimeout: durationpb.New(0),
UpgradeConfigs: []*httpconnectionmanager.HttpConnectionManager_UpgradeConfig{{
UpgradeType: "websocket",
}},
}
tcpConfig := &tcpproxy.TcpProxy{
@@ -223,8 +290,8 @@ func ToListener(listenerName string, routeName string, port int32, p corev1.Prot
return &listener.Listener{
Name: listenerName,
TrafficDirection: core.TrafficDirection_INBOUND,
BindToPort: &wrappers.BoolValue{Value: false},
UseOriginalDst: &wrappers.BoolValue{Value: true},
BindToPort: &wrapperspb.BoolValue{Value: false},
UseOriginalDst: &wrapperspb.BoolValue{Value: true},
Address: &core.Address{
Address: &core.Address_SocketAddress{
@@ -246,7 +313,7 @@ func ToListener(listenerName string, routeName string, port int32, p corev1.Prot
{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: any(httpManager),
TypedConfig: anyFunc(httpManager),
},
},
},
@@ -256,7 +323,7 @@ func ToListener(listenerName string, routeName string, port int32, p corev1.Prot
{
Name: wellknown.TCPProxy,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: any(tcpConfig),
TypedConfig: anyFunc(tcpConfig),
},
},
},
@@ -266,7 +333,13 @@ func ToListener(listenerName string, routeName string, port int32, p corev1.Prot
{
Name: wellknown.HttpInspector,
ConfigType: &listener.ListenerFilter_TypedConfig{
TypedConfig: any(&httpinspector.HttpInspector{}),
TypedConfig: anyFunc(&httpinspector.HttpInspector{}),
},
},
{
Name: wellknown.OriginalDestination,
ConfigType: &listener.ListenerFilter_TypedConfig{
TypedConfig: anyFunc(&dstv3inspector.OriginalDst{}),
},
},
},

48
pkg/controlplane/main.go Normal file
View File

@@ -0,0 +1,48 @@
package controlplane
import (
"context"
"fmt"
"github.com/envoyproxy/go-control-plane/pkg/cache/v3"
serverv3 "github.com/envoyproxy/go-control-plane/pkg/server/v3"
"github.com/fsnotify/fsnotify"
log "github.com/sirupsen/logrus"
)
func Main(filename string, port uint, logger *log.Logger) {
snapshotCache := cache.NewSnapshotCache(false, cache.IDHash{}, logger)
proc := NewProcessor(snapshotCache, logger)
go func() {
ctx := context.Background()
server := serverv3.NewServer(ctx, snapshotCache, nil)
RunServer(ctx, server, port)
}()
notifyCh := make(chan NotifyMessage, 100)
notifyCh <- NotifyMessage{
Operation: Create,
FilePath: filename,
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(fmt.Errorf("failed to create file watcher, err: %v", err))
}
defer watcher.Close()
if err = watcher.Add(filename); err != nil {
log.Fatal(fmt.Errorf("failed to add file: %s to wather, err: %v", filename, err))
}
go func() {
log.Fatal(Watch(watcher, filename, notifyCh))
}()
for {
select {
case msg := <-notifyCh:
proc.ProcessFile(msg)
}
}
}

View File

@@ -2,16 +2,20 @@ package controlplane
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"math"
"math/rand"
"os"
"reflect"
"strconv"
"time"
"github.com/envoyproxy/go-control-plane/pkg/cache/types"
"github.com/envoyproxy/go-control-plane/pkg/cache/v3"
"github.com/envoyproxy/go-control-plane/pkg/resource/v3"
"github.com/sirupsen/logrus"
utilcache "k8s.io/apimachinery/pkg/util/cache"
"k8s.io/apimachinery/pkg/util/yaml"
)
@@ -19,13 +23,16 @@ type Processor struct {
cache cache.SnapshotCache
logger *logrus.Logger
version int64
expireCache *utilcache.Expiring
}
func NewProcessor(cache cache.SnapshotCache, log *logrus.Logger) *Processor {
return &Processor{
cache: cache,
logger: log,
version: rand.Int63n(1000),
cache: cache,
logger: log,
version: rand.Int63n(1000),
expireCache: utilcache.NewExpiring(),
}
}
@@ -47,6 +54,14 @@ func (p *Processor) ProcessFile(file NotifyMessage) {
if len(config.Uid) == 0 {
continue
}
lastConfig, ok := p.expireCache.Get(config.Uid)
if ok && reflect.DeepEqual(lastConfig.(*Virtual), config) {
marshal, _ := json.Marshal(config)
p.logger.Debugf("config are same, not needs to update, config: %s", string(marshal))
continue
}
p.logger.Debugf("update config, version %d, config %v", p.version, config)
listeners, clusters, routes, endpoints := config.To()
resources := map[resource.Type][]types.Resource{
resource.ListenerType: listeners, // listeners
@@ -56,7 +71,8 @@ func (p *Processor) ProcessFile(file NotifyMessage) {
resource.RuntimeType: {}, // runtimes
resource.SecretType: {}, // secrets
}
snapshot, err := cache.NewSnapshot(p.newVersion(), resources)
var snapshot *cache.Snapshot
snapshot, err = cache.NewSnapshot(p.newVersion(), resources)
if err != nil {
p.logger.Errorf("snapshot inconsistency: %v, err: %v", snapshot, err)
@@ -68,17 +84,19 @@ func (p *Processor) ProcessFile(file NotifyMessage) {
return
}
p.logger.Debugf("will serve snapshot %+v, nodeID: %s", snapshot, config.Uid)
if err = p.cache.SetSnapshot(context.TODO(), config.Uid, snapshot); err != nil {
if err = p.cache.SetSnapshot(context.Background(), config.Uid, snapshot); err != nil {
p.logger.Errorf("snapshot error %q for %v", err, snapshot)
p.logger.Fatal(err)
}
p.expireCache.Set(config.Uid, config, time.Minute*5)
}
}
func ParseYaml(file string) ([]*Virtual, error) {
var virtualList = make([]*Virtual, 0)
yamlFile, err := ioutil.ReadFile(file)
yamlFile, err := os.ReadFile(file)
if err != nil {
return nil, fmt.Errorf("Error reading YAML file: %s\n", err)
}

View File

@@ -3,7 +3,6 @@ package controlplane
import (
"context"
"fmt"
"log"
"net"
clusterservice "github.com/envoyproxy/go-control-plane/envoy/service/cluster/v3"
@@ -14,6 +13,7 @@ import (
runtimeservice "github.com/envoyproxy/go-control-plane/envoy/service/runtime/v3"
secretservice "github.com/envoyproxy/go-control-plane/envoy/service/secret/v3"
serverv3 "github.com/envoyproxy/go-control-plane/pkg/server/v3"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
)
@@ -38,7 +38,7 @@ func RunServer(ctx context.Context, server serverv3.Server, port uint) {
secretservice.RegisterSecretDiscoveryServiceServer(grpcServer, server)
runtimeservice.RegisterRuntimeDiscoveryServiceServer(grpcServer, server)
log.Printf("management server listening on %d\n", port)
log.Infof("management server listening on %d", port)
if err = grpcServer.Serve(listener); err != nil {
log.Fatal(err)
}

View File

@@ -1,7 +1,7 @@
package controlplane
import (
"log"
"fmt"
"time"
"github.com/fsnotify/fsnotify"
@@ -20,22 +20,14 @@ type NotifyMessage struct {
FilePath string
}
func Watch(directory string, notifyCh chan<- NotifyMessage) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
err = watcher.Add(directory)
if err != nil {
log.Fatal(err)
}
func Watch(watcher *fsnotify.Watcher, filename string, notifyCh chan<- NotifyMessage) error {
ticker := time.NewTicker(time.Second * 5)
defer ticker.Stop()
for {
select {
case event, ok := <-watcher.Events:
if !ok {
return
return fmt.Errorf("watcher has closed")
}
if event.Op&fsnotify.Write == fsnotify.Write {
notifyCh <- NotifyMessage{
@@ -56,14 +48,14 @@ func Watch(directory string, notifyCh chan<- NotifyMessage) {
case err, ok := <-watcher.Errors:
if !ok {
return
return fmt.Errorf("watcher error closed")
}
log.Println("error:", err)
return err
case <-time.Tick(time.Second * 3):
case <-ticker.C:
notifyCh <- NotifyMessage{
Operation: Remove,
FilePath: directory,
Operation: Modify,
FilePath: filename,
}
}
}

View File

@@ -3,7 +3,6 @@ package core
import (
"context"
"errors"
"fmt"
"math"
"net"
)
@@ -61,15 +60,37 @@ func (c *Chain) dial(ctx context.Context) (net.Conn, error) {
func (*Chain) resolve(addr string) string {
if host, port, err := net.SplitHostPort(addr); err == nil {
if ips, err := net.LookupIP(host); err == nil && len(ips) > 0 {
return fmt.Sprintf("%s:%s", ips[0].String(), port)
return net.JoinHostPort(ips[0].String(), port)
}
}
return addr
}
func (c *Chain) getConn(_ context.Context) (net.Conn, error) {
func (c *Chain) getConn(ctx context.Context) (net.Conn, error) {
if c.IsEmpty() {
return nil, ErrorEmptyChain
}
return c.Node().Client.Dial(c.Node().Addr)
return c.Node().Client.Dial(ctx, c.resolve(c.Node().Addr))
}
type Handler interface {
Handle(ctx context.Context, conn net.Conn)
}
type Client struct {
Connector
Transporter
}
type Connector interface {
ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error)
}
type Transporter interface {
Dial(ctx context.Context, addr string) (net.Conn, error)
}
type Server struct {
Listener net.Listener
Handler Handler
}

View File

@@ -1,19 +0,0 @@
package core
import (
"context"
"net"
)
type Client struct {
Connector
Transporter
}
type Connector interface {
ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error)
}
type Transporter interface {
Dial(addr string) (net.Conn, error)
}

111
pkg/core/gvisorstack.go Executable file
View File

@@ -0,0 +1,111 @@
package core
import (
"context"
"fmt"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/link/packetsocket"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/raw"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
)
var _ stack.UniqueID = (*id)(nil)
type id struct {
}
func (i id) UniqueID() uint64 {
return 1
}
func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
s := stack.New(stack.Options{
NetworkProtocols: []stack.NetworkProtocolFactory{
ipv4.NewProtocol,
ipv6.NewProtocol,
},
TransportProtocols: []stack.TransportProtocolFactory{
tcp.NewProtocol,
udp.NewProtocol,
},
Clock: tcpip.NewStdClock(),
AllowPacketEndpointWrite: true,
HandleLocal: false, // if set to true, ping local ip will fail
// Enable raw sockets for users with sufficient
// privileges.
RawFactory: raw.EndpointFactory{},
UniqueID: id{},
})
// set handler for TCP UDP ICMP
s.SetTransportProtocolHandler(tcp.ProtocolNumber, TCPForwarder(s))
s.SetTransportProtocolHandler(udp.ProtocolNumber, UDPForwarder(s))
s.SetRouteTable([]tcpip.Route{
{
Destination: header.IPv4EmptySubnet,
NIC: 1,
},
{
Destination: header.IPv6EmptySubnet,
NIC: 1,
},
})
s.CreateNICWithOptions(1, packetsocket.New(tun), stack.NICOptions{
Disabled: false,
Context: ctx,
})
s.SetPromiscuousMode(1, true)
s.SetSpoofing(1, true)
// Enable SACK Recovery.
{
opt := tcpip.TCPSACKEnabled(true)
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
log.Fatal(fmt.Errorf("SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, opt, err))
}
}
// Set default TTLs as required by socket/netstack.
{
opt := tcpip.DefaultTTLOption(64)
if err := s.SetNetworkProtocolOption(ipv4.ProtocolNumber, &opt); err != nil {
log.Fatal(fmt.Errorf("SetNetworkProtocolOption(%d, &%T(%d)): %s", ipv4.ProtocolNumber, opt, opt, err))
}
if err := s.SetNetworkProtocolOption(ipv6.ProtocolNumber, &opt); err != nil {
log.Fatal(fmt.Errorf("SetNetworkProtocolOption(%d, &%T(%d)): %s", ipv6.ProtocolNumber, opt, opt, err))
}
}
// Enable Receive Buffer Auto-Tuning.
{
opt := tcpip.TCPModerateReceiveBufferOption(true)
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
log.Fatal(fmt.Errorf("SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, opt, err))
}
}
{
if err := s.SetForwardingDefaultAndAllNICs(ipv4.ProtocolNumber, true); err != nil {
log.Fatal(fmt.Errorf("set ipv4 forwarding: %s", err))
}
if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, true); err != nil {
log.Fatal(fmt.Errorf("set ipv6 forwarding: %s", err))
}
}
{
option := tcpip.TCPModerateReceiveBufferOption(true)
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &option); err != nil {
log.Fatal(fmt.Errorf("set TCP moderate receive buffer: %s", err))
}
}
return s
}

View File

@@ -0,0 +1,137 @@
package core
import (
"bytes"
"context"
"encoding/binary"
"errors"
"io"
"net"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"gvisor.dev/gvisor/pkg/waiter"
"github.com/wencaiwulue/kubevpn/pkg/config"
)
var GvisorTCPForwardAddr string
func TCPForwarder(s *stack.Stack) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
return tcp.NewForwarder(s, 0, 100000, func(request *tcp.ForwarderRequest) {
defer request.Complete(false)
id := request.ID()
log.Debugf("[TUN-TCP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
)
node, err := ParseNode(GvisorTCPForwardAddr)
if err != nil {
log.Debugf("[TUN-TCP] Error: can not parse gvisor tcp forward addr %s: %v", GvisorTCPForwardAddr, err)
return
}
node.Client = &Client{
Connector: GvisorTCPTunnelConnector(),
Transporter: TCPTransporter(),
}
forwardChain := NewChain(5, node)
remote, err := forwardChain.dial(context.Background())
if err != nil {
log.Debugf("[TUN-TCP] Error: failed to dial remote conn: %v", err)
return
}
if err = WriteProxyInfo(remote, id); err != nil {
log.Debugf("[TUN-TCP] Error: failed to write proxy info: %v", err)
return
}
w := &waiter.Queue{}
endpoint, tErr := request.CreateEndpoint(w)
if tErr != nil {
log.Debugf("[TUN-TCP] Error: can not create endpoint: %v", tErr)
return
}
conn := gonet.NewTCPConn(w, endpoint)
defer conn.Close()
defer remote.Close()
errChan := make(chan error, 2)
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(remote, conn, i)
log.Debugf("[TUN-TCP] Debug: write length %d data to remote", written)
errChan <- err2
}()
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(conn, remote, i)
log.Debugf("[TUN-TCP] Debug: read length %d data from remote", written)
errChan <- err2
}()
err = <-errChan
if err != nil && !errors.Is(err, io.EOF) {
log.Debugf("[TUN-TCP] Error: dsiconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
}
}).HandlePacket
}
func WriteProxyInfo(conn net.Conn, id stack.TransportEndpointID) error {
var b bytes.Buffer
i := config.SPool.Get().([]byte)[:]
defer config.SPool.Put(i[:])
binary.BigEndian.PutUint16(i, id.LocalPort)
b.Write(i)
binary.BigEndian.PutUint16(i, id.RemotePort)
b.Write(i)
b.WriteByte(byte(id.LocalAddress.Len()))
b.Write(id.LocalAddress.AsSlice())
b.WriteByte(byte(id.RemoteAddress.Len()))
b.Write(id.RemoteAddress.AsSlice())
_, err := b.WriteTo(conn)
return err
}
// ParseProxyInfo parse proxy info [20]byte
func ParseProxyInfo(conn net.Conn) (id stack.TransportEndpointID, err error) {
var n int
var port = make([]byte, 2)
// local port
if n, err = io.ReadFull(conn, port); err != nil || n != 2 {
return
}
id.LocalPort = binary.BigEndian.Uint16(port)
// remote port
if n, err = io.ReadFull(conn, port); err != nil || n != 2 {
return
}
id.RemotePort = binary.BigEndian.Uint16(port)
// local address
if n, err = io.ReadFull(conn, port[:1]); err != nil || n != 1 {
return
}
var localAddress = make([]byte, port[0])
if n, err = io.ReadFull(conn, localAddress); err != nil || n != len(localAddress) {
return
}
id.LocalAddress = tcpip.AddrFromSlice(localAddress)
// remote address
if n, err = io.ReadFull(conn, port[:1]); err != nil || n != 1 {
return
}
var remoteAddress = make([]byte, port[0])
if n, err = io.ReadFull(conn, remoteAddress); err != nil || n != len(remoteAddress) {
return
}
id.RemoteAddress = tcpip.AddrFromSlice(remoteAddress)
return
}

View File

@@ -0,0 +1,102 @@
package core
import (
"context"
"errors"
"fmt"
"io"
"net"
"time"
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/pkg/config"
)
type gvisorTCPTunnelConnector struct {
}
func GvisorTCPTunnelConnector() Connector {
return &gvisorTCPTunnelConnector{}
}
func (c *gvisorTCPTunnelConnector) ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error) {
switch con := conn.(type) {
case *net.TCPConn:
err := con.SetNoDelay(true)
if err != nil {
return nil, err
}
err = con.SetKeepAlive(true)
if err != nil {
return nil, err
}
err = con.SetKeepAlivePeriod(15 * time.Second)
if err != nil {
return nil, err
}
}
return conn, nil
}
type gvisorTCPHandler struct{}
func GvisorTCPHandler() Handler {
return &gvisorTCPHandler{}
}
func (h *gvisorTCPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
defer tcpConn.Close()
log.Debugf("[TUN-TCP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
// 1, get proxy info
endpointID, err := ParseProxyInfo(tcpConn)
if err != nil {
log.Debugf("[TUN-TCP] Error: failed to parse proxy info: %v", err)
return
}
log.Debugf("[TUN-TCP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
)
// 2, dial proxy
host := endpointID.LocalAddress.String()
port := fmt.Sprintf("%d", endpointID.LocalPort)
var remote net.Conn
remote, err = net.DialTimeout("tcp", net.JoinHostPort(host, port), time.Second*5)
if err != nil {
log.Debugf("[TUN-TCP] Error: failed to connect addr %s: %v", net.JoinHostPort(host, port), err)
return
}
errChan := make(chan error, 2)
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(remote, tcpConn, i)
log.Debugf("[TUN-TCP] Debug: write length %d data to remote", written)
errChan <- err2
}()
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(tcpConn, remote, i)
log.Debugf("[TUN-TCP] Debug: read length %d data from remote", written)
errChan <- err2
}()
err = <-errChan
if err != nil && !errors.Is(err, io.EOF) {
log.Debugf("[TUN-TCP] Error: dsiconnect: %s >-<: %s: %v", tcpConn.LocalAddr(), remote.RemoteAddr(), err)
}
}
func GvisorTCPListener(addr string) (net.Listener, error) {
log.Debug("gvisor tcp listen addr", addr)
laddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return nil, err
}
ln, err := net.ListenTCP("tcp", laddr)
if err != nil {
return nil, err
}
return &tcpKeepAliveListener{TCPListener: ln}, nil
}

View File

@@ -0,0 +1,83 @@
package core
import (
"context"
"errors"
"io"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
"gvisor.dev/gvisor/pkg/waiter"
"github.com/wencaiwulue/kubevpn/pkg/config"
)
var GvisorUDPForwardAddr string
func UDPForwarder(s *stack.Stack) func(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
return udp.NewForwarder(s, func(request *udp.ForwarderRequest) {
endpointID := request.ID()
log.Debugf("[TUN-UDP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
)
w := &waiter.Queue{}
endpoint, tErr := request.CreateEndpoint(w)
if tErr != nil {
log.Debugf("[TUN-UDP] Error: can not create endpoint: %v", tErr)
return
}
node, err := ParseNode(GvisorUDPForwardAddr)
if err != nil {
log.Debugf("[TUN-UDP] Error: parse gviosr udp forward addr %s: %v", GvisorUDPForwardAddr, err)
return
}
node.Client = &Client{
Connector: GvisorUDPOverTCPTunnelConnector(endpointID),
Transporter: TCPTransporter(),
}
forwardChain := NewChain(5, node)
ctx := context.Background()
c, err := forwardChain.getConn(ctx)
if err != nil {
log.Debugf("[TUN-UDP] Error: can not get conn: %v", err)
return
}
if err = WriteProxyInfo(c, endpointID); err != nil {
log.Debugf("[TUN-UDP] Error: can not write proxy info: %v", err)
return
}
remote, err := node.Client.ConnectContext(ctx, c)
if err != nil {
log.Debugf("[TUN-UDP] Error: can not connect: %v", err)
return
}
conn := gonet.NewUDPConn(s, w, endpoint)
go func() {
defer conn.Close()
defer remote.Close()
errChan := make(chan error, 2)
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(remote, conn, i)
log.Debugf("[TUN-UDP] Debug: write length %d data to remote", written)
errChan <- err2
}()
go func() {
i := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(i[:])
written, err2 := io.CopyBuffer(conn, remote, i)
log.Debugf("[TUN-UDP] Debug: read length %d data from remote", written)
errChan <- err2
}()
err = <-errChan
if err != nil && !errors.Is(err, io.EOF) {
log.Debugf("[TUN-UDP] Error: dsiconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
}
}()
}).HandlePacket
}

View File

@@ -0,0 +1,217 @@
package core
import (
"context"
"fmt"
"io"
"net"
"time"
log "github.com/sirupsen/logrus"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"github.com/wencaiwulue/kubevpn/pkg/config"
)
type gvisorUDPOverTCPTunnelConnector struct {
Id stack.TransportEndpointID
}
func GvisorUDPOverTCPTunnelConnector(endpointID stack.TransportEndpointID) Connector {
return &gvisorUDPOverTCPTunnelConnector{
Id: endpointID,
}
}
func (c *gvisorUDPOverTCPTunnelConnector) ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error) {
switch con := conn.(type) {
case *net.TCPConn:
err := con.SetNoDelay(true)
if err != nil {
return nil, err
}
err = con.SetKeepAlive(true)
if err != nil {
return nil, err
}
err = con.SetKeepAlivePeriod(15 * time.Second)
if err != nil {
return nil, err
}
}
return newGvisorFakeUDPTunnelConnOverTCP(ctx, conn)
}
type gvisorUDPHandler struct{}
func GvisorUDPHandler() Handler {
return &gvisorUDPHandler{}
}
func (h *gvisorUDPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
defer tcpConn.Close()
log.Debugf("[TUN-UDP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
// 1, get proxy info
endpointID, err := ParseProxyInfo(tcpConn)
if err != nil {
log.Warningf("[TUN-UDP] Error: Failed to parse proxy info: %v", err)
return
}
log.Debugf("[TUN-UDP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
)
// 2, dial proxy
addr := &net.UDPAddr{
IP: endpointID.LocalAddress.AsSlice(),
Port: int(endpointID.LocalPort),
}
var remote *net.UDPConn
remote, err = net.DialUDP("udp", nil, addr)
if err != nil {
log.Debugf("[TUN-UDP] Error: failed to connect addr %s: %v", addr.String(), err)
return
}
handle(ctx, tcpConn, remote)
}
// fake udp connect over tcp
type gvisorFakeUDPTunnelConn struct {
// tcp connection
net.Conn
ctx context.Context
}
func newGvisorFakeUDPTunnelConnOverTCP(ctx context.Context, conn net.Conn) (net.Conn, error) {
return &gvisorFakeUDPTunnelConn{ctx: ctx, Conn: conn}, nil
}
func (c *gvisorFakeUDPTunnelConn) Read(b []byte) (int, error) {
select {
case <-c.ctx.Done():
return 0, c.ctx.Err()
default:
dgram, err := readDatagramPacket(c.Conn, b)
if err != nil {
return 0, err
}
return int(dgram.DataLength), nil
}
}
func (c *gvisorFakeUDPTunnelConn) Write(b []byte) (int, error) {
dgram := newDatagramPacket(b)
if err := dgram.Write(c.Conn); err != nil {
return 0, err
}
return len(b), nil
}
func (c *gvisorFakeUDPTunnelConn) Close() error {
if cc, ok := c.Conn.(interface{ CloseRead() error }); ok {
_ = cc.CloseRead()
}
if cc, ok := c.Conn.(interface{ CloseWrite() error }); ok {
_ = cc.CloseWrite()
}
return c.Conn.Close()
}
func GvisorUDPListener(addr string) (net.Listener, error) {
log.Debug("gvisor UDP over TCP listen addr", addr)
laddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return nil, err
}
ln, err := net.ListenTCP("tcp", laddr)
if err != nil {
return nil, err
}
return &tcpKeepAliveListener{ln}, nil
}
func copyPacketData(dst, src net.PacketConn, to net.Addr, timeout time.Duration) error {
buf := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(buf[:])
for {
src.SetReadDeadline(time.Now().Add(timeout))
n, _, err := src.ReadFrom(buf)
if ne, ok := err.(net.Error); ok && ne.Timeout() {
return nil /* ignore I/O timeout */
} else if err == io.EOF {
return nil /* ignore EOF */
} else if err != nil {
return err
}
if _, err = dst.WriteTo(buf[:n], to); err != nil {
return err
}
dst.SetReadDeadline(time.Now().Add(timeout))
}
}
func handle(ctx context.Context, tcpConn net.Conn, udpConn *net.UDPConn) {
defer udpConn.Close()
log.Debugf("[TUN-UDP] Debug: %s <-> %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
errChan := make(chan error, 2)
go func() {
b := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(b[:])
for {
dgram, err := readDatagramPacket(tcpConn, b[:])
if err != nil {
log.Debugf("[TUN-UDP] Debug: %s -> 0 : %v", tcpConn.RemoteAddr(), err)
errChan <- err
return
}
if dgram.DataLength == 0 {
log.Debugf("[TUN-UDP] Error: length is zero")
errChan <- fmt.Errorf("length of read packet is zero")
return
}
if _, err = udpConn.Write(dgram.Data); err != nil {
log.Debugf("[TUN-UDP] Error: %s -> %s : %s", tcpConn.RemoteAddr(), Server8422, err)
errChan <- err
return
}
log.Debugf("[TUN-UDP] Debug: %s >>> %s length: %d", tcpConn.RemoteAddr(), Server8422, dgram.DataLength)
}
}()
go func() {
b := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(b[:])
for {
n, _, err := udpConn.ReadFrom(b[:])
if err != nil {
log.Debugf("[TUN-UDP] Error: %s : %s", tcpConn.RemoteAddr(), err)
errChan <- err
return
}
if n == 0 {
log.Debugf("[TUN-UDP] Error: length is zero")
errChan <- fmt.Errorf("length of read packet is zero")
return
}
// pipe from peer to tunnel
dgram := newDatagramPacket(b[:n])
if err = dgram.Write(tcpConn); err != nil {
log.Debugf("[TUN-UDP] Error: %s <- %s : %s", tcpConn.RemoteAddr(), dgram.Addr(), err)
errChan <- err
return
}
log.Debugf("[TUN-UDP] Debug: %s <<< %s length: %d", tcpConn.RemoteAddr(), dgram.Addr(), len(dgram.Data))
}
}()
err := <-errChan
if err != nil {
log.Debugf("[TUN-UDP] Error: %v", err)
}
log.Debugf("[TUN-UDP] Debug: %s >-< %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
return
}

View File

@@ -1,10 +0,0 @@
package core
import (
"context"
"net"
)
type Handler interface {
Handle(ctx context.Context, conn net.Conn)
}

View File

@@ -39,6 +39,12 @@ func ParseNode(s string) (*Node, error) {
// Get returns node parameter specified by key.
func (node *Node) Get(key string) string {
values := node.Values[key]
for _, value := range values {
if value != "" {
return value
}
}
return node.Values.Get(key)
}

View File

@@ -1,25 +0,0 @@
package core
import (
"sync"
"github.com/wencaiwulue/kubevpn/pkg/config"
)
var (
SPool = &sync.Pool{
New: func() interface{} {
return make([]byte, config.SmallBufferSize)
},
}
MPool = &sync.Pool{
New: func() interface{} {
return make([]byte, config.MediumBufferSize)
},
}
LPool = &sync.Pool{
New: func() interface{} {
return make([]byte, config.LargeBufferSize)
},
}
)

130
pkg/core/route.go Normal file
View File

@@ -0,0 +1,130 @@
package core
import (
"fmt"
"net"
"os"
"strings"
"sync"
"github.com/containernetworking/cni/pkg/types"
"github.com/pkg/errors"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/tun"
)
var (
// RouteNAT Globe route table for inner ip
RouteNAT = NewNAT()
// RouteConnNAT map[srcIP]net.Conn
RouteConnNAT = &sync.Map{}
// Chan tcp connects
Chan = make(chan *datagramPacket, MaxSize)
)
type TCPUDPacket struct {
data *datagramPacket
}
// Route example:
// -L "tcp://:10800" -L "tun://:8422?net=223.254.0.100/16"
// -L "tun:/10.233.24.133:8422?net=223.254.0.102/16&route=223.254.0.0/16"
// -L "tun:/127.0.0.1:8422?net=223.254.0.102/16&route=223.254.0.0/16,10.233.0.0/16" -F "tcp://127.0.0.1:10800"
type Route struct {
ServeNodes []string // -L tun
ChainNode string // -F tcp
Retries int
}
func (r *Route) parseChain() (*Chain, error) {
// parse the base nodes
node, err := parseChainNode(r.ChainNode)
if err != nil {
return nil, err
}
return NewChain(r.Retries, node), nil
}
func parseChainNode(ns string) (*Node, error) {
node, err := ParseNode(ns)
if err != nil {
return nil, err
}
node.Client = &Client{
Connector: UDPOverTCPTunnelConnector(),
Transporter: TCPTransporter(),
}
return node, nil
}
func (r *Route) GenerateServers() ([]Server, error) {
chain, err := r.parseChain()
if err != nil && !errors.Is(err, ErrorInvalidNode) {
return nil, err
}
servers := make([]Server, 0, len(r.ServeNodes))
for _, serveNode := range r.ServeNodes {
var node *Node
node, err = ParseNode(serveNode)
if err != nil {
return nil, err
}
var ln net.Listener
var handler Handler
switch node.Protocol {
case "tun":
handler = TunHandler(chain, node)
ln, err = tun.Listener(tun.Config{
Name: node.Get("name"),
Addr: node.Get("net"),
Addr6: os.Getenv(config.EnvInboundPodTunIPv6),
MTU: node.GetInt("mtu"),
Routes: parseIPRoutes(node.Get("route")),
Gateway: node.Get("gw"),
})
if err != nil {
return nil, err
}
case "tcp":
handler = TCPHandler()
ln, err = TCPListener(node.Addr)
if err != nil {
return nil, err
}
case "gtcp":
handler = GvisorTCPHandler()
ln, err = GvisorTCPListener(node.Addr)
if err != nil {
return nil, err
}
case "gudp":
handler = GvisorUDPHandler()
ln, err = GvisorUDPListener(node.Addr)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("not support protocol %s", node.Protocol)
}
servers = append(servers, Server{Listener: ln, Handler: handler})
}
return servers, nil
}
func parseIPRoutes(routeStringList string) (routes []types.Route) {
if len(routeStringList) == 0 {
return
}
routeList := strings.Split(routeStringList, ",")
for _, route := range routeList {
if _, ipNet, _ := net.ParseCIDR(strings.TrimSpace(route)); ipNet != nil {
routes = append(routes, types.Route{Dst: *ipNet})
}
}
return
}

View File

@@ -1,51 +0,0 @@
package core
import (
"context"
"net"
"time"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/util/retry"
)
type Server struct {
Listener net.Listener
Handler Handler
}
// Serve serves as a proxy server.
func (s *Server) Serve(ctx context.Context) error {
l := s.Listener
defer l.Close()
var tempDelay time.Duration
go func() {
<-ctx.Done()
if err := retry.OnError(retry.DefaultBackoff, func(err error) bool { return err != nil }, l.Close); err != nil {
log.Warnf("error while close listener, err: %v", err)
}
}()
for ctx.Err() == nil {
conn, e := l.Accept()
if e != nil {
if ne, ok := e.(net.Error); ok && ne.Temporary() {
if tempDelay == 0 {
tempDelay = 5 * time.Millisecond
} else {
tempDelay *= 2
}
if max := 1 * time.Second; tempDelay > max {
tempDelay = max
}
log.Warnf("server: Accept error: %v; retrying in %v", e, tempDelay)
time.Sleep(tempDelay)
continue
}
return e
}
tempDelay = 0
go s.Handler.Handle(ctx, conn)
}
return nil
}

View File

@@ -1,7 +1,7 @@
package core
import (
"crypto/tls"
"context"
"net"
"github.com/wencaiwulue/kubevpn/pkg/config"
@@ -13,9 +13,9 @@ func TCPTransporter() Transporter {
return &tcpTransporter{}
}
func (tr *tcpTransporter) Dial(addr string) (net.Conn, error) {
func (tr *tcpTransporter) Dial(ctx context.Context, addr string) (net.Conn, error) {
dialer := &net.Dialer{Timeout: config.DialTimeout}
return tls.DialWithDialer(dialer, "tcp", addr, config.TlsConfigClient)
return dialer.DialContext(ctx, "tcp", addr)
}
func TCPListener(addr string) (net.Listener, error) {
@@ -39,7 +39,17 @@ func (ln *tcpKeepAliveListener) Accept() (c net.Conn, err error) {
if err != nil {
return
}
_ = conn.SetKeepAlive(true)
_ = conn.SetKeepAlivePeriod(config.KeepAliveTime)
err = conn.SetKeepAlive(true)
if err != nil {
return nil, err
}
err = conn.SetKeepAlivePeriod(config.KeepAliveTime)
if err != nil {
return nil, err
}
err = conn.SetNoDelay(true)
if err != nil {
return nil, err
}
return conn, nil
}

View File

@@ -2,13 +2,14 @@ package core
import (
"context"
"errors"
"net"
"sync"
"time"
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
type fakeUDPTunnelConnector struct {
@@ -19,91 +20,88 @@ func UDPOverTCPTunnelConnector() Connector {
}
func (c *fakeUDPTunnelConnector) ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error) {
defer conn.SetDeadline(time.Time{})
//defer conn.SetDeadline(time.Time{})
switch con := conn.(type) {
case *net.TCPConn:
err := con.SetNoDelay(true)
if err != nil {
return nil, err
}
err = con.SetKeepAlive(true)
if err != nil {
return nil, err
}
err = con.SetKeepAlivePeriod(15 * time.Second)
if err != nil {
return nil, err
}
}
return newFakeUDPTunnelConnOverTCP(ctx, conn)
}
type fakeUdpHandler struct {
// map[srcIP]net.Conn
connNAT *sync.Map
ch chan *datagramPacket
}
// TCPHandler creates a server Handler
func TCPHandler() Handler {
return &fakeUdpHandler{}
return &fakeUdpHandler{
connNAT: RouteConnNAT,
ch: Chan,
}
}
var Server8422, _ = net.ResolveUDPAddr("udp", "localhost:8422")
func (h *fakeUdpHandler) Handle(ctx context.Context, tcpConn net.Conn) {
defer tcpConn.Close()
if config.Debug {
log.Debugf("[tcpserver] %s -> %s\n", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
}
// serve tunnel udp, tunnel <-> remote, handle tunnel udp request
udpConn, err := net.DialUDP("udp", nil, Server8422)
if err != nil {
log.Debugf("[tcpserver] udp-tun %s -> %s : %s", tcpConn.RemoteAddr(), udpConn.LocalAddr(), err)
return
}
defer udpConn.Close()
if config.Debug {
log.Debugf("[tcpserver] udp-tun %s <- %s\n", tcpConn.RemoteAddr(), udpConn.LocalAddr())
}
log.Debugf("[tcpserver] udp-tun %s <-> %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
_ = h.tunnelServerUDP(tcpConn, udpConn)
log.Debugf("[tcpserver] udp-tun %s >-< %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
return
}
log.Debugf("[tcpserver] %s -> %s\n", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
var Server8422, _ = net.ResolveUDPAddr("udp", "127.0.0.1:8422")
func (h *fakeUdpHandler) tunnelServerUDP(tcpConn net.Conn, udpConn *net.UDPConn) (err error) {
errChan := make(chan error, 2)
go func() {
b := LPool.Get().([]byte)
defer LPool.Put(b)
for {
dgram, err := readDatagramPacket(tcpConn, b[:])
if err != nil {
log.Debugf("[udp-tun] %s -> 0 : %v", tcpConn.RemoteAddr(), err)
errChan <- err
return
}
if _, err = udpConn.Write(dgram.Data); err != nil {
log.Debugf("[tcpserver] udp-tun %s -> %s : %s", tcpConn.RemoteAddr(), Server8422, err)
errChan <- err
return
}
if config.Debug {
log.Debugf("[tcpserver] udp-tun %s >>> %s length: %d", tcpConn.RemoteAddr(), Server8422, len(dgram.Data))
defer func(addr net.Addr) {
var keys []string
h.connNAT.Range(func(key, value any) bool {
if value.(net.Conn) == tcpConn {
keys = append(keys, key.(string))
}
return true
})
for _, key := range keys {
h.connNAT.Delete(key)
}
}()
log.Debugf("[tcpserver] delete conn %s from globle routeConnNAT, deleted count %d", addr, len(keys))
}(tcpConn.LocalAddr())
go func() {
b := MPool.Get().([]byte)
defer MPool.Put(b)
for {
n, err := udpConn.Read(b[:])
if err != nil {
log.Debugf("[udp-tun] %s : %s", tcpConn.RemoteAddr(), err)
errChan <- err
return
}
// pipe from peer to tunnel
dgram := newDatagramPacket(b[:n])
if err = dgram.Write(tcpConn); err != nil {
log.Debugf("[tcpserver] udp-tun %s <- %s : %s", tcpConn.RemoteAddr(), dgram.Addr(), err)
errChan <- err
return
}
if config.Debug {
log.Debugf("[tcpserver] udp-tun %s <<< %s length: %d", tcpConn.RemoteAddr(), dgram.Addr(), len(dgram.Data))
}
for {
b := config.LPool.Get().([]byte)[:]
dgram, err := readDatagramPacketServer(tcpConn, b[:])
if err != nil {
log.Debugf("[tcpserver] %s -> 0 : %v", tcpConn.RemoteAddr(), err)
return
}
}()
return <-errChan
var src net.IP
bb := dgram.Data[:dgram.DataLength]
if util.IsIPv4(bb) {
src = net.IPv4(bb[12], bb[13], bb[14], bb[15])
} else if util.IsIPv6(bb) {
src = bb[8:24]
} else {
log.Errorf("[tcpserver] unknown packet")
continue
}
value, loaded := h.connNAT.LoadOrStore(src.String(), tcpConn)
if loaded {
if tcpConn != value.(net.Conn) {
h.connNAT.Store(src.String(), tcpConn)
log.Debugf("[tcpserver] replace routeConnNAT: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
}
log.Debugf("[tcpserver] find routeConnNAT: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
} else {
log.Debugf("[tcpserver] new routeConnNAT: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
}
h.ch <- dgram
}
}
// fake udp connect over tcp
@@ -120,7 +118,7 @@ func newFakeUDPTunnelConnOverTCP(ctx context.Context, conn net.Conn) (net.Conn,
func (c *fakeUDPTunnelConn) ReadFrom(b []byte) (int, net.Addr, error) {
select {
case <-c.ctx.Done():
return 0, nil, errors.New("closed connection")
return 0, nil, c.ctx.Err()
default:
dgram, err := readDatagramPacket(c.Conn, b)
if err != nil {
@@ -139,19 +137,11 @@ func (c *fakeUDPTunnelConn) WriteTo(b []byte, _ net.Addr) (int, error) {
}
func (c *fakeUDPTunnelConn) Close() error {
if cc, ok := c.Conn.(interface{ CloseRead() error }); ok {
_ = cc.CloseRead()
}
if cc, ok := c.Conn.(interface{ CloseWrite() error }); ok {
_ = cc.CloseWrite()
}
return c.Conn.Close()
}
func (c *fakeUDPTunnelConn) CloseWrite() error {
if cc, ok := c.Conn.(interface{ CloseWrite() error }); ok {
return cc.CloseWrite()
}
return nil
}
func (c *fakeUDPTunnelConn) CloseRead() error {
if cc, ok := c.Conn.(interface{ CloseRead() error }); ok {
return cc.CloseRead()
}
return nil
}

212
pkg/core/tunendpoint.go Executable file
View File

@@ -0,0 +1,212 @@
package core
import (
"context"
"net"
"os"
"sync"
"github.com/google/gopacket/layers"
log "github.com/sirupsen/logrus"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
"gvisor.dev/gvisor/pkg/buffer"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"github.com/wencaiwulue/kubevpn/pkg/config"
)
var _ stack.LinkEndpoint = (*tunEndpoint)(nil)
// tunEndpoint /Users/naison/go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20220422052705-39790bd3a15a/pkg/tcpip/link/tun/device.go:122
type tunEndpoint struct {
ctx context.Context
tun net.Conn
once sync.Once
endpoint *channel.Endpoint
in chan<- *DataElem
out chan *DataElem
}
// WritePackets writes packets. Must not be called with an empty list of
// packet buffers.
//
// WritePackets may modify the packet buffers, and takes ownership of the PacketBufferList.
// it is not safe to use the PacketBufferList after a call to WritePackets.
func (e *tunEndpoint) WritePackets(p stack.PacketBufferList) (int, tcpip.Error) {
return e.endpoint.WritePackets(p)
}
// MTU is the maximum transmission unit for this endpoint. This is
// usually dictated by the backing physical network; when such a
// physical network doesn't exist, the limit is generally 64k, which
// includes the maximum size of an IP packet.
func (e *tunEndpoint) MTU() uint32 {
return uint32(config.DefaultMTU)
}
// MaxHeaderLength returns the maximum size the data link (and
// lower level layers combined) headers can have. Higher levels use this
// information to reserve space in the front of the packets they're
// building.
func (e *tunEndpoint) MaxHeaderLength() uint16 {
return 0
}
// LinkAddress returns the link address (typically a MAC) of the
// endpoint.
func (e *tunEndpoint) LinkAddress() tcpip.LinkAddress {
return e.endpoint.LinkAddress()
}
// Capabilities returns the set of capabilities supported by the
// endpoint.
func (e *tunEndpoint) Capabilities() stack.LinkEndpointCapabilities {
return e.endpoint.LinkEPCapabilities
}
// Attach attaches the data link layer endpoint to the network-layer
// dispatcher of the stack.
//
// Attach is called with a nil dispatcher when the endpoint's NIC is being
// removed.
func (e *tunEndpoint) Attach(dispatcher stack.NetworkDispatcher) {
e.endpoint.Attach(dispatcher)
// queue --> tun
e.once.Do(func() {
go func() {
for {
read := e.endpoint.ReadContext(e.ctx)
if !read.IsNil() {
bb := read.ToView().AsSlice()
i := config.LPool.Get().([]byte)[:]
n := copy(i, bb)
bb = nil
e.out <- NewDataElem(i[:], n, nil, nil)
}
}
}()
// tun --> dispatcher
go func() {
// full(all use gvisor), mix(cluster network use gvisor), raw(not use gvisor)
mode := config.Engine(os.Getenv(config.EnvKubeVPNTransportEngine))
for {
bytes := config.LPool.Get().([]byte)[:]
read, err := e.tun.Read(bytes[:])
if err != nil {
log.Warningln(err)
panic(err)
return
}
if read == 0 {
log.Warnf("[TUN]: read from tun length is %d", read)
continue
}
// Try to determine network protocol number, default zero.
var protocol tcpip.NetworkProtocolNumber
var ipProtocol int
var src, dst net.IP
// TUN interface with IFF_NO_PI enabled, thus
// we need to determine protocol from version field
version := bytes[0] >> 4
if version == 4 {
protocol = header.IPv4ProtocolNumber
ipHeader, err := ipv4.ParseHeader(bytes[:read])
if err != nil {
log.Error(err)
continue
}
ipProtocol = ipHeader.Protocol
src = ipHeader.Src
dst = ipHeader.Dst
} else if version == 6 {
protocol = header.IPv6ProtocolNumber
ipHeader, err := ipv6.ParseHeader(bytes[:read])
if err != nil {
log.Error(err)
continue
}
ipProtocol = ipHeader.NextHeader
src = ipHeader.Src
dst = ipHeader.Dst
} else {
log.Debugf("[TUN-gvisor] unknown packet version %d", version)
continue
}
// only tcp and udp needs to distinguish transport engine
// gvisor: all network use gvisor
// mix: cluster network use gvisor, diy network use raw
// raw: all network use raw
if (ipProtocol == int(layers.IPProtocolUDP) || ipProtocol == int(layers.IPProtocolUDPLite) || ipProtocol == int(layers.IPProtocolTCP)) &&
(mode == config.EngineGvisor || (mode == config.EngineMix && (!config.CIDR.Contains(dst) && !config.CIDR6.Contains(dst)))) {
pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
ReserveHeaderBytes: 0,
Payload: buffer.MakeWithData(bytes[:read]),
})
//defer pkt.DecRef()
config.LPool.Put(bytes[:])
e.endpoint.InjectInbound(protocol, pkt)
log.Debugf("[TUN-%s] IP-Protocol: %s, SRC: %s, DST: %s, Length: %d", layers.IPProtocol(ipProtocol).String(), layers.IPProtocol(ipProtocol).String(), src.String(), dst, read)
} else {
log.Debugf("[TUN-RAW] IP-Protocol: %s, SRC: %s, DST: %s, Length: %d", layers.IPProtocol(ipProtocol).String(), src.String(), dst, read)
e.in <- NewDataElem(bytes[:], read, src, dst)
}
}
}()
go func() {
for elem := range e.out {
_, err := e.tun.Write(elem.Data()[:elem.Length()])
config.LPool.Put(elem.Data()[:])
if err != nil {
log.Fatalf("[TUN] Fatal: failed to write data to tun device: %v", err)
}
}
}()
})
}
// IsAttached returns whether a NetworkDispatcher is attached to the
// endpoint.
func (e *tunEndpoint) IsAttached() bool {
return e.endpoint.IsAttached()
}
// Wait waits for any worker goroutines owned by the endpoint to stop.
//
// For now, requesting that an endpoint's worker goroutine(s) stop is
// implementation specific.
//
// Wait will not block if the endpoint hasn't started any goroutines
// yet, even if it might later.
func (e *tunEndpoint) Wait() {
return
}
// ARPHardwareType returns the ARPHRD_TYPE of the link endpoint.
//
// See:
// https://github.com/torvalds/linux/blob/aa0c9086b40c17a7ad94425b3b70dd1fdd7497bf/include/uapi/linux/if_arp.h#L30
func (e *tunEndpoint) ARPHardwareType() header.ARPHardwareType {
return header.ARPHardwareNone
}
// AddHeader adds a link layer header to the packet if required.
func (e *tunEndpoint) AddHeader(ptr stack.PacketBufferPtr) {
return
}
func NewTunEndpoint(ctx context.Context, tun net.Conn, mtu uint32, in chan<- *DataElem, out chan *DataElem) stack.LinkEndpoint {
addr, _ := tcpip.ParseMACAddress("02:03:03:04:05:06")
return &tunEndpoint{
ctx: ctx,
tun: tun,
endpoint: channel.New(tcp.DefaultReceiveBufferSize, mtu, addr),
in: in,
out: out,
}
}

View File

@@ -2,281 +2,636 @@ package core
import (
"context"
"errors"
"fmt"
"math/rand"
"net"
"strings"
"sync"
"time"
"github.com/shadowsocks/go-shadowsocks2/shadowaead"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
log "github.com/sirupsen/logrus"
"github.com/songgao/water/waterutil"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/tun"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
func ipToTunRouteKey(ip net.IP) string {
return ip.To16().String()
}
const (
MaxSize = 1000
MaxThread = 10
MaxConn = 1
)
type tunHandler struct {
chain *Chain
node *Node
routes *sync.Map
chExit chan struct{}
chain *Chain
node *Node
routeNAT *NAT
// map[srcIP]net.Conn
routeConnNAT *sync.Map
chExit chan error
}
type NAT struct {
lock *sync.RWMutex
routes map[string][]net.Addr
}
func NewNAT() *NAT {
return &NAT{
lock: &sync.RWMutex{},
routes: map[string][]net.Addr{},
}
}
func (n *NAT) RemoveAddr(addr net.Addr) (count int) {
n.lock.Lock()
defer n.lock.Unlock()
for k, v := range n.routes {
for i := 0; i < len(v); i++ {
if v[i].String() == addr.String() {
v = append(v[:i], v[i+1:]...)
i--
count++
}
}
n.routes[k] = v
}
return
}
func (n *NAT) LoadOrStore(to net.IP, addr net.Addr) (result net.Addr, load bool) {
n.lock.RLock()
addrList := n.routes[to.String()]
n.lock.RUnlock()
for _, add := range addrList {
if add.String() == addr.String() {
load = true
result = addr
return
}
}
n.lock.Lock()
defer n.lock.Unlock()
if addrList == nil {
n.routes[to.String()] = []net.Addr{addr}
result = addr
return
} else {
n.routes[to.String()] = append(n.routes[to.String()], addr)
result = addr
return
}
}
func (n *NAT) RouteTo(ip net.IP) net.Addr {
n.lock.RLock()
defer n.lock.RUnlock()
addrList := n.routes[ip.String()]
if len(addrList) == 0 {
return nil
}
// for load balance
index := rand.Intn(len(n.routes[ip.String()]))
return addrList[index]
}
func (n *NAT) Remove(ip net.IP, addr net.Addr) {
n.lock.Lock()
defer n.lock.Unlock()
addrList, ok := n.routes[ip.String()]
if !ok {
return
}
for i := 0; i < len(addrList); i++ {
if addrList[i].String() == addr.String() {
addrList = append(addrList[:i], addrList[i+1:]...)
i--
}
}
n.routes[ip.String()] = addrList
return
}
func (n *NAT) Range(f func(key string, v []net.Addr)) {
n.lock.RLock()
defer n.lock.RUnlock()
for k, v := range n.routes {
f(k, v)
}
}
// TunHandler creates a handler for tun tunnel.
func TunHandler(chain *Chain, node *Node) Handler {
return &tunHandler{
chain: chain,
node: node,
routes: &sync.Map{},
chExit: make(chan struct{}, 1),
chain: chain,
node: node,
routeNAT: RouteNAT,
routeConnNAT: RouteConnNAT,
chExit: make(chan error, 1),
}
}
func (h *tunHandler) Handle(ctx context.Context, conn net.Conn) {
defer conn.Close()
var err error
var raddr net.Addr
if addr := h.node.Remote; addr != "" {
raddr, err = net.ResolveUDPAddr("udp", addr)
func (h *tunHandler) Handle(ctx context.Context, tun net.Conn) {
if h.node.Remote != "" {
h.HandleClient(ctx, tun)
} else {
h.HandleServer(ctx, tun)
}
}
func (h tunHandler) printRoute() {
for {
select {
case <-time.Tick(time.Second * 5):
var i int
var sb strings.Builder
h.routeNAT.Range(func(key string, value []net.Addr) {
i++
var s []string
for _, addr := range value {
if addr != nil {
s = append(s, addr.String())
}
}
if len(s) != 0 {
sb.WriteString(fmt.Sprintf("to: %s, route: %s\n", key, strings.Join(s, " ")))
}
})
fmt.Println(sb.String())
fmt.Println(i)
}
}
}
type Device struct {
tun net.Conn
thread int
tunInboundRaw chan *DataElem
tunInbound chan *DataElem
tunOutbound chan *DataElem
// your main logic
tunInboundHandler func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem)
chExit chan error
}
func (d *Device) readFromTun() {
for {
b := config.LPool.Get().([]byte)[:]
n, err := d.tun.Read(b[:])
if err != nil {
log.Debugf("[tun] %s: remote addr: %v", conn.LocalAddr(), err)
select {
case d.chExit <- err:
default:
}
return
}
d.tunInboundRaw <- &DataElem{
data: b[:],
length: n,
}
}
}
func (d *Device) writeToTun() {
for e := range d.tunOutbound {
_, err := d.tun.Write(e.data[:e.length])
config.LPool.Put(e.data[:])
if err != nil {
select {
case d.chExit <- err:
default:
}
return
}
}
}
var tempDelay time.Duration
for ctx.Err() == nil {
err = func() error {
var err error
var pc net.PacketConn
if raddr != nil && !h.chain.IsEmpty() {
cc, err := h.chain.DialContext(ctx)
if err != nil {
return err
}
var ok bool
pc, ok = cc.(net.PacketConn)
if !ok {
err = errors.New("not a packet connection")
log.Debugf("[tun] %s - %s: %s", conn.LocalAddr(), raddr, err)
return err
}
} else {
laddr, _ := net.ResolveUDPAddr("udp", h.node.Addr)
pc, err = net.ListenUDP("udp", laddr)
}
if err != nil {
return err
}
return h.transportTun(ctx, conn, pc, raddr)
}()
if err != nil {
log.Debugf("[tun] %s: %v", conn.LocalAddr(), err)
}
select {
case <-h.chExit:
return
case <-ctx.Done():
h.chExit <- struct{}{}
default:
log.Warnf("next loop, err: %v", err)
}
if err != nil {
if tempDelay == 0 {
tempDelay = 1000 * time.Millisecond
} else {
tempDelay *= 2
}
if max := 6 * time.Second; tempDelay > max {
tempDelay = max
}
time.Sleep(tempDelay)
func (d *Device) parseIPHeader() {
for e := range d.tunInboundRaw {
if util.IsIPv4(e.data[:e.length]) {
// ipv4.ParseHeader
b := e.data[:e.length]
e.src = net.IPv4(b[12], b[13], b[14], b[15])
e.dst = net.IPv4(b[16], b[17], b[18], b[19])
} else if util.IsIPv6(e.data[:e.length]) {
// ipv6.ParseHeader
e.src = e.data[:e.length][8:24]
e.dst = e.data[:e.length][24:40]
} else {
log.Errorf("[tun-packet] unknown packet")
continue
}
tempDelay = 0
log.Debugf("[tun] %s --> %s, length: %d", e.src, e.dst, e.length)
d.tunInbound <- e
}
}
func (h *tunHandler) findRouteFor(dst net.IP) net.Addr {
if v, ok := h.routes.Load(ipToTunRouteKey(dst)); ok {
return v.(net.Addr)
}
//for _, route := range h.options.IPRoutes {
// if route.Dest.Contains(dst) && route.Gateway != nil {
// if v, ok := h.routes.Load(ipToTunRouteKey(route.Gateway)); ok {
// return v.(net.Addr)
// }
// }
//}
return nil
func (d *Device) Close() {
d.tun.Close()
}
func (h *tunHandler) transportTun(ctx context.Context, tun net.Conn, conn net.PacketConn, raddr net.Addr) error {
errChan := make(chan error, 2)
defer func() {
if c, ok := conn.(interface{ CloseRead() error }); ok {
_ = c.CloseRead()
func heartbeats(in chan<- *DataElem) {
tunIface, err := tun.GetInterface()
if err != nil {
return
}
addrs, err := tunIface.Addrs()
if err != nil {
return
}
var srcIPv4, srcIPv6 net.IP
for _, addr := range addrs {
ip, cidr, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if c, ok := conn.(interface{ CloseWrite() error }); ok {
_ = c.CloseWrite()
if cidr.Contains(config.RouterIP) {
srcIPv4 = ip
}
_ = conn.Close()
}()
go func() {
b := SPool.Get().([]byte)
defer SPool.Put(b)
if cidr.Contains(config.RouterIP6) {
srcIPv6 = ip
}
}
if srcIPv4 == nil || srcIPv6 == nil {
return
}
if config.RouterIP.To4().Equal(srcIPv4) {
return
}
if config.RouterIP6.To4().Equal(srcIPv6) {
return
}
for ctx.Err() == nil {
err := func() error {
n, err := tun.Read(b[:])
var bytes []byte
var bytes6 []byte
ticker := time.NewTicker(time.Second * 5)
defer ticker.Stop()
for ; true; <-ticker.C {
for i := 0; i < 4; i++ {
if bytes == nil {
bytes, err = genICMPPacket(srcIPv4, config.RouterIP)
if err != nil {
select {
case h.chExit <- struct{}{}:
default:
}
return err
log.Error(err)
continue
}
// client side, deliver packet directly.
if raddr != nil {
_, err = conn.WriteTo(b[:n], raddr)
return err
}
var src, dst net.IP
if waterutil.IsIPv4(b[:n]) {
header, err := ipv4.ParseHeader(b[:n])
if err != nil {
log.Debugf("[tun] %s: %v", tun.LocalAddr(), err)
return nil
}
if config.Debug {
log.Debugf("[tun] %s", header.String())
}
src, dst = header.Src, header.Dst
} else if waterutil.IsIPv6(b[:n]) {
header, err := ipv6.ParseHeader(b[:n])
if err != nil {
log.Debugf("[tun] %s: %v", tun.LocalAddr(), err)
return nil
}
if config.Debug {
log.Debugf("[tun] %s", header.String())
}
src, dst = header.Src, header.Dst
} else {
log.Debugf("[tun] unknown packet")
return nil
}
addr := h.findRouteFor(dst)
if addr == nil {
log.Debugf("[tun] no route for %s -> %s", src, dst)
return nil
}
if config.Debug {
log.Debugf("[tun] find route: %s -> %s", dst, addr)
}
_, err = conn.WriteTo(b[:n], addr)
return err
}()
if err != nil {
errChan <- err
return
}
}
}()
go func() {
b := LPool.Get().([]byte)
defer LPool.Put(b)
for ctx.Err() == nil {
err := func() error {
n, addr, err := conn.ReadFrom(b[:])
if err != nil && err != shadowaead.ErrShortPacket {
return err
if bytes6 == nil {
bytes6, err = genICMPPacketIPv6(srcIPv6, config.RouterIP6)
if err != nil {
log.Error(err)
continue
}
// client side, deliver packet to tun device.
if raddr != nil {
_, err = tun.Write(b[:n])
return err
}
var src, dst net.IP
if waterutil.IsIPv4(b[:n]) {
header, err := ipv4.ParseHeader(b[:n])
if err != nil {
log.Debugf("[tun] %s: %v", tun.LocalAddr(), err)
return nil
}
if config.Debug {
log.Debugf("[tun] %s", header.String())
}
src, dst = header.Src, header.Dst
} else if waterutil.IsIPv6(b[:n]) {
header, err := ipv6.ParseHeader(b[:n])
if err != nil {
log.Debugf("[tun] %s: %v", tun.LocalAddr(), err)
return nil
}
if config.Debug {
log.Debugf("[tun] %s", header.String())
}
src, dst = header.Src, header.Dst
} else {
log.Debugf("[tun] unknown packet")
return nil
}
routeKey := ipToTunRouteKey(src)
if actual, loaded := h.routes.LoadOrStore(routeKey, addr); loaded {
if actual.(net.Addr).String() != addr.String() {
log.Debugf("[tun] update route: %s -> %s (old %s)", src, addr, actual.(net.Addr))
h.routes.Store(routeKey, addr)
}
} else {
log.Debugf("[tun] new route: %s -> %s", src, addr)
}
if routeToAddr := h.findRouteFor(dst); routeToAddr != nil {
if config.Debug {
log.Debugf("[tun] find route: %s -> %s", dst, routeToAddr)
}
_, err = conn.WriteTo(b[:n], routeToAddr)
return err
}
if _, err = tun.Write(b[:n]); err != nil {
select {
case h.chExit <- struct{}{}:
default:
}
return err
}
return nil
}()
if err != nil {
errChan <- err
return
}
for index, i2 := range [][]byte{bytes, bytes6} {
data := config.LPool.Get().([]byte)[:]
length := copy(data, i2)
var src, dst net.IP
if index == 0 {
src, dst = srcIPv4, config.RouterIP
} else {
src, dst = srcIPv6, config.RouterIP6
}
in <- &DataElem{
data: data[:],
length: length,
src: src,
dst: dst,
}
}
time.Sleep(time.Second)
}
}()
}
}
func genICMPPacket(src net.IP, dst net.IP) ([]byte, error) {
buf := gopacket.NewSerializeBuffer()
icmpLayer := layers.ICMPv4{
TypeCode: layers.CreateICMPv4TypeCode(layers.ICMPv4TypeEchoRequest, 0),
Id: 3842,
Seq: 1,
}
ipLayer := layers.IPv4{
Version: 4,
SrcIP: src,
DstIP: dst,
Protocol: layers.IPProtocolICMPv4,
Flags: layers.IPv4DontFragment,
TTL: 64,
IHL: 5,
Id: 55664,
}
opts := gopacket.SerializeOptions{
FixLengths: true,
ComputeChecksums: true,
}
err := gopacket.SerializeLayers(buf, opts, &ipLayer, &icmpLayer)
if err != nil {
return nil, fmt.Errorf("failed to serialize icmp packet, err: %v", err)
}
return buf.Bytes(), nil
}
func genICMPPacketIPv6(src net.IP, dst net.IP) ([]byte, error) {
buf := gopacket.NewSerializeBuffer()
icmpLayer := layers.ICMPv6{
TypeCode: layers.CreateICMPv6TypeCode(layers.ICMPv6TypeEchoRequest, 0),
}
ipLayer := layers.IPv6{
Version: 6,
SrcIP: src,
DstIP: dst,
NextHeader: layers.IPProtocolICMPv6,
HopLimit: 255,
}
opts := gopacket.SerializeOptions{
FixLengths: true,
}
err := gopacket.SerializeLayers(buf, opts, &ipLayer, &icmpLayer)
if err != nil {
return nil, fmt.Errorf("failed to serialize icmp6 packet, err: %v", err)
}
return buf.Bytes(), nil
}
func (d *Device) Start(ctx context.Context) {
go d.readFromTun()
for i := 0; i < d.thread; i++ {
go d.parseIPHeader()
}
go d.tunInboundHandler(d.tunInbound, d.tunOutbound)
go d.writeToTun()
go heartbeats(d.tunInbound)
select {
case err := <-errChan:
case err := <-d.chExit:
log.Error(err)
return
case <-ctx.Done():
return
}
}
func (d *Device) SetTunInboundHandler(handler func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem)) {
d.tunInboundHandler = handler
}
func (h *tunHandler) HandleServer(ctx context.Context, tun net.Conn) {
go h.printRoute()
device := &Device{
tun: tun,
thread: MaxThread,
tunInboundRaw: make(chan *DataElem, MaxSize),
tunInbound: make(chan *DataElem, MaxSize),
tunOutbound: make(chan *DataElem, MaxSize),
chExit: h.chExit,
}
device.SetTunInboundHandler(func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem) {
for {
packetConn, err := (&net.ListenConfig{}).ListenPacket(ctx, "udp", h.node.Addr)
if err != nil {
log.Debugf("[udp] can not listen %s, err: %v", h.node.Addr, err)
return
}
err = transportTun(ctx, tunInbound, tunOutbound, packetConn, h.routeNAT, h.routeConnNAT)
if err != nil {
log.Debugf("[tun] %s: %v", tun.LocalAddr(), err)
}
}
})
defer device.Close()
device.Start(ctx)
}
type DataElem struct {
data []byte
length int
src net.IP
dst net.IP
}
func NewDataElem(data []byte, length int, src net.IP, dst net.IP) *DataElem {
return &DataElem{
data: data,
length: length,
src: src,
dst: dst,
}
}
func (d *DataElem) Data() []byte {
return d.data
}
func (d *DataElem) Length() int {
return d.length
}
type udpElem struct {
from net.Addr
data []byte
length int
src net.IP
dst net.IP
}
type Peer struct {
conn net.PacketConn
thread int
connInbound chan *udpElem
parsedConnInfo chan *udpElem
tunInbound <-chan *DataElem
tunOutbound chan<- *DataElem
routeNAT *NAT
// map[srcIP]net.Conn
// routeConnNAT sync.Map
routeConnNAT *sync.Map
errChan chan error
}
func (p *Peer) sendErr(err error) {
select {
case p.errChan <- err:
default:
}
}
func (p *Peer) readFromConn() {
for {
b := config.LPool.Get().([]byte)[:]
n, srcAddr, err := p.conn.ReadFrom(b[:])
if err != nil {
p.sendErr(err)
return
}
p.connInbound <- &udpElem{
from: srcAddr,
data: b[:],
length: n,
}
}
}
func (p *Peer) readFromTCPConn() {
for packet := range Chan {
u := &udpElem{
data: packet.Data[:],
length: int(packet.DataLength),
}
b := packet.Data
if util.IsIPv4(packet.Data) {
// ipv4.ParseHeader
u.src = net.IPv4(b[12], b[13], b[14], b[15])
u.dst = net.IPv4(b[16], b[17], b[18], b[19])
} else if util.IsIPv6(packet.Data) {
// ipv6.ParseHeader
u.src = b[8:24]
u.dst = b[24:40]
} else {
log.Errorf("[tun-conn] unknown packet")
continue
}
log.Debugf("[tcpserver] udp-tun %s >>> %s length: %d", u.src, u.dst, u.length)
p.parsedConnInfo <- u
}
}
func (p *Peer) parseHeader() {
var firstIPv4, firstIPv6 = true, true
for e := range p.connInbound {
b := e.data[:e.length]
if util.IsIPv4(e.data[:e.length]) {
// ipv4.ParseHeader
e.src = net.IPv4(b[12], b[13], b[14], b[15])
e.dst = net.IPv4(b[16], b[17], b[18], b[19])
} else if util.IsIPv6(e.data[:e.length]) {
// ipv6.ParseHeader
e.src = b[:e.length][8:24]
e.dst = b[:e.length][24:40]
} else {
log.Errorf("[tun] unknown packet")
continue
}
if firstIPv4 || firstIPv6 {
if util.IsIPv4(e.data[:e.length]) {
firstIPv4 = false
} else {
firstIPv6 = false
}
if _, loaded := p.routeNAT.LoadOrStore(e.src, e.from); loaded {
log.Debugf("[tun] find route: %s -> %s", e.src, e.from)
} else {
log.Debugf("[tun] new route: %s -> %s", e.src, e.from)
}
}
p.parsedConnInfo <- e
}
}
func (p *Peer) routePeer() {
for e := range p.parsedConnInfo {
if routeToAddr := p.routeNAT.RouteTo(e.dst); routeToAddr != nil {
log.Debugf("[tun] find route: %s -> %s", e.dst, routeToAddr)
_, err := p.conn.WriteTo(e.data[:e.length], routeToAddr)
config.LPool.Put(e.data[:])
if err != nil {
p.sendErr(err)
return
}
} else if conn, ok := p.routeConnNAT.Load(e.dst.String()); ok {
dgram := newDatagramPacket(e.data[:e.length])
if err := dgram.Write(conn.(net.Conn)); err != nil {
log.Debugf("[tcpserver] udp-tun %s <- %s : %s", conn.(net.Conn).RemoteAddr(), dgram.Addr(), err)
p.sendErr(err)
return
}
config.LPool.Put(e.data[:])
} else {
p.tunOutbound <- &DataElem{
data: e.data,
length: e.length,
src: e.src,
dst: e.dst,
}
}
}
}
func (p *Peer) routeTUN() {
for e := range p.tunInbound {
if addr := p.routeNAT.RouteTo(e.dst); addr != nil {
log.Debugf("[tun] find route: %s -> %s", e.dst, addr)
_, err := p.conn.WriteTo(e.data[:e.length], addr)
config.LPool.Put(e.data[:])
if err != nil {
log.Debugf("[tun] can not route: %s -> %s", e.dst, addr)
p.sendErr(err)
return
}
} else if conn, ok := p.routeConnNAT.Load(e.dst.String()); ok {
dgram := newDatagramPacket(e.data[:e.length])
err := dgram.Write(conn.(net.Conn))
config.LPool.Put(e.data[:])
if err != nil {
log.Debugf("[tcpserver] udp-tun %s <- %s : %s", conn.(net.Conn).RemoteAddr(), dgram.Addr(), err)
p.sendErr(err)
return
}
} else {
config.LPool.Put(e.data[:])
log.Debug(fmt.Errorf("[tun] no route for %s -> %s", e.src, e.dst))
}
}
}
func (p *Peer) Start() {
go p.readFromConn()
go p.readFromTCPConn()
for i := 0; i < p.thread; i++ {
go p.parseHeader()
}
go p.routePeer()
go p.routeTUN()
}
func (p *Peer) Close() {
p.conn.Close()
}
func transportTun(ctx context.Context, tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem, packetConn net.PacketConn, nat *NAT, connNAT *sync.Map) error {
p := &Peer{
conn: packetConn,
thread: MaxThread,
connInbound: make(chan *udpElem, MaxSize),
parsedConnInfo: make(chan *udpElem, MaxSize),
tunInbound: tunInbound,
tunOutbound: tunOutbound,
routeNAT: nat,
routeConnNAT: connNAT,
errChan: make(chan error, 2),
}
defer p.Close()
p.Start()
select {
case err := <-p.errChan:
log.Errorf(err.Error())
return err
case <-ctx.Done():
return nil

View File

@@ -0,0 +1,133 @@
package core
import (
"context"
"errors"
"net"
"time"
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/pkg/config"
)
func (h *tunHandler) HandleClient(ctx context.Context, tun net.Conn) {
remoteAddr, err := net.ResolveUDPAddr("udp", h.node.Remote)
if err != nil {
log.Errorf("[tun] %s: remote addr: %v", tun.LocalAddr(), err)
return
}
in := make(chan *DataElem, MaxSize)
out := make(chan *DataElem, MaxSize)
endpoint := NewTunEndpoint(ctx, tun, uint32(config.DefaultMTU), in, out)
stack := NewStack(ctx, endpoint)
go stack.Wait()
d := &ClientDevice{
tunInbound: in,
tunOutbound: out,
chExit: h.chExit,
}
d.SetTunInboundHandler(func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem) {
for {
packetConn, err := getRemotePacketConn(ctx, h.chain)
if err != nil {
log.Debugf("[tun-client] %s - %s: %s", tun.LocalAddr(), remoteAddr, err)
time.Sleep(time.Second * 2)
continue
}
err = transportTunClient(ctx, tunInbound, tunOutbound, packetConn, remoteAddr)
if err != nil {
log.Debugf("[tun-client] %s: %v", tun.LocalAddr(), err)
}
}
})
d.Start(ctx)
}
func getRemotePacketConn(ctx context.Context, chain *Chain) (net.PacketConn, error) {
var packetConn net.PacketConn
if !chain.IsEmpty() {
cc, err := chain.DialContext(ctx)
if err != nil {
return nil, err
}
var ok bool
if packetConn, ok = cc.(net.PacketConn); !ok {
return nil, errors.New("not a packet connection")
}
} else {
var err error
var lc net.ListenConfig
packetConn, err = lc.ListenPacket(ctx, "udp", "")
if err != nil {
return nil, err
}
}
return packetConn, nil
}
func transportTunClient(ctx context.Context, tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem, packetConn net.PacketConn, remoteAddr net.Addr) error {
errChan := make(chan error, 2)
defer packetConn.Close()
go func() {
for e := range tunInbound {
if e.src.Equal(e.dst) {
tunOutbound <- e
continue
}
_, err := packetConn.WriteTo(e.data[:e.length], remoteAddr)
config.LPool.Put(e.data[:])
if err != nil {
errChan <- err
return
}
}
}()
go func() {
for {
b := config.LPool.Get().([]byte)[:]
n, _, err := packetConn.ReadFrom(b[:])
if err != nil {
errChan <- err
return
}
tunOutbound <- &DataElem{data: b[:], length: n}
}
}()
select {
case err := <-errChan:
return err
case <-ctx.Done():
return nil
}
}
type ClientDevice struct {
tunInbound chan *DataElem
tunOutbound chan *DataElem
// your main logic
tunInboundHandler func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem)
chExit chan error
}
func (d *ClientDevice) Start(ctx context.Context) {
go d.tunInboundHandler(d.tunInbound, d.tunOutbound)
go heartbeats(d.tunInbound)
select {
case err := <-d.chExit:
log.Error(err)
return
case <-ctx.Done():
return
}
}
func (d *ClientDevice) SetTunInboundHandler(handler func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem)) {
d.tunInboundHandler = handler
}

View File

@@ -5,6 +5,8 @@ import (
"fmt"
"io"
"net"
"github.com/wencaiwulue/kubevpn/pkg/config"
)
type datagramPacket struct {
@@ -37,15 +39,29 @@ func readDatagramPacket(r io.Reader, b []byte) (*datagramPacket, error) {
}
dataLength := binary.BigEndian.Uint16(b[:2])
_, err = io.ReadFull(r, b[:dataLength])
if err != nil && (err != io.ErrUnexpectedEOF || err != io.EOF) {
if err != nil /*&& (err != io.ErrUnexpectedEOF || err != io.EOF)*/ {
return nil, err
}
return &datagramPacket{DataLength: dataLength, Data: b[:dataLength]}, nil
}
// this method will return all byte array in the way: b[:]
func readDatagramPacketServer(r io.Reader, b []byte) (*datagramPacket, error) {
_, err := io.ReadFull(r, b[:2])
if err != nil {
return nil, err
}
dataLength := binary.BigEndian.Uint16(b[:2])
_, err = io.ReadFull(r, b[:dataLength])
if err != nil /*&& (err != io.ErrUnexpectedEOF || err != io.EOF)*/ {
return nil, err
}
return &datagramPacket{DataLength: dataLength, Data: b[:]}, nil
}
func (addr *datagramPacket) Write(w io.Writer) error {
b := LPool.Get().([]byte)
defer LPool.Put(b)
b := config.LPool.Get().([]byte)[:]
defer config.LPool.Put(b[:])
binary.BigEndian.PutUint16(b[:2], uint16(len(addr.Data)))
n := copy(b[2:], addr.Data)
_, err := w.Write(b[:n+2])

201
pkg/cp/LICENSE Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

465
pkg/cp/cp.go Normal file
View File

@@ -0,0 +1,465 @@
package cp
import (
"archive/tar"
"bytes"
"errors"
"fmt"
"io"
"os"
"runtime"
"strings"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubectl/pkg/cmd/exec"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
)
// CopyOptions have the data required to perform the copy operation
type CopyOptions struct {
Container string
Namespace string
NoPreserve bool
MaxTries int
ClientConfig *restclient.Config
Clientset kubernetes.Interface
ExecParentCmdName string
args []string
genericclioptions.IOStreams
}
// NewCopyOptions creates the options for copy
func NewCopyOptions(ioStreams genericclioptions.IOStreams) *CopyOptions {
return &CopyOptions{
IOStreams: ioStreams,
}
}
var (
errFileSpecDoesntMatchFormat = errors.New("filespec must match the canonical format: [[namespace/]pod:]file/path")
)
func extractFileSpec(arg string) (fileSpec, error) {
i := strings.Index(arg, ":")
// filespec starting with a semicolon is invalid
if i == 0 {
return fileSpec{}, errFileSpecDoesntMatchFormat
}
// C:\Users\ADMINI~1\AppData\Local\Temp\849198392506502457
// disk name C is not a pod name
if i == -1 || (runtime.GOOS == "windows" && strings.Contains("ABCDEFGHIJKLMNOPQRSTUVWXYZ", arg[:i])) {
return fileSpec{
File: newLocalPath(arg),
}, nil
}
pod, file := arg[:i], arg[i+1:]
pieces := strings.Split(pod, "/")
switch len(pieces) {
case 1:
return fileSpec{
PodName: pieces[0],
File: newRemotePath(file),
}, nil
case 2:
return fileSpec{
PodNamespace: pieces[0],
PodName: pieces[1],
File: newRemotePath(file),
}, nil
default:
return fileSpec{}, errFileSpecDoesntMatchFormat
}
}
// Complete completes all the required options
func (o *CopyOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error {
if cmd.Parent() != nil {
o.ExecParentCmdName = cmd.Parent().CommandPath()
}
var err error
o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
o.Clientset, err = f.KubernetesClientSet()
if err != nil {
return err
}
o.ClientConfig, err = f.ToRESTConfig()
if err != nil {
return err
}
o.args = args
return nil
}
// Validate makes sure provided values for CopyOptions are valid
func (o *CopyOptions) Validate() error {
if len(o.args) != 2 {
return fmt.Errorf("source and destination are required")
}
return nil
}
// Run performs the execution
func (o *CopyOptions) Run() error {
srcSpec, err := extractFileSpec(o.args[0])
if err != nil {
return err
}
destSpec, err := extractFileSpec(o.args[1])
if err != nil {
return err
}
if len(srcSpec.PodName) != 0 && len(destSpec.PodName) != 0 {
return fmt.Errorf("one of src or dest must be a local file specification")
}
if len(srcSpec.File.String()) == 0 || len(destSpec.File.String()) == 0 {
return errors.New("filepath can not be empty")
}
if len(srcSpec.PodName) != 0 {
return o.copyFromPod(srcSpec, destSpec)
}
if len(destSpec.PodName) != 0 {
return o.copyToPod(srcSpec, destSpec, &exec.ExecOptions{})
}
return fmt.Errorf("one of src or dest must be a remote file specification")
}
// checkDestinationIsDir receives a destination fileSpec and
// determines if the provided destination path exists on the
// pod. If the destination path does not exist or is _not_ a
// directory, an error is returned with the exit code received.
func (o *CopyOptions) checkDestinationIsDir(dest fileSpec) error {
options := &exec.ExecOptions{
StreamOptions: exec.StreamOptions{
IOStreams: genericclioptions.IOStreams{
Out: bytes.NewBuffer([]byte{}),
ErrOut: bytes.NewBuffer([]byte{}),
},
Namespace: dest.PodNamespace,
PodName: dest.PodName,
},
Command: []string{"test", "-d", dest.File.String()},
Executor: &exec.DefaultRemoteExecutor{},
}
return o.execute(options)
}
func (o *CopyOptions) copyToPod(src, dest fileSpec, options *exec.ExecOptions) error {
if _, err := os.Stat(src.File.String()); err != nil {
return fmt.Errorf("%s doesn't exist in local filesystem", src.File)
}
reader, writer := io.Pipe()
srcFile := src.File.(localPath)
destFile := dest.File.(remotePath)
if err := o.checkDestinationIsDir(dest); err == nil {
// If no error, dest.File was found to be a directory.
// Copy specified src into it
destFile = destFile.Join(srcFile.Base())
}
go func(src localPath, dest remotePath, writer io.WriteCloser) {
defer writer.Close()
if err := makeTar(src, dest, writer); err != nil {
log.Error(err)
}
}(srcFile, destFile, writer)
var cmdArr []string
if o.NoPreserve {
cmdArr = []string{"tar", "--no-same-permissions", "--no-same-owner", "-xmfh", "-"}
} else {
cmdArr = []string{"tar", "-xmfh", "-"}
}
destFileDir := destFile.Dir().String()
if len(destFileDir) > 0 {
cmdArr = append(cmdArr, "-C", destFileDir)
}
options.StreamOptions = exec.StreamOptions{
IOStreams: genericclioptions.IOStreams{
In: reader,
Out: o.Out,
ErrOut: o.ErrOut,
},
Stdin: true,
Namespace: dest.PodNamespace,
PodName: dest.PodName,
}
options.Command = cmdArr
options.Executor = &exec.DefaultRemoteExecutor{}
return o.execute(options)
}
func (o *CopyOptions) copyFromPod(src, dest fileSpec) error {
reader := newTarPipe(src, o)
srcFile := src.File.(remotePath)
destFile := dest.File.(localPath)
// remove extraneous path shortcuts - these could occur if a path contained extra "../"
// and attempted to navigate beyond "/" in a remote filesystem
prefix := stripPathShortcuts(srcFile.StripSlashes().Clean().String())
return o.untarAll(prefix, destFile, reader)
}
type TarPipe struct {
src fileSpec
o *CopyOptions
reader *io.PipeReader
outStream *io.PipeWriter
bytesRead uint64
retries int
}
func newTarPipe(src fileSpec, o *CopyOptions) *TarPipe {
t := new(TarPipe)
t.src = src
t.o = o
t.initReadFrom(0)
return t
}
func (t *TarPipe) initReadFrom(n uint64) {
t.reader, t.outStream = io.Pipe()
options := &exec.ExecOptions{
StreamOptions: exec.StreamOptions{
IOStreams: genericclioptions.IOStreams{
In: nil,
Out: t.outStream,
ErrOut: t.o.Out,
},
Namespace: t.src.PodNamespace,
PodName: t.src.PodName,
},
Command: []string{"tar", "cfh", "-", t.src.File.String()},
Executor: &exec.DefaultRemoteExecutor{},
}
if t.o.MaxTries != 0 {
options.Command = []string{"sh", "-c", fmt.Sprintf("tar cfh - %s | tail -c+%d", t.src.File, n)}
}
go func() {
defer t.outStream.Close()
if err := t.o.execute(options); err != nil {
log.Error(err)
}
}()
}
func (t *TarPipe) Read(p []byte) (n int, err error) {
n, err = t.reader.Read(p)
if err != nil {
if t.o.MaxTries < 0 || t.retries < t.o.MaxTries {
t.retries++
fmt.Printf("Resuming copy at %d bytes, retry %d/%d\n", t.bytesRead, t.retries, t.o.MaxTries)
t.initReadFrom(t.bytesRead + 1)
err = nil
} else {
fmt.Printf("Dropping out copy after %d retries\n", t.retries)
}
} else {
t.bytesRead += uint64(n)
}
return
}
func makeTar(src localPath, dest remotePath, writer io.Writer) error {
// TODO: use compression here?
tarWriter := tar.NewWriter(writer)
defer tarWriter.Close()
srcPath := src.Clean()
destPath := dest.Clean()
return recursiveTar(srcPath.Dir(), srcPath.Base(), destPath.Dir(), destPath.Base(), tarWriter)
}
func recursiveTar(srcDir, srcFile localPath, destDir, destFile remotePath, tw *tar.Writer) error {
matchedPaths, err := srcDir.Join(srcFile).Glob()
if err != nil {
return err
}
for _, fpath := range matchedPaths {
stat, err := os.Lstat(fpath)
if err != nil {
return err
}
if stat.IsDir() {
files, err := os.ReadDir(fpath)
if err != nil {
return err
}
if len(files) == 0 {
//case empty directory
hdr, _ := tar.FileInfoHeader(stat, fpath)
hdr.Name = destFile.String()
if err := tw.WriteHeader(hdr); err != nil {
return err
}
}
for _, f := range files {
if err := recursiveTar(srcDir, srcFile.Join(newLocalPath(f.Name())),
destDir, destFile.Join(newRemotePath(f.Name())), tw); err != nil {
return err
}
}
return nil
} else if stat.Mode()&os.ModeSymlink != 0 {
//case soft link
hdr, _ := tar.FileInfoHeader(stat, fpath)
target, err := os.Readlink(fpath)
if err != nil {
return err
}
hdr.Linkname = target
hdr.Name = destFile.String()
if err := tw.WriteHeader(hdr); err != nil {
return err
}
} else {
//case regular file or other file type like pipe
hdr, err := tar.FileInfoHeader(stat, fpath)
if err != nil {
return err
}
hdr.Name = destFile.String()
if err := tw.WriteHeader(hdr); err != nil {
return err
}
f, err := os.Open(fpath)
if err != nil {
return err
}
defer f.Close()
if _, err := io.Copy(tw, f); err != nil {
return err
}
return f.Close()
}
}
return nil
}
func (o *CopyOptions) untarAll(prefix string, dest localPath, reader io.Reader) error {
// TODO: use compression here?
tarReader := tar.NewReader(reader)
var linkList []tar.Header
var genDstFilename = func(headerName string) localPath {
return dest.Join(newRemotePath(headerName[len(prefix):]))
}
for {
header, err := tarReader.Next()
if err != nil {
if err != io.EOF {
return err
}
break
}
// All the files will start with the prefix, which is the directory where
// they were located on the pod, we need to strip down that prefix, but
// if the prefix is missing it means the tar was tempered with.
// For the case where prefix is empty we need to ensure that the path
// is not absolute, which also indicates the tar file was tempered with.
if !strings.HasPrefix(header.Name, prefix) {
return fmt.Errorf("tar contents corrupted")
}
// header.Name is a name of the REMOTE file, so we need to create
// a remotePath so that it goes through appropriate processing related
// with cleaning remote paths
destFileName := genDstFilename(header.Name)
if !isRelative(dest, destFileName) {
fmt.Fprintf(o.IOStreams.ErrOut, "warning: file %q is outside target destination, skipping\n", destFileName)
continue
}
if err := os.MkdirAll(destFileName.Dir().String(), 0755); err != nil {
return err
}
if header.FileInfo().IsDir() {
if err := os.MkdirAll(destFileName.String(), 0755); err != nil {
return err
}
continue
}
outFile, err := os.Create(destFileName.String())
if err != nil {
return err
}
defer outFile.Close()
if _, err := io.Copy(outFile, tarReader); err != nil {
return err
}
if err := outFile.Close(); err != nil {
return err
}
// all file became into normal file, this means linkList to another file, do it later
if header.Linkname != "" {
linkList = append(linkList, *header)
}
}
// handle linked file
for _, f := range linkList {
err := copyFromLink(linkList, f, genDstFilename)
if err != nil {
return err
}
}
return nil
}
func (o *CopyOptions) execute(options *exec.ExecOptions) error {
if len(options.Namespace) == 0 {
options.Namespace = o.Namespace
}
if len(o.Container) > 0 {
options.ContainerName = o.Container
}
options.Config = o.ClientConfig
options.PodClient = o.Clientset.CoreV1()
if err := options.Validate(); err != nil {
return err
}
if err := options.Run(); err != nil {
return err
}
return nil
}

145
pkg/cp/filespec.go Normal file
View File

@@ -0,0 +1,145 @@
package cp
import (
"path"
"path/filepath"
"strings"
)
type fileSpec struct {
PodName string
PodNamespace string
File pathSpec
}
type pathSpec interface {
String() string
}
// localPath represents a client-native path, which will differ based
// on the client OS, its methods will use path/filepath package which
// is OS dependant
type localPath struct {
file string
}
func newLocalPath(fileName string) localPath {
file := stripTrailingSlash(fileName)
return localPath{file: file}
}
func (p localPath) String() string {
return p.file
}
func (p localPath) Dir() localPath {
return newLocalPath(filepath.Dir(p.file))
}
func (p localPath) Base() localPath {
return newLocalPath(filepath.Base(p.file))
}
func (p localPath) Clean() localPath {
return newLocalPath(filepath.Clean(p.file))
}
func (p localPath) Join(elem pathSpec) localPath {
return newLocalPath(filepath.Join(p.file, elem.String()))
}
func (p localPath) Glob() (matches []string, err error) {
return filepath.Glob(p.file)
}
func (p localPath) StripSlashes() localPath {
return newLocalPath(stripLeadingSlash(p.file))
}
func isRelative(base, target localPath) bool {
relative, err := filepath.Rel(base.String(), target.String())
if err != nil {
return false
}
return relative == "." || relative == stripPathShortcuts(relative)
}
// remotePath represents always UNIX path, its methods will use path
// package which is always using `/`
type remotePath struct {
file string
}
func newRemotePath(fileName string) remotePath {
// we assume remote file is a linux container but we need to convert
// windows path separators to unix style for consistent processing
file := strings.ReplaceAll(stripTrailingSlash(fileName), `\`, "/")
return remotePath{file: file}
}
func (p remotePath) String() string {
return p.file
}
func (p remotePath) Dir() remotePath {
return newRemotePath(path.Dir(p.file))
}
func (p remotePath) Base() remotePath {
return newRemotePath(path.Base(p.file))
}
func (p remotePath) Clean() remotePath {
return newRemotePath(path.Clean(p.file))
}
func (p remotePath) Join(elem pathSpec) remotePath {
return newRemotePath(path.Join(p.file, elem.String()))
}
func (p remotePath) StripShortcuts() remotePath {
p = p.Clean()
return newRemotePath(stripPathShortcuts(p.file))
}
func (p remotePath) StripSlashes() remotePath {
return newRemotePath(stripLeadingSlash(p.file))
}
// strips trailing slash (if any) both unix and windows style
func stripTrailingSlash(file string) string {
if len(file) == 0 {
return file
}
if file != "/" && strings.HasSuffix(string(file[len(file)-1]), "/") {
return file[:len(file)-1]
}
return file
}
func stripLeadingSlash(file string) string {
// tar strips the leading '/' and '\' if it's there, so we will too
return strings.TrimLeft(file, `/\`)
}
// stripPathShortcuts removes any leading or trailing "../" from a given path
func stripPathShortcuts(p string) string {
newPath := p
trimmed := strings.TrimPrefix(newPath, "../")
for trimmed != newPath {
newPath = trimmed
trimmed = strings.TrimPrefix(newPath, "../")
}
// trim leftover {".", ".."}
if newPath == "." || newPath == ".." {
newPath = ""
}
if len(newPath) > 0 && string(newPath[0]) == "/" {
return newPath[1:]
}
return newPath
}

36
pkg/cp/untar.go Normal file
View File

@@ -0,0 +1,36 @@
package cp
import (
"archive/tar"
"io"
"os"
)
// copy from another real file
func copyFromLink(fileHeaderList []tar.Header, currFile tar.Header, genDstFilename func(headerName string) localPath) error {
for _, t := range fileHeaderList {
if t.Name == currFile.Linkname {
// handle it recursive if linkA --> linkB --> originFile
return copyFromLink(fileHeaderList, t, genDstFilename)
}
}
var err error
var r, w *os.File
// read from origin file
r, err = os.OpenFile(genDstFilename(currFile.Linkname).String(), os.O_RDONLY, 0644)
if err != nil {
return err
}
// write to current file
w, err = os.OpenFile(genDstFilename(currFile.Name).String(), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
_, err = io.Copy(w, r)
if closeErr := w.Close(); closeErr != nil && err == nil {
err = closeErr
}
return err
}

191
pkg/dev/LICENSE Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2017 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

251
pkg/dev/convert.go Normal file
View File

@@ -0,0 +1,251 @@
package dev
import (
"context"
"fmt"
"math/rand"
"os"
"path/filepath"
"strconv"
"strings"
"unsafe"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/go-connections/nat"
"github.com/google/uuid"
miekgdns "github.com/miekg/dns"
"github.com/moby/term"
v12 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/spf13/cobra"
"k8s.io/api/core/v1"
v13 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/kubectl/pkg/cmd/util"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/cp"
"github.com/wencaiwulue/kubevpn/pkg/dns"
"github.com/wencaiwulue/kubevpn/pkg/handler"
)
type RunConfig struct {
containerName string
k8sContainerName string
config *container.Config
hostConfig *container.HostConfig
networkingConfig *network.NetworkingConfig
platform *v12.Platform
Options RunOptions
Copts *ContainerOptions
}
func ConvertKubeResourceToContainer(namespace string, temp v1.PodTemplateSpec, envMap map[string][]string, mountVolume map[string][]mount.Mount, dnsConfig *miekgdns.ClientConfig) (runConfigList ConfigList) {
spec := temp.Spec
for _, c := range spec.Containers {
var r RunConfig
tmpConfig := &container.Config{
Hostname: func() string {
var hostname = spec.Hostname
if hostname == "" {
for _, envEntry := range envMap[c.Name] {
env := strings.Split(envEntry, "=")
if len(env) == 2 && env[0] == "HOSTNAME" {
hostname = env[1]
break
}
}
}
return hostname
}(),
Domainname: spec.Subdomain,
User: "root",
AttachStdin: false,
AttachStdout: false,
AttachStderr: false,
ExposedPorts: nil,
Tty: c.TTY,
OpenStdin: c.Stdin,
StdinOnce: false,
Env: envMap[c.Name],
Cmd: c.Args,
Healthcheck: nil,
ArgsEscaped: false,
Image: c.Image,
Volumes: nil,
WorkingDir: c.WorkingDir,
Entrypoint: c.Command,
NetworkDisabled: false,
MacAddress: "",
OnBuild: nil,
Labels: temp.Labels,
StopSignal: "",
StopTimeout: nil,
Shell: nil,
}
if temp.DeletionGracePeriodSeconds != nil {
tmpConfig.StopTimeout = (*int)(unsafe.Pointer(temp.DeletionGracePeriodSeconds))
}
hostConfig := &container.HostConfig{
Binds: []string{},
ContainerIDFile: "",
LogConfig: container.LogConfig{},
//NetworkMode: "",
PortBindings: nil,
RestartPolicy: container.RestartPolicy{},
AutoRemove: false,
VolumeDriver: "",
VolumesFrom: nil,
ConsoleSize: [2]uint{},
CapAdd: strslice.StrSlice{"SYS_PTRACE", "SYS_ADMIN"}, // for dlv
CgroupnsMode: "",
DNS: dnsConfig.Servers,
DNSOptions: []string{fmt.Sprintf("ndots=%d", dnsConfig.Ndots)},
DNSSearch: dnsConfig.Search,
ExtraHosts: nil,
GroupAdd: nil,
IpcMode: "",
Cgroup: "",
Links: nil,
OomScoreAdj: 0,
PidMode: "",
Privileged: true,
PublishAllPorts: false,
ReadonlyRootfs: false,
SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"},
StorageOpt: nil,
Tmpfs: nil,
UTSMode: "",
UsernsMode: "",
ShmSize: 0,
Sysctls: nil,
Runtime: "",
Isolation: "",
Resources: container.Resources{},
Mounts: mountVolume[c.Name],
MaskedPaths: nil,
ReadonlyPaths: nil,
Init: nil,
}
var portmap = nat.PortMap{}
var portset = nat.PortSet{}
for _, port := range c.Ports {
port1 := nat.Port(fmt.Sprintf("%d/%s", port.ContainerPort, strings.ToLower(string(port.Protocol))))
if port.HostPort != 0 {
binding := []nat.PortBinding{{HostPort: strconv.FormatInt(int64(port.HostPort), 10)}}
portmap[port1] = binding
}
portset[port1] = struct{}{}
}
hostConfig.PortBindings = portmap
tmpConfig.ExposedPorts = portset
if c.SecurityContext != nil && c.SecurityContext.Capabilities != nil {
hostConfig.CapAdd = append(hostConfig.CapAdd, *(*strslice.StrSlice)(unsafe.Pointer(&c.SecurityContext.Capabilities.Add))...)
hostConfig.CapDrop = *(*strslice.StrSlice)(unsafe.Pointer(&c.SecurityContext.Capabilities.Drop))
}
var suffix string
newUUID, err := uuid.NewUUID()
if err == nil {
suffix = strings.ReplaceAll(newUUID.String(), "-", "")[:5]
}
r.containerName = fmt.Sprintf("%s_%s_%s_%s", c.Name, namespace, "kubevpn", suffix)
r.k8sContainerName = c.Name
r.config = tmpConfig
r.hostConfig = hostConfig
r.networkingConfig = &network.NetworkingConfig{EndpointsConfig: make(map[string]*network.EndpointSettings)}
r.platform = nil
runConfigList = append(runConfigList, &r)
}
return runConfigList
}
func GetDNS(ctx context.Context, f util.Factory, ns, pod string) (*miekgdns.ClientConfig, error) {
clientSet, err := f.KubernetesClientSet()
if err != nil {
return nil, err
}
_, err = clientSet.CoreV1().Pods(ns).Get(ctx, pod, v13.GetOptions{})
if err != nil {
return nil, err
}
config, err := f.ToRESTConfig()
if err != nil {
return nil, err
}
client, err := f.RESTClient()
if err != nil {
return nil, err
}
clientConfig, err := dns.GetDNSServiceIPFromPod(clientSet, client, config, pod, ns)
if err != nil {
return nil, err
}
return clientConfig, nil
}
// GetVolume key format: [container name]-[volume mount name]
func GetVolume(ctx context.Context, f util.Factory, ns, pod string) (map[string][]mount.Mount, error) {
clientSet, err := f.KubernetesClientSet()
if err != nil {
return nil, err
}
var get *v1.Pod
get, err = clientSet.CoreV1().Pods(ns).Get(ctx, pod, v13.GetOptions{})
if err != nil {
return nil, err
}
result := map[string][]mount.Mount{}
for _, c := range get.Spec.Containers {
// if container name is vpn or envoy-proxy, not need to download volume
if c.Name == config.ContainerSidecarVPN || c.Name == config.ContainerSidecarEnvoyProxy {
continue
}
var m []mount.Mount
for _, volumeMount := range c.VolumeMounts {
if volumeMount.MountPath == "/tmp" {
continue
}
join := filepath.Join(os.TempDir(), strconv.Itoa(rand.Int()))
err = os.MkdirAll(join, 0755)
if err != nil {
return nil, err
}
if volumeMount.SubPath != "" {
join = filepath.Join(join, volumeMount.SubPath)
}
handler.RollbackFuncList = append(handler.RollbackFuncList, func() {
_ = os.RemoveAll(join)
})
// pod-namespace/pod-name:path
remotePath := fmt.Sprintf("%s/%s:%s", ns, pod, volumeMount.MountPath)
stdIn, stdOut, stdErr := term.StdStreams()
copyOptions := cp.NewCopyOptions(genericclioptions.IOStreams{In: stdIn, Out: stdOut, ErrOut: stdErr})
copyOptions.Container = c.Name
copyOptions.MaxTries = 10
err = copyOptions.Complete(f, &cobra.Command{}, []string{remotePath, join})
if err != nil {
return nil, err
}
err = copyOptions.Run()
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "failed to download volume %s path %s to %s, err: %v, ignore...\n", volumeMount.Name, remotePath, join, err)
continue
}
m = append(m, mount.Mount{
Type: mount.TypeBind,
Source: join,
Target: volumeMount.MountPath,
})
fmt.Printf("%s:%s\n", join, volumeMount.MountPath)
}
result[c.Name] = m
}
return result, nil
}

271
pkg/dev/dockercreate.go Normal file
View File

@@ -0,0 +1,271 @@
package dev
import (
"context"
"fmt"
"io"
"os"
"regexp"
"github.com/containerd/containerd/platforms"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/image"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/versions"
apiclient "github.com/docker/docker/client"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/registry"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// Pull constants
const (
PullImageAlways = "always"
PullImageMissing = "missing" // Default (matches previous behavior)
PullImageNever = "never"
)
type createOptions struct {
Name string
Platform string
Untrusted bool
Pull string // always, missing, never
Quiet bool
}
func pullImage(ctx context.Context, dockerCli command.Cli, image string, platform string, out io.Writer) error {
ref, err := reference.ParseNormalizedNamed(image)
if err != nil {
return err
}
// Resolve the Repository name from fqn to RepositoryInfo
repoInfo, err := registry.ParseRepositoryInfo(ref)
if err != nil {
return err
}
authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index)
encodedAuth, err := command.EncodeAuthToBase64(authConfig)
if err != nil {
return err
}
options := types.ImageCreateOptions{
RegistryAuth: encodedAuth,
Platform: platform,
}
responseBody, err := dockerCli.Client().ImageCreate(ctx, image, options)
if err != nil {
return err
}
defer responseBody.Close()
return jsonmessage.DisplayJSONMessagesStream(
responseBody,
out,
dockerCli.Out().FD(),
dockerCli.Out().IsTerminal(),
nil)
}
type cidFile struct {
path string
file *os.File
written bool
}
func (cid *cidFile) Close() error {
if cid.file == nil {
return nil
}
cid.file.Close()
if cid.written {
return nil
}
if err := os.Remove(cid.path); err != nil {
return errors.Wrapf(err, "failed to remove the CID file '%s'", cid.path)
}
return nil
}
func (cid *cidFile) Write(id string) error {
if cid.file == nil {
return nil
}
if _, err := cid.file.Write([]byte(id)); err != nil {
return errors.Wrap(err, "failed to write the container ID to the file")
}
cid.written = true
return nil
}
func newCIDFile(path string) (*cidFile, error) {
if path == "" {
return &cidFile{}, nil
}
if _, err := os.Stat(path); err == nil {
return nil, errors.Errorf("container ID file found, make sure the other container isn't running or delete %s", path)
}
f, err := os.Create(path)
if err != nil {
return nil, errors.Wrap(err, "failed to create the container ID file")
}
return &cidFile{path: path, file: f}, nil
}
//nolint:gocyclo
func createContainer(ctx context.Context, dockerCli command.Cli, containerConfig *containerConfig, opts *createOptions) (*container.CreateResponse, error) {
config := containerConfig.Config
hostConfig := containerConfig.HostConfig
networkingConfig := containerConfig.NetworkingConfig
stderr := dockerCli.Err()
warnOnOomKillDisable(*hostConfig, stderr)
warnOnLocalhostDNS(*hostConfig, stderr)
var (
trustedRef reference.Canonical
namedRef reference.Named
)
containerIDFile, err := newCIDFile(hostConfig.ContainerIDFile)
if err != nil {
return nil, err
}
defer containerIDFile.Close()
ref, err := reference.ParseAnyReference(config.Image)
if err != nil {
return nil, err
}
if named, ok := ref.(reference.Named); ok {
namedRef = reference.TagNameOnly(named)
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && !opts.Untrusted {
var err error
trustedRef, err = image.TrustedReference(ctx, dockerCli, taggedRef, nil)
if err != nil {
return nil, err
}
config.Image = reference.FamiliarString(trustedRef)
}
}
pullAndTagImage := func() error {
pullOut := stderr
if opts.Quiet {
pullOut = io.Discard
}
if err := pullImage(ctx, dockerCli, config.Image, opts.Platform, pullOut); err != nil {
return err
}
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && trustedRef != nil {
return image.TagTrusted(ctx, dockerCli, trustedRef, taggedRef)
}
return nil
}
var platform *specs.Platform
// Engine API version 1.41 first introduced the option to specify platform on
// create. It will produce an error if you try to set a platform on older API
// versions, so check the API version here to maintain backwards
// compatibility for CLI users.
if opts.Platform != "" && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.41") {
p, err := platforms.Parse(opts.Platform)
if err != nil {
return nil, errors.Wrap(err, "error parsing specified platform")
}
platform = &p
}
if opts.Pull == PullImageAlways {
if err := pullAndTagImage(); err != nil {
return nil, err
}
}
hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize()
response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, opts.Name)
if err != nil {
// Pull image if it does not exist locally and we have the PullImageMissing option. Default behavior.
if apiclient.IsErrNotFound(err) && namedRef != nil && opts.Pull == PullImageMissing {
if !opts.Quiet {
// we don't want to write to stdout anything apart from container.ID
fmt.Fprintf(stderr, "Unable to find image '%s' locally\n", reference.FamiliarString(namedRef))
}
if err := pullAndTagImage(); err != nil {
return nil, err
}
var retryErr error
response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, opts.Name)
if retryErr != nil {
return nil, retryErr
}
} else {
return nil, err
}
}
for _, warning := range response.Warnings {
fmt.Fprintf(stderr, "WARNING: %s\n", warning)
}
err = containerIDFile.Write(response.ID)
return &response, err
}
func warnOnOomKillDisable(hostConfig container.HostConfig, stderr io.Writer) {
if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 {
fmt.Fprintln(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.")
}
}
// check the DNS settings passed via --dns against localhost regexp to warn if
// they are trying to set a DNS to a localhost address
func warnOnLocalhostDNS(hostConfig container.HostConfig, stderr io.Writer) {
for _, dnsIP := range hostConfig.DNS {
if isLocalhost(dnsIP) {
fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP)
return
}
}
}
// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range.
const ipLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)`
var localhostIPRegexp = regexp.MustCompile(ipLocalhost)
// IsLocalhost returns true if ip matches the localhost IP regular expression.
// Used for determining if nameserver settings are being passed which are
// localhost addresses
func isLocalhost(ip string) bool {
return localhostIPRegexp.MatchString(ip)
}
func validatePullOpt(val string) error {
switch val {
case PullImageAlways, PullImageMissing, PullImageNever, "":
// valid option, but nothing to do yet
return nil
default:
return fmt.Errorf(
"invalid pull option: '%s': must be one of %q, %q or %q",
val,
PullImageAlways,
PullImageMissing,
PullImageNever,
)
}
}

1076
pkg/dev/dockeropts.go Normal file

File diff suppressed because it is too large Load Diff

108
pkg/dev/dockerrun.go Normal file
View File

@@ -0,0 +1,108 @@
package dev
import (
"context"
"fmt"
"io"
"strings"
"syscall"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
)
type RunOptions struct {
createOptions
Detach bool
sigProxy bool
detachKeys string
}
func attachContainer(ctx context.Context, dockerCli command.Cli, errCh *chan error, config *container.Config, containerID string) (func(), error) {
options := types.ContainerAttachOptions{
Stream: true,
Stdin: config.AttachStdin,
Stdout: config.AttachStdout,
Stderr: config.AttachStderr,
DetachKeys: dockerCli.ConfigFile().DetachKeys,
}
resp, errAttach := dockerCli.Client().ContainerAttach(ctx, containerID, options)
if errAttach != nil {
return nil, fmt.Errorf("failed to attach to container: %s, err: %v", containerID, errAttach)
}
var (
out, cerr io.Writer
in io.ReadCloser
)
if config.AttachStdin {
in = dockerCli.In()
}
if config.AttachStdout {
out = dockerCli.Out()
}
if config.AttachStderr {
if config.Tty {
cerr = dockerCli.Out()
} else {
cerr = dockerCli.Err()
}
}
ch := make(chan error, 1)
*errCh = ch
if in != nil && out != nil && cerr != nil {
}
go func() {
ch <- func() error {
streamer := hijackedIOStreamer{
streams: dockerCli,
inputStream: in,
outputStream: out,
errorStream: cerr,
resp: resp,
tty: config.Tty,
detachKeys: options.DetachKeys,
}
if errHijack := streamer.stream(ctx); errHijack != nil {
return errHijack
}
return errAttach
}()
}()
return resp.Close, nil
}
// reportError is a utility method that prints a user-friendly message
// containing the error that occurred during parsing and a suggestion to get help
func reportError(stderr io.Writer, name string, str string, withHelp bool) {
str = strings.TrimSuffix(str, ".") + "."
if withHelp {
str += "\nSee 'docker " + name + " --help'."
}
_, _ = fmt.Fprintln(stderr, "docker:", str)
}
// if container start fails with 'not found'/'no such' error, return 127
// if container start fails with 'permission denied' error, return 126
// return 125 for generic docker daemon failures
func runStartContainerErr(err error) error {
trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ")
statusError := cli.StatusError{StatusCode: 125}
if strings.Contains(trimmedErr, "executable file not found") ||
strings.Contains(trimmedErr, "no such file or directory") ||
strings.Contains(trimmedErr, "system cannot find the file specified") {
statusError = cli.StatusError{StatusCode: 127}
} else if strings.Contains(trimmedErr, syscall.EACCES.Error()) {
statusError = cli.StatusError{StatusCode: 126}
}
return statusError
}

207
pkg/dev/hijack.go Normal file
View File

@@ -0,0 +1,207 @@
package dev
import (
"context"
"fmt"
"io"
"runtime"
"sync"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/stdcopy"
"github.com/moby/term"
"github.com/sirupsen/logrus"
)
// The default escape key sequence: ctrl-p, ctrl-q
// TODO: This could be moved to `pkg/term`.
var defaultEscapeKeys = []byte{16, 17}
// A hijackedIOStreamer handles copying input to and output from streams to the
// connection.
type hijackedIOStreamer struct {
streams command.Streams
inputStream io.ReadCloser
outputStream io.Writer
errorStream io.Writer
resp types.HijackedResponse
tty bool
detachKeys string
}
// stream handles setting up the IO and then begins streaming stdin/stdout
// to/from the hijacked connection, blocking until it is either done reading
// output, the user inputs the detach key sequence when in TTY mode, or when
// the given context is cancelled.
func (h *hijackedIOStreamer) stream(ctx context.Context) error {
restoreInput, err := h.setupInput()
if err != nil {
return fmt.Errorf("unable to setup input stream: %s", err)
}
defer restoreInput()
outputDone := h.beginOutputStream(restoreInput)
inputDone, detached := h.beginInputStream(restoreInput)
select {
case err := <-outputDone:
return err
case <-inputDone:
// Input stream has closed.
if h.outputStream != nil || h.errorStream != nil {
// Wait for output to complete streaming.
select {
case err := <-outputDone:
return err
case <-ctx.Done():
return ctx.Err()
}
}
return nil
case err := <-detached:
// Got a detach key sequence.
return err
case <-ctx.Done():
return ctx.Err()
}
}
func (h *hijackedIOStreamer) setupInput() (restore func(), err error) {
if h.inputStream == nil || !h.tty {
// No need to setup input TTY.
// The restore func is a nop.
return func() {}, nil
}
if err := setRawTerminal(h.streams); err != nil {
return nil, fmt.Errorf("unable to set IO streams as raw terminal: %s", err)
}
// Use sync.Once so we may call restore multiple times but ensure we
// only restore the terminal once.
var restoreOnce sync.Once
restore = func() {
restoreOnce.Do(func() {
restoreTerminal(h.streams, h.inputStream)
})
}
// Wrap the input to detect detach escape sequence.
// Use default escape keys if an invalid sequence is given.
escapeKeys := defaultEscapeKeys
if h.detachKeys != "" {
customEscapeKeys, err := term.ToBytes(h.detachKeys)
if err != nil {
logrus.Warnf("invalid detach escape keys, using default: %s", err)
} else {
escapeKeys = customEscapeKeys
}
}
h.inputStream = ioutils.NewReadCloserWrapper(term.NewEscapeProxy(h.inputStream, escapeKeys), h.inputStream.Close)
return restore, nil
}
func (h *hijackedIOStreamer) beginOutputStream(restoreInput func()) <-chan error {
if h.outputStream == nil && h.errorStream == nil {
// There is no need to copy output.
return nil
}
outputDone := make(chan error)
go func() {
var err error
// When TTY is ON, use regular copy
if h.outputStream != nil && h.tty {
_, err = io.Copy(h.outputStream, h.resp.Reader)
// We should restore the terminal as soon as possible
// once the connection ends so any following print
// messages will be in normal type.
restoreInput()
} else {
_, err = stdcopy.StdCopy(h.outputStream, h.errorStream, h.resp.Reader)
}
logrus.Debug("[hijack] End of stdout")
if err != nil {
logrus.Debugf("Error receiveStdout: %s", err)
}
outputDone <- err
}()
return outputDone
}
func (h *hijackedIOStreamer) beginInputStream(restoreInput func()) (doneC <-chan struct{}, detachedC <-chan error) {
inputDone := make(chan struct{})
detached := make(chan error)
go func() {
if h.inputStream != nil {
_, err := io.Copy(h.resp.Conn, h.inputStream)
// We should restore the terminal as soon as possible
// once the connection ends so any following print
// messages will be in normal type.
restoreInput()
logrus.Debug("[hijack] End of stdin")
if _, ok := err.(term.EscapeError); ok {
detached <- err
return
}
if err != nil {
// This error will also occur on the receive
// side (from stdout) where it will be
// propagated back to the caller.
logrus.Debugf("Error sendStdin: %s", err)
}
}
if err := h.resp.CloseWrite(); err != nil {
logrus.Debugf("Couldn't send EOF: %s", err)
}
close(inputDone)
}()
return inputDone, detached
}
func setRawTerminal(streams command.Streams) error {
if err := streams.In().SetRawTerminal(); err != nil {
return err
}
return streams.Out().SetRawTerminal()
}
func restoreTerminal(streams command.Streams, in io.Closer) error {
streams.In().RestoreTerminal()
streams.Out().RestoreTerminal()
// WARNING: DO NOT REMOVE THE OS CHECKS !!!
// For some reason this Close call blocks on darwin..
// As the client exits right after, simply discard the close
// until we find a better solution.
//
// This can also cause the client on Windows to get stuck in Win32 CloseHandle()
// in some cases. See https://github.com/docker/docker/issues/28267#issuecomment-288237442
// Tracked internally at Microsoft by VSO #11352156. In the
// Windows case, you hit this if you are using the native/v2 console,
// not the "legacy" console, and you start the client in a new window. eg
// `start docker run --rm -it microsoft/nanoserver cmd /s /c echo foobar`
// will hang. Remove start, and it won't repro.
if in != nil && runtime.GOOS != "darwin" && runtime.GOOS != "windows" {
return in.Close()
}
return nil
}

755
pkg/dev/main.go Normal file
View File

@@ -0,0 +1,755 @@
package dev
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"math/rand"
"os"
"path/filepath"
"reflect"
"sort"
"strconv"
"strings"
"syscall"
"time"
"github.com/containerd/containerd/platforms"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/flags"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/stdcopy"
"github.com/docker/go-connections/nat"
"github.com/google/uuid"
specs "github.com/opencontainers/image-spec/specs-go/v1"
pkgerr "github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/pflag"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/polymorphichelpers"
"k8s.io/kubectl/pkg/util/interrupt"
"k8s.io/kubectl/pkg/util/podutils"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/handler"
"github.com/wencaiwulue/kubevpn/pkg/mesh"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
type ConnectMode string
const (
ConnectModeContainer ConnectMode = "container"
ConnectModeHost ConnectMode = "host"
)
type Options struct {
Headers map[string]string
Namespace string
Workload string
Factory cmdutil.Factory
ContainerName string
NoProxy bool
ExtraCIDR []string
ExtraDomain []string
ConnectMode ConnectMode
Engine config.Engine
// docker options
DockerImage string
Options RunOptions
Copts *ContainerOptions
}
func (d *Options) Main(ctx context.Context, cli *client.Client, dockerCli *command.DockerCli, tempContainerConfig *containerConfig) error {
rand.Seed(time.Now().UnixNano())
object, err := util.GetUnstructuredObject(d.Factory, d.Namespace, d.Workload)
if err != nil {
return err
}
u := object.Object.(*unstructured.Unstructured)
var templateSpec *v1.PodTemplateSpec
//var path []string
templateSpec, _, err = util.GetPodTemplateSpecPath(u)
if err != nil {
return err
}
set, err := d.Factory.KubernetesClientSet()
if err != nil {
return err
}
sortBy := func(pods []*v1.Pod) sort.Interface {
for i := 0; i < len(pods); i++ {
if pods[i].DeletionTimestamp != nil {
pods = append(pods[:i], pods[i+1:]...)
i--
}
}
return sort.Reverse(podutils.ActivePods(pods))
}
lab := labels.SelectorFromSet(templateSpec.Labels).String()
firstPod, _, err := polymorphichelpers.GetFirstPod(set.CoreV1(), d.Namespace, lab, time.Second*60, sortBy)
if err != nil {
return err
}
pod := firstPod.Name
env, err := util.GetEnv(ctx, d.Factory, d.Namespace, pod)
if err != nil {
return err
}
volume, err := GetVolume(ctx, d.Factory, d.Namespace, pod)
if err != nil {
return err
}
dns, err := GetDNS(ctx, d.Factory, d.Namespace, pod)
if err != nil {
return fmt.Errorf("can not get dns conf from pod: %s, err: %v", pod, err)
}
mesh.RemoveContainers(templateSpec)
runConfigList := ConvertKubeResourceToContainer(d.Namespace, *templateSpec, env, volume, dns)
err = mergeDockerOptions(runConfigList, d, tempContainerConfig)
if err != nil {
return fmt.Errorf("can not fill docker options, err: %v", err)
}
// check resource
var outOfMemory bool
outOfMemory, _ = checkOutOfMemory(templateSpec, cli)
if outOfMemory {
return fmt.Errorf("your pod resource request is bigger than docker-desktop resource, please adjust your docker-desktop resource")
}
mode := container.NetworkMode(d.Copts.netMode.NetworkMode())
if len(d.Copts.netMode.Value()) != 0 {
for _, runConfig := range runConfigList[:] {
// remove expose port
runConfig.config.ExposedPorts = nil
runConfig.hostConfig.NetworkMode = mode
if mode.IsContainer() {
runConfig.hostConfig.PidMode = containertypes.PidMode(d.Copts.netMode.NetworkMode())
}
runConfig.hostConfig.PortBindings = nil
// remove dns
runConfig.hostConfig.DNS = nil
runConfig.hostConfig.DNSOptions = nil
runConfig.hostConfig.DNSSearch = nil
runConfig.hostConfig.PublishAllPorts = false
runConfig.config.Hostname = ""
}
} else {
var networkID string
networkID, err = createKubevpnNetwork(ctx, cli)
if err != nil {
return err
}
runConfigList[len(runConfigList)-1].networkingConfig.EndpointsConfig[runConfigList[len(runConfigList)-1].containerName] = &network.EndpointSettings{
NetworkID: networkID,
}
var portmap = nat.PortMap{}
var portset = nat.PortSet{}
for _, runConfig := range runConfigList {
for k, v := range runConfig.hostConfig.PortBindings {
if oldValue, ok := portmap[k]; ok {
portmap[k] = append(oldValue, v...)
} else {
portmap[k] = v
}
}
for k, v := range runConfig.config.ExposedPorts {
portset[k] = v
}
}
runConfigList[len(runConfigList)-1].hostConfig.PortBindings = portmap
runConfigList[len(runConfigList)-1].config.ExposedPorts = portset
// skip last, use last container network
for _, runConfig := range runConfigList[:len(runConfigList)-1] {
// remove expose port
runConfig.config.ExposedPorts = nil
runConfig.hostConfig.NetworkMode = containertypes.NetworkMode("container:" + runConfigList[len(runConfigList)-1].containerName)
runConfig.hostConfig.PidMode = containertypes.PidMode("container:" + runConfigList[len(runConfigList)-1].containerName)
runConfig.hostConfig.PortBindings = nil
// remove dns
runConfig.hostConfig.DNS = nil
runConfig.hostConfig.DNSOptions = nil
runConfig.hostConfig.DNSSearch = nil
runConfig.hostConfig.PublishAllPorts = false
runConfig.config.Hostname = ""
}
}
handler.RollbackFuncList = append(handler.RollbackFuncList, func() {
_ = runConfigList.Remove(ctx, cli)
})
err = runConfigList.Run(ctx, volume, cli, dockerCli)
if err != nil {
return err
}
return terminal(runConfigList[0].containerName, dockerCli)
}
type ConfigList []*RunConfig
func (l ConfigList) Remove(ctx context.Context, cli *client.Client) error {
for _, runConfig := range l {
err := cli.NetworkDisconnect(ctx, runConfig.containerName, runConfig.containerName, true)
if err != nil {
log.Debug(err)
}
err = cli.ContainerRemove(ctx, runConfig.containerName, types.ContainerRemoveOptions{Force: true})
if err != nil {
log.Debug(err)
}
}
i, err := cli.NetworkInspect(ctx, config.ConfigMapPodTrafficManager, types.NetworkInspectOptions{})
if err != nil {
return err
}
if len(i.Containers) == 0 {
return cli.NetworkRemove(ctx, config.ConfigMapPodTrafficManager)
}
return nil
}
func GetClient() (*client.Client, *command.DockerCli, error) {
cli, err := client.NewClientWithOpts(
client.FromEnv,
client.WithAPIVersionNegotiation(),
)
if err != nil {
return nil, nil, fmt.Errorf("can not create docker client from env, err: %v", err)
}
var dockerCli *command.DockerCli
dockerCli, err = command.NewDockerCli(command.WithAPIClient(cli))
if err != nil {
return nil, nil, fmt.Errorf("can not create docker client from env, err: %v", err)
}
err = dockerCli.Initialize(flags.NewClientOptions())
if err != nil {
return nil, nil, fmt.Errorf("can not init docker client, err: %v", err)
}
return cli, dockerCli, nil
}
func (l ConfigList) Run(ctx context.Context, volume map[string][]mount.Mount, cli *client.Client, dockerCli *command.DockerCli) error {
for index := len(l) - 1; index >= 0; index-- {
runConfig := l[index]
if index == 0 {
_, err := runFirst(ctx, runConfig, cli, dockerCli)
if err != nil {
return err
}
} else {
id, err := run(ctx, runConfig, cli, dockerCli)
if err != nil {
// try another way to startup container
log.Infof("occur err: %v, try another way to startup container...", err)
runConfig.hostConfig.Mounts = nil
id, err = run(ctx, runConfig, cli, dockerCli)
if err != nil {
return err
}
err = l.copyToContainer(ctx, volume[runConfig.k8sContainerName], cli, id)
if err != nil {
return err
}
}
}
}
return nil
}
func (l ConfigList) copyToContainer(ctx context.Context, volume []mount.Mount, cli *client.Client, id string) error {
// copy volume into container
for _, v := range volume {
target, err := createFolder(ctx, cli, id, v.Source, v.Target)
if err != nil {
log.Debugf("create folder %s previoully faied, err: %v", target, err)
}
log.Debugf("from %s to %s", v.Source, v.Target)
srcInfo, err := archive.CopyInfoSourcePath(v.Source, true)
if err != nil {
return fmt.Errorf("copy info source path, err: %v", err)
}
srcArchive, err := archive.TarResource(srcInfo)
if err != nil {
return fmt.Errorf("tar resource failed, err: %v", err)
}
dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, archive.CopyInfo{Path: v.Target})
if err != nil {
return fmt.Errorf("can not prepare archive copy, err: %v", err)
}
err = cli.CopyToContainer(ctx, id, dstDir, preparedArchive, types.CopyToContainerOptions{
AllowOverwriteDirWithFile: true,
CopyUIDGID: true,
})
if err != nil {
log.Info(fmt.Errorf("can not copy %s to container %s:%s, err: %v", v.Source, id, v.Target, err))
}
}
return nil
}
func createFolder(ctx context.Context, cli *client.Client, id string, src string, target string) (string, error) {
lstat, err := os.Lstat(src)
if err != nil {
return "", err
}
if !lstat.IsDir() {
target = filepath.Dir(target)
}
var create types.IDResponse
create, err = cli.ContainerExecCreate(ctx, id, types.ExecConfig{
AttachStdin: true,
AttachStderr: true,
AttachStdout: true,
Cmd: []string{"mkdir", "-p", target},
})
if err != nil {
return "", err
}
err = cli.ContainerExecStart(ctx, create.ID, types.ExecStartCheck{})
if err != nil {
return "", err
}
chanStop := make(chan struct{})
wait.Until(func() {
inspect, err := cli.ContainerExecInspect(ctx, create.ID)
if err != nil {
return
}
if !inspect.Running {
close(chanStop)
}
}, time.Second, chanStop)
return target, nil
}
func checkOutOfMemory(spec *v1.PodTemplateSpec, cli *client.Client) (outOfMemory bool, err error) {
var info types.Info
info, err = cli.Info(context.Background())
if err != nil {
return
}
total := info.MemTotal
var req int64
for _, container := range spec.Spec.Containers {
memory := container.Resources.Requests.Memory()
if memory != nil {
req += memory.Value()
}
}
if req > total {
outOfMemory = true
return
}
return
}
func DoDev(devOptions *Options, flags *pflag.FlagSet, f cmdutil.Factory) error {
connect := handler.ConnectOptions{
Headers: devOptions.Headers,
Workloads: []string{devOptions.Workload},
ExtraCIDR: devOptions.ExtraCIDR,
ExtraDomain: devOptions.ExtraDomain,
Engine: devOptions.Engine,
}
cli, dockerCli, err := GetClient()
if err != nil {
return err
}
mode := container.NetworkMode(devOptions.Copts.netMode.NetworkMode())
if mode.IsContainer() {
var inspect types.ContainerJSON
inspect, err = cli.ContainerInspect(context.Background(), mode.ConnectedContainer())
if err != nil {
return err
}
if inspect.State == nil {
return fmt.Errorf("can not get container status, please make contianer name is valid")
}
if !inspect.State.Running {
return fmt.Errorf("container %s status is %s, expect is running, please make sure your outer docker name is correct", mode.ConnectedContainer(), inspect.State.Status)
}
}
if err = connect.InitClient(f); err != nil {
return err
}
if err = connect.PreCheckResource(); err != nil {
return err
}
if len(connect.Workloads) > 1 {
return fmt.Errorf("can only dev one workloads at same time, workloads: %v", connect.Workloads)
}
if len(connect.Workloads) < 1 {
return fmt.Errorf("you must provide resource to dev, workloads : %v is invaild", connect.Workloads)
}
var platform *specs.Platform
if devOptions.Options.Platform != "" {
p, err := platforms.Parse(devOptions.Options.Platform)
if err != nil {
return pkgerr.Wrap(err, "error parsing specified platform")
}
platform = &p
}
devOptions.Workload = connect.Workloads[0]
// if no-proxy is true, not needs to intercept traffic
if devOptions.NoProxy {
if len(connect.Headers) != 0 {
return fmt.Errorf("not needs to provide headers if is no-proxy mode")
}
connect.Workloads = []string{}
}
path, err := connect.GetKubeconfigPath()
if err != nil {
return err
}
switch devOptions.ConnectMode {
case ConnectModeHost:
defer func() {
handler.Cleanup(syscall.SIGQUIT)
select {}
}()
if err = connect.DoConnect(); err != nil {
log.Errorln(err)
return err
}
case ConnectModeContainer:
var connectContainer *RunConfig
connectContainer, err = createConnectContainer(*devOptions, connect, path, err, cli, platform)
if err != nil {
return err
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var id string
if id, err = run(ctx, connectContainer, cli, dockerCli); err != nil {
return err
}
h := interrupt.New(func(signal os.Signal) {
os.Exit(0)
}, func() {
cancel()
_ = cli.ContainerKill(context.Background(), id, "SIGTERM")
_ = runLogsSinceNow(dockerCli, id)
})
go h.Run(func() error { select {} })
defer h.Close()
if err = runLogsWaitRunning(ctx, dockerCli, id); err != nil {
// interrupt by signal KILL
if ctx.Err() == context.Canceled {
return nil
}
return err
}
if err = devOptions.Copts.netMode.Set("container:" + id); err != nil {
return err
}
default:
return fmt.Errorf("unsupport connect mode: %s", devOptions.ConnectMode)
}
var tempContainerConfig *containerConfig
{
if err := validatePullOpt(devOptions.Options.Pull); err != nil {
return err
}
proxyConfig := dockerCli.ConfigFile().ParseProxyConfig(dockerCli.Client().DaemonHost(), opts.ConvertKVStringsToMapWithNil(devOptions.Copts.env.GetAll()))
newEnv := []string{}
for k, v := range proxyConfig {
if v == nil {
newEnv = append(newEnv, k)
} else {
newEnv = append(newEnv, fmt.Sprintf("%s=%s", k, *v))
}
}
devOptions.Copts.env = *opts.NewListOptsRef(&newEnv, nil)
tempContainerConfig, err = parse(flags, devOptions.Copts, dockerCli.ServerInfo().OSType)
// just in case the parse does not exit
if err != nil {
return err
}
if err = validateAPIVersion(tempContainerConfig, dockerCli.Client().ClientVersion()); err != nil {
return err
}
}
devOptions.Namespace = connect.Namespace
err = devOptions.Main(context.Background(), cli, dockerCli, tempContainerConfig)
if err != nil {
log.Errorln(err)
}
return err
}
func createConnectContainer(devOptions Options, connect handler.ConnectOptions, path string, err error, cli *client.Client, platform *specs.Platform) (*RunConfig, error) {
var entrypoint []string
if devOptions.NoProxy {
entrypoint = []string{"kubevpn", "connect", "-n", connect.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image}
for _, v := range connect.ExtraCIDR {
entrypoint = append(entrypoint, "--extra-cidr", v)
}
for _, v := range connect.ExtraDomain {
entrypoint = append(entrypoint, "--extra-domain", v)
}
} else {
entrypoint = []string{"kubevpn", "proxy", connect.Workloads[0], "-n", connect.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image}
for k, v := range connect.Headers {
entrypoint = append(entrypoint, "--headers", fmt.Sprintf("%s=%s", k, v))
}
for _, v := range connect.ExtraCIDR {
entrypoint = append(entrypoint, "--extra-cidr", v)
}
for _, v := range connect.ExtraDomain {
entrypoint = append(entrypoint, "--extra-domain", v)
}
}
runConfig := &container.Config{
User: "root",
AttachStdin: false,
AttachStdout: false,
AttachStderr: false,
ExposedPorts: nil,
StdinOnce: false,
Env: []string{fmt.Sprintf("%s=1", config.EnvStartSudoKubeVPNByKubeVPN)},
Cmd: []string{},
Healthcheck: nil,
ArgsEscaped: false,
Image: config.Image,
Volumes: nil,
Entrypoint: entrypoint,
NetworkDisabled: false,
MacAddress: "",
OnBuild: nil,
StopSignal: "",
StopTimeout: nil,
Shell: nil,
}
hostConfig := &container.HostConfig{
Binds: []string{fmt.Sprintf("%s:%s", path, "/root/.kube/config")},
LogConfig: container.LogConfig{},
PortBindings: nil,
RestartPolicy: container.RestartPolicy{},
AutoRemove: true,
VolumeDriver: "",
VolumesFrom: nil,
ConsoleSize: [2]uint{},
CapAdd: strslice.StrSlice{"SYS_PTRACE", "SYS_ADMIN"}, // for dlv
CgroupnsMode: "",
ExtraHosts: nil,
GroupAdd: nil,
IpcMode: "",
Cgroup: "",
Links: nil,
OomScoreAdj: 0,
PidMode: "",
Privileged: true,
PublishAllPorts: false,
ReadonlyRootfs: false,
SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"},
StorageOpt: nil,
Tmpfs: nil,
UTSMode: "",
UsernsMode: "",
ShmSize: 0,
Sysctls: map[string]string{"net.ipv6.conf.all.disable_ipv6": strconv.Itoa(0)},
Runtime: "",
Isolation: "",
Resources: container.Resources{},
MaskedPaths: nil,
ReadonlyPaths: nil,
Init: nil,
}
var suffix string
if newUUID, err := uuid.NewUUID(); err == nil {
suffix = strings.ReplaceAll(newUUID.String(), "-", "")[:5]
}
var kubevpnNetwork string
kubevpnNetwork, err = createKubevpnNetwork(context.Background(), cli)
if err != nil {
return nil, err
}
name := fmt.Sprintf("%s_%s_%s", "kubevpn", "local", suffix)
c := &RunConfig{
config: runConfig,
hostConfig: hostConfig,
networkingConfig: &network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{name: {
NetworkID: kubevpnNetwork,
}},
},
platform: platform,
containerName: name,
k8sContainerName: name,
}
return c, nil
}
func runLogsWaitRunning(ctx context.Context, dockerCli command.Cli, container string) error {
c, err := dockerCli.Client().ContainerInspect(ctx, container)
if err != nil {
return err
}
options := types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
}
responseBody, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
if err != nil {
return err
}
defer responseBody.Close()
buf := bytes.NewBuffer(nil)
writer := io.MultiWriter(buf, dockerCli.Out())
var errChan = make(chan error)
var stopChan = make(chan struct{})
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
if strings.Contains(buf.String(), "enjoy it") {
close(stopChan)
return
}
}
}()
go func() {
var err error
if c.Config.Tty {
_, err = io.Copy(writer, responseBody)
} else {
_, err = stdcopy.StdCopy(writer, dockerCli.Err(), responseBody)
}
if err != nil {
errChan <- err
}
}()
select {
case err = <-errChan:
return err
case <-stopChan:
return nil
}
}
func runLogsSinceNow(dockerCli command.Cli, container string) error {
ctx := context.Background()
c, err := dockerCli.Client().ContainerInspect(ctx, container)
if err != nil {
return err
}
options := types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Since: "0m",
Follow: true,
}
responseBody, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
if err != nil {
return err
}
defer responseBody.Close()
if c.Config.Tty {
_, err = io.Copy(dockerCli.Out(), responseBody)
} else {
_, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody)
}
return err
}
func runKill(dockerCli command.Cli, containers ...string) error {
var errs []string
ctx := context.Background()
errChan := parallelOperation(ctx, append([]string{}, containers...), func(ctx context.Context, container string) error {
return dockerCli.Client().ContainerKill(ctx, container, "SIGTERM")
})
for _, name := range containers {
if err := <-errChan; err != nil {
errs = append(errs, err.Error())
} else {
fmt.Fprintln(dockerCli.Out(), name)
}
}
if len(errs) > 0 {
return errors.New(strings.Join(errs, "\n"))
}
return nil
}
func createKubevpnNetwork(ctx context.Context, cli *client.Client) (string, error) {
by := map[string]string{"owner": config.ConfigMapPodTrafficManager}
list, _ := cli.NetworkList(ctx, types.NetworkListOptions{})
for _, resource := range list {
if reflect.DeepEqual(resource.Labels, by) {
return resource.ID, nil
}
}
create, err := cli.NetworkCreate(ctx, config.ConfigMapPodTrafficManager, types.NetworkCreate{
Driver: "bridge",
Scope: "local",
IPAM: &network.IPAM{
Driver: "",
Options: nil,
Config: []network.IPAMConfig{
{
Subnet: config.DockerCIDR.String(),
Gateway: config.DockerRouterIP.String(),
},
},
},
//Options: map[string]string{"--icc": "", "--ip-masq": ""},
Labels: by,
})
if err != nil {
if errdefs.IsForbidden(err) {
list, _ = cli.NetworkList(ctx, types.NetworkListOptions{})
for _, resource := range list {
if reflect.DeepEqual(resource.Labels, by) {
return resource.ID, nil
}
}
}
return "", err
}
return create.ID, nil
}

97
pkg/dev/option.go Normal file
View File

@@ -0,0 +1,97 @@
package dev
import (
"github.com/containerd/containerd/platforms"
"github.com/docker/docker/api/types/network"
"github.com/pkg/errors"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
// 这里的逻辑是找到指定的容器。然后以传入的参数 tempContainerConfig 为准。即也就是用户命令行指定的参数为准。
// 然后附加上 deployment 中原本的声明
func mergeDockerOptions(r ConfigList, copts *Options, tempContainerConfig *containerConfig) error {
if copts.ContainerName != "" {
var index = -1
for i, config := range r {
if config.k8sContainerName == copts.ContainerName {
index = i
break
}
}
if index != -1 {
r[0], r[index] = r[index], r[0]
}
}
config := r[0]
config.Options = copts.Options
config.Copts = copts.Copts
if copts.DockerImage != "" {
config.config.Image = copts.DockerImage
}
if copts.Options.Name != "" {
config.containerName = copts.Options.Name
} else {
config.Options.Name = config.containerName
}
if copts.Options.Platform != "" {
p, err := platforms.Parse(copts.Options.Platform)
if err != nil {
return errors.Wrap(err, "error parsing specified platform")
}
config.platform = &p
}
tempContainerConfig.HostConfig.CapAdd = append(tempContainerConfig.HostConfig.CapAdd, config.hostConfig.CapAdd...)
tempContainerConfig.HostConfig.SecurityOpt = append(tempContainerConfig.HostConfig.SecurityOpt, config.hostConfig.SecurityOpt...)
tempContainerConfig.HostConfig.VolumesFrom = append(tempContainerConfig.HostConfig.VolumesFrom, config.hostConfig.VolumesFrom...)
tempContainerConfig.HostConfig.DNS = append(tempContainerConfig.HostConfig.DNS, config.hostConfig.DNS...)
tempContainerConfig.HostConfig.DNSOptions = append(tempContainerConfig.HostConfig.DNSOptions, config.hostConfig.DNSOptions...)
tempContainerConfig.HostConfig.DNSSearch = append(tempContainerConfig.HostConfig.DNSSearch, config.hostConfig.DNSSearch...)
tempContainerConfig.HostConfig.Mounts = append(tempContainerConfig.HostConfig.Mounts, config.hostConfig.Mounts...)
for port, bindings := range config.hostConfig.PortBindings {
if v, ok := tempContainerConfig.HostConfig.PortBindings[port]; ok {
tempContainerConfig.HostConfig.PortBindings[port] = append(v, bindings...)
} else {
tempContainerConfig.HostConfig.PortBindings[port] = bindings
}
}
config.hostConfig = tempContainerConfig.HostConfig
config.networkingConfig.EndpointsConfig = util.Merge[string, *network.EndpointSettings](tempContainerConfig.NetworkingConfig.EndpointsConfig, config.networkingConfig.EndpointsConfig)
c := tempContainerConfig.Config
var entrypoint = config.config.Entrypoint
var args = config.config.Cmd
// if special --entrypoint, then use it
if len(c.Entrypoint) != 0 {
entrypoint = c.Entrypoint
args = c.Cmd
}
if len(c.Cmd) != 0 {
args = c.Cmd
}
c.Entrypoint = entrypoint
c.Cmd = args
c.Env = append(config.config.Env, c.Env...)
c.Image = config.config.Image
if c.User == "" {
c.User = config.config.User
}
c.Labels = util.Merge[string, string](config.config.Labels, c.Labels)
c.Volumes = util.Merge[string, struct{}](c.Volumes, config.config.Volumes)
if c.WorkingDir == "" {
c.WorkingDir = config.config.WorkingDir
}
for k, v := range config.config.ExposedPorts {
if _, found := c.ExposedPorts[k]; !found {
c.ExposedPorts[k] = v
}
}
config.config = c
return nil
}

451
pkg/dev/run.go Normal file
View File

@@ -0,0 +1,451 @@
package dev
import (
"context"
"errors"
"fmt"
"io"
"math/rand"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/container"
"github.com/docker/cli/cli/command/image"
"github.com/docker/cli/cli/streams"
"github.com/docker/cli/cli/trust"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
typescommand "github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
apiclient "github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/moby/term"
dockerterm "github.com/moby/term"
v12 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
func run(ctx context.Context, runConfig *RunConfig, cli *client.Client, c *command.DockerCli) (id string, err error) {
rand.New(rand.NewSource(time.Now().UnixNano()))
var config = runConfig.config
var hostConfig = runConfig.hostConfig
var platform = runConfig.platform
var networkConfig = runConfig.networkingConfig
var name = runConfig.containerName
var needPull bool
var img types.ImageInspect
img, _, err = cli.ImageInspectWithRaw(ctx, config.Image)
if err != nil {
needPull = true
err = nil
}
if platform != nil && platform.Architecture != "" && platform.OS != "" {
if img.Os != platform.OS || img.Architecture != platform.Architecture {
needPull = true
}
}
if needPull {
if err = PullImage(ctx, runConfig.platform, cli, c, config.Image); err != nil {
return
}
}
var create typescommand.CreateResponse
create, err = cli.ContainerCreate(ctx, config, hostConfig, networkConfig, platform, name)
if err != nil {
err = fmt.Errorf("failed to create container %s, err: %s", name, err)
return
}
id = create.ID
log.Infof("Created container: %s", name)
defer func() {
if err != nil {
_ = cli.ContainerRemove(ctx, id, types.ContainerRemoveOptions{Force: true})
}
}()
err = cli.ContainerStart(ctx, create.ID, types.ContainerStartOptions{})
if err != nil {
err = fmt.Errorf("failed to startup container %s: %v", name, err)
return
}
log.Infof("Wait container %s to be running...", name)
chanStop := make(chan struct{})
var inspect types.ContainerJSON
var once = &sync.Once{}
wait.Until(func() {
inspect, err = cli.ContainerInspect(ctx, create.ID)
if err != nil && errdefs.IsNotFound(err) {
once.Do(func() { close(chanStop) })
return
}
if err != nil {
return
}
if inspect.State != nil && (inspect.State.Status == "exited" || inspect.State.Status == "dead" || inspect.State.Dead) {
once.Do(func() { close(chanStop) })
err = errors.New(fmt.Sprintf("container status: %s", inspect.State.Status))
return
}
if inspect.State != nil && inspect.State.Running {
once.Do(func() { close(chanStop) })
return
}
}, time.Second, chanStop)
if err != nil {
err = fmt.Errorf("failed to wait container to be ready: %v", err)
return
}
// print port mapping to host
var empty = true
var str string
if inspect.NetworkSettings != nil && inspect.NetworkSettings.Ports != nil {
var list []string
for port, bindings := range inspect.NetworkSettings.Ports {
var p []string
for _, binding := range bindings {
if binding.HostPort != "" {
p = append(p, binding.HostPort)
empty = false
}
}
list = append(list, fmt.Sprintf("%s:%s", port, strings.Join(p, ",")))
}
str = fmt.Sprintf("Container %s is running on port %s now", name, strings.Join(list, " "))
}
if !empty {
log.Infoln(str)
} else {
log.Infof("Container %s is running now", name)
}
return
}
func runFirst(ctx context.Context, runConfig *RunConfig, cli *apiclient.Client, dockerCli *command.DockerCli) (id string, err error) {
rand.New(rand.NewSource(time.Now().UnixNano()))
defer func() {
if err != nil {
_ = cli.ContainerRemove(ctx, id, types.ContainerRemoveOptions{Force: true})
}
}()
stdout, stderr := dockerCli.Out(), dockerCli.Err()
client := dockerCli.Client()
runConfig.config.ArgsEscaped = false
if err := dockerCli.In().CheckTty(runConfig.config.AttachStdin, runConfig.config.Tty); err != nil {
return id, err
}
if !runConfig.Options.Detach {
if err := dockerCli.In().CheckTty(runConfig.config.AttachStdin, runConfig.config.Tty); err != nil {
return id, err
}
} else {
if runConfig.Copts.attach.Len() != 0 {
return id, errors.New("Conflicting options: -a and -d")
}
runConfig.config.AttachStdin = false
runConfig.config.AttachStdout = false
runConfig.config.AttachStderr = false
runConfig.config.StdinOnce = false
}
ctx, cancelFun := context.WithCancel(context.Background())
defer cancelFun()
createResponse, err := createContainer(ctx, dockerCli, &containerConfig{
Config: runConfig.config,
HostConfig: runConfig.hostConfig,
NetworkingConfig: runConfig.networkingConfig,
}, &runConfig.Options.createOptions)
if err != nil {
return "", err
}
log.Infof("Created container: %s", runConfig.containerName)
var (
waitDisplayID chan struct{}
errCh chan error
)
if !runConfig.config.AttachStdout && !runConfig.config.AttachStderr {
// Make this asynchronous to allow the client to write to stdin before having to read the ID
waitDisplayID = make(chan struct{})
go func() {
defer close(waitDisplayID)
fmt.Fprintln(stdout, createResponse.ID)
}()
}
attach := runConfig.config.AttachStdin || runConfig.config.AttachStdout || runConfig.config.AttachStderr
if attach {
close, err := attachContainer(ctx, dockerCli, &errCh, runConfig.config, createResponse.ID)
if err != nil {
return id, err
}
defer close()
}
statusChan := waitExitOrRemoved(ctx, dockerCli, createResponse.ID, runConfig.Copts.autoRemove)
// start the container
if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil {
// If we have hijackedIOStreamer, we should notify
// hijackedIOStreamer we are going to exit and wait
// to avoid the terminal are not restored.
if attach {
cancelFun()
<-errCh
}
reportError(stderr, "run", err.Error(), false)
if runConfig.Copts.autoRemove {
// wait container to be removed
<-statusChan
}
return id, runStartContainerErr(err)
}
if (runConfig.config.AttachStdin || runConfig.config.AttachStdout || runConfig.config.AttachStderr) && runConfig.config.Tty && dockerCli.Out().IsTerminal() {
if err := container.MonitorTtySize(ctx, dockerCli, createResponse.ID, false); err != nil {
fmt.Fprintln(stderr, "Error monitoring TTY size:", err)
}
}
if errCh != nil {
if err := <-errCh; err != nil {
if _, ok := err.(term.EscapeError); ok {
// The user entered the detach escape sequence.
return id, nil
}
logrus.Debugf("Error hijack: %s", err)
return id, err
}
}
// Detached mode: wait for the id to be displayed and return.
if !runConfig.config.AttachStdout && !runConfig.config.AttachStderr {
// Detached mode
<-waitDisplayID
return id, nil
}
status := <-statusChan
if status != 0 {
return id, errors.New(strconv.Itoa(status))
}
log.Infof("Wait container %s to be running...", runConfig.containerName)
chanStop := make(chan struct{})
var inspect types.ContainerJSON
var once = &sync.Once{}
wait.Until(func() {
inspect, err = cli.ContainerInspect(ctx, createResponse.ID)
if err != nil && errdefs.IsNotFound(err) {
once.Do(func() { close(chanStop) })
return
}
if err != nil {
return
}
if inspect.State != nil && (inspect.State.Status == "exited" || inspect.State.Status == "dead" || inspect.State.Dead) {
once.Do(func() { close(chanStop) })
err = errors.New(fmt.Sprintf("container status: %s", inspect.State.Status))
return
}
if inspect.State != nil && inspect.State.Running {
once.Do(func() { close(chanStop) })
return
}
}, time.Second, chanStop)
if err != nil {
err = fmt.Errorf("failed to wait container to be ready: %v", err)
return
}
// print port mapping to host
var empty = true
var str string
if inspect.NetworkSettings != nil && inspect.NetworkSettings.Ports != nil {
var list []string
for port, bindings := range inspect.NetworkSettings.Ports {
var p []string
for _, binding := range bindings {
if binding.HostPort != "" {
p = append(p, binding.HostPort)
empty = false
}
}
list = append(list, fmt.Sprintf("%s:%s", port, strings.Join(p, ",")))
}
str = fmt.Sprintf("Container %s is running on port %s now", runConfig.containerName, strings.Join(list, " "))
}
if !empty {
log.Infoln(str)
} else {
log.Infof("Container %s is running now", runConfig.containerName)
}
return
}
func PullImage(ctx context.Context, platform *v12.Platform, cli *client.Client, c *command.DockerCli, img string) error {
var readCloser io.ReadCloser
var plat string
if platform != nil && platform.Architecture != "" && platform.OS != "" {
plat = fmt.Sprintf("%s/%s", platform.OS, platform.Architecture)
}
distributionRef, err := reference.ParseNormalizedNamed(img)
if err != nil {
return fmt.Errorf("can not parse image name %s: %v", img, err)
}
var imgRefAndAuth trust.ImageRefAndAuth
imgRefAndAuth, err = trust.GetImageReferencesAndAuth(ctx, nil, image.AuthResolver(c), distributionRef.String())
if err != nil {
return fmt.Errorf("can not get image auth: %v", err)
}
var encodedAuth string
encodedAuth, err = command.EncodeAuthToBase64(*imgRefAndAuth.AuthConfig())
if err != nil {
return fmt.Errorf("can not encode auth config to base64:%v", err)
}
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(c, imgRefAndAuth.RepoInfo().Index, "pull")
readCloser, err = cli.ImagePull(ctx, img, types.ImagePullOptions{
All: false,
RegistryAuth: encodedAuth,
PrivilegeFunc: requestPrivilege,
Platform: plat,
})
if err != nil {
err = fmt.Errorf("can not pull image %s, err: %s, please make sure image is exist and can be pulled from local", img, err)
return err
}
defer readCloser.Close()
_, stdout, _ := dockerterm.StdStreams()
out := streams.NewOut(stdout)
err = jsonmessage.DisplayJSONMessagesToStream(readCloser, out, nil)
if err != nil {
err = fmt.Errorf("can not display message, err: %v", err)
return err
}
return nil
}
func terminal(c string, cli *command.DockerCli) error {
options := container.NewExecOptions()
options.Interactive = true
options.TTY = true
options.Container = c
options.Command = []string{"sh", "-c", `command -v bash >/dev/null && exec bash || exec sh`}
return container.RunExec(cli, options)
}
// TransferImage
// 1) if not special ssh config, just pull image and tag and push
// 2) if special ssh config, pull image, tag image, save image and scp image to remote, load image and push
func TransferImage(ctx context.Context, conf *util.SshConfig) error {
cli, c, err := GetClient()
if err != nil {
return fmt.Errorf("failed to get docker client: %v", err)
}
// todo add flags? or detect k8s node runtime ?
err = PullImage(ctx, &v12.Platform{
Architecture: "amd64",
OS: "linux",
}, cli, c, config.OriginImage)
if err != nil {
return fmt.Errorf("failed to pull image: %v", err)
}
err = cli.ImageTag(ctx, config.OriginImage, config.Image)
if err != nil {
return fmt.Errorf("failed to tag image %s to %s: %v", config.OriginImage, config.Image, err)
}
// use it if sshConfig is not empty
if conf.ConfigAlias == "" && conf.Addr == "" {
var distributionRef reference.Named
distributionRef, err = reference.ParseNormalizedNamed(config.Image)
if err != nil {
return fmt.Errorf("can not parse image name %s: %v", config.Image, err)
}
var imgRefAndAuth trust.ImageRefAndAuth
imgRefAndAuth, err = trust.GetImageReferencesAndAuth(ctx, nil, image.AuthResolver(c), distributionRef.String())
if err != nil {
return fmt.Errorf("can not get image auth: %v", err)
}
var encodedAuth string
encodedAuth, err = command.EncodeAuthToBase64(*imgRefAndAuth.AuthConfig())
if err != nil {
return fmt.Errorf("can not encode auth config to base64: %v", err)
}
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(c, imgRefAndAuth.RepoInfo().Index, "push")
var readCloser io.ReadCloser
readCloser, err = cli.ImagePush(ctx, config.Image, types.ImagePushOptions{
RegistryAuth: encodedAuth,
PrivilegeFunc: requestPrivilege,
})
if err != nil {
err = fmt.Errorf("can not push image %s, err: %v", config.Image, err)
return err
}
defer readCloser.Close()
_, stdout, _ := dockerterm.StdStreams()
out := streams.NewOut(stdout)
err = jsonmessage.DisplayJSONMessagesToStream(readCloser, out, nil)
if err != nil {
err = fmt.Errorf("can not display message, err: %v", err)
return err
}
return nil
}
// transfer image to remote
var responseReader io.ReadCloser
responseReader, err = cli.ImageSave(ctx, []string{config.Image})
if err != nil {
return err
}
defer responseReader.Close()
file, err := os.CreateTemp("", "*.tar")
if err != nil {
return err
}
log.Infof("saving image %s to temp file %s", config.Image, file.Name())
if _, err = io.Copy(file, responseReader); err != nil {
return err
}
if err = file.Close(); err != nil {
return err
}
defer os.Remove(file.Name())
log.Infof("Transfering image %s", config.Image)
err = util.SCP(conf, file.Name(), []string{
fmt.Sprintf(
"(docker load image -i kubevpndir/%s && docker push %s) || (nerdctl image load -i kubevpndir/%s && nerdctl image push %s)",
filepath.Base(file.Name()), config.Image,
filepath.Base(file.Name()), config.Image,
),
}...)
if err != nil {
return err
}
log.Infof("Loaded image: %s", config.Image)
return nil
}

162
pkg/dev/utils.go Normal file
View File

@@ -0,0 +1,162 @@
package dev
import (
"context"
"strconv"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/versions"
"github.com/sirupsen/logrus"
)
func waitExitOrRemoved(ctx context.Context, dockerCli command.Cli, containerID string, waitRemove bool) <-chan int {
if len(containerID) == 0 {
// containerID can never be empty
panic("Internal Error: waitExitOrRemoved needs a containerID as parameter")
}
// Older versions used the Events API, and even older versions did not
// support server-side removal. This legacyWaitExitOrRemoved method
// preserves that old behavior and any issues it may have.
if versions.LessThan(dockerCli.Client().ClientVersion(), "1.30") {
return legacyWaitExitOrRemoved(ctx, dockerCli, containerID, waitRemove)
}
condition := container.WaitConditionNextExit
if waitRemove {
condition = container.WaitConditionRemoved
}
resultC, errC := dockerCli.Client().ContainerWait(ctx, containerID, condition)
statusC := make(chan int)
go func() {
select {
case result := <-resultC:
if result.Error != nil {
logrus.Errorf("Error waiting for container: %v", result.Error.Message)
statusC <- 125
} else {
statusC <- int(result.StatusCode)
}
case err := <-errC:
logrus.Errorf("error waiting for container: %v", err)
statusC <- 125
}
}()
return statusC
}
func legacyWaitExitOrRemoved(ctx context.Context, dockerCli command.Cli, containerID string, waitRemove bool) <-chan int {
var removeErr error
statusChan := make(chan int)
exitCode := 125
// Get events via Events API
f := filters.NewArgs()
f.Add("type", "container")
f.Add("container", containerID)
options := types.EventsOptions{
Filters: f,
}
eventCtx, cancel := context.WithCancel(ctx)
eventq, errq := dockerCli.Client().Events(eventCtx, options)
eventProcessor := func(e events.Message) bool {
stopProcessing := false
switch e.Status {
case "die":
if v, ok := e.Actor.Attributes["exitCode"]; ok {
code, cerr := strconv.Atoi(v)
if cerr != nil {
logrus.Errorf("failed to convert exitcode '%q' to int: %v", v, cerr)
} else {
exitCode = code
}
}
if !waitRemove {
stopProcessing = true
} else {
// If we are talking to an older daemon, `AutoRemove` is not supported.
// We need to fall back to the old behavior, which is client-side removal
if versions.LessThan(dockerCli.Client().ClientVersion(), "1.25") {
go func() {
removeErr = dockerCli.Client().ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true})
if removeErr != nil {
logrus.Errorf("error removing container: %v", removeErr)
cancel() // cancel the event Q
}
}()
}
}
case "detach":
exitCode = 0
stopProcessing = true
case "destroy":
stopProcessing = true
}
return stopProcessing
}
go func() {
defer func() {
statusChan <- exitCode // must always send an exit code or the caller will block
cancel()
}()
for {
select {
case <-eventCtx.Done():
if removeErr != nil {
return
}
case evt := <-eventq:
if eventProcessor(evt) {
return
}
case err := <-errq:
logrus.Errorf("error getting events from daemon: %v", err)
return
}
}
}()
return statusChan
}
func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, container string) error) chan error {
if len(containers) == 0 {
return nil
}
const defaultParallel int = 50
sem := make(chan struct{}, defaultParallel)
errChan := make(chan error)
// make sure result is printed in correct order
output := map[string]chan error{}
for _, c := range containers {
output[c] = make(chan error, 1)
}
go func() {
for _, c := range containers {
err := <-output[c]
errChan <- err
}
}()
go func() {
for _, c := range containers {
sem <- struct{}{} // Wait for active queue sem to drain.
go func(container string) {
output[container] <- op(ctx, container)
<-sem
}(c)
}
}()
return errChan
}

60
pkg/dns/coredns.go Normal file
View File

@@ -0,0 +1,60 @@
package dns
import (
"bytes"
"text/template"
)
type CoreFile struct {
Content []byte
}
// Gets the Caddyfile contents
func (c *CoreFile) Body() []byte {
return c.Content
}
// Gets the path to the origin file
func (c *CoreFile) Path() string {
return "CoreFile"
}
// The type of server this input is intended for
func (c *CoreFile) ServerType() string {
return "dns"
}
type CoreFileTmpl struct {
UpstreamDNS string
Nameservers string
}
func BuildCoreFile(corefileTmpl CoreFileTmpl) (*CoreFile, error) {
tplText := `
.:53 {
bind 127.0.0.1
forward cluster.local {{ .UpstreamDNS }}
forward . {{ .Nameservers }} {{ .UpstreamDNS }} {
policy sequential
max_concurrent 1
}
cache 30
log
errors
reload
}`
tpl, err := template.New("corefile").Parse(tplText)
if err != nil {
return nil, err
}
data := bytes.NewBuffer(nil)
if err := tpl.Execute(data, corefileTmpl); err != nil {
return nil, err
}
return &CoreFile{
Content: data.Bytes(),
}, nil
}

View File

@@ -3,24 +3,34 @@ package dns
import (
"bytes"
"context"
"fmt"
"net"
"os"
"sort"
"strings"
"text/tabwriter"
"time"
miekgdns "github.com/miekg/dns"
"github.com/pkg/errors"
v12 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
v13 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/utils/pointer"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
func GetDNSServiceIPFromPod(clientset *kubernetes.Clientset, restclient *rest.RESTClient, config *rest.Config, podName, namespace string) (*miekgdns.ClientConfig, error) {
var ipp []string
if ips, err := getDNSIPFromDnsPod(clientset); err == nil {
ipp = ips
}
resolvConfStr, err := util.Shell(clientset, restclient, config, podName, namespace, "cat /etc/resolv.conf")
resolvConfStr, err := util.Shell(clientset, restclient, config, podName, "", namespace, []string{"cat", "/etc/resolv.conf"})
if err != nil {
return nil, err
}
@@ -28,30 +38,185 @@ func GetDNSServiceIPFromPod(clientset *kubernetes.Clientset, restclient *rest.RE
if err != nil {
return nil, err
}
if len(ipp) != 0 {
resolvConf.Servers = append(resolvConf.Servers, make([]string, len(ipp))...)
copy(resolvConf.Servers[len(ipp):], resolvConf.Servers[:len(resolvConf.Servers)-len(ipp)])
for i := range ipp {
resolvConf.Servers[i] = ipp[i]
}
if ips, err := GetDNSIPFromDnsPod(clientset); err == nil && len(ips) != 0 {
resolvConf.Servers = ips
}
// linux nameserver only support amount is 3, so if namespace too much, just use two, left one to system
if len(resolvConf.Servers) > 2 {
resolvConf.Servers = resolvConf.Servers[:2]
}
return resolvConf, nil
}
func getDNSIPFromDnsPod(clientset *kubernetes.Clientset) (ips []string, err error) {
serviceList, err := clientset.CoreV1().Pods(v1.NamespaceSystem).List(context.Background(), v1.ListOptions{
func GetDNSIPFromDnsPod(clientset *kubernetes.Clientset) (ips []string, err error) {
var serviceList *v12.ServiceList
serviceList, err = clientset.CoreV1().Services(v1.NamespaceSystem).List(context.Background(), v1.ListOptions{
LabelSelector: fields.OneTermEqualSelector("k8s-app", "kube-dns").String(),
})
if err != nil {
return
}
for _, pod := range serviceList.Items {
if pod.Status.Phase == v12.PodRunning && pod.DeletionTimestamp == nil {
ips = append(ips, pod.Status.PodIP)
for _, item := range serviceList.Items {
if len(item.Spec.ClusterIP) != 0 {
ips = append(ips, item.Spec.ClusterIP)
}
}
var podList *v12.PodList
podList, err = clientset.CoreV1().Pods(v1.NamespaceSystem).List(context.Background(), v1.ListOptions{
LabelSelector: fields.OneTermEqualSelector("k8s-app", "kube-dns").String(),
})
if err == nil {
for _, pod := range podList.Items {
if pod.Status.Phase == v12.PodRunning && pod.DeletionTimestamp == nil {
ips = append(ips, pod.Status.PodIP)
}
}
}
if len(ips) == 0 {
return nil, errors.New("")
err = errors.New("can not found any dns service ip")
return
}
return ips, nil
err = nil
return
}
func AddServiceNameToHosts(ctx context.Context, serviceInterface v13.ServiceInterface, hosts ...Entry) {
rateLimiter := flowcontrol.NewTokenBucketRateLimiter(0.2, 1)
defer rateLimiter.Stop()
var last string
serviceList, err := serviceInterface.List(ctx, v1.ListOptions{})
if err == nil && len(serviceList.Items) != 0 {
entry := generateHostsEntry(serviceList.Items, hosts)
if err = updateHosts(entry); err == nil {
last = entry
}
}
for {
select {
case <-ctx.Done():
return
default:
func() {
w, err := serviceInterface.Watch(ctx, v1.ListOptions{
Watch: true, TimeoutSeconds: pointer.Int64(30), ResourceVersion: serviceList.ResourceVersion,
})
if err != nil {
if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) {
time.Sleep(time.Second * 5)
}
return
}
defer w.Stop()
for {
select {
case c, ok := <-w.ResultChan():
if !ok {
return
}
if watch.Error == c.Type || watch.Bookmark == c.Type {
continue
}
if !rateLimiter.TryAccept() {
return
}
list, err := serviceInterface.List(ctx, v1.ListOptions{})
if err != nil {
return
}
entry := generateHostsEntry(list.Items, hosts)
if entry == last {
continue
}
if err = updateHosts(entry); err != nil {
return
}
last = entry
}
}
}()
}
}
}
func updateHosts(str string) error {
path := GetHostFile()
file, err := os.ReadFile(path)
if err != nil {
return err
}
split := strings.Split(string(file), "\n")
for i := 0; i < len(split); i++ {
if strings.Contains(split[i], "KubeVPN") {
split = append(split[:i], split[i+1:]...)
i--
}
}
var sb strings.Builder
sb.WriteString(strings.Join(split, "\n"))
if str != "" {
sb.WriteString("\n")
sb.WriteString(str)
}
s := sb.String()
// remove last empty line
strList := strings.Split(s, "\n")
for {
if len(strList) > 0 {
if strList[len(strList)-1] == "" {
strList = strList[:len(strList)-1]
continue
}
}
break
}
return os.WriteFile(path, []byte(strings.Join(strList, "\n")), 0644)
}
type Entry struct {
IP string
Domain string
}
func generateHostsEntry(list []v12.Service, hosts []Entry) string {
const ServiceKubernetes = "kubernetes"
var entryList []Entry
for _, item := range list {
if strings.EqualFold(item.Name, ServiceKubernetes) {
continue
}
ipList := sets.New[string](item.Spec.ClusterIPs...).Insert(item.Spec.ExternalIPs...).UnsortedList()
domainList := sets.New[string](item.Name).Insert(item.Spec.ExternalName).UnsortedList()
for _, ip := range ipList {
for _, domain := range domainList {
if net.ParseIP(ip) == nil || domain == "" {
continue
}
entryList = append(entryList, Entry{IP: ip, Domain: domain})
}
}
}
sort.SliceStable(entryList, func(i, j int) bool {
if entryList[i].Domain == entryList[j].Domain {
return entryList[i].IP > entryList[j].IP
}
return entryList[i].Domain > entryList[j].Domain
})
entryList = append(entryList, hosts...)
var sb = new(bytes.Buffer)
w := tabwriter.NewWriter(sb, 1, 1, 1, ' ', 0)
for _, e := range entryList {
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", e.IP, e.Domain, "", "# Add by KubeVPN")
}
_ = w.Flush()
return sb.String()
}

View File

@@ -4,35 +4,139 @@
package dns
import (
"bytes"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/docker/docker/libnetwork/resolvconf"
miekgdns "github.com/miekg/dns"
log "github.com/sirupsen/logrus"
"github.com/coredns/caddy"
_ "github.com/coredns/coredns/core/dnsserver"
_ "github.com/coredns/coredns/core/plugin"
"github.com/wencaiwulue/kubevpn/pkg/config"
)
// systemd-resolve --status, systemd-resolve --flush-caches
func SetupDNS(config *miekgdns.ClientConfig) error {
tunName := os.Getenv("tunName")
func SetupDNS(clientConfig *miekgdns.ClientConfig, _ []string, useLocalDNS bool) error {
existNameservers := make([]string, 0)
existSearches := make([]string, 0)
filename := filepath.Join("/", "etc", "resolv.conf")
readFile, err := os.ReadFile(filename)
if err == nil {
resolvConf, err := miekgdns.ClientConfigFromReader(bytes.NewBufferString(string(readFile)))
if err == nil {
if len(resolvConf.Servers) != 0 {
existNameservers = append(existNameservers, resolvConf.Servers...)
}
if len(resolvConf.Search) != 0 {
existSearches = append(existSearches, resolvConf.Search...)
}
}
}
if useLocalDNS {
if err := SetupLocalDNS(clientConfig, existNameservers); err != nil {
return err
}
clientConfig.Servers[0] = "127.0.0.1"
}
tunName := os.Getenv(config.EnvTunNameOrLUID)
if len(tunName) == 0 {
tunName = "tun0"
}
// TODO consider use https://wiki.debian.org/NetworkManager and nmcli to config DNS
// try to solve:
// sudo systemd-resolve --set-dns 172.28.64.10 --interface tun0 --set-domain=vke-system.svc.cluster.local --set-domain=svc.cluster.local --set-domain=cluster.local
//Failed to set DNS configuration: Unit dbus-org.freedesktop.resolve1.service not found.
// ref: https://superuser.com/questions/1427311/activation-via-systemd-failed-for-unit-dbus-org-freedesktop-resolve1-service
// systemctl enable systemd-resolved.service
_ = exec.Command("systemctl", "enable", "systemd-resolved.service").Run()
// systemctl start systemd-resolved.service
_ = exec.Command("systemctl", "start", "systemd-resolved.service").Run()
//systemctl status systemd-resolved.service
_ = exec.Command("systemctl", "status", "systemd-resolved.service").Run()
cmd := exec.Command("systemd-resolve", []string{
"--set-dns",
config.Servers[0],
clientConfig.Servers[0],
"--interface",
tunName,
"--set-domain=" + config.Search[0],
"--set-domain=" + config.Search[1],
"--set-domain=" + config.Search[2],
"--set-domain=" + clientConfig.Search[0],
"--set-domain=" + clientConfig.Search[1],
"--set-domain=" + clientConfig.Search[2],
}...)
output, err := cmd.CombinedOutput()
if err != nil {
log.Warnf("cmd: %s, output: %s, error: %v\n", cmd.Args, string(output), err)
log.Debugf("failed to exec cmd: %s, message: %s, ignore", strings.Join(cmd.Args, " "), string(output))
}
if len(existNameservers) != 0 {
clientConfig.Servers = append(clientConfig.Servers, existNameservers...)
}
if len(existSearches) != 0 {
clientConfig.Search = append(clientConfig.Search, existSearches...)
}
return WriteResolvConf(*clientConfig)
}
func SetupLocalDNS(clientConfig *miekgdns.ClientConfig, existNameservers []string) error {
corefile, err := BuildCoreFile(CoreFileTmpl{
UpstreamDNS: clientConfig.Servers[0],
Nameservers: strings.Join(existNameservers, " "),
})
if err != nil {
return err
}
log.Debugf("corefile content: %s", string(corefile.Body()))
// Start your engines
instance, err := caddy.Start(corefile)
if err != nil {
return err
}
// Twiddle your thumbs
go instance.Wait()
return nil
}
func CancelDNS() {
updateHosts("")
filename := filepath.Join("/", "etc", "resolv.conf")
_ = os.Rename(getBackupFilename(filename), filename)
}
func GetHostFile() string {
return "/etc/hosts"
}
func WriteResolvConf(config miekgdns.ClientConfig) error {
var options []string
if config.Ndots != 0 {
options = append(options, fmt.Sprintf("ndots:%d", config.Ndots))
}
if config.Attempts != 0 {
options = append(options, fmt.Sprintf("attempts:%d", config.Attempts))
}
if config.Timeout != 0 {
options = append(options, fmt.Sprintf("timeout:%d", config.Timeout))
}
filename := filepath.Join("/", "etc", "resolv.conf")
_ = os.Rename(filename, getBackupFilename(filename))
_, err := resolvconf.Build(filename, config.Servers, config.Search, options)
return err
}
func getBackupFilename(filename string) string {
return filename + ".kubevpn_backup"
}

View File

@@ -2,75 +2,164 @@ package dns
import (
"context"
"encoding/json"
"errors"
"math"
"math/rand"
"net"
"os"
"strings"
"sync"
"sync/atomic"
"time"
miekgdns "github.com/miekg/dns"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/semaphore"
"golang.org/x/time/rate"
"k8s.io/apimachinery/pkg/util/cache"
"k8s.io/apimachinery/pkg/util/sets"
)
var (
maxConcurrent int64 = 1024
logInterval = 2 * time.Second
)
// github.com/docker/docker@v23.0.1+incompatible/libnetwork/network_windows.go:53
type server struct {
// todo using cache to speed up dns resolve process
dnsCache *cache.LRUExpireCache
forwardDNS *miekgdns.ClientConfig
client *miekgdns.Client
fwdSem *semaphore.Weighted // Limit the number of concurrent external DNS requests in-flight
logInverval rate.Sometimes // Rate-limit logging about hitting the fwdSem limit
}
func NewDNSServer(network, address string, forwardDNS *miekgdns.ClientConfig) error {
return miekgdns.ListenAndServe(address, network, &server{
dnsCache: cache.NewLRUExpireCache(1000),
forwardDNS: forwardDNS,
dnsCache: cache.NewLRUExpireCache(1000),
forwardDNS: forwardDNS,
client: &miekgdns.Client{Net: "udp", SingleInflight: true, Timeout: time.Second * 30},
fwdSem: semaphore.NewWeighted(maxConcurrent),
logInverval: rate.Sometimes{Interval: logInterval},
})
}
// ServeDNS consider using a cache
// eg: nslookup -port=56571 code.byted.org 127.0.0.1
func (s *server) ServeDNS(w miekgdns.ResponseWriter, r *miekgdns.Msg) {
//defer w.Close()
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*3)
defer cancelFunc()
defer w.Close()
if len(r.Question) == 0 {
r.Response = true
_ = w.WriteMsg(r)
return
}
for _, dnsAddr := range s.forwardDNS.Servers {
var msg = new(miekgdns.Msg)
*msg = *r
go func(r miekgdns.Msg, dnsAddr string) {
var q = r.Question[0]
var originName = q.Name
q.Name = fix(originName, s.forwardDNS.Search[0])
r.Question = []miekgdns.Question{q}
answer, err := miekgdns.Exchange(&r, dnsAddr+":53")
if err == nil && len(answer.Answer) != 0 {
if len(answer.Answer) != 0 {
answer.Answer[0].Header().Name = originName
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*5)
defer cancelFunc()
// limits the number of outstanding concurrent queries
err := s.fwdSem.Acquire(ctx, 1)
if err != nil {
s.logInverval.Do(func() {
log.Errorf("dns-server more than %v concurrent queries", maxConcurrent)
})
r.SetRcode(r, miekgdns.RcodeRefused)
return
}
defer s.fwdSem.Release(1)
var wg = &sync.WaitGroup{}
var done = &atomic.Value{}
done.Store(false)
var q = r.Question[0]
var originName = q.Name
searchList := fix(originName, s.forwardDNS.Search)
if v, ok := s.dnsCache.Get(originName); ok {
searchList = []string{v.(string)}
}
for _, name := range searchList {
// only should have dot [5,6]
// productpage.default.svc.cluster.local.
// mongo-headless.mongodb.default.svc.cluster.local.
count := strings.Count(name, ".")
if count < 5 || count > 6 {
continue
}
for _, dnsAddr := range s.forwardDNS.Servers {
wg.Add(1)
go func(name, dnsAddr string) {
defer wg.Done()
var msg miekgdns.Msg
marshal, _ := json.Marshal(r)
_ = json.Unmarshal(marshal, &msg)
for i := 0; i < len(msg.Question); i++ {
msg.Question[i].Name = name
}
if len(answer.Question) != 0 {
answer.Question[0].Name = originName
}
if ctx.Err() == nil {
defer cancelFunc()
err = w.WriteMsg(answer)
if err != nil {
log.Debugf(err.Error())
msg.Ns = nil
msg.Extra = nil
msg.Id = uint16(rand.Intn(math.MaxUint16 + 1))
answer, _, err := s.client.ExchangeContext(context.Background(), &msg, net.JoinHostPort(dnsAddr, s.forwardDNS.Port))
if err == nil && len(answer.Answer) != 0 {
s.dnsCache.Add(originName, name, time.Hour*24*365*100) // never expire
for i := 0; i < len(answer.Answer); i++ {
answer.Answer[i].Header().Name = originName
}
for i := 0; i < len(answer.Question); i++ {
answer.Question[i].Name = originName
}
r.Answer = answer.Answer
r.Response = answer.Response
r.Authoritative = answer.Authoritative
r.AuthenticatedData = answer.AuthenticatedData
r.CheckingDisabled = answer.CheckingDisabled
r.Rcode = answer.Rcode
r.Truncated = answer.Truncated
r.RecursionDesired = answer.RecursionDesired
r.RecursionAvailable = answer.RecursionAvailable
r.Opcode = answer.Opcode
r.Zero = answer.Zero
select {
case <-ctx.Done():
return
default:
done.Store(true)
err = w.WriteMsg(r)
cancelFunc()
return
}
}
return
}
if err != nil {
log.Debugf(err.Error())
}
}(*msg, dnsAddr)
if err != nil && !errors.Is(err, os.ErrDeadlineExceeded) {
log.Debugf(err.Error())
}
}(name, dnsAddr)
}
}
<-ctx.Done()
if ctx.Err() != context.Canceled {
go func() {
wg.Wait()
cancelFunc()
}()
select {
case <-ctx.Done():
}
if !done.Load().(bool) {
r.Response = true
_ = w.WriteMsg(r)
}
}
func fix(domain, suffix string) string {
namespace := strings.Split(suffix, ".")[0]
if sets.NewString(strings.Split(domain, ".")...).Has(namespace) {
domain = domain[0:strings.LastIndex(domain, namespace)]
func fix(domain string, suffix []string) (result []string) {
result = []string{domain}
for _, s := range suffix {
result = append(result, strings.TrimSuffix(domain, ".")+"."+s+".")
}
return strings.TrimSuffix(domain, ".") + "." + suffix + "."
return
}

View File

@@ -1,140 +0,0 @@
package dns
import (
"fmt"
miekgdns "github.com/miekg/dns"
"io/fs"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
func TestSetupDnsServer(t *testing.T) {
port := util.GetAvailableUDPPortOrDie()
clientConfig := &miekgdns.ClientConfig{
Servers: []string{"10.233.93.190"},
Search: []string{"vke-system.svc.cluster.local", "svc.cluster.local", "cluster.local"},
Port: "53",
Ndots: 0,
}
go func() {
err := NewDNSServer("udp", "127.0.0.1:"+strconv.Itoa(port), clientConfig)
if err != nil {
t.FailNow()
}
}()
config := miekgdns.ClientConfig{
Servers: []string{"127.0.0.1"},
Search: clientConfig.Search,
Port: strconv.Itoa(port),
Ndots: clientConfig.Ndots,
Timeout: 1,
}
_ = os.RemoveAll(filepath.Join("/", "etc", "resolver"))
if err := os.MkdirAll(filepath.Join("/", "etc", "resolver"), fs.ModePerm); err != nil {
panic(err)
}
for _, s := range strings.Split(clientConfig.Search[0], ".") {
filename := filepath.Join("/", "etc", "resolver", s)
err := ioutil.WriteFile(filename, []byte(toString(config)), 0644)
if err != nil {
panic(err)
}
}
fmt.Println(port)
select {}
}
func TestFull(t *testing.T) {
type Question struct {
Q string
}
type person struct {
Name string
age *int
Question []Question
}
age := 22
p := &person{"Bob", &age, []Question{{"haha"}}}
fmt.Println(p)
p2 := new(person)
*p2 = *p
fmt.Println(p2)
p.Name = " zhangsan"
p.Question = append(p.Question, Question{"asdf"})
fmt.Println(p.Question)
fmt.Println(p2.Question)
}
func TestName(t *testing.T) {
type name struct {
input string
output string
}
var data = []name{
{
input: "ry-server",
output: "ry-server.vke-system.svc.cluster.local",
},
{
input: "ry-server.",
output: "ry-server.vke-system.svc.cluster.local",
},
{
input: "ry-server.vke-system",
output: "ry-server.vke-system.svc.cluster.local",
}, {
input: "ry-server.vke-system.",
output: "ry-server.vke-system.svc.cluster.local",
},
{
input: "ry-server.vke-system.svc",
output: "ry-server.vke-system.svc.cluster.local",
},
{
input: "ry-server.vke-system.svc.",
output: "ry-server.vke-system.svc.cluster.local",
},
{
input: "ry-server.vke-system.svc.cluster",
output: "ry-server.vke-system.svc.cluster.local",
},
{
input: "mongodb-1.mongodb-headless",
output: "mongodb-1.mongodb-headless.vke-system.svc.cluster.local",
}, {
input: "mongodb-1.mongodb-headless.",
output: "mongodb-1.mongodb-headless.vke-system.svc.cluster.local",
},
{
input: "mongodb-1.mongodb-headless.vke-system",
output: "mongodb-1.mongodb-headless.vke-system.svc.cluster.local",
},
{
input: "mongodb-1.mongodb-headless.vke-system.",
output: "mongodb-1.mongodb-headless.vke-system.svc.cluster.local",
},
{
input: "mongodb-1.mongodb-headless.vke-system.svc",
output: "mongodb-1.mongodb-headless.vke-system.svc.cluster.local",
},
{
input: "mongodb-1.mongodb-headless.vke-system.svc.cluster",
output: "mongodb-1.mongodb-headless.vke-system.svc.cluster.local",
},
}
for _, datum := range data {
if o := fix(datum.input, "vke-system.svc.cluster.local"); o != datum.output {
t.Logf("output: %s, expected: %s", o, datum.output)
t.FailNow()
}
}
}

View File

@@ -7,7 +7,6 @@ import (
"context"
"fmt"
"io/fs"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -32,15 +31,15 @@ var resolv = "/etc/resolv.conf"
// service.namespace.svc:port
// service.namespace.svc.cluster:port
// service.namespace.svc.cluster.local:port
func SetupDNS(config *miekgdns.ClientConfig) error {
usingResolver(config)
func SetupDNS(config *miekgdns.ClientConfig, ns []string, _ bool) error {
usingResolver(config, ns)
_ = exec.Command("killall", "mDNSResponderHelper").Run()
_ = exec.Command("killall", "-HUP", "mDNSResponder").Run()
_ = exec.Command("dscacheutil", "-flushcache").Run()
return nil
}
func usingResolver(clientConfig *miekgdns.ClientConfig) {
func usingResolver(clientConfig *miekgdns.ClientConfig, ns []string) {
var err error
_ = os.RemoveAll(filepath.Join("/", "etc", "resolver"))
if err = os.MkdirAll(filepath.Join("/", "etc", "resolver"), fs.ModePerm); err != nil {
@@ -50,11 +49,11 @@ func usingResolver(clientConfig *miekgdns.ClientConfig) {
Servers: clientConfig.Servers,
Search: clientConfig.Search,
Ndots: 5,
Timeout: 1,
Timeout: 2,
}
// for support like: service:port, service.namespace.svc.cluster.local:port
filename := filepath.Join("/", "etc", "resolver", "local")
_ = ioutil.WriteFile(filename, []byte(toString(config)), 0644)
_ = os.WriteFile(filename, []byte(toString(config)), 0644)
// for support like: service.namespace:port, service.namespace.svc:port, service.namespace.svc.cluster:port
port := util.GetAvailableUDPPortOrDie()
@@ -68,18 +67,18 @@ func usingResolver(clientConfig *miekgdns.ClientConfig) {
Search: clientConfig.Search,
Port: strconv.Itoa(port),
Ndots: clientConfig.Ndots,
Timeout: 1,
Timeout: 2,
}
for _, s := range strings.Split(clientConfig.Search[0], ".") {
for _, s := range sets.New[string](strings.Split(clientConfig.Search[0], ".")...).Insert(ns...).UnsortedList() {
filename = filepath.Join("/", "etc", "resolver", s)
_ = ioutil.WriteFile(filename, []byte(toString(config)), 0644)
_ = os.WriteFile(filename, []byte(toString(config)), 0644)
}
}
func usingNetworkSetup(ip string, namespace string) {
networkSetup(ip, namespace)
var ctx context.Context
ctx, cancel = context.WithCancel(context.TODO())
ctx, cancel = context.WithCancel(context.Background())
go func() {
ticker := time.NewTicker(time.Second * 10)
newWatcher, _ := fsnotify.NewWatcher()
@@ -98,7 +97,7 @@ func usingNetworkSetup(ip string, namespace string) {
//}
case <-c:
if rc, err := miekgdns.ClientConfigFromFile(resolv); err == nil && rc.Timeout != 1 {
if !sets.NewString(rc.Servers...).Has(ip) {
if !sets.New[string](rc.Servers...).Has(ip) {
rc.Servers = append(rc.Servers, ip)
for _, s := range []string{namespace + ".svc.cluster.local", "svc.cluster.local", "cluster.local"} {
rc.Search = append(rc.Search, s)
@@ -107,7 +106,7 @@ func usingNetworkSetup(ip string, namespace string) {
}
//rc.Attempts = 1
rc.Timeout = 1
_ = ioutil.WriteFile(resolv, []byte(toString(*rc)), 0644)
_ = os.WriteFile(resolv, []byte(toString(*rc)), 0644)
}
case <-ctx.Done():
return
@@ -155,6 +154,7 @@ func CancelDNS() {
}
_ = os.RemoveAll(filepath.Join("/", "etc", "resolver"))
//networkCancel()
updateHosts("")
}
/*
@@ -257,3 +257,7 @@ func networkCancel() {
}
}
}
func GetHostFile() string {
return "/etc/hosts"
}

View File

@@ -4,9 +4,8 @@
package dns
import (
"context"
"fmt"
"net"
"net/netip"
"os"
"os/exec"
"strconv"
@@ -15,29 +14,45 @@ import (
log "github.com/sirupsen/logrus"
"golang.org/x/sys/windows"
"golang.zx2c4.com/wireguard/windows/tunnel/winipcfg"
"github.com/wencaiwulue/kubevpn/pkg/config"
)
func SetupDNS(config *miekgdns.ClientConfig) error {
getenv := os.Getenv("luid")
parseUint, err := strconv.ParseUint(getenv, 10, 64)
func SetupDNS(clientConfig *miekgdns.ClientConfig, _ []string, _ bool) error {
env := os.Getenv(config.EnvTunNameOrLUID)
parseUint, err := strconv.ParseUint(env, 10, 64)
if err != nil {
log.Warningln(err)
return err
}
luid := winipcfg.LUID(parseUint)
err = luid.SetDNS(windows.AF_INET, []net.IP{net.ParseIP(config.Servers[0])}, config.Search)
_ = exec.CommandContext(context.Background(), "ipconfig", "/flushdns").Run()
var servers []netip.Addr
for _, s := range clientConfig.Servers {
var addr netip.Addr
addr, err = netip.ParseAddr(s)
if err != nil {
log.Warningln(err)
return err
}
servers = append(servers, addr)
}
err = luid.SetDNS(windows.AF_INET, servers, clientConfig.Search)
if err != nil {
log.Warningln(err)
return err
}
if err != nil {
log.Warningln(err)
return err
}
//_ = updateNicMetric(tunName)
_ = addNicSuffixSearchList(config.Search)
_ = addNicSuffixSearchList(clientConfig.Search)
return nil
}
func CancelDNS() {
getenv := os.Getenv("luid")
updateHosts("")
getenv := os.Getenv(config.EnvTunNameOrLUID)
parseUint, err := strconv.ParseUint(getenv, 10, 64)
if err != nil {
log.Warningln(err)
@@ -45,6 +60,7 @@ func CancelDNS() {
}
luid := winipcfg.LUID(parseUint)
_ = luid.FlushDNS(windows.AF_INET)
_ = luid.FlushRoutes(windows.AF_INET)
}
func updateNicMetric(name string) error {
@@ -70,9 +86,14 @@ func addNicSuffixSearchList(search []string) error {
fmt.Sprintf("@(\"%s\", \"%s\", \"%s\")", search[0], search[1], search[2]),
}...)
output, err := cmd.CombinedOutput()
log.Info(cmd.Args)
log.Debugln(cmd.Args)
if err != nil {
log.Warnf("error while set dns suffix search list, err: %v, output: %s, command: %v", err, string(output), cmd.Args)
}
return err
}
func GetHostFile() string {
//return "/windows/system32/drivers/etc/hosts"
return "C:\\Windows\\System32\\drivers\\etc\\hosts"
}

View File

@@ -33,11 +33,11 @@ func InstallWireGuardTunDriver() {
}
func UninstallWireGuardTunDriver() error {
wd, err := os.Getwd()
executable, err := os.Executable()
if err != nil {
return err
}
filename := filepath.Join(wd, "wintun.dll")
filename := filepath.Join(filepath.Dir(executable), "wintun.dll")
return os.Remove(filename)
}

View File

@@ -2,7 +2,6 @@ package wintun
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
)
@@ -15,7 +14,7 @@ func copyDriver(b []byte) error {
}
filename := filepath.Join(filepath.Dir(executable), "wintun.dll")
var content []byte
content, err = ioutil.ReadFile(filename)
content, err = os.ReadFile(filename)
if err == nil {
// already exists and content are same, not need to copy this file
if bytes.Compare(b, content) == 0 {
@@ -23,6 +22,6 @@ func copyDriver(b []byte) error {
}
_ = os.Remove(filename)
}
err = ioutil.WriteFile(filename, b, 644)
err = os.WriteFile(filename, b, 644)
return err
}

View File

@@ -3,6 +3,7 @@ package exchange
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/utils/pointer"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/util"
@@ -20,41 +21,69 @@ func RemoveContainer(spec *corev1.PodSpec) {
func AddContainer(spec *corev1.PodSpec, c util.PodRouteConfig) {
// remove vpn container if already exist
RemoveContainer(spec)
t := true
zero := int64(0)
spec.Containers = append(spec.Containers, corev1.Container{
Name: config.ContainerSidecarVPN,
Image: config.Image,
EnvFrom: []corev1.EnvFromSource{{
SecretRef: &corev1.SecretEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: config.ConfigMapPodTrafficManager,
},
},
}},
Env: []corev1.EnvVar{
{
Name: "LocalTunIP",
Value: c.LocalTunIP,
Name: "LocalTunIPv4",
Value: c.LocalTunIPv4,
},
{
Name: "TrafficManagerRealIP",
Value: c.TrafficManagerRealIP,
Name: "LocalTunIPv6",
Value: c.LocalTunIPv6,
},
{
Name: "InboundPodTunIP",
Value: c.InboundPodTunIP,
Name: config.EnvInboundPodTunIPv4,
Value: "",
},
{
Name: "Route",
Value: c.Route,
Name: config.EnvInboundPodTunIPv6,
Value: "",
},
{
Name: "CIDR4",
Value: config.CIDR.String(),
},
{
Name: "CIDR6",
Value: config.CIDR6.String(),
},
{
Name: "TrafficManagerService",
Value: config.ConfigMapPodTrafficManager,
},
},
Command: []string{"/bin/sh", "-c"},
// https://www.netfilter.org/documentation/HOWTO/NAT-HOWTO-6.html#ss6.2
// for curl -g -6 [efff:ffff:ffff:ffff:ffff:ffff:ffff:999a]:9080/health or curl 127.0.0.1:9080/health hit local PC
// output chain
Args: []string{`
sysctl net.ipv4.ip_forward=1
sysctl -w net.ipv4.ip_forward=1
sysctl -w net.ipv6.conf.all.disable_ipv6=0
sysctl -w net.ipv6.conf.all.forwarding=1
sysctl -w net.ipv4.conf.all.route_localnet=1
update-alternatives --set iptables /usr/sbin/iptables-legacy
iptables -F
ip6tables -F
iptables -P INPUT ACCEPT
ip6tables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -t nat -A PREROUTING ! -p icmp -j DNAT --to ${LocalTunIP}
ip6tables -P FORWARD ACCEPT
iptables -t nat -A PREROUTING ! -p icmp -j DNAT --to ${LocalTunIPv4}
ip6tables -t nat -A PREROUTING ! -p icmp -j DNAT --to ${LocalTunIPv6}
iptables -t nat -A POSTROUTING ! -p icmp -j MASQUERADE
iptables -t nat -A OUTPUT -o lo ! -p icmp -j DNAT --to-destination ${LocalTunIP}
kubevpn serve -L "tun://0.0.0.0:8421/${TrafficManagerRealIP}:8422?net=${InboundPodTunIP}&route=${Route}" --debug=true`,
ip6tables -t nat -A POSTROUTING ! -p icmp -j MASQUERADE
iptables -t nat -A OUTPUT -o lo ! -p icmp -j DNAT --to-destination ${LocalTunIPv4}
ip6tables -t nat -A OUTPUT -o lo ! -p icmp -j DNAT --to-destination ${LocalTunIPv6}
kubevpn serve -L "tun:/127.0.0.1:8422?net=${TunIPv4}&route=${CIDR4}" -F "tcp://${TrafficManagerService}:10800"`,
},
SecurityContext: &corev1.SecurityContext{
Capabilities: &corev1.Capabilities{
@@ -63,8 +92,8 @@ kubevpn serve -L "tun://0.0.0.0:8421/${TrafficManagerRealIP}:8422?net=${InboundP
//"SYS_MODULE",
},
},
RunAsUser: &zero,
Privileged: &t,
RunAsUser: pointer.Int64(0),
Privileged: pointer.Bool(true),
},
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{

View File

@@ -2,46 +2,73 @@ package handler
import (
"context"
"encoding/json"
"fmt"
"net"
"os"
"os/signal"
"strconv"
"syscall"
"time"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
v12 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/util/retry"
"k8s.io/utils/pointer"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/dns"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
var stopChan = make(chan os.Signal)
var RollbackFuncList = make([]func(), 2)
var ctx, cancel = context.WithCancel(context.Background())
func (c *ConnectOptions) addCleanUpResourceHandler(clientset *kubernetes.Clientset, namespace string) {
signal.Notify(stopChan, os.Interrupt, os.Kill, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGKILL /*, syscall.SIGSTOP*/)
func (c *ConnectOptions) addCleanUpResourceHandler() {
signal.Notify(stopChan, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGKILL)
go func() {
<-stopChan
log.Info("prepare to exit, cleaning up")
dns.CancelDNS()
err := c.dhcp.ReleaseIpToDHCP(c.usedIPs...)
cleanupCtx, cancelFunc := context.WithTimeout(context.Background(), time.Second*5)
defer cancelFunc()
var ips []net.IP
if c.localTunIPv4 != nil && c.localTunIPv4.IP != nil {
ips = append(ips, c.localTunIPv4.IP)
}
if c.localTunIPv6 != nil && c.localTunIPv6.IP != nil {
ips = append(ips, c.localTunIPv6.IP)
}
err := c.dhcp.ReleaseIP(cleanupCtx, ips...)
if err != nil {
log.Errorf("failed to release ip to dhcp, err: %v", err)
}
cancel()
for _, function := range RollbackFuncList {
if function != nil {
function()
}
}
cleanUpTrafficManagerIfRefCountIsZero(clientset, namespace)
_ = c.clientset.CoreV1().Pods(c.Namespace).Delete(cleanupCtx, config.CniNetName, v1.DeleteOptions{GracePeriodSeconds: pointer.Int64(0)})
var count int
count, err = updateRefCount(cleanupCtx, c.clientset.CoreV1().ConfigMaps(c.Namespace), config.ConfigMapPodTrafficManager, -1)
// only if ref is zero and deployment is not ready, needs to clean up
if err == nil && count <= 0 {
deployment, errs := c.clientset.AppsV1().Deployments(c.Namespace).Get(cleanupCtx, config.ConfigMapPodTrafficManager, v1.GetOptions{})
if errs == nil && deployment.Status.UnavailableReplicas != 0 {
cleanup(cleanupCtx, c.clientset, c.Namespace, config.ConfigMapPodTrafficManager, true)
}
}
if err != nil {
log.Errorf("can not update ref-count: %v", err)
}
dns.CancelDNS()
cancel()
log.Info("clean up successful")
util.CleanExtensionLib()
os.Exit(0)
}()
}
@@ -54,57 +81,82 @@ func Cleanup(s os.Signal) {
}
// vendor/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go:99
func updateServiceRefCount(serviceInterface v12.ServiceInterface, name string, increment int) {
err := retry.OnError(
func updateRefCount(ctx context.Context, configMapInterface v12.ConfigMapInterface, name string, increment int) (current int, err error) {
err = retry.OnError(
retry.DefaultRetry,
func(err error) bool { return !k8serrors.IsNotFound(err) },
func() error {
service, err := serviceInterface.Get(context.TODO(), name, v1.GetOptions{})
func(err error) bool {
notFound := k8serrors.IsNotFound(err)
if notFound {
return false
}
conflict := k8serrors.IsConflict(err)
if conflict {
return true
}
return false
},
func() (err error) {
var cm *corev1.ConfigMap
cm, err = configMapInterface.Get(ctx, name, v1.GetOptions{})
if err != nil {
log.Errorf("update ref-count failed, increment: %d, error: %v", increment, err)
return err
if k8serrors.IsNotFound(err) {
return err
}
err = fmt.Errorf("update ref-count failed, increment: %d, error: %v", increment, err)
return
}
curCount := 0
if ref := service.GetAnnotations()["ref-count"]; len(ref) > 0 {
curCount, err = strconv.Atoi(ref)
curCount, _ := strconv.Atoi(cm.Data[config.KeyRefCount])
var newVal = curCount + increment
if newVal < 0 {
newVal = 0
}
p, _ := json.Marshal([]interface{}{
map[string]interface{}{
"op": "replace",
"path": "/metadata/annotations/ref-count",
"value": strconv.Itoa(curCount + increment),
},
})
_, err = serviceInterface.Patch(context.TODO(), config.ConfigMapPodTrafficManager, types.JSONPatchType, p, v1.PatchOptions{})
return err
p := []byte(fmt.Sprintf(`{"data":{"%s":"%s"}}`, config.KeyRefCount, strconv.Itoa(newVal)))
_, err = configMapInterface.Patch(ctx, name, types.MergePatchType, p, v1.PatchOptions{})
if err != nil {
if k8serrors.IsNotFound(err) {
return err
}
err = fmt.Errorf("update ref count error, error: %v", err)
return
}
return
})
if err != nil {
log.Errorf("update ref count error, error: %v", err)
} else {
log.Info("update ref count successfully")
return
}
log.Info("update ref count successfully")
var cm *corev1.ConfigMap
cm, err = configMapInterface.Get(ctx, name, v1.GetOptions{})
if err != nil {
err = fmt.Errorf("failed to get cm: %s, err: %v", name, err)
return
}
current, err = strconv.Atoi(cm.Data[config.KeyRefCount])
if err != nil {
err = fmt.Errorf("failed to get ref-count, err: %v", err)
}
return
}
func cleanUpTrafficManagerIfRefCountIsZero(clientset *kubernetes.Clientset, namespace string) {
updateServiceRefCount(clientset.CoreV1().Services(namespace), config.ConfigMapPodTrafficManager, -1)
pod, err := clientset.CoreV1().Services(namespace).Get(context.TODO(), config.ConfigMapPodTrafficManager, v1.GetOptions{})
if err != nil {
log.Error(err)
return
}
refCount, err := strconv.Atoi(pod.GetAnnotations()["ref-count"])
if err != nil {
log.Error(err)
return
}
// if refcount is less than zero or equals to zero, means nobody is using this traffic pod, so clean it
if refCount <= 0 {
zero := int64(0)
log.Info("refCount is zero, prepare to clean up resource")
deleteOptions := v1.DeleteOptions{GracePeriodSeconds: &zero}
func cleanup(ctx context.Context, clientset *kubernetes.Clientset, namespace, name string, keepCIDR bool) {
options := v1.DeleteOptions{GracePeriodSeconds: pointer.Int64(0)}
if keepCIDR {
// keep configmap
//_ = clientset.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), config.ConfigMapPodTrafficManager, deleteOptions)
_ = clientset.CoreV1().Services(namespace).Delete(context.TODO(), config.ConfigMapPodTrafficManager, deleteOptions)
_ = clientset.AppsV1().Deployments(namespace).Delete(context.TODO(), config.ConfigMapPodTrafficManager, deleteOptions)
p := []byte(fmt.Sprintf(`[{"op": "remove", "path": "/data/%s"},{"op": "remove", "path": "/data/%s"}]`, config.KeyDHCP, config.KeyDHCP6))
_, _ = clientset.CoreV1().ConfigMaps(namespace).Patch(ctx, name, types.JSONPatchType, p, v1.PatchOptions{})
p = []byte(fmt.Sprintf(`{"data":{"%s":"%s"}}`, config.KeyRefCount, strconv.Itoa(0)))
_, _ = clientset.CoreV1().ConfigMaps(namespace).Patch(ctx, name, types.MergePatchType, p, v1.PatchOptions{})
} else {
_ = clientset.CoreV1().ConfigMaps(namespace).Delete(ctx, name, options)
}
_ = clientset.CoreV1().Pods(namespace).Delete(ctx, config.CniNetName, options)
_ = clientset.CoreV1().Secrets(namespace).Delete(ctx, name, options)
_ = clientset.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(ctx, name+"."+namespace, options)
_ = clientset.RbacV1().RoleBindings(namespace).Delete(ctx, name, options)
_ = clientset.CoreV1().ServiceAccounts(namespace).Delete(ctx, name, options)
_ = clientset.RbacV1().Roles(namespace).Delete(ctx, name, options)
_ = clientset.CoreV1().Services(namespace).Delete(ctx, name, options)
_ = clientset.AppsV1().Deployments(namespace).Delete(ctx, name, options)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,146 +0,0 @@
package handler
import (
"context"
"crypto/md5"
"fmt"
"net"
"os/exec"
"testing"
"time"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
net2 "k8s.io/apimachinery/pkg/util/net"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
var (
clientConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: clientcmd.RecommendedHomeFile}, nil,
)
clientconfig, _ = clientConfig.ClientConfig()
clientsets, _ = kubernetes.NewForConfig(clientconfig)
namespaces, _, _ = clientConfig.Namespace()
)
func TestGetCIDR(t *testing.T) {
cidr, err := util.GetCIDRFromResourceUgly(clientsets, namespaces)
if err == nil {
for _, ipNet := range cidr {
fmt.Println(ipNet)
}
}
}
func TestPingUsingCommand(t *testing.T) {
list, _ := clientsets.CoreV1().Services(namespaces).List(context.Background(), metav1.ListOptions{})
for _, service := range list.Items {
for _, clusterIP := range service.Spec.ClusterIPs {
_ = exec.Command("ping", clusterIP, "-c", "4").Run()
}
}
}
func TestGetMacAddress(t *testing.T) {
interfaces, _ := net.Interfaces()
hostInterface, _ := net2.ChooseHostInterface()
for _, i := range interfaces {
//fmt.Printf("%s -> %s\n", i.Name, i.HardwareAddr.String())
addrs, _ := i.Addrs()
for _, addr := range addrs {
if hostInterface.Equal(addr.(*net.IPNet).IP) {
hash := md5.New()
hash.Write([]byte(i.HardwareAddr.String()))
sum := hash.Sum(nil)
toInt := util.BytesToInt(sum)
fmt.Println(toInt)
}
}
}
}
func TestPingUsingCode(t *testing.T) {
conn, err := net.DialTimeout("ip4:icmp", "www.baidu.com", time.Second*5)
if err != nil {
log.Print(err)
return
}
var msg [512]byte
msg[0] = 8
msg[1] = 0
msg[2] = 0
msg[3] = 0
msg[4] = 0
msg[5] = 13
msg[6] = 0
msg[7] = 37
length := 8
check := checkSum(msg[0:length])
msg[2] = byte(check >> 8)
msg[3] = byte(check & 255)
_, err = conn.Write(msg[0:length])
if err != nil {
log.Print(err)
return
}
conn.Read(msg[0:])
log.Println(msg[5] == 13)
log.Println(msg[7] == 37)
}
func checkSum(msg []byte) uint16 {
sum := 0
for n := 1; n < len(msg)-1; n += 2 {
sum += int(msg[n])*256 + int(msg[n+1])
}
sum = (sum >> 16) + (sum & 0xffff)
sum += sum >> 16
return uint16(^sum)
}
func TestPatchAnnotation(t *testing.T) {
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
configFlags.KubeConfig = &clientcmd.RecommendedHomeFile
factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(configFlags))
do := factory.NewBuilder().
Unstructured().
NamespaceParam("default").DefaultNamespace().AllNamespaces(false).
ResourceTypeOrNameArgs(true, "deployments/reviews").
ContinueOnError().
Latest().
Flatten().
TransformRequests(func(req *rest.Request) { req.Param("includeObject", "Object") }).
Do()
err := do.Err()
if err != nil {
panic(err)
}
infos, err := do.Infos()
if err != nil {
panic(err)
}
info := infos[0]
helper := resource.NewHelper(info.Client, info.Mapping)
object, err := helper.Patch(
info.Namespace,
info.Name,
types.JSONPatchType,
[]byte(`[{"op":"replace","path":"/metadata/annotations/dev.nocalhost","value":{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1","dev.nocalhost/application-name":"bookinfo","dev.nocalhost/application-namespace":"default"},"labels":{"app":"reviews","app.kubernetes.io/managed-by":"nocalhost"},"name":"reviews","namespace":"default","selfLink":"/apis/apps/v1/namespaces/default/deployments/reviews"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"reviews"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"reviews"}},"spec":{"containers":[{"env":[{"name":"LOG_DIR","value":"/tmp/logs"}],"image":"nocalhost-docker.pkg.coding.net/nocalhost/bookinfo/reviews:latest","imagePullPolicy":"Always","name":"reviews","ports":[{"containerPort":9080,"protocol":"TCP"}],"readinessProbe":{"failureThreshold":3,"initialDelaySeconds":5,"periodSeconds":10,"successThreshold":1,"tcpSocket":{"port":9080},"timeoutSeconds":1},"resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"10m","memory":"32Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","volumeMounts":[{"mountPath":"/tmp","name":"tmp"},{"mountPath":"/opt/ibm/wlp/output","name":"wlp-output"}]}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30,"volumes":[{"emptyDir":{},"name":"wlp-output"},{"emptyDir":{},"name":"tmp"}]}}}}}]`),
&metav1.PatchOptions{})
if err != nil {
panic(err)
}
fmt.Println(object.(*unstructured.Unstructured).GetAnnotations())
}

View File

@@ -2,6 +2,7 @@ package handler
import (
"context"
"encoding/base64"
"fmt"
"net"
@@ -9,6 +10,7 @@ import (
"github.com/cilium/ipam/service/ipallocator"
log "github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
@@ -19,86 +21,103 @@ import (
type DHCPManager struct {
client corev1.ConfigMapInterface
cidr *net.IPNet
cidr6 *net.IPNet
namespace string
}
func NewDHCPManager(client corev1.ConfigMapInterface, namespace string, cidr *net.IPNet) *DHCPManager {
func NewDHCPManager(client corev1.ConfigMapInterface, namespace string) *DHCPManager {
return &DHCPManager{
client: client,
namespace: namespace,
cidr: cidr,
cidr: &net.IPNet{IP: config.RouterIP, Mask: config.CIDR.Mask},
cidr6: &net.IPNet{IP: config.RouterIP6, Mask: config.CIDR6.Mask},
}
}
// todo optimize dhcp, using mac address, ip and deadline as unit
func (d *DHCPManager) InitDHCP() error {
configMap, err := d.client.Get(context.Background(), config.ConfigMapPodTrafficManager, metav1.GetOptions{})
// initDHCP
// TODO optimize dhcp, using mac address, ip and deadline as unit
func (d *DHCPManager) initDHCP(ctx context.Context) error {
cm, err := d.client.Get(ctx, config.ConfigMapPodTrafficManager, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("failed to get configmap %s, err: %v", config.ConfigMapPodTrafficManager, err)
}
if err == nil {
if _, found := configMap.Data[config.KeyEnvoy]; !found {
// add key envoy in case of mount not exist content
if _, found := cm.Data[config.KeyEnvoy]; !found {
_, err = d.client.Patch(
context.Background(),
configMap.Name,
ctx,
cm.Name,
types.MergePatchType,
[]byte(fmt.Sprintf(`{"data":{"%s":"%s"}}`, config.KeyEnvoy, "")),
metav1.PatchOptions{},
)
return err
return fmt.Errorf("failed to patch configmap %s, err: %v", config.ConfigMapPodTrafficManager, err)
}
return nil
}
result := &v1.ConfigMap{
cm = &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: config.ConfigMapPodTrafficManager,
Namespace: d.namespace,
Labels: map[string]string{},
},
Data: map[string]string{config.KeyEnvoy: ""},
Data: map[string]string{
config.KeyEnvoy: "",
config.KeyRefCount: "0",
},
}
_, err = d.client.Create(context.Background(), result, metav1.CreateOptions{})
_, err = d.client.Create(ctx, cm, metav1.CreateOptions{})
if err != nil {
log.Errorf("create dhcp error, err: %v", err)
return err
return fmt.Errorf("create dhcp error, err: %v", err)
}
return nil
}
func (d *DHCPManager) RentIPBaseNICAddress() (*net.IPNet, error) {
ips := make(chan net.IP, 1)
err := d.updateDHCPConfigMap(func(allocator *ipallocator.Range) error {
ip, err := allocator.AllocateNext()
if err != nil {
func (d *DHCPManager) RentIPBaseNICAddress(ctx context.Context) (*net.IPNet, *net.IPNet, error) {
var v4, v6 net.IP
err := d.updateDHCPConfigMap(ctx, func(ipv4 *ipallocator.Range, ipv6 *ipallocator.Range) (err error) {
if v4, err = ipv4.AllocateNext(); err != nil {
return err
}
ips <- ip
return nil
})
if err != nil {
return nil, err
}
return &net.IPNet{IP: <-ips, Mask: d.cidr.Mask}, nil
}
func (d *DHCPManager) RentIPRandom() (*net.IPNet, error) {
var ipC = make(chan net.IP, 1)
err := d.updateDHCPConfigMap(func(dhcp *ipallocator.Range) error {
ip, err := dhcp.AllocateNext()
if err != nil {
if v6, err = ipv6.AllocateNext(); err != nil {
return err
}
ipC <- ip
return nil
return
})
if err != nil {
log.Errorf("update dhcp error after get ip, need to put ip back, err: %v", err)
return nil, err
return nil, nil, err
}
return &net.IPNet{IP: <-ipC, Mask: d.cidr.Mask}, nil
return &net.IPNet{IP: v4, Mask: d.cidr.Mask}, &net.IPNet{IP: v6, Mask: d.cidr6.Mask}, nil
}
func (d *DHCPManager) ReleaseIpToDHCP(ips ...*net.IPNet) error {
return d.updateDHCPConfigMap(func(r *ipallocator.Range) error {
func (d *DHCPManager) RentIPRandom(ctx context.Context) (*net.IPNet, *net.IPNet, error) {
var v4, v6 net.IP
err := d.updateDHCPConfigMap(ctx, func(ipv4 *ipallocator.Range, ipv6 *ipallocator.Range) (err error) {
if v4, err = ipv4.AllocateNext(); err != nil {
return err
}
if v6, err = ipv6.AllocateNext(); err != nil {
return err
}
return
})
if err != nil {
log.Errorf("failed to rent ip from DHCP server, err: %v", err)
return nil, nil, err
}
return &net.IPNet{IP: v4, Mask: d.cidr.Mask}, &net.IPNet{IP: v6, Mask: d.cidr6.Mask}, nil
}
func (d *DHCPManager) ReleaseIP(ctx context.Context, ips ...net.IP) error {
return d.updateDHCPConfigMap(ctx, func(ipv4 *ipallocator.Range, ipv6 *ipallocator.Range) error {
for _, ip := range ips {
if err := r.Release(ip.IP); err != nil {
var use *ipallocator.Range
if ip.To4() != nil {
use = ipv4
} else {
use = ipv6
}
if err := use.Release(ip); err != nil {
return err
}
}
@@ -106,37 +125,64 @@ func (d *DHCPManager) ReleaseIpToDHCP(ips ...*net.IPNet) error {
})
}
func (d *DHCPManager) updateDHCPConfigMap(f func(*ipallocator.Range) error) error {
cm, err := d.client.Get(context.Background(), config.ConfigMapPodTrafficManager, metav1.GetOptions{})
func (d *DHCPManager) updateDHCPConfigMap(ctx context.Context, f func(ipv4 *ipallocator.Range, ipv6 *ipallocator.Range) error) error {
cm, err := d.client.Get(ctx, config.ConfigMapPodTrafficManager, metav1.GetOptions{})
if err != nil {
log.Errorf("failed to get dhcp, err: %v", err)
return err
return fmt.Errorf("failed to get cm DHCP server, err: %v", err)
}
if cm.Data == nil {
cm.Data = make(map[string]string)
}
dhcp, err := ipallocator.NewAllocatorCIDRRange(d.cidr, func(max int, rangeSpec string) (allocator.Interface, error) {
var dhcp *ipallocator.Range
dhcp, err = ipallocator.NewAllocatorCIDRRange(d.cidr, func(max int, rangeSpec string) (allocator.Interface, error) {
return allocator.NewContiguousAllocationMap(max, rangeSpec), nil
})
if err != nil {
return err
}
err = dhcp.Restore(d.cidr, []byte(cm.Data[config.KeyDHCP]))
var str []byte
str, err = base64.StdEncoding.DecodeString(cm.Data[config.KeyDHCP])
if err == nil {
err = dhcp.Restore(d.cidr, str)
if err != nil {
return err
}
}
var dhcp6 *ipallocator.Range
dhcp6, err = ipallocator.NewAllocatorCIDRRange(d.cidr6, func(max int, rangeSpec string) (allocator.Interface, error) {
return allocator.NewContiguousAllocationMap(max, rangeSpec), nil
})
if err != nil {
return err
}
if err = f(dhcp); err != nil {
str, err = base64.StdEncoding.DecodeString(cm.Data[config.KeyDHCP6])
if err == nil {
err = dhcp6.Restore(d.cidr6, str)
if err != nil {
return err
}
}
if err = f(dhcp, dhcp6); err != nil {
return err
}
_, bytes, err := dhcp.Snapshot()
if err != nil {
return err
for index, i := range []*ipallocator.Range{dhcp, dhcp6} {
var bytes []byte
if _, bytes, err = i.Snapshot(); err != nil {
return err
}
var key string
if index == 0 {
key = config.KeyDHCP
} else {
key = config.KeyDHCP6
}
cm.Data[key] = base64.StdEncoding.EncodeToString(bytes)
}
cm.Data[config.KeyDHCP] = string(bytes)
_, err = d.client.Update(context.Background(), cm, metav1.UpdateOptions{})
_, err = d.client.Update(ctx, cm, metav1.UpdateOptions{})
if err != nil {
log.Errorf("update dhcp failed, err: %v", err)
return err
return fmt.Errorf("update dhcp failed, err: %v", err)
}
return nil
}
@@ -159,8 +205,8 @@ func (d *DHCPManager) Set(key, value string) error {
return nil
}
func (d *DHCPManager) Get(key string) (string, error) {
cm, err := d.client.Get(context.Background(), config.ConfigMapPodTrafficManager, metav1.GetOptions{})
func (d *DHCPManager) Get(ctx2 context.Context, key string) (string, error) {
cm, err := d.client.Get(ctx2, config.ConfigMapPodTrafficManager, metav1.GetOptions{})
if err != nil {
return "", err
}
@@ -171,3 +217,30 @@ func (d *DHCPManager) Get(key string) (string, error) {
}
return "", fmt.Errorf("can not get data")
}
func (d *DHCPManager) ForEach(fn func(net.IP)) error {
cm, err := d.client.Get(context.Background(), config.ConfigMapPodTrafficManager, metav1.GetOptions{})
if err != nil {
log.Errorf("failed to get cm DHCP server, err: %v", err)
return err
}
if cm.Data == nil {
cm.Data = make(map[string]string)
}
dhcp, err := ipallocator.NewAllocatorCIDRRange(d.cidr, func(max int, rangeSpec string) (allocator.Interface, error) {
return allocator.NewContiguousAllocationMap(max, rangeSpec), nil
})
if err != nil {
return err
}
str, err := base64.StdEncoding.DecodeString(cm.Data[config.KeyDHCP])
if err != nil {
return err
}
err = dhcp.Restore(d.cidr, str)
if err != nil {
return err
}
dhcp.ForEach(fn)
return nil
}

705
pkg/handler/duplicate.go Normal file
View File

@@ -0,0 +1,705 @@
package handler
import (
"context"
"encoding/json"
"fmt"
"path/filepath"
"sort"
"strings"
"time"
"github.com/docker/distribution/reference"
"github.com/google/uuid"
log "github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/cli-runtime/pkg/genericclioptions"
runtimeresource "k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/tools/clientcmd/api/latest"
clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest"
"k8s.io/client-go/util/retry"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/polymorphichelpers"
"k8s.io/kubectl/pkg/util/podutils"
"k8s.io/utils/pointer"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/mesh"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
type DuplicateOptions struct {
Namespace string
Headers map[string]string
Workloads []string
ExtraCIDR []string
ExtraDomain []string
Engine config.Engine
TargetKubeconfig string
TargetNamespace string
TargetContainer string
TargetImage string
TargetRegistry string
IsChangeTargetRegistry bool
isSame bool
targetClientset *kubernetes.Clientset
targetRestclient *rest.RESTClient
targetConfig *rest.Config
targetFactory cmdutil.Factory
clientset *kubernetes.Clientset
restclient *rest.RESTClient
config *rest.Config
factory cmdutil.Factory
}
func (d *DuplicateOptions) InitClient(f cmdutil.Factory) (err error) {
d.factory = f
if d.config, err = d.factory.ToRESTConfig(); err != nil {
return
}
if d.restclient, err = d.factory.RESTClient(); err != nil {
return
}
if d.clientset, err = d.factory.KubernetesClientSet(); err != nil {
return
}
if d.Namespace, _, err = d.factory.ToRawKubeConfigLoader().Namespace(); err != nil {
return
}
// init target info
if len(d.TargetKubeconfig) == 0 {
d.targetFactory = d.factory
d.targetClientset = d.clientset
d.targetConfig = d.config
d.targetRestclient = d.restclient
if len(d.TargetNamespace) == 0 {
d.TargetNamespace = d.Namespace
d.isSame = true
}
return
}
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
configFlags.KubeConfig = pointer.String(d.TargetKubeconfig)
configFlags.Namespace = pointer.String(d.TargetNamespace)
matchVersionFlags := cmdutil.NewMatchVersionFlags(configFlags)
d.targetFactory = cmdutil.NewFactory(matchVersionFlags)
loader := d.targetFactory.ToRawKubeConfigLoader()
var found bool
d.TargetNamespace, found, err = loader.Namespace()
if err != nil || !found {
d.TargetNamespace = d.Namespace
}
d.targetClientset, err = d.targetFactory.KubernetesClientSet()
return
}
// DoDuplicate
/*
* 1) download mount path use empty-dir but skip empty-dir in init-containers
* 2) get env from containers
* 3) create serviceAccount as needed
* 4) modify podTempSpec inject kubevpn container
*/
func (d *DuplicateOptions) DoDuplicate(ctx context.Context) error {
rawConfig, err := d.targetFactory.ToRawKubeConfigLoader().RawConfig()
if err != nil {
return err
}
err = api.FlattenConfig(&rawConfig)
if err != nil {
return err
}
rawConfig.SetGroupVersionKind(schema.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"})
var convertedObj runtime.Object
convertedObj, err = latest.Scheme.ConvertToVersion(&rawConfig, latest.ExternalVersion)
if err != nil {
return err
}
var kubeconfigJsonBytes []byte
kubeconfigJsonBytes, err = json.Marshal(convertedObj)
if err != nil {
return err
}
for _, workload := range d.Workloads {
var object *runtimeresource.Info
object, err = util.GetUnstructuredObject(d.factory, d.Namespace, workload)
if err != nil {
return err
}
u := object.Object.(*unstructured.Unstructured)
u.SetNamespace(d.TargetNamespace)
RemoveUselessInfo(u)
var newUUID uuid.UUID
newUUID, err = uuid.NewUUID()
if err != nil {
return err
}
u.SetName(fmt.Sprintf("%s-dup-%s", u.GetName(), newUUID.String()[:5]))
// if is another cluster, needs to set volume and set env
if !d.isSame {
if err = d.setVolume(u); err != nil {
return err
}
if err = d.setEnv(u); err != nil {
return err
}
}
labelsMap := map[string]string{
config.ManageBy: config.ConfigMapPodTrafficManager,
"owner-ref": u.GetName(),
}
var path []string
_, path, err = util.GetPodTemplateSpecPath(u)
if err != nil {
return err
}
err = unstructured.SetNestedStringMap(u.Object, labelsMap, "spec", "selector", "matchLabels")
if err != nil {
return err
}
var client dynamic.Interface
client, err = d.targetFactory.DynamicClient()
if err != nil {
return err
}
RollbackFuncList = append(RollbackFuncList, func() {
_ = client.Resource(object.Mapping.Resource).Namespace(d.TargetNamespace).Delete(context.Background(), u.GetName(), metav1.DeleteOptions{})
})
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
var volumesPath = append(path, "spec", "volumes")
var containersPath = append(path, "spec", "containers")
var annotationPath = append(path, "metadata", "annotations")
var labelsPath = append(path, "metadata", "labels")
// (1) add annotation KUBECONFIG
stringMap, found, err := unstructured.NestedStringMap(u.Object, annotationPath...)
if err != nil {
return err
}
if !found {
stringMap = map[string]string{}
}
stringMap[config.KUBECONFIG] = string(kubeconfigJsonBytes)
if err = unstructured.SetNestedStringMap(u.Object, stringMap, annotationPath...); err != nil {
return err
}
// (2) modify labels
if err = unstructured.SetNestedStringMap(u.Object, labelsMap, labelsPath...); err != nil {
return err
}
// (3) add volumes KUBECONFIG
volumes, found, err := unstructured.NestedSlice(u.Object, volumesPath...)
if err != nil {
return err
}
if !found {
volumes = []interface{}{}
}
volume := &v1.Volume{
Name: config.KUBECONFIG,
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
Items: []v1.DownwardAPIVolumeFile{{
Path: config.KUBECONFIG,
FieldRef: &v1.ObjectFieldSelector{
FieldPath: fmt.Sprintf("metadata.annotations['%s']", config.KUBECONFIG),
},
}},
},
},
}
marshal, err := json.Marshal(volume)
v := unstructured.Unstructured{}
err = v.UnmarshalJSON(marshal)
if err = unstructured.SetNestedSlice(u.Object, append(volumes, v.Object), volumesPath...); err != nil {
return err
}
// (4) add kubevpn containers
containers, found, err := unstructured.NestedSlice(u.Object, containersPath...)
if err != nil || !found || containers == nil {
return fmt.Errorf("deployment containers not found or error in spec: %v", err)
}
if d.TargetImage != "" {
var index = -1
if d.TargetContainer != "" {
for i, container := range containers {
nestedString, _, err := unstructured.NestedString(container.(map[string]interface{}), "name")
if err == nil && nestedString == d.TargetContainer {
index = i
break
}
}
} else {
index = 0
}
if index < 0 {
return fmt.Errorf("can not found container %s in pod template", d.TargetContainer)
}
// update container[index] image
if err = unstructured.SetNestedField(containers[index].(map[string]interface{}), d.TargetImage, "image"); err != nil {
return err
}
}
container := &v1.Container{
Name: config.ContainerSidecarVPN,
Image: config.Image,
Command: []string{
"kubevpn",
"proxy",
workload,
"--kubeconfig", "/tmp/.kube/" + config.KUBECONFIG,
"--namespace", d.Namespace,
"--headers", labels.Set(d.Headers).String(),
"--image", config.Image,
},
Args: nil,
Resources: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("1000m"),
v1.ResourceMemory: resource.MustParse("1024Mi"),
},
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("2048Mi"),
},
},
VolumeMounts: []v1.VolumeMount{{
Name: config.KUBECONFIG,
ReadOnly: false,
MountPath: "/tmp/.kube",
}},
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{
"NET_ADMIN",
},
},
RunAsUser: pointer.Int64(0),
Privileged: pointer.Bool(true),
},
}
marshal, err = json.Marshal(container)
v = unstructured.Unstructured{}
err = v.UnmarshalJSON(marshal)
if err = unstructured.SetNestedField(u.Object, append(containers, v.Object), containersPath...); err != nil {
return err
}
if err = d.replaceRegistry(u); err != nil {
return err
}
_, createErr := client.Resource(object.Mapping.Resource).Namespace(d.TargetNamespace).Create(context.Background(), u, metav1.CreateOptions{})
//_, createErr := runtimeresource.NewHelper(object.Client, object.Mapping).Create(d.TargetNamespace, true, u)
return createErr
})
if retryErr != nil {
return fmt.Errorf("create duplidate for resource %s failed: %v", workload, retryErr)
}
err = util.WaitPodToBeReady(ctx, d.targetClientset.CoreV1().Pods(d.TargetNamespace), metav1.LabelSelector{MatchLabels: labelsMap})
if err != nil {
return err
}
_ = util.RolloutStatus(ctx, d.factory, d.Namespace, workload, time.Minute*60)
}
return nil
}
func RemoveUselessInfo(u *unstructured.Unstructured) {
if u == nil {
return
}
delete(u.Object, "status")
_ = unstructured.SetNestedField(u.Object, nil, "status")
u.SetManagedFields(nil)
u.SetResourceVersion("")
u.SetCreationTimestamp(metav1.NewTime(time.Time{}))
u.SetUID("")
u.SetGeneration(0)
a := u.GetAnnotations()
if len(a) == 0 {
a = map[string]string{}
}
delete(a, "kubectl.kubernetes.io/last-applied-configuration")
u.SetAnnotations(a)
}
// setVolume
/*
1) calculate volume content, and download it into emptyDir
*/
func (d *DuplicateOptions) setVolume(u *unstructured.Unstructured) error {
const TokenVolumeMountPath = "/var/run/secrets/kubernetes.io/serviceaccount"
type VolumeMountContainerPair struct {
container v1.Container
volumeMount v1.VolumeMount
}
temp, path, err := util.GetPodTemplateSpecPath(u)
if err != nil {
return err
}
sortBy := func(pods []*v1.Pod) sort.Interface {
for i := 0; i < len(pods); i++ {
if pods[i].DeletionTimestamp != nil {
pods = append(pods[:i], pods[i+1:]...)
i--
}
}
return sort.Reverse(podutils.ActivePods(pods))
}
lab := labels.SelectorFromSet(temp.Labels).String()
pod, _, err := polymorphichelpers.GetFirstPod(d.clientset.CoreV1(), d.Namespace, lab, time.Second*60, sortBy)
if err != nil {
return err
}
// remove serviceAccount info
temp.Spec.ServiceAccountName = ""
temp.Spec.DeprecatedServiceAccount = ""
temp.Spec.AutomountServiceAccountToken = pointer.Bool(false)
var volumeMap = make(map[string]v1.Volume)
var volumeList []v1.Volume
// pod's volume maybe more than spec defined
for _, volume := range pod.Spec.Volumes {
volumeMap[volume.Name] = volume
// keep volume emptyDir
if volume.EmptyDir != nil {
volumeList = append(volumeList, volume)
} else {
volumeList = append(volumeList, v1.Volume{
Name: volume.Name,
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
})
}
}
var tokenVolume string
var volumeM = make(map[string][]VolumeMountContainerPair)
for _, container := range pod.Spec.Containers {
// group by volume name, what we want is figure out what's contains in every volume
// we need to restore a total volume base on mountPath and subPath
for _, volumeMount := range container.VolumeMounts {
if volumeMap[volumeMount.Name].EmptyDir != nil {
continue
}
if volumeMount.MountPath == TokenVolumeMountPath {
tokenVolume = volumeMount.Name
}
mounts := volumeM[volumeMount.Name]
if mounts == nil {
volumeM[volumeMount.Name] = []VolumeMountContainerPair{}
}
volumeM[volumeMount.Name] = append(volumeM[volumeMount.Name], VolumeMountContainerPair{
container: container,
volumeMount: volumeMount,
})
}
}
var initContainer []v1.Container
for _, volume := range pod.Spec.Volumes {
mountPoint := "/tmp/" + volume.Name
var args []string
for _, pair := range volumeM[volume.Name] {
remote := filepath.Join(pair.volumeMount.MountPath, pair.volumeMount.SubPath)
local := filepath.Join(mountPoint, pair.volumeMount.SubPath)
// kubectl cp <some-namespace>/<some-pod>:/tmp/foo /tmp/bar
args = append(args,
fmt.Sprintf("kubevpn cp %s/%s:%s %s -c %s", pod.Namespace, pod.Name, remote, local, pair.container.Name),
)
}
// means maybe volume only used in initContainers
if len(args) == 0 {
for i := 0; i < len(temp.Spec.InitContainers); i++ {
for _, mount := range temp.Spec.InitContainers[i].VolumeMounts {
if mount.Name == volume.Name {
// remove useless initContainer
temp.Spec.InitContainers = append(temp.Spec.InitContainers[:i], temp.Spec.InitContainers[i+1:]...)
i--
break
}
}
}
continue
}
newContainer := v1.Container{
Name: fmt.Sprintf("download-" + volume.Name),
Image: config.Image,
Command: []string{"sh", "-c"},
Args: []string{strings.Join(args, "&&")},
WorkingDir: "/tmp",
Env: []v1.EnvVar{
{
Name: clientcmd.RecommendedConfigPathEnvVar,
Value: "/tmp/.kube/kubeconfig",
},
},
Resources: v1.ResourceRequirements{},
VolumeMounts: []v1.VolumeMount{
{
Name: volume.Name,
MountPath: mountPoint,
},
{
Name: config.KUBECONFIG,
ReadOnly: false,
MountPath: "/tmp/.kube",
},
},
ImagePullPolicy: v1.PullIfNotPresent,
}
initContainer = append(initContainer, newContainer)
}
// put download volume to front
temp.Spec.InitContainers = append(initContainer, temp.Spec.InitContainers...)
// replace old one
temp.Spec.Volumes = volumeList
// remove containers vpn and envoy-proxy
mesh.RemoveContainers(temp)
// add each container service account token
if tokenVolume != "" {
for i := 0; i < len(temp.Spec.Containers); i++ {
var found bool
for _, mount := range temp.Spec.Containers[i].VolumeMounts {
if mount.MountPath == TokenVolumeMountPath {
found = true
break
}
}
if !found {
temp.Spec.Containers[i].VolumeMounts = append(temp.Spec.Containers[i].VolumeMounts, v1.VolumeMount{
Name: tokenVolume,
MountPath: TokenVolumeMountPath,
})
}
}
}
var marshal []byte
if marshal, err = json.Marshal(temp.Spec); err != nil {
return err
}
var content map[string]interface{}
if err = json.Unmarshal(marshal, &content); err != nil {
return err
}
if err = unstructured.SetNestedField(u.Object, content, append(path, "spec")...); err != nil {
return err
}
return nil
}
func (d *DuplicateOptions) setEnv(u *unstructured.Unstructured) error {
temp, path, err := util.GetPodTemplateSpecPath(u)
if err != nil {
return err
}
/*sortBy := func(pods []*v1.Pod) sort.Interface {
for i := 0; i < len(pods); i++ {
if pods[i].DeletionTimestamp != nil {
pods = append(pods[:i], pods[i+1:]...)
i--
}
}
return sort.Reverse(podutils.ActivePods(pods))
}
lab := labels.SelectorFromSet(temp.Labels).String()
pod, _, err := polymorphichelpers.GetFirstPod(d.clientset.CoreV1(), d.Namespace, lab, time.Second*60, sortBy)
if err != nil {
return err
}
var envMap map[string][]string
envMap, err = util.GetEnv(context.Background(), d.factory, d.Namespace, pod.Name)
if err != nil {
return err
}*/
var secretMap = make(map[string]*v1.Secret)
var configmapMap = make(map[string]*v1.ConfigMap)
var howToGetCm = func(name string) {
if configmapMap[name] == nil {
cm, err := d.clientset.CoreV1().ConfigMaps(d.Namespace).Get(context.Background(), name, metav1.GetOptions{})
if err == nil {
configmapMap[name] = cm
}
}
}
var howToGetSecret = func(name string) {
if configmapMap[name] == nil {
secret, err := d.clientset.CoreV1().Secrets(d.Namespace).Get(context.Background(), name, metav1.GetOptions{})
if err == nil {
secretMap[name] = secret
}
}
}
// get all ref configmaps and secrets
for _, container := range temp.Spec.Containers {
for _, envVar := range container.Env {
if envVar.ValueFrom != nil {
if ref := envVar.ValueFrom.ConfigMapKeyRef; ref != nil {
howToGetCm(ref.Name)
}
if ref := envVar.ValueFrom.SecretKeyRef; ref != nil {
howToGetSecret(ref.Name)
}
}
}
for _, source := range container.EnvFrom {
if ref := source.ConfigMapRef; ref != nil {
if configmapMap[ref.Name] == nil {
howToGetCm(ref.Name)
}
}
if ref := source.SecretRef; ref != nil {
howToGetSecret(ref.Name)
}
}
}
// parse real value from secrets and configmaps
for i := 0; i < len(temp.Spec.Containers); i++ {
container := temp.Spec.Containers[i]
var envVars []v1.EnvVar
for _, envFromSource := range container.EnvFrom {
if ref := envFromSource.ConfigMapRef; ref != nil && configmapMap[ref.Name] != nil {
cm := configmapMap[ref.Name]
for k, v := range cm.Data {
if strings.HasPrefix(k, envFromSource.Prefix) {
envVars = append(envVars, v1.EnvVar{
Name: k,
Value: v,
})
}
}
}
if ref := envFromSource.SecretRef; ref != nil && secretMap[ref.Name] != nil {
secret := secretMap[ref.Name]
for k, v := range secret.Data {
if strings.HasPrefix(k, envFromSource.Prefix) {
envVars = append(envVars, v1.EnvVar{
Name: k,
Value: string(v),
})
}
}
}
}
temp.Spec.Containers[i].EnvFrom = nil
temp.Spec.Containers[i].Env = append(temp.Spec.Containers[i].Env, envVars...)
for j, envVar := range container.Env {
if envVar.ValueFrom != nil {
if ref := envVar.ValueFrom.ConfigMapKeyRef; ref != nil {
if configMap := configmapMap[ref.Name]; configMap != nil {
temp.Spec.Containers[i].Env[j].Value = configMap.Data[ref.Key]
temp.Spec.Containers[i].Env[j].ValueFrom = nil
}
}
if ref := envVar.ValueFrom.SecretKeyRef; ref != nil {
if secret := secretMap[ref.Name]; secret != nil {
temp.Spec.Containers[i].Env[j].Value = string(secret.Data[ref.Key])
temp.Spec.Containers[i].Env[j].ValueFrom = nil
}
}
}
}
}
var marshal []byte
if marshal, err = json.Marshal(temp.Spec); err != nil {
return err
}
var content map[string]interface{}
if err = json.Unmarshal(marshal, &content); err != nil {
return err
}
if err = unstructured.SetNestedField(u.Object, content, append(path, "spec")...); err != nil {
return err
}
return nil
}
// replace origin registry with special registry for pulling image
func (d *DuplicateOptions) replaceRegistry(u *unstructured.Unstructured) error {
// not pass this options, do nothing
if !d.IsChangeTargetRegistry {
return nil
}
temp, path, err := util.GetPodTemplateSpecPath(u)
if err != nil {
return err
}
for i, container := range temp.Spec.InitContainers {
oldImage := container.Image
named, err := reference.ParseNormalizedNamed(oldImage)
if err != nil {
return err
}
domain := reference.Domain(named)
newImage := strings.TrimPrefix(strings.ReplaceAll(oldImage, domain, d.TargetRegistry), "/")
temp.Spec.InitContainers[i].Image = newImage
log.Debugf("update init container: %s image: %s --> %s", container.Name, oldImage, newImage)
}
for i, container := range temp.Spec.Containers {
oldImage := container.Image
named, err := reference.ParseNormalizedNamed(oldImage)
if err != nil {
return err
}
domain := reference.Domain(named)
newImage := strings.TrimPrefix(strings.ReplaceAll(oldImage, domain, d.TargetRegistry), "/")
temp.Spec.Containers[i].Image = newImage
log.Debugf("update container: %s image: %s --> %s", container.Name, oldImage, newImage)
}
var marshal []byte
if marshal, err = json.Marshal(temp.Spec); err != nil {
return err
}
var content map[string]interface{}
if err = json.Unmarshal(marshal, &content); err != nil {
return err
}
if err = unstructured.SetNestedField(u.Object, content, append(path, "spec")...); err != nil {
return err
}
return nil
}

View File

@@ -16,6 +16,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
pkgresource "k8s.io/cli-runtime/pkg/resource"
runtimeresource "k8s.io/cli-runtime/pkg/resource"
v12 "k8s.io/client-go/kubernetes/typed/core/v1"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"sigs.k8s.io/yaml"
@@ -28,22 +29,23 @@ import (
// https://istio.io/latest/docs/ops/deployment/requirements/#ports-used-by-istio
// patch a sidecar, using iptables to do port-forward let this pod decide should go to 233.254.254.100 or request to 127.0.0.1
func InjectVPNAndEnvoySidecar(factory cmdutil.Factory, clientset v12.ConfigMapInterface, namespace, workloads string, c util.PodRouteConfig, headers map[string]string) error {
//t := true
//zero := int64(0)
object, err := util.GetUnstructuredObject(factory, namespace, workloads)
// InjectVPNAndEnvoySidecar patch a sidecar, using iptables to do port-forward let this pod decide should go to 233.254.254.100 or request to 127.0.0.1
func InjectVPNAndEnvoySidecar(ctx1 context.Context, factory cmdutil.Factory, clientset v12.ConfigMapInterface, namespace, workloads string, c util.PodRouteConfig, headers map[string]string) (err error) {
var object *runtimeresource.Info
object, err = util.GetUnstructuredObject(factory, namespace, workloads)
if err != nil {
return err
}
u := object.Object.(*unstructured.Unstructured)
templateSpec, path, err := util.GetPodTemplateSpecPath(u)
var templateSpec *v1.PodTemplateSpec
var path []string
templateSpec, path, err = util.GetPodTemplateSpecPath(u)
if err != nil {
return err
}
origin := *templateSpec
origin := templateSpec.DeepCopy()
var port []v1.ContainerPort
for _, container := range templateSpec.Spec.Containers {
@@ -51,14 +53,14 @@ func InjectVPNAndEnvoySidecar(factory cmdutil.Factory, clientset v12.ConfigMapIn
}
nodeID := fmt.Sprintf("%s.%s", object.Mapping.Resource.GroupResource().String(), object.Name)
err = addEnvoyConfig(clientset, nodeID, c.LocalTunIP, headers, port)
err = addEnvoyConfig(clientset, nodeID, c, headers, port)
if err != nil {
log.Warnln(err)
return err
}
// already inject container vpn and envoy-proxy, do nothing
containerNames := sets.NewString()
containerNames := sets.New[string]()
for _, container := range templateSpec.Spec.Containers {
containerNames.Insert(container.Name)
}
@@ -73,8 +75,13 @@ func InjectVPNAndEnvoySidecar(factory cmdutil.Factory, clientset v12.ConfigMapIn
return nil
}
// (1) add mesh container
removePatch, restorePatch := patch(origin, path)
b, _ := json.Marshal(restorePatch)
removePatch, restorePatch := patch(*origin, path)
var b []byte
b, err = json.Marshal(restorePatch)
if err != nil {
return err
}
mesh.AddMeshContainer(templateSpec, nodeID, c)
helper := pkgresource.NewHelper(object.Client, object.Mapping)
ps := []P{
@@ -89,7 +96,8 @@ func InjectVPNAndEnvoySidecar(factory cmdutil.Factory, clientset v12.ConfigMapIn
Value: b,
},
}
bytes, err := json.Marshal(append(ps, removePatch...))
var bytes []byte
bytes, err = json.Marshal(append(ps, removePatch...))
if err != nil {
return err
}
@@ -100,17 +108,18 @@ func InjectVPNAndEnvoySidecar(factory cmdutil.Factory, clientset v12.ConfigMapIn
}
RollbackFuncList = append(RollbackFuncList, func() {
if err = UnPatchContainer(factory, clientset, namespace, workloads, headers); err != nil {
if err := UnPatchContainer(factory, clientset, namespace, workloads, headers); err != nil {
log.Error(err)
}
})
_ = util.RolloutStatus(factory, namespace, workloads, time.Minute*5)
if err != nil {
return err
}
err = util.RolloutStatus(ctx1, factory, namespace, workloads, time.Minute*60)
return err
}
func UnPatchContainer(factory cmdutil.Factory, mapInterface v12.ConfigMapInterface, namespace, workloads string, headers map[string]string) error {
//t := true
//zero := int64(0)
object, err := util.GetUnstructuredObject(factory, namespace, workloads)
if err != nil {
return err
@@ -134,7 +143,8 @@ func UnPatchContainer(factory cmdutil.Factory, mapInterface v12.ConfigMapInterfa
if empty {
mesh.RemoveContainers(templateSpec)
helper := pkgresource.NewHelper(object.Client, object.Mapping)
bytes, err := json.Marshal([]struct {
var bytes []byte
bytes, err = json.Marshal([]struct {
Op string `json:"op"`
Path string `json:"path"`
Value interface{} `json:"value"`
@@ -151,12 +161,15 @@ func UnPatchContainer(factory cmdutil.Factory, mapInterface v12.ConfigMapInterfa
return err
}
_, err = helper.Patch(object.Namespace, object.Name, types.JSONPatchType, bytes, &metav1.PatchOptions{})
if err != nil {
return err
}
}
return err
}
func addEnvoyConfig(mapInterface v12.ConfigMapInterface, nodeID string, localTUNIP string, headers map[string]string, port []v1.ContainerPort) error {
configMap, err := mapInterface.Get(context.TODO(), config.ConfigMapPodTrafficManager, metav1.GetOptions{})
func addEnvoyConfig(mapInterface v12.ConfigMapInterface, nodeID string, tunIP util.PodRouteConfig, headers map[string]string, port []v1.ContainerPort) error {
configMap, err := mapInterface.Get(context.Background(), config.ConfigMapPodTrafficManager, metav1.GetOptions{})
if err != nil {
return err
}
@@ -178,15 +191,20 @@ func addEnvoyConfig(mapInterface v12.ConfigMapInterface, nodeID string, localTUN
Uid: nodeID,
Ports: port,
Rules: []*controlplane.Rule{{
Headers: headers,
LocalTunIP: localTUNIP,
Headers: headers,
LocalTunIPv4: tunIP.LocalTunIPv4,
LocalTunIPv6: tunIP.LocalTunIPv6,
}},
})
} else {
v[index].Rules = append(v[index].Rules, &controlplane.Rule{
Headers: headers,
LocalTunIP: localTUNIP,
Headers: headers,
LocalTunIPv4: tunIP.LocalTunIPv4,
LocalTunIPv6: tunIP.LocalTunIPv6,
})
if v[index].Ports == nil {
v[index].Ports = port
}
}
marshal, err := yaml.Marshal(v)
@@ -199,7 +217,7 @@ func addEnvoyConfig(mapInterface v12.ConfigMapInterface, nodeID string, localTUN
}
func removeEnvoyConfig(mapInterface v12.ConfigMapInterface, nodeID string, headers map[string]string) (bool, error) {
configMap, err := mapInterface.Get(context.TODO(), config.ConfigMapPodTrafficManager, metav1.GetOptions{})
configMap, err := mapInterface.Get(context.Background(), config.ConfigMapPodTrafficManager, metav1.GetOptions{})
if k8serrors.IsNotFound(err) {
return true, nil
}
@@ -233,11 +251,12 @@ func removeEnvoyConfig(mapInterface v12.ConfigMapInterface, nodeID string, heade
empty = true
}
}
marshal, err := yaml.Marshal(v)
var bytes []byte
bytes, err = yaml.Marshal(v)
if err != nil {
return false, err
}
configMap.Data[config.KeyEnvoy] = string(marshal)
configMap.Data[config.KeyEnvoy] = string(bytes)
_, err = mapInterface.Update(context.Background(), configMap, metav1.UpdateOptions{})
return empty, err
}

View File

@@ -1,4 +1,4 @@
package main
package handler
import (
"context"
@@ -15,7 +15,7 @@ import (
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/cli-runtime/pkg/genericclioptions"
@@ -32,14 +32,11 @@ var (
namespace string
clientset *kubernetes.Clientset
restclient *rest.RESTClient
config *rest.Config
cancelFunc context.CancelFunc
restconfig *rest.Config
)
func TestFunctions(t *testing.T) {
kubevpnConnect(t)
t.Cleanup(cancelFunc)
t.Parallel()
t.Run(runtime.FuncForPC(reflect.ValueOf(pingPodIP).Pointer()).Name(), pingPodIP)
t.Run(runtime.FuncForPC(reflect.ValueOf(dialUDP).Pointer()).Name(), dialUDP)
t.Run(runtime.FuncForPC(reflect.ValueOf(healthCheckPod).Pointer()).Name(), healthCheckPod)
@@ -49,9 +46,7 @@ func TestFunctions(t *testing.T) {
}
func pingPodIP(t *testing.T) {
ctx, f := context.WithTimeout(context.TODO(), time.Second*60)
defer f()
list, err := clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
list, err := clientset.CoreV1().Pods(namespace).List(context.Background(), v1.ListOptions{})
if err != nil {
t.Error(err)
}
@@ -74,44 +69,69 @@ func pingPodIP(t *testing.T) {
}
func healthCheckPod(t *testing.T) {
podList, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{
LabelSelector: fields.OneTermEqualSelector("app", "productpage").String(),
var app = "authors"
podList, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), v1.ListOptions{
LabelSelector: fields.OneTermEqualSelector("app", app).String(),
})
if err != nil {
t.Error(err)
}
if len(podList.Items) == 0 {
t.Error("can not found pods of product page")
t.Error("can not found pods of authors")
}
endpoint := fmt.Sprintf("http://%s:%v/health", podList.Items[0].Status.PodIP, podList.Items[0].Spec.Containers[0].Ports[0].ContainerPort)
req, _ := http.NewRequest("GET", endpoint, nil)
res, err := http.DefaultClient.Do(req)
if err != nil {
t.Error(err)
return
}
if res == nil || res.StatusCode != 200 {
t.Errorf("health check not pass")
return
for _, pod := range podList.Items {
pod := pod
if pod.Status.Phase != corev1.PodRunning {
continue
}
endpoint := fmt.Sprintf("http://%s:%v/health", pod.Status.PodIP, pod.Spec.Containers[0].Ports[0].ContainerPort)
req, _ := http.NewRequest("GET", endpoint, nil)
var res *http.Response
err = retry.OnError(
wait.Backoff{Duration: time.Second, Factor: 2, Jitter: 0.2, Steps: 5},
func(err error) bool {
return err != nil
},
func() error {
res, err = http.DefaultClient.Do(req)
return err
},
)
if err != nil {
t.Error(err)
}
if res == nil || res.StatusCode != 200 {
t.Errorf("health check not pass")
}
}
}
func healthCheckService(t *testing.T) {
serviceList, err := clientset.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{
LabelSelector: fields.OneTermEqualSelector("app", "productpage").String(),
var app = "authors"
serviceList, err := clientset.CoreV1().Services(namespace).List(context.TODO(), v1.ListOptions{
LabelSelector: fields.OneTermEqualSelector("app", app).String(),
})
if err != nil {
t.Error(err)
}
if len(serviceList.Items) == 0 {
t.Error("can not found pods of product page")
t.Error("can not found pods of authors")
}
endpoint := fmt.Sprintf("http://%s:%v/health", serviceList.Items[0].Spec.ClusterIP, serviceList.Items[0].Spec.Ports[0].Port)
req, _ := http.NewRequest("GET", endpoint, nil)
res, err := http.DefaultClient.Do(req)
var res *http.Response
err = retry.OnError(
wait.Backoff{Duration: time.Second, Factor: 2, Jitter: 0.2, Steps: 5},
func(err error) bool {
return err != nil
},
func() error {
res, err = http.DefaultClient.Do(req)
return err
},
)
if err != nil {
t.Error(err)
return
}
if res == nil || res.StatusCode != 200 {
t.Errorf("health check not pass")
@@ -120,8 +140,8 @@ func healthCheckService(t *testing.T) {
}
func shortDomain(t *testing.T) {
var app = "productpage"
serviceList, err := clientset.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{
var app = "authors"
serviceList, err := clientset.CoreV1().Services(namespace).List(context.TODO(), v1.ListOptions{
LabelSelector: fields.OneTermEqualSelector("app", app).String(),
})
if err != nil {
@@ -132,20 +152,28 @@ func shortDomain(t *testing.T) {
}
endpoint := fmt.Sprintf("http://%s:%v/health", app, serviceList.Items[0].Spec.Ports[0].Port)
req, _ := http.NewRequest("GET", endpoint, nil)
res, err := http.DefaultClient.Do(req)
var res *http.Response
err = retry.OnError(
wait.Backoff{Duration: time.Second, Factor: 2, Jitter: 0.2, Steps: 5},
func(err error) bool {
return err != nil
},
func() error {
res, err = http.DefaultClient.Do(req)
return err
},
)
if err != nil {
t.Error(err)
return
}
if res == nil || res.StatusCode != 200 {
t.Errorf("health check not pass")
return
}
}
func fullDomain(t *testing.T) {
var app = "productpage"
serviceList, err := clientset.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{
var app = "authors"
serviceList, err := clientset.CoreV1().Services(namespace).List(context.TODO(), v1.ListOptions{
LabelSelector: fields.OneTermEqualSelector("app", app).String(),
})
if err != nil {
@@ -156,10 +184,19 @@ func fullDomain(t *testing.T) {
}
endpoint := fmt.Sprintf("http://%s:%v/health", fmt.Sprintf("%s.%s.svc.cluster.local", app, namespace), serviceList.Items[0].Spec.Ports[0].Port)
req, _ := http.NewRequest("GET", endpoint, nil)
res, err := http.DefaultClient.Do(req)
var res *http.Response
err = retry.OnError(
wait.Backoff{Duration: time.Second, Factor: 2, Jitter: 0.2, Steps: 5},
func(err error) bool {
return err != nil
},
func() error {
res, err = http.DefaultClient.Do(req)
return err
},
)
if err != nil {
t.Error(err)
return
}
if res == nil || res.StatusCode != 200 {
t.Errorf("health check not pass")
@@ -171,7 +208,7 @@ func dialUDP(t *testing.T) {
port := util.GetAvailableUDPPortOrDie()
go server(port)
list, err := clientset.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{
list, err := clientset.CoreV1().Pods(namespace).List(context.Background(), v1.ListOptions{
LabelSelector: fields.OneTermEqualSelector("app", "reviews").String(),
})
if err != nil {
@@ -186,6 +223,7 @@ func dialUDP(t *testing.T) {
}
if len(ip) == 0 {
t.Errorf("can not found pods for service reviews")
return
}
log.Printf("dail udp to ip: %s", ip)
if err = retry.OnError(
@@ -193,13 +231,13 @@ func dialUDP(t *testing.T) {
func(err error) bool {
return err != nil
}, func() error {
return client(ip, port)
return udpclient(ip, port)
}); err != nil {
t.Errorf("can not access pod ip: %s, port: %v", ip, port)
}
}
func client(ip string, port int) error {
func udpclient(ip string, port int) error {
udpConn, err := net.DialUDP("udp4", nil, &net.UDPAddr{
IP: net.ParseIP(ip),
Port: port,
@@ -266,22 +304,25 @@ func server(port int) {
}
func kubevpnConnect(t *testing.T) {
var ctx context.Context
ctx, cancelFunc = context.WithCancel(context.TODO())
ctx, cancel := context.WithTimeout(ctx, 2*time.Hour)
ctx2, timeoutFunc := context.WithTimeout(context.Background(), 2*time.Hour)
cmd := exec.CommandContext(ctx, "kubevpn", "connect", "--debug", "--workloads", "deployments/reviews")
cmd := exec.Command("kubevpn", "proxy", "--debug", "deployments/reviews")
go func() {
_, _, err := util.RunWithRollingOutWithChecker(cmd, func(log string) {
if strings.Contains(log, "dns service ok") {
cancel()
stdout, stderr, err := util.RunWithRollingOutWithChecker(cmd, func(log string) {
ok := strings.Contains(log, "dns service ok")
if ok {
timeoutFunc()
}
})
defer timeoutFunc()
if err != nil {
t.Log(err)
t.Log(stdout, stderr)
t.Error(err)
t.Fail()
return
}
}()
<-ctx.Done()
<-ctx2.Done()
}
func init() {
@@ -291,16 +332,32 @@ func init() {
configFlags.KubeConfig = &clientcmd.RecommendedHomeFile
f := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(configFlags))
if config, err = f.ToRESTConfig(); err != nil {
if restconfig, err = f.ToRESTConfig(); err != nil {
log.Fatal(err)
}
if restclient, err = rest.RESTClientFor(config); err != nil {
if restclient, err = rest.RESTClientFor(restconfig); err != nil {
log.Fatal(err)
}
if clientset, err = kubernetes.NewForConfig(config); err != nil {
if clientset, err = kubernetes.NewForConfig(restconfig); err != nil {
log.Fatal(err)
}
if namespace, _, err = f.ToRawKubeConfigLoader().Namespace(); err != nil {
log.Fatal(err)
}
}
func TestWaitBackoff(t *testing.T) {
var last = time.Now()
_ = retry.OnError(
wait.Backoff{
Steps: 10,
Duration: time.Millisecond * 50,
}, func(err error) bool {
return err != nil
}, func() error {
now := time.Now()
fmt.Println(now.Sub(last).String())
last = now
return fmt.Errorf("")
})
}

View File

@@ -1,19 +1,21 @@
package handler
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"k8s.io/utils/pointer"
"net"
"strconv"
"strings"
"time"
log "github.com/sirupsen/logrus"
admissionv1 "k8s.io/api/admissionregistration/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -24,33 +26,124 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
pkgresource "k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/cert"
"k8s.io/client-go/util/retry"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/polymorphichelpers"
"k8s.io/kubectl/pkg/util/podutils"
"k8s.io/utils/pointer"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/exchange"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
func CreateOutboundPod(clientset *kubernetes.Clientset, namespace string, trafficManagerIP string, nodeCIDR []*net.IPNet) (net.IP, error) {
podInterface := clientset.CoreV1().Pods(namespace)
serviceInterface := clientset.CoreV1().Services(namespace)
func createOutboundPod(ctx context.Context, factory cmdutil.Factory, clientset *kubernetes.Clientset, namespace string) (err error) {
innerIpv4CIDR := net.IPNet{IP: config.RouterIP, Mask: config.CIDR.Mask}
innerIpv6CIDR := net.IPNet{IP: config.RouterIP6, Mask: config.CIDR6.Mask}
service, err := serviceInterface.Get(context.Background(), config.ConfigMapPodTrafficManager, metav1.GetOptions{})
if err == nil && service != nil {
log.Infoln("traffic manager already exist, reuse it")
updateServiceRefCount(serviceInterface, service.GetName(), 1)
return net.ParseIP(service.Spec.ClusterIP), nil
service, err := clientset.CoreV1().Services(namespace).Get(ctx, config.ConfigMapPodTrafficManager, metav1.GetOptions{})
if err == nil {
_, err = polymorphichelpers.AttachablePodForObjectFn(factory, service, 2*time.Second)
if err == nil {
_, err = updateRefCount(ctx, clientset.CoreV1().ConfigMaps(namespace), config.ConfigMapPodTrafficManager, 1)
if err != nil {
return
}
log.Infoln("traffic manager already exist, reuse it")
return nil
}
}
var deleteResource = func(ctx context.Context) {
options := metav1.DeleteOptions{}
_ = clientset.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(ctx, config.ConfigMapPodTrafficManager+"."+namespace, options)
_ = clientset.RbacV1().RoleBindings(namespace).Delete(ctx, config.ConfigMapPodTrafficManager, options)
_ = clientset.RbacV1().Roles(namespace).Delete(ctx, config.ConfigMapPodTrafficManager, options)
_ = clientset.CoreV1().ServiceAccounts(namespace).Delete(ctx, config.ConfigMapPodTrafficManager, options)
_ = clientset.CoreV1().Services(namespace).Delete(ctx, config.ConfigMapPodTrafficManager, options)
_ = clientset.AppsV1().Deployments(namespace).Delete(ctx, config.ConfigMapPodTrafficManager, options)
}
defer func() {
if err != nil {
deleteResource(context.Background())
}
}()
deleteResource(context.Background())
log.Infoln("traffic manager not exist, try to create it...")
// 1) label namespace
ns, err := clientset.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
if err != nil {
return err
}
if ns.Labels == nil {
ns.Labels = map[string]string{}
}
ns.Labels["ns"] = namespace
_, err = clientset.CoreV1().Namespaces().Update(ctx, ns, metav1.UpdateOptions{})
if err != nil {
return err
}
// 2) create serviceAccount
_, err = clientset.CoreV1().ServiceAccounts(namespace).Create(ctx, &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: config.ConfigMapPodTrafficManager,
Namespace: namespace,
},
AutomountServiceAccountToken: pointer.Bool(true),
}, metav1.CreateOptions{})
if err != nil {
return err
}
// 3) create roles
_, err = clientset.RbacV1().Roles(namespace).Create(ctx, &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: config.ConfigMapPodTrafficManager,
Namespace: namespace,
},
Rules: []rbacv1.PolicyRule{{
Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"},
APIGroups: []string{""},
Resources: []string{"configmaps", "secrets"},
ResourceNames: []string{config.ConfigMapPodTrafficManager},
}},
}, metav1.CreateOptions{})
if err != nil {
return err
}
// 4) create roleBinding
_, err = clientset.RbacV1().RoleBindings(namespace).Create(ctx, &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: config.ConfigMapPodTrafficManager,
Namespace: namespace,
},
Subjects: []rbacv1.Subject{{
Kind: "ServiceAccount",
//APIGroup: "rbac.authorization.k8s.io",
Name: config.ConfigMapPodTrafficManager,
Namespace: namespace,
}},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: config.ConfigMapPodTrafficManager,
},
}, metav1.CreateOptions{})
if err != nil {
return err
}
udp8422 := "8422-for-udp"
tcp10800 := "10800-for-tcp"
tcp9002 := "9002-for-envoy"
svc, err := serviceInterface.Create(context.Background(), &v1.Service{
tcp80 := "80-for-webhook"
_, err = clientset.CoreV1().Services(namespace).Create(ctx, &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: config.ConfigMapPodTrafficManager,
Namespace: namespace,
Annotations: map[string]string{"ref-count": "1"},
Name: config.ConfigMapPodTrafficManager,
Namespace: namespace,
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
@@ -68,29 +161,66 @@ func CreateOutboundPod(clientset *kubernetes.Clientset, namespace string, traffi
Protocol: v1.ProtocolTCP,
Port: 9002,
TargetPort: intstr.FromInt(9002),
}, {
Name: tcp80,
Protocol: v1.ProtocolTCP,
Port: 80,
TargetPort: intstr.FromInt(80),
}},
Selector: map[string]string{"app": config.ConfigMapPodTrafficManager},
Type: v1.ServiceTypeClusterIP,
},
}, metav1.CreateOptions{})
if err != nil {
return nil, err
}
var s = []string{config.CIDR.String()}
for _, ipNet := range nodeCIDR {
s = append(s, ipNet.String())
return err
}
var Resources = v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("128m"),
v1.ResourceMemory: resource.MustParse("256Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("128Mi"),
},
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("256m"),
v1.ResourceCPU: resource.MustParse("200m"),
v1.ResourceMemory: resource.MustParse("256Mi"),
},
}
var ResourcesContainerVPN = v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("500m"),
v1.ResourceMemory: resource.MustParse("512Mi"),
},
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("2000m"),
v1.ResourceMemory: resource.MustParse("2048Mi"),
},
}
domain := util.GetTlsDomain(namespace)
var crt, key []byte
crt, key, err = cert.GenerateSelfSignedCertKey(domain, nil, nil)
if err != nil {
return err
}
// reason why not use v1.SecretTypeTls is because it needs key called tls.crt and tls.key, but tls.key can not as env variable
// ➜ ~ export tls.key=a
//export: not valid in this context: tls.key
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: config.ConfigMapPodTrafficManager,
Namespace: namespace,
},
Data: map[string][]byte{
config.TLSCertKey: crt,
config.TLSPrivateKeyKey: key,
},
Type: v1.SecretTypeOpaque,
}
_, err = clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
deployment := &appsv1.Deployment{
@@ -108,6 +238,7 @@ func CreateOutboundPod(clientset *kubernetes.Clientset, namespace string, traffi
Labels: map[string]string{"app": config.ConfigMapPodTrafficManager},
},
Spec: v1.PodSpec{
ServiceAccountName: config.ConfigMapPodTrafficManager,
Volumes: []v1.Volume{{
Name: config.VolumeEnvoyConfig,
VolumeSource: v1.VolumeSource{
@@ -131,22 +262,43 @@ func CreateOutboundPod(clientset *kubernetes.Clientset, namespace string, traffi
Image: config.Image,
Command: []string{"/bin/sh", "-c"},
Args: []string{`
sysctl net.ipv4.ip_forward=1
sysctl -w net.ipv4.ip_forward=1
sysctl -w net.ipv6.conf.all.disable_ipv6=0
sysctl -w net.ipv6.conf.all.forwarding=1
update-alternatives --set iptables /usr/sbin/iptables-legacy
iptables -F
ip6tables -F
iptables -P INPUT ACCEPT
ip6tables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -t nat -A POSTROUTING -s ${CIDR} -o eth0 -j MASQUERADE
kubevpn serve -L "tcp://:10800" -L "tun://:8422?net=${TrafficManagerIP}" --debug=true`,
ip6tables -P FORWARD ACCEPT
iptables -t nat -A POSTROUTING -s ${CIDR4} -o eth0 -j MASQUERADE
ip6tables -t nat -A POSTROUTING -s ${CIDR6} -o eth0 -j MASQUERADE
kubevpn serve -L "tcp://:10800" -L "tun://:8422?net=${TunIPv4}" -L "gtcp://:10801" -L "gudp://:10802" --debug=true`,
},
EnvFrom: []v1.EnvFromSource{{
SecretRef: &v1.SecretEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: config.ConfigMapPodTrafficManager,
},
},
}},
Env: []v1.EnvVar{
{
Name: "CIDR",
Value: strings.Join(s, ","),
Name: "CIDR4",
Value: config.CIDR.String(),
},
{
Name: "TrafficManagerIP",
Value: trafficManagerIP,
Name: "CIDR6",
Value: config.CIDR6.String(),
},
{
Name: config.EnvInboundPodTunIPv4,
Value: innerIpv4CIDR.String(),
},
{
Name: config.EnvInboundPodTunIPv6,
Value: innerIpv6CIDR.String(),
},
},
Ports: []v1.ContainerPort{{
@@ -158,7 +310,7 @@ kubevpn serve -L "tcp://:10800" -L "tun://:8422?net=${TrafficManagerIP}" --debug
ContainerPort: 10800,
Protocol: v1.ProtocolTCP,
}},
Resources: Resources,
Resources: ResourcesContainerVPN,
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
@@ -174,8 +326,8 @@ kubevpn serve -L "tcp://:10800" -L "tun://:8422?net=${TrafficManagerIP}" --debug
{
Name: config.ContainerSidecarControlPlane,
Image: config.Image,
Command: []string{"envoy-xds-server"},
Args: []string{"--watchDirectoryFileName", "/etc/envoy/envoy-config.yaml"},
Command: []string{"kubevpn"},
Args: []string{"control-plane", "--watchDirectoryFilename", "/etc/envoy/envoy-config.yaml"},
Ports: []v1.ContainerPort{{
Name: tcp9002,
ContainerPort: 9002,
@@ -191,6 +343,27 @@ kubevpn serve -L "tcp://:10800" -L "tun://:8422?net=${TrafficManagerIP}" --debug
ImagePullPolicy: v1.PullIfNotPresent,
Resources: Resources,
},
{
Name: "webhook",
Image: config.Image,
Command: []string{"kubevpn"},
Args: []string{"webhook"},
Ports: []v1.ContainerPort{{
Name: tcp80,
ContainerPort: 80,
Protocol: v1.ProtocolTCP,
}},
EnvFrom: []v1.EnvFromSource{{
SecretRef: &v1.SecretEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: config.ConfigMapPodTrafficManager,
},
},
}},
Env: []v1.EnvVar{},
ImagePullPolicy: v1.PullIfNotPresent,
Resources: Resources,
},
},
RestartPolicy: v1.RestartPolicyAlways,
PriorityClassName: "system-cluster-critical",
@@ -198,38 +371,105 @@ kubevpn serve -L "tcp://:10800" -L "tun://:8422?net=${TrafficManagerIP}" --debug
},
},
}
watchStream, err := podInterface.Watch(context.TODO(), metav1.ListOptions{
watchStream, err := clientset.CoreV1().Pods(namespace).Watch(ctx, metav1.ListOptions{
LabelSelector: fields.OneTermEqualSelector("app", config.ConfigMapPodTrafficManager).String(),
})
if err != nil {
return nil, err
return err
}
defer watchStream.Stop()
if _, err = clientset.AppsV1().Deployments(namespace).Create(context.TODO(), deployment, metav1.CreateOptions{}); err != nil {
return nil, err
if _, err = clientset.AppsV1().Deployments(namespace).Create(ctx, deployment, metav1.CreateOptions{}); err != nil {
return err
}
var phase v1.PodPhase
out:
for {
select {
case e := <-watchStream.ResultChan():
if podT, ok := e.Object.(*v1.Pod); ok {
if phase != podT.Status.Phase {
log.Infof("pod %s status is %s", config.ConfigMapPodTrafficManager, podT.Status.Phase)
}
if podT.Status.Phase == v1.PodRunning {
break out
}
phase = podT.Status.Phase
}
case <-time.Tick(time.Minute * 60):
return nil, errors.New(fmt.Sprintf("wait pod %s to be ready timeout", config.ConfigMapPodTrafficManager))
var ok bool
ctx2, cancelFunc := context.WithTimeout(ctx, time.Minute*60)
defer cancelFunc()
wait.UntilWithContext(ctx2, func(ctx context.Context) {
podList, err := clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: fields.OneTermEqualSelector("app", config.ConfigMapPodTrafficManager).String(),
})
if err != nil {
return
}
for _, podT := range podList.Items {
podT := &podT
if podT.DeletionTimestamp != nil {
continue
}
var sb = bytes.NewBuffer(nil)
sb.WriteString(fmt.Sprintf("pod %s is %s\n", podT.Name, podT.Status.Phase))
if podT.Status.Reason != "" {
sb.WriteString(fmt.Sprintf(" reason %s", podT.Status.Reason))
}
if podT.Status.Message != "" {
sb.WriteString(fmt.Sprintf(" message %s", podT.Status.Message))
}
util.PrintStatus(podT, sb)
log.Infof(sb.String())
if podutils.IsPodReady(podT) && func() bool {
for _, status := range podT.Status.ContainerStatuses {
if !status.Ready {
return false
}
}
return true
}() {
cancelFunc()
ok = true
}
}
}, time.Second*3)
if !ok {
return errors.New(fmt.Sprintf("wait pod %s to be ready timeout", config.ConfigMapPodTrafficManager))
}
return net.ParseIP(svc.Spec.ClusterIP), nil
_, err = clientset.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(ctx, &admissionv1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: config.ConfigMapPodTrafficManager + "." + namespace,
Namespace: namespace,
},
Webhooks: []admissionv1.MutatingWebhook{{
Name: config.ConfigMapPodTrafficManager + ".naison.io", // no sense
ClientConfig: admissionv1.WebhookClientConfig{
Service: &admissionv1.ServiceReference{
Namespace: namespace,
Name: config.ConfigMapPodTrafficManager,
Path: pointer.String("/pods"),
Port: pointer.Int32(80),
},
CABundle: crt,
},
Rules: []admissionv1.RuleWithOperations{{
Operations: []admissionv1.OperationType{admissionv1.Create, admissionv1.Delete},
Rule: admissionv1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"pods"},
Scope: (*admissionv1.ScopeType)(pointer.String(string(admissionv1.NamespacedScope))),
},
}},
FailurePolicy: (*admissionv1.FailurePolicyType)(pointer.String(string(admissionv1.Ignore))),
// same as above label ns
NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"ns": namespace}},
SideEffects: (*admissionv1.SideEffectClass)(pointer.String(string(admissionv1.SideEffectClassNone))),
TimeoutSeconds: nil,
AdmissionReviewVersions: []string{"v1", "v1beta1"},
ReinvocationPolicy: (*admissionv1.ReinvocationPolicyType)(pointer.String(string(admissionv1.NeverReinvocationPolicy))),
}},
}, metav1.CreateOptions{})
if err != nil && !k8serrors.IsForbidden(err) && !k8serrors.IsAlreadyExists(err) {
return fmt.Errorf("failed to create MutatingWebhookConfigurations, err: %v", err)
}
_, err = updateRefCount(ctx, clientset.CoreV1().ConfigMaps(namespace), config.ConfigMapPodTrafficManager, 1)
if err != nil {
return
}
return
}
func InjectVPNSidecar(factory cmdutil.Factory, namespace, workloads string, config util.PodRouteConfig) error {
func InjectVPNSidecar(ctx1 context.Context, factory cmdutil.Factory, namespace, workloads string, config util.PodRouteConfig) error {
object, err := util.GetUnstructuredObject(factory, namespace, workloads)
if err != nil {
return err
@@ -297,14 +537,16 @@ func InjectVPNSidecar(factory cmdutil.Factory, namespace, workloads string, conf
}
})
}
_ = util.RolloutStatus(factory, namespace, workloads, time.Minute*5)
if err != nil {
return err
}
err = util.RolloutStatus(ctx1, factory, namespace, workloads, time.Minute*60)
return err
}
func createAfterDeletePod(factory cmdutil.Factory, p *v1.Pod, helper *pkgresource.Helper) error {
zero := int64(0)
if _, err := helper.DeleteWithOptions(p.Namespace, p.Name, &metav1.DeleteOptions{
GracePeriodSeconds: &zero,
GracePeriodSeconds: pointer.Int64(0),
}); err != nil {
log.Error(err)
}
@@ -318,7 +560,7 @@ func createAfterDeletePod(factory cmdutil.Factory, p *v1.Pod, helper *pkgresourc
return true
}
clientset, err := factory.KubernetesClientSet()
get, err := clientset.CoreV1().Pods(p.Namespace).Get(context.TODO(), p.Name, metav1.GetOptions{})
get, err := clientset.CoreV1().Pods(p.Namespace).Get(context.Background(), p.Name, metav1.GetOptions{})
if err != nil || get.Status.Phase != v1.PodRunning {
return true
}
@@ -353,10 +595,9 @@ func removeInboundContainer(factory cmdutil.Factory, namespace, workloads string
helper := pkgresource.NewHelper(object.Client, object.Mapping)
// pods
zero := int64(0)
if len(path) == 0 {
_, err = helper.DeleteWithOptions(object.Namespace, object.Name, &metav1.DeleteOptions{
GracePeriodSeconds: &zero,
GracePeriodSeconds: pointer.Int64(0),
})
if err != nil {
return err
@@ -377,7 +618,6 @@ func removeInboundContainer(factory cmdutil.Factory, namespace, workloads string
if err != nil {
return err
}
//t := true
_, err = helper.Patch(object.Namespace, object.Name, types.JSONPatchType, bytes, &metav1.PatchOptions{
//Force: &t,
})

View File

@@ -1,262 +0,0 @@
package handler
import (
"context"
"encoding/json"
"errors"
"fmt"
"net"
"os/exec"
"path/filepath"
"testing"
"time"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/cli-runtime/pkg/genericclioptions"
pkgresource "k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
"k8s.io/client-go/util/retry"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
//func TestCreateServer(t *testing.T) {
// clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
// &clientcmd.ClientConfigLoadingRules{ExplicitPath: clientcmd.RecommendedHomeFile}, nil,
// )
// config, err := clientConfig.ClientConfig()
// if err != nil {
// log.Fatal(err)
// }
// clientset, err := kubernetes.NewForConfig(config)
// if err != nil {
// log.Fatal(err)
// }
//
// i := &net.IPNet{
// IP: net.ParseIP("192.168.254.100"),
// Mask: net.IPv4Mask(255, 255, 255, 0),
// }
//
// j := &net.IPNet{
// IP: net.ParseIP("172.20.0.0"),
// Mask: net.IPv4Mask(255, 255, 0, 0),
// }
//
// server, err := pkg.CreateOutboundPod(clientset, "test", i, []*net.IPNet{j})
// fmt.Println(server)
//}
func TestGetIp(t *testing.T) {
ip := &net.IPNet{
IP: net.IPv4(192, 168, 254, 100),
Mask: net.IPv4Mask(255, 255, 255, 0),
}
fmt.Println(ip.String())
}
func TestGetIPFromDHCP(t *testing.T) {
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: clientcmd.RecommendedHomeFile}, nil,
)
config, err := clientConfig.ClientConfig()
if err != nil {
log.Fatal(err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
log.Fatal(err)
}
_, ipNet, err := net.ParseCIDR("192.168.1.100/24")
manager := NewDHCPManager(clientset.CoreV1().ConfigMaps("test"), "test", ipNet)
manager.InitDHCP()
for i := 0; i < 10; i++ {
ipNet, err := manager.RentIPRandom()
ipNet2, err := manager.RentIPRandom()
if err != nil {
fmt.Println(err)
continue
} else {
fmt.Printf("%s->%s\n", ipNet.String(), ipNet2.String())
}
time.Sleep(time.Millisecond * 10)
err = manager.ReleaseIpToDHCP(ipNet)
err = manager.ReleaseIpToDHCP(ipNet2)
if err != nil {
fmt.Println(err)
}
time.Sleep(time.Millisecond * 10)
}
}
func TestGetTopController(t *testing.T) {
s := "/Users/naison/.kube/devpool"
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
configFlags.KubeConfig = &s
factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(configFlags))
controller, err := util.GetTopOwnerReference(factory, "nh90bwck", "pods/services-authors-shadow")
fmt.Println(controller.Name)
fmt.Println(controller.Mapping.Resource.Resource)
fmt.Println(err)
}
func TestGetTopControllerByLabel(t *testing.T) {
s := "/Users/naison/.kube/mesh"
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
configFlags.KubeConfig = &s
factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(configFlags))
controller, err := util.GetTopOwnerReferenceBySelector(factory, "default", "app=productpage")
fmt.Println(controller)
fmt.Println(err)
}
func TestPreCheck(t *testing.T) {
options := ConnectOptions{
KubeconfigPath: filepath.Join(homedir.HomeDir(), ".kube", "mesh"),
Namespace: "naison-test",
Workloads: []string{"services/authors"},
}
options.InitClient()
options.PreCheckResource()
fmt.Println(options.Workloads)
}
func init() {
util.InitLogger(config.Debug)
}
func TestBackoff(t *testing.T) {
var last = time.Now()
retry.OnError(wait.Backoff{
Steps: 10,
Duration: 40 * time.Millisecond,
Factor: 2.0,
Jitter: 0.5,
}, func(err error) bool {
return true
}, func() error {
now := time.Now()
fmt.Printf("%vs\n", now.Sub(last).Seconds())
last = now
return errors.New("")
})
}
func TestGetCRD(t *testing.T) {
join := filepath.Join(homedir.HomeDir(), ".kube", "nocalhost.large")
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
configFlags.KubeConfig = &join
factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(configFlags))
Namespace, _, _ := factory.ToRawKubeConfigLoader().Namespace()
object, err := util.GetUnstructuredObject(factory, Namespace, "statefulsets.apps.kruise.io/sample-beta1")
fmt.Println(object)
fmt.Println(err)
}
func TestDeleteAndCreate(t *testing.T) {
file := clientcmd.RecommendedHomeFile
file = filepath.Join(homedir.HomeDir(), ".kube", "config")
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
configFlags.KubeConfig = &file
factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(configFlags))
Namespace, _, err := factory.ToRawKubeConfigLoader().Namespace()
object, err := util.GetUnstructuredObject(factory, Namespace, "pods/nginx")
u := object.Object.(*unstructured.Unstructured)
var pp corev1.Pod
marshal, err := json.Marshal(u)
err = json.Unmarshal(marshal, &pp)
helper := pkgresource.NewHelper(object.Client, object.Mapping)
zero := int64(0)
if _, err = helper.DeleteWithOptions(object.Namespace, object.Name, &metav1.DeleteOptions{
GracePeriodSeconds: &zero,
}); err != nil {
log.Fatal(err)
}
_ = exec.Command("kubectl", "wait", "pods/nginx", "--for=delete").Run()
p := &corev1.Pod{ObjectMeta: pp.ObjectMeta, Spec: pp.Spec}
CleanupUselessInfo(p)
if err = retry.OnError(wait.Backoff{
Steps: 10,
Duration: 50 * time.Millisecond,
Factor: 5.0,
Jitter: 1,
}, func(err error) bool {
if !k8serrors.IsAlreadyExists(err) {
return true
}
clientset, err := factory.KubernetesClientSet()
get, err := clientset.CoreV1().Pods(p.Namespace).Get(context.TODO(), p.Name, metav1.GetOptions{})
if err != nil || get.Status.Phase != corev1.PodRunning {
return true
}
return false
}, func() error {
_, err = helper.Create(object.Namespace, true, p)
if err != nil {
return err
}
return errors.New("")
}); !k8serrors.IsAlreadyExists(err) {
log.Fatal(err)
}
}
func TestReadiness(t *testing.T) {
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
configFlags.KubeConfig = &clientcmd.RecommendedHomeFile
factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(configFlags))
object, err := util.GetUnstructuredObject(factory, "default", "deployment/authors")
if err != nil {
panic(err)
}
podTemplateSpec, path, err := util.GetPodTemplateSpecPath(object.Object.(*unstructured.Unstructured))
if err != nil {
panic(err)
}
helper := pkgresource.NewHelper(object.Client, object.Mapping)
removePatch, restorePatch := patch(*podTemplateSpec, path)
_, err = patchs(helper, object.Namespace, object.Name, removePatch)
if err != nil {
panic(err)
}
_, err = patchs(helper, object.Namespace, object.Name, restorePatch)
if err != nil {
panic(err)
}
}
func patchs(helper *pkgresource.Helper, namespace, name string, p []byte) (k8sruntime.Object, error) {
return helper.Patch(
namespace,
name,
types.JSONPatchType,
p,
&metav1.PatchOptions{},
)
}
func TestSliceRemove(t *testing.T) {
a := []string{"a", "a", "b", "c"}
for i := 0; i < len(a); i++ {
if a[i] == "a" {
a = append(a[:i], a[i+1:]...)
//i--
}
}
fmt.Println(a)
}

60
pkg/handler/reset.go Normal file
View File

@@ -0,0 +1,60 @@
package handler
import (
"context"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
log "github.com/sirupsen/logrus"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/controlplane"
)
// Reset
// 1, get all proxy-resources from configmap
// 2, cleanup all containers
func (c *ConnectOptions) Reset(ctx context.Context) error {
cm, err := c.clientset.CoreV1().ConfigMaps(c.Namespace).Get(ctx, config.ConfigMapPodTrafficManager, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return err
}
var v = make([]*controlplane.Virtual, 0)
if cm != nil && cm.Data != nil {
if str, ok := cm.Data[config.KeyEnvoy]; ok && len(str) != 0 {
if err = yaml.Unmarshal([]byte(str), &v); err != nil {
log.Error(err)
return err
}
for _, virtual := range v {
// deployments.apps.ry-server --> deployments.apps/ry-server
lastIndex := strings.LastIndex(virtual.Uid, ".")
uid := virtual.Uid[:lastIndex] + "/" + virtual.Uid[lastIndex+1:]
for _, rule := range virtual.Rules {
err = UnPatchContainer(c.factory, c.clientset.CoreV1().ConfigMaps(c.Namespace), c.Namespace, uid, rule.Headers)
if err != nil {
log.Error(err)
continue
}
}
}
}
}
cleanup(ctx, c.clientset, c.Namespace, config.ConfigMapPodTrafficManager, false)
var cli *client.Client
if cli, err = client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()); err != nil {
return nil
}
var i types.NetworkResource
if i, err = cli.NetworkInspect(ctx, config.ConfigMapPodTrafficManager, types.NetworkInspectOptions{}); err != nil {
return nil
}
if len(i.Containers) == 0 {
return cli.NetworkRemove(ctx, config.ConfigMapPodTrafficManager)
}
return nil
}

View File

@@ -1,93 +0,0 @@
package handler
import (
"crypto/tls"
"net"
"strings"
"github.com/pkg/errors"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/core"
"github.com/wencaiwulue/kubevpn/pkg/tun"
)
type Route struct {
ServeNodes []string // -L tun
ChainNode string // -F tcp
Retries int
}
func (r *Route) parseChain() (*core.Chain, error) {
// parse the base nodes
node, err := parseChainNode(r.ChainNode)
if err != nil {
return nil, err
}
return core.NewChain(r.Retries, node), nil
}
func parseChainNode(ns string) (*core.Node, error) {
node, err := core.ParseNode(ns)
if err != nil {
return nil, err
}
node.Client = &core.Client{
Connector: core.UDPOverTCPTunnelConnector(),
Transporter: core.TCPTransporter(),
}
return node, nil
}
func (r *Route) GenerateServers() ([]core.Server, error) {
chain, err := r.parseChain()
if err != nil && !errors.Is(err, core.ErrorInvalidNode) {
return nil, err
}
servers := make([]core.Server, 0, len(r.ServeNodes))
for _, serveNode := range r.ServeNodes {
node, err := core.ParseNode(serveNode)
if err != nil {
return nil, err
}
var ln net.Listener
var handler core.Handler
switch node.Protocol {
case "tun":
handler = core.TunHandler(chain, node)
ln, err = tun.Listener(tun.Config{
Name: node.Get("name"),
Addr: node.Get("net"),
MTU: node.GetInt("mtu"),
Routes: parseIPRoutes(node.Get("route")),
Gateway: node.Get("gw"),
})
if err != nil {
return nil, err
}
default:
handler = core.TCPHandler()
tcpListener, _ := core.TCPListener(node.Addr)
ln = tls.NewListener(tcpListener, config.TlsConfigServer)
}
servers = append(servers, core.Server{Listener: ln, Handler: handler})
}
return servers, nil
}
func parseIPRoutes(routeStringList string) (routes []tun.IPRoute) {
if len(routeStringList) == 0 {
return
}
routeList := strings.Split(routeStringList, ",")
for _, route := range routeList {
if _, ipNet, _ := net.ParseCIDR(strings.TrimSpace(route)); ipNet != nil {
routes = append(routes, tun.IPRoute{Dest: ipNet})
}
}
return
}

76
pkg/handler/serve.go Normal file
View File

@@ -0,0 +1,76 @@
package handler
import (
"fmt"
"net/http"
"os"
"strings"
log "github.com/sirupsen/logrus"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/core"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
func Complete(route *core.Route) error {
if v, ok := os.LookupEnv(config.EnvInboundPodTunIPv4); ok && v == "" {
namespace := os.Getenv(config.EnvPodNamespace)
if namespace == "" {
return fmt.Errorf("can not get namespace")
}
url := fmt.Sprintf("https://%s:80%s", util.GetTlsDomain(namespace), config.APIRentIP)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return fmt.Errorf("can not new req, err: %v", err)
}
req.Header.Set(config.HeaderPodName, os.Getenv(config.EnvPodName))
req.Header.Set(config.HeaderPodNamespace, namespace)
var ip []byte
ip, err = util.DoReq(req)
if err != nil {
log.Error(err)
return err
}
log.Infof("rent an ip %s", strings.TrimSpace(string(ip)))
ips := strings.Split(string(ip), ",")
if len(ips) != 2 {
return fmt.Errorf("can not get ip from %s", string(ip))
}
if err = os.Setenv(config.EnvInboundPodTunIPv4, ips[0]); err != nil {
log.Error(err)
return err
}
if err = os.Setenv(config.EnvInboundPodTunIPv6, ips[1]); err != nil {
log.Error(err)
return err
}
for i := 0; i < len(route.ServeNodes); i++ {
node, err := core.ParseNode(route.ServeNodes[i])
if err != nil {
return err
}
if node.Protocol == "tun" {
if get := node.Get("net"); get == "" {
route.ServeNodes[i] = route.ServeNodes[i] + "&net=" + string(ip[0])
}
}
}
}
return nil
}
func Final() error {
namespace := os.Getenv(config.EnvPodNamespace)
url := fmt.Sprintf("https://%s:80%s", util.GetTlsDomain(namespace), config.APIReleaseIP)
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
return fmt.Errorf("can not new req, err: %v", err)
}
req.Header.Set(config.HeaderPodName, os.Getenv(config.EnvPodName))
req.Header.Set(config.HeaderPodNamespace, namespace)
req.Header.Set(config.HeaderIPv4, os.Getenv(config.EnvInboundPodTunIPv4))
req.Header.Set(config.HeaderIPv6, os.Getenv(config.EnvInboundPodTunIPv6))
_, err = util.DoReq(req)
return err
}

View File

@@ -1,60 +1,112 @@
package mesh
import (
"fmt"
_ "embed"
log "github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/utils/pointer"
"github.com/wencaiwulue/kubevpn/pkg/config"
"github.com/wencaiwulue/kubevpn/pkg/util"
)
//go:embed envoy.yaml
var envoyConfig []byte
func RemoveContainers(spec *v1.PodTemplateSpec) {
for i := 0; i < len(spec.Spec.Containers); i++ {
if sets.NewString(config.ContainerSidecarEnvoyProxy, config.ContainerSidecarVPN).Has(spec.Spec.Containers[i].Name) {
if sets.New[string](config.ContainerSidecarEnvoyProxy, config.ContainerSidecarVPN).Has(spec.Spec.Containers[i].Name) {
spec.Spec.Containers = append(spec.Spec.Containers[:i], spec.Spec.Containers[i+1:]...)
i--
}
}
}
// todo envoy support ipv6
func AddMeshContainer(spec *v1.PodTemplateSpec, nodeId string, c util.PodRouteConfig) {
// remove envoy proxy containers if already exist
RemoveContainers(spec)
zero := int64(0)
t := true
spec.Spec.Containers = append(spec.Spec.Containers, v1.Container{
Name: config.ContainerSidecarVPN,
Image: config.Image,
Command: []string{"/bin/sh", "-c"},
Args: []string{`
sysctl net.ipv4.ip_forward=1
sysctl -w net.ipv4.ip_forward=1
sysctl -w net.ipv6.conf.all.disable_ipv6=0
sysctl -w net.ipv6.conf.all.forwarding=1
update-alternatives --set iptables /usr/sbin/iptables-legacy
iptables -F
ip6tables -F
iptables -P INPUT ACCEPT
ip6tables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -t nat -A PREROUTING ! -p icmp ! -s 127.0.0.1 ! -d ${CIDR} -j DNAT --to 127.0.0.1:15006
iptables -t nat -A POSTROUTING ! -p icmp ! -s 127.0.0.1 ! -d ${CIDR} -j MASQUERADE
kubevpn serve -L "tun:/${TrafficManagerRealIP}:8422?net=${InboundPodTunIP}&route=${Route}" --debug=true`,
ip6tables -P FORWARD ACCEPT
iptables -t nat -A PREROUTING ! -p icmp ! -s 127.0.0.1 ! -d ${CIDR4} -j DNAT --to 127.0.0.1:15006
ip6tables -t nat -A PREROUTING ! -p icmp ! -s 0:0:0:0:0:0:0:1 ! -d ${CIDR6} -j DNAT --to [0:0:0:0:0:0:0:1]:15006
iptables -t nat -A POSTROUTING ! -p icmp ! -s 127.0.0.1 ! -d ${CIDR4} -j MASQUERADE
ip6tables -t nat -A POSTROUTING ! -p icmp ! -s 0:0:0:0:0:0:0:1 ! -d ${CIDR6} -j MASQUERADE
kubevpn serve -L "tun:/localhost:8422?net=${TunIPv4}&route=${CIDR4}" -F "tcp://${TrafficManagerService}:10800"`,
},
EnvFrom: []v1.EnvFromSource{{
SecretRef: &v1.SecretEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: config.ConfigMapPodTrafficManager,
},
},
}},
Env: []v1.EnvVar{
{
Name: "CIDR",
Name: "CIDR4",
Value: config.CIDR.String(),
},
{
Name: "TrafficManagerRealIP",
Value: c.TrafficManagerRealIP},
Name: "CIDR6",
Value: config.CIDR6.String(),
},
{
Name: "InboundPodTunIP",
Value: c.InboundPodTunIP},
Name: config.EnvInboundPodTunIPv4,
Value: "",
},
{
Name: "Route",
Value: c.Route,
Name: config.EnvInboundPodTunIPv6,
Value: "",
},
{
Name: "TrafficManagerService",
Value: config.ConfigMapPodTrafficManager,
},
{
Name: config.EnvPodNamespace,
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
{
Name: config.EnvPodName,
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
},
Resources: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("128m"),
v1.ResourceMemory: resource.MustParse("128Mi"),
},
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("256m"),
v1.ResourceMemory: resource.MustParse("256Mi"),
},
},
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{
@@ -62,27 +114,27 @@ kubevpn serve -L "tun:/${TrafficManagerRealIP}:8422?net=${InboundPodTunIP}&route
//"SYS_MODULE",
},
},
RunAsUser: &zero,
Privileged: &t,
RunAsUser: pointer.Int64(0),
Privileged: pointer.Bool(true),
},
Resources: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("128m"),
v1.ResourceMemory: resource.MustParse("128Mi"),
},
Limits: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("256m"),
v1.ResourceMemory: resource.MustParse("256Mi"),
},
},
ImagePullPolicy: v1.PullIfNotPresent,
})
spec.Spec.Containers = append(spec.Spec.Containers, v1.Container{
Name: config.ContainerSidecarEnvoyProxy,
Image: config.Image,
Command: []string{"envoy", "-l", "debug", "--base-id", "1", "--config-yaml"},
Name: config.ContainerSidecarEnvoyProxy,
Image: config.Image,
Command: []string{
"envoy",
"-l",
"error",
"--base-id",
"1",
"--service-node",
nodeId,
"--service-cluster",
nodeId,
"--config-yaml",
},
Args: []string{
fmt.Sprintf(s, nodeId, nodeId, c.TrafficManagerRealIP),
string(envoyConfig),
},
Resources: v1.ResourceRequirements{
Requests: map[v1.ResourceName]resource.Quantity{
@@ -97,3 +149,12 @@ kubevpn serve -L "tun:/${TrafficManagerRealIP}:8422?net=${InboundPodTunIP}&route
ImagePullPolicy: v1.PullIfNotPresent,
})
}
func init() {
json, err := yaml.ToJSON(envoyConfig)
if err != nil {
log.Error(err)
return
}
envoyConfig = json
}

53
pkg/mesh/envoy.yaml Normal file
View File

@@ -0,0 +1,53 @@
admin:
access_log_path: /dev/null
address:
socket_address:
address: "::"
port_value: 9003
ipv4_compat: true
dynamic_resources:
ads_config:
api_type: GRPC
transport_api_version: V3
grpc_services:
- envoy_grpc:
cluster_name: xds_cluster
set_node_on_first_message_only: true
cds_config:
resource_api_version: V3
ads: { }
lds_config:
resource_api_version: V3
ads: { }
static_resources:
listeners:
- name: default_listener
address:
socket_address:
address: "::"
port_value: 15006
ipv4_compat: true
use_original_dst: true
filter_chains:
- filters:
- name: envoy.filters.network.tcp_proxy
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy
stat_prefix: tcp
cluster: origin_cluster
clusters:
- name: xds_cluster
connect_timeout: 2s
type: STRICT_DNS
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: xds_cluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: kubevpn-traffic-manager
port_value: 9002
ipv4_compat: true
http2_protocol_options: { }

View File

@@ -1,61 +0,0 @@
package mesh
var s = `
node:
cluster: %s
id: %s
admin:
access_log_path: /dev/null
address:
socket_address:
address: 0.0.0.0
port_value: 9003
dynamic_resources:
ads_config:
api_type: GRPC
transport_api_version: V3
grpc_services:
- envoy_grpc:
cluster_name: xds_cluster
set_node_on_first_message_only: true
cds_config:
resource_api_version: V3
ads: {}
lds_config:
resource_api_version: V3
ads: {}
static_resources:
listeners:
- name: default_listener
address:
socket_address:
address: 0.0.0.0
port_value: 15006
use_original_dst: true
filter_chains:
- filters:
- name: envoy.filters.network.tcp_proxy
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy
stat_prefix: tcp
cluster: origin_cluster
clusters:
- name: xds_cluster
connect_timeout: 2s
type: STRICT_DNS
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: xds_cluster
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: %s
port_value: 9002
http2_protocol_options: {}
- name: origin_cluster
connect_timeout: 5s
type: ORIGINAL_DST
lb_policy: CLUSTER_PROVIDED
`

View File

@@ -1,57 +0,0 @@
package route
import (
"bytes"
"net"
"strings"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/sets"
)
// DetectAndDisableConflictDevice will detect conflict route table and try to disable device
// 1, get route table
// 2, detect conflict
// 3, disable device
func DetectAndDisableConflictDevice(origin string) error {
routeTable, err := GetRouteTable()
if err != nil {
return err
}
conflict := detectConflictDevice(origin, routeTable)
if len(conflict) != 0 {
log.Infof("those device: %s will to be disabled because of route conflict with %s", strings.Join(conflict, ","), origin)
}
err = DisableDevice(conflict)
return err
}
func detectConflictDevice(origin string, routeTable map[string][]*net.IPNet) []string {
var conflict = sets.NewString()
vpnRoute := routeTable[origin]
for k, originRoute := range routeTable {
if k == origin {
continue
}
out:
for _, originNet := range originRoute {
for _, vpnNet := range vpnRoute {
// like 255.255.0.0/16 should not take effect
if bytes.Equal(originNet.IP, originNet.Mask) || bytes.Equal(vpnNet.IP, vpnNet.Mask) {
continue
}
if vpnNet.Contains(originNet.IP) || originNet.Contains(vpnNet.IP) {
originMask, _ := originNet.Mask.Size()
vpnMask, _ := vpnNet.Mask.Size()
// means interface: k is more precisely, traffic will go to interface k because route table feature
// mare precisely is preferred
if originMask > vpnMask {
conflict.Insert(k)
break out
}
}
}
}
}
return conflict.Delete(origin).List()
}

Some files were not shown because too many files have changed in this diff Show More