mirror of
https://github.com/kubenetworks/kubevpn.git
synced 2025-12-24 11:51:13 +08:00
Compare commits
508 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f9f52d1001 | ||
|
|
51c16989fe | ||
|
|
75c609211b | ||
|
|
6d545dc5c9 | ||
|
|
b17da3cbcb | ||
|
|
d1108ebd86 | ||
|
|
792839a2d4 | ||
|
|
f493931b41 | ||
|
|
7df065ef93 | ||
|
|
c265b3581c | ||
|
|
f802e03d01 | ||
|
|
c08cb461dd | ||
|
|
1a2649a02a | ||
|
|
facd6bdb3d | ||
|
|
a1117dee62 | ||
|
|
b28eaef6a7 | ||
|
|
46aebef01f | ||
|
|
3791f48737 | ||
|
|
fc76b70713 | ||
|
|
e990dc1d0f | ||
|
|
d636449073 | ||
|
|
e85e1a6c40 | ||
|
|
40d09716c4 | ||
|
|
63792172bd | ||
|
|
ca6a2be70f | ||
|
|
e21fc8cda9 | ||
|
|
1f4698c6f8 | ||
|
|
efea780edf | ||
|
|
bdb21f8964 | ||
|
|
e33d2f1928 | ||
|
|
e6df115933 | ||
|
|
549e56cd05 | ||
|
|
54ed2b711f | ||
|
|
56b81574ac | ||
|
|
ce2b7a010e | ||
|
|
5df0c3ffdc | ||
|
|
8b0e87592a | ||
|
|
31a42c1fa7 | ||
|
|
ee0957a5c9 | ||
|
|
206d74c331 | ||
|
|
53ed72dee3 | ||
|
|
323235f268 | ||
|
|
6af6622bd3 | ||
|
|
18ef72fc20 | ||
|
|
fe08448249 | ||
|
|
ebaa4098f1 | ||
|
|
9ba873494f | ||
|
|
da40f3315b | ||
|
|
c4540b1930 | ||
|
|
a6ec321e46 | ||
|
|
79f8aca7df | ||
|
|
6edfc3127d | ||
|
|
bed0a9168c | ||
|
|
d5ee35bfa8 | ||
|
|
9661a122bd | ||
|
|
28657e3832 | ||
|
|
6a8a197f48 | ||
|
|
31186fc1d9 | ||
|
|
fca3baf47e | ||
|
|
1cae5d270b | ||
|
|
a3556a263d | ||
|
|
dd80717d8d | ||
|
|
537b2940fe | ||
|
|
9aae88d54b | ||
|
|
100a8df723 | ||
|
|
48e30b4344 | ||
|
|
c9f1ce6522 | ||
|
|
c42e3475f9 | ||
|
|
4fb338b5fc | ||
|
|
15243b3935 | ||
|
|
f0f9459976 | ||
|
|
ee7d5fa6f9 | ||
|
|
e393f8371e | ||
|
|
ca333fcdaf | ||
|
|
7e4e9e1e0d | ||
|
|
58ee2df1a3 | ||
|
|
15200f1caf | ||
|
|
23baab449c | ||
|
|
0ddcaa8acc | ||
|
|
0c122473ce | ||
|
|
d08f74a57e | ||
|
|
cd66bb7907 | ||
|
|
f303616554 | ||
|
|
3973b85d25 | ||
|
|
4fd1f014bd | ||
|
|
fe62cf6c4d | ||
|
|
c5900d070c | ||
|
|
d84ca66cfb | ||
|
|
60c3030e65 | ||
|
|
ea574a756b | ||
|
|
e8735a68be | ||
|
|
d55d290677 | ||
|
|
45435bcc48 | ||
|
|
dbe9f91ee0 | ||
|
|
b3d2e1e838 | ||
|
|
fa0b343401 | ||
|
|
a1bb338cdb | ||
|
|
dbc9df070b | ||
|
|
804708aabe | ||
|
|
21087fc708 | ||
|
|
94db7846d8 | ||
|
|
e205b77e41 | ||
|
|
2927261390 | ||
|
|
8f37488207 | ||
|
|
d05a53a77f | ||
|
|
a2df9f7b59 | ||
|
|
cd68b1fb00 | ||
|
|
208f607f03 | ||
|
|
116a1f1983 | ||
|
|
d191c927f4 | ||
|
|
a030dc582b | ||
|
|
08bcbe1611 | ||
|
|
fb428403a2 | ||
|
|
4f4bbd79f2 | ||
|
|
1ec3ca4637 | ||
|
|
484a5cafe4 | ||
|
|
b62a6b0185 | ||
|
|
90898c8047 | ||
|
|
c06daf68e8 | ||
|
|
d65da7ba66 | ||
|
|
2ac187eb64 | ||
|
|
b46f7a9877 | ||
|
|
a5622b9439 | ||
|
|
0e8f655673 | ||
|
|
f7250649af | ||
|
|
cbaff5e623 | ||
|
|
6aee9f0882 | ||
|
|
1f63a15e01 | ||
|
|
a65c26e446 | ||
|
|
f5566f6ec2 | ||
|
|
543e2d716d | ||
|
|
f267443c61 | ||
|
|
b6f90812f7 | ||
|
|
b5ea7b2016 | ||
|
|
30f904d7bb | ||
|
|
fde001009e | ||
|
|
6908991461 | ||
|
|
031c2134d8 | ||
|
|
77570575ca | ||
|
|
a70ce62762 | ||
|
|
5edd70452c | ||
|
|
24d16b2791 | ||
|
|
6820dbb30d | ||
|
|
ee26880bf5 | ||
|
|
05b76094f0 | ||
|
|
2e79a331b4 | ||
|
|
ec5efc8253 | ||
|
|
4547e84de9 | ||
|
|
f0694efeda | ||
|
|
8df6da1871 | ||
|
|
ec7d939f8d | ||
|
|
a682dfbc2c | ||
|
|
a16c1ef007 | ||
|
|
ec88fd82f0 | ||
|
|
3457a79328 | ||
|
|
2780f67dd6 | ||
|
|
24b2195036 | ||
|
|
d61d08694a | ||
|
|
f81c7ec3ce | ||
|
|
168db06979 | ||
|
|
8ad7463fc7 | ||
|
|
8926577885 | ||
|
|
0f94f58310 | ||
|
|
210767d908 | ||
|
|
ae9c23550f | ||
|
|
2f9a025f5b | ||
|
|
4d5c4fa426 | ||
|
|
3a4bfa9241 | ||
|
|
db09cbbb6e | ||
|
|
a87cbf1e9a | ||
|
|
547501fc41 | ||
|
|
7051f24313 | ||
|
|
153fe3e5e7 | ||
|
|
78914e8765 | ||
|
|
2fbfb080e0 | ||
|
|
86585214d4 | ||
|
|
867aefbc3a | ||
|
|
2037d3b05f | ||
|
|
794fd861ba | ||
|
|
d10a4e3aef | ||
|
|
5b39275f5b | ||
|
|
de38a35189 | ||
|
|
04c0b33516 | ||
|
|
ffdefce23c | ||
|
|
2a3b4d89f7 | ||
|
|
b1abafd7f4 | ||
|
|
12f29f2528 | ||
|
|
7f3f0305e4 | ||
|
|
c947472d47 | ||
|
|
4013846cab | ||
|
|
399bc4efe0 | ||
|
|
24367b1b82 | ||
|
|
1a32d7a58e | ||
|
|
2793ab20e6 | ||
|
|
528ac55325 | ||
|
|
3896fd1642 | ||
|
|
819b20bbdb | ||
|
|
2fc0bb3f0c | ||
|
|
a6730613e7 | ||
|
|
3ad0b5d1a3 | ||
|
|
3c2b7943b5 | ||
|
|
b2f5fc6ac1 | ||
|
|
768e8b1931 | ||
|
|
abe1bcafd6 | ||
|
|
07cfb8b02e | ||
|
|
11a89d8609 | ||
|
|
98baec8253 | ||
|
|
1d40843e99 | ||
|
|
be327d571b | ||
|
|
8c96431328 | ||
|
|
666a69cdfb | ||
|
|
9a922ae084 | ||
|
|
f55a65e04c | ||
|
|
a3c166dc7b | ||
|
|
7426541e0f | ||
|
|
d70ac3418e | ||
|
|
5c502c9d5f | ||
|
|
c7d8e381f4 | ||
|
|
5ac2588e5d | ||
|
|
e0e45cf84e | ||
|
|
ebfb7168d2 | ||
|
|
caee039ffd | ||
|
|
3d4c8be963 | ||
|
|
c6f59e46c9 | ||
|
|
7d028fc950 | ||
|
|
12920650ba | ||
|
|
2e96247e74 | ||
|
|
b6cfba7db9 | ||
|
|
8b771e82b5 | ||
|
|
d737a6b434 | ||
|
|
420fcd4abb | ||
|
|
fd786caa0f | ||
|
|
d3c2ddecc4 | ||
|
|
2e8d251b20 | ||
|
|
6cd7837d28 | ||
|
|
652a60ce1f | ||
|
|
fad55dce28 | ||
|
|
68d550a80d | ||
|
|
51166477c2 | ||
|
|
4476a38883 | ||
|
|
6597331740 | ||
|
|
6e594fa5a5 | ||
|
|
f046e474af | ||
|
|
062c69de0e | ||
|
|
b9c1f2a814 | ||
|
|
5599dc6bdd | ||
|
|
d068125897 | ||
|
|
959d285294 | ||
|
|
d165dacd20 | ||
|
|
9ebc95352a | ||
|
|
d9d4091905 | ||
|
|
7618ae30ca | ||
|
|
1dc3c057a7 | ||
|
|
81f62eab31 | ||
|
|
d9a978d330 | ||
|
|
c95cb5ba6c | ||
|
|
d418da83b0 | ||
|
|
24a97de5dc | ||
|
|
481b720da6 | ||
|
|
a1247995e7 | ||
|
|
7cb86d70b0 | ||
|
|
9edf0122a7 | ||
|
|
5a0533c0fc | ||
|
|
17a13a2672 | ||
|
|
98c22ba9b7 | ||
|
|
880f842203 | ||
|
|
ab09f9e71c | ||
|
|
ef16641675 | ||
|
|
d9a9000d7b | ||
|
|
a1212f5144 | ||
|
|
f4c22f3073 | ||
|
|
2aa7812cb1 | ||
|
|
cad5d23d33 | ||
|
|
85e8bd76d2 | ||
|
|
a243842052 | ||
|
|
6e052a5a0b | ||
|
|
f966cd29d7 | ||
|
|
bfb7ac441d | ||
|
|
0cc8b04bab | ||
|
|
65ae890842 | ||
|
|
aa881a589e | ||
|
|
07292fcde5 | ||
|
|
3071ff2439 | ||
|
|
a64eaf66da | ||
|
|
9238e9914a | ||
|
|
6e4aeb288a | ||
|
|
105c3967e1 | ||
|
|
5dae60ffbc | ||
|
|
875cb8dc8c | ||
|
|
15103837a7 | ||
|
|
baf5b79a24 | ||
|
|
5618500e66 | ||
|
|
d28096d9fa | ||
|
|
bc960987ea | ||
|
|
1005075367 | ||
|
|
8f4de1968a | ||
|
|
a93f0b1667 | ||
|
|
941373a902 | ||
|
|
605fe047ca | ||
|
|
4d075b29b3 | ||
|
|
d141ec869b | ||
|
|
e2757d3916 | ||
|
|
9d917ae9cb | ||
|
|
0763e8a201 | ||
|
|
274116e44f | ||
|
|
ed375be157 | ||
|
|
be8ef7a127 | ||
|
|
2bfa82d936 | ||
|
|
394bc1a0e4 | ||
|
|
e64b9a3311 | ||
|
|
f9bbaeb3cf | ||
|
|
ac918b5009 | ||
|
|
69b6fa6318 | ||
|
|
63be89bf25 | ||
|
|
c4fb3c5ca0 | ||
|
|
947d50af85 | ||
|
|
0826f2e20c | ||
|
|
9f62e02f96 | ||
|
|
a3b8c1586d | ||
|
|
675ce2a52f | ||
|
|
79e524e319 | ||
|
|
49adeac14c | ||
|
|
9283c2f8f7 | ||
|
|
a48750c048 | ||
|
|
bbf3914f1e | ||
|
|
f13e21a049 | ||
|
|
a37bfc28da | ||
|
|
862238f65f | ||
|
|
18d6f67a5d | ||
|
|
4ae09a9dd2 | ||
|
|
1feaacaba9 | ||
|
|
bc7d205695 | ||
|
|
78de74bf08 | ||
|
|
8c0f2098c9 | ||
|
|
44320a792e | ||
|
|
0e2a8f1ce6 | ||
|
|
b0a6a0d054 | ||
|
|
62b0de99f9 | ||
|
|
295a7a709e | ||
|
|
8d400fd698 | ||
|
|
5f0fe6668a | ||
|
|
993be34b70 | ||
|
|
8093cb125a | ||
|
|
d3542b840a | ||
|
|
d2faffc2c7 | ||
|
|
d2648aabed | ||
|
|
0e87705e5e | ||
|
|
2d947f965f | ||
|
|
35ef5a8c88 | ||
|
|
ce750d9c74 | ||
|
|
207445640e | ||
|
|
e9327ec572 | ||
|
|
deb4ec98f5 | ||
|
|
5cd7ef4a0a | ||
|
|
d6f833fc0b | ||
|
|
faa6229aef | ||
|
|
98d88ac542 | ||
|
|
680e95fd7f | ||
|
|
4aeee5f8d8 | ||
|
|
28d2e78d04 | ||
|
|
d8e0cbcc3d | ||
|
|
ed4c6bbe2f | ||
|
|
a45688115c | ||
|
|
35f0568b09 | ||
|
|
2ec20f7d1d | ||
|
|
a26abab8ce | ||
|
|
9be029e65e | ||
|
|
6fed288e67 | ||
|
|
80e3aa154c | ||
|
|
38b9ad1991 | ||
|
|
eaacf3954f | ||
|
|
12a12bcda7 | ||
|
|
28f6d54398 | ||
|
|
a23b197554 | ||
|
|
a0ca862d59 | ||
|
|
7dd762b853 | ||
|
|
78762cd9e5 | ||
|
|
e58a9bf69e | ||
|
|
a10b1b2526 | ||
|
|
331423c308 | ||
|
|
e5c1ea4b9b | ||
|
|
cc032c4a6d | ||
|
|
984ab2ce89 | ||
|
|
3e51bf0f4d | ||
|
|
e7f00f5899 | ||
|
|
70d5723e97 | ||
|
|
5da018db2a | ||
|
|
a0137ad485 | ||
|
|
37552d3db9 | ||
|
|
5ac8eac923 | ||
|
|
d2d411a1cb | ||
|
|
d16bdf8fea | ||
|
|
ca18dab08f | ||
|
|
706afb348d | ||
|
|
def6c7dfdd | ||
|
|
e64dd428ab | ||
|
|
9df4efb98b | ||
|
|
6f6d338656 | ||
|
|
f93b06ea1c | ||
|
|
ada4b51035 | ||
|
|
679d097e83 | ||
|
|
c7b437c5d8 | ||
|
|
43dad39cca | ||
|
|
d428ee42bc | ||
|
|
0e569fe1a4 | ||
|
|
fe7be90d0b | ||
|
|
bacc8cdc26 | ||
|
|
9c62504489 | ||
|
|
6060bd8120 | ||
|
|
2cd4de52f4 | ||
|
|
03ac484069 | ||
|
|
c7b4499503 | ||
|
|
9a7466479b | ||
|
|
31d7e4debb | ||
|
|
52f1d38e56 | ||
|
|
acd4de313f | ||
|
|
8dbb80be7c | ||
|
|
45491f185d | ||
|
|
4eeecd5255 | ||
|
|
87166494c0 | ||
|
|
91b3a2fbdf | ||
|
|
b7615f57c3 | ||
|
|
e5438b297a | ||
|
|
d3aeae7573 | ||
|
|
aacdc8a6d0 | ||
|
|
fadfd00927 | ||
|
|
600e35b8d7 | ||
|
|
f3d1c99a04 | ||
|
|
18a5569054 | ||
|
|
1baa1de13f | ||
|
|
dcda747d0e | ||
|
|
2fd6427242 | ||
|
|
dc270ca846 | ||
|
|
ab0cd80b39 | ||
|
|
e920133c88 | ||
|
|
0d64dc7b10 | ||
|
|
96845ba37a | ||
|
|
0730cb12b7 | ||
|
|
e232bf902e | ||
|
|
1bc269d901 | ||
|
|
a8826b3334 | ||
|
|
7c560df82b | ||
|
|
939cc8547f | ||
|
|
fed7108eec | ||
|
|
2fdfc1d88d | ||
|
|
64cd7709e8 | ||
|
|
5773b69367 | ||
|
|
c689f47664 | ||
|
|
1f32a129b6 | ||
|
|
01e3456ad3 | ||
|
|
46fcf5521f | ||
|
|
26f53209c6 | ||
|
|
454f67b6c4 | ||
|
|
bd5c3c4cf6 | ||
|
|
991a840db2 | ||
|
|
3ad6127132 | ||
|
|
14e91d5110 | ||
|
|
4abc5f004a | ||
|
|
59abb16136 | ||
|
|
6a232473cd | ||
|
|
878a8190e3 | ||
|
|
d0978aa5b7 | ||
|
|
073c249e96 | ||
|
|
78c8afb456 | ||
|
|
0384de250a | ||
|
|
9be04cc149 | ||
|
|
f9ef4c8dad | ||
|
|
c09ac8f536 | ||
|
|
14731fe8e8 | ||
|
|
dc33331a8c | ||
|
|
879bdbc03d | ||
|
|
8eeb420245 | ||
|
|
847c2c8cc1 | ||
|
|
988c2e7fdc | ||
|
|
8c55d39af2 | ||
|
|
e2cb639c6e | ||
|
|
f9a67a2773 | ||
|
|
cbf3cdff42 | ||
|
|
d35656f3df | ||
|
|
9f97a9202d | ||
|
|
ae2b97a4b4 | ||
|
|
156ee998cd | ||
|
|
b2a6e602e6 | ||
|
|
8650e4ecf9 | ||
|
|
4a2abc24da | ||
|
|
a66fbb1637 | ||
|
|
c3c6864b47 | ||
|
|
80ffd2f468 | ||
|
|
0ad6b103cb | ||
|
|
d9977a5c11 | ||
|
|
6fbae091ec | ||
|
|
8505c26830 | ||
|
|
7c53cbc79b | ||
|
|
c0da61cd4b | ||
|
|
1644201978 | ||
|
|
91ee5be981 | ||
|
|
74bb3d3746 | ||
|
|
51bb3b8700 | ||
|
|
c18b56eb2a | ||
|
|
de050c2944 | ||
|
|
49876dee05 | ||
|
|
abf36b87a6 | ||
|
|
5cc64d17c2 | ||
|
|
c80f610fc1 | ||
|
|
1a9593f140 | ||
|
|
0e0885afd5 | ||
|
|
9e0c0b2bf0 | ||
|
|
99601693d3 |
27
.github/krew.yaml
vendored
27
.github/krew.yaml
vendored
@@ -4,20 +4,19 @@ metadata:
|
||||
name: kubevpn
|
||||
spec:
|
||||
version: {{ .TagName }}
|
||||
homepage: https://github.com/KubeNetworks/kubevpn
|
||||
shortDescription: "A vpn tunnel tools which can connect to kubernetes cluster network"
|
||||
homepage: https://github.com/kubenetworks/kubevpn
|
||||
shortDescription: "KubeVPN offers a Cloud Native Dev Environment that connects to kubernetes cluster network"
|
||||
description: |
|
||||
KubeVPN is Cloud Native Dev Environment, connect to kubernetes cluster network, you can access remote kubernetes
|
||||
cluster network, remote
|
||||
kubernetes cluster service can also access your local service. and more, you can run your kubernetes pod on local Docker
|
||||
container with same environment、volume、and network. you can develop your application on local PC totally.
|
||||
KubeVPN offers a Cloud-Native Dev Environment that seamlessly connects to your Kubernetes cluster network.
|
||||
Gain access to the Kubernetes cluster network effortlessly using service names or Pod IP/Service IP. Facilitate the interception of inbound traffic from remote Kubernetes cluster services to your local PC through a service mesh and more.
|
||||
For instance, you have the flexibility to run your Kubernetes pod within a local Docker container, ensuring an identical environment, volume, and network setup. With KubeVPN, empower yourself to develop applications entirely on your local PC!
|
||||
|
||||
platforms:
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: amd64
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_amd64.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_amd64.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn.exe
|
||||
to: .
|
||||
@@ -28,7 +27,7 @@ spec:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: arm64
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_arm64.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_arm64.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn.exe
|
||||
to: .
|
||||
@@ -39,7 +38,7 @@ spec:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: 386
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_386.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_386.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn.exe
|
||||
to: .
|
||||
@@ -50,7 +49,7 @@ spec:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: amd64
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_amd64.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_amd64.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn
|
||||
to: .
|
||||
@@ -61,7 +60,7 @@ spec:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: arm64
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_arm64.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_arm64.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn
|
||||
to: .
|
||||
@@ -72,7 +71,7 @@ spec:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: 386
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_386.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_386.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn
|
||||
to: .
|
||||
@@ -83,7 +82,7 @@ spec:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: amd64
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_darwin_amd64.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_darwin_amd64.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn
|
||||
to: .
|
||||
@@ -94,7 +93,7 @@ spec:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: arm64
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_darwin_arm64.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_darwin_arm64.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn
|
||||
to: .
|
||||
|
||||
67
.github/release-note.sh
vendored
67
.github/release-note.sh
vendored
@@ -7,11 +7,70 @@ PREVIOUS_RELEASE=${PREVIOUS_RELEASE:-$1}
|
||||
CHANGELOG=$(git log --no-merges --date=short --pretty=format:'- %h %an %ad %s' "${PREVIOUS_RELEASE}".."${RELEASE}")
|
||||
|
||||
cat <<EOF
|
||||
## ${RELEASE}
|
||||
# KubeVPN release ${RELEASE}
|
||||
|
||||
KubeVPN ${RELEASE} is available now ! 🎉
|
||||
- fix known bugs 🛠
|
||||
## Installation and Upgrading
|
||||
wget -LO "https://github.com/KubeNetworks/kubevpn/releases/download/$(curl -L -s https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/plugins/stable.txt)/kubevpn_$(curl -L -s https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/plugins/stable.txt)_darwin_amd64.zip"
|
||||
|
||||
## Download KubeVPN for your platform
|
||||
|
||||
**Mac** (x86-64/Intel)
|
||||
|
||||
\`\`\`
|
||||
curl -Lo kubevpn.zip https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_darwin_amd64.zip && unzip -d kubevpn kubevpn.zip
|
||||
\`\`\`
|
||||
|
||||
**Mac** (AArch64/Apple M1 silicon)
|
||||
|
||||
\`\`\`
|
||||
curl -Lo kubevpn.zip https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_darwin_arm64.zip && unzip -d kubevpn kubevpn.zip
|
||||
\`\`\`
|
||||
|
||||
**Linux** (x86-64)
|
||||
|
||||
\`\`\`
|
||||
curl -Lo kubevpn.zip https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_linux_amd64.zip && unzip -d kubevpn kubevpn.zip
|
||||
\`\`\`
|
||||
|
||||
**Linux** (AArch64)
|
||||
|
||||
\`\`\`
|
||||
curl -Lo kubevpn.zip https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_linux_arm64.zip && unzip -d kubevpn kubevpn.zip
|
||||
\`\`\`
|
||||
|
||||
**Linux** (i386)
|
||||
|
||||
\`\`\`
|
||||
curl -Lo kubevpn.zip https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_linux_386.zip && unzip -d kubevpn kubevpn.zip
|
||||
\`\`\`
|
||||
|
||||
**Windows** (x86-64)
|
||||
|
||||
\`\`\`
|
||||
curl -LO https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_windows_amd64.zip
|
||||
\`\`\`
|
||||
|
||||
**Windows** (AArch64)
|
||||
|
||||
\`\`\`
|
||||
curl -LO https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_windows_arm64.zip
|
||||
\`\`\`
|
||||
|
||||
**Windows** (i386)
|
||||
|
||||
\`\`\`
|
||||
curl -LO https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_windows_386.zip
|
||||
\`\`\`
|
||||
|
||||
## Checksums
|
||||
|
||||
SHA256 checksums available for compiled binaries.
|
||||
Run \`shasum -a 256 -c checksums.txt\` to verify.
|
||||
|
||||
## Upgrading
|
||||
|
||||
Run \`kubevpn upgrade\` to upgrade from a previous version.
|
||||
|
||||
## Changelog
|
||||
|
||||
${CHANGELOG}
|
||||
EOF
|
||||
|
||||
77
.github/workflows/coverage.yml
vendored
Normal file
77
.github/workflows/coverage.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
name: Coverage
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
linux:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
|
||||
- name: Setup Minikube
|
||||
id: minikube
|
||||
timeout-minutes: 30
|
||||
uses: medyagh/setup-minikube@latest
|
||||
with:
|
||||
cache: true
|
||||
|
||||
- name: Kubernetes info
|
||||
run: |
|
||||
kubectl cluster-info
|
||||
cat ~/.kube/config
|
||||
kubectl get pods -n kube-system -o wide
|
||||
|
||||
- name: Install demo bookinfo
|
||||
run: |
|
||||
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-details-v1:1.20.2
|
||||
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-ratings-v1:1.20.2
|
||||
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-reviews-v1:1.20.2
|
||||
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-productpage-v1:1.20.2
|
||||
minikube image load --remote ghcr.io/kubenetworks/authors:latest
|
||||
minikube image load --remote ghcr.io/kubenetworks/nginx:latest
|
||||
minikube image ls
|
||||
eval $(minikube docker-env)
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
export VERSION=${{github.event.pull_request.head.sha}}
|
||||
if [[ -z "$VERSION" ]]; then
|
||||
export VERSION=${{ github.sha }}
|
||||
fi
|
||||
make kubevpn-linux-amd64
|
||||
chmod +x ./bin/kubevpn
|
||||
cp ./bin/kubevpn /usr/local/bin/kubevpn
|
||||
kubevpn version
|
||||
|
||||
- name: Wait for pods reviews to be ready
|
||||
run: |
|
||||
kubectl wait --for=condition=Ready pods --all --timeout=3600s
|
||||
kubectl get svc -A -o wide
|
||||
kubectl get pod -A -o wide
|
||||
kubectl get all -o wide
|
||||
kubectl get nodes -o yaml
|
||||
ifconfig
|
||||
route -n
|
||||
sudo ln /usr/bin/resolvectl /usr/bin/systemd-resolve
|
||||
|
||||
- name: Test
|
||||
run: make ut
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1 # v4.0.2
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
verbose: true
|
||||
slug: wencaiwulue/kubevpn
|
||||
182
.github/workflows/release.yml
vendored
182
.github/workflows/release.yml
vendored
@@ -11,18 +11,19 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Push image to docker hub
|
||||
run: |
|
||||
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USER }} --password-stdin
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
docker buildx create --use
|
||||
make container
|
||||
|
||||
@@ -52,13 +53,13 @@ jobs:
|
||||
git reset --hard
|
||||
|
||||
- name: Upload RELEASE_VERSION
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: RELEASE_VERSION
|
||||
path: RELEASE_VERSION
|
||||
|
||||
- name: Upload UPLOAD_URL
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: UPLOAD_URL
|
||||
path: UPLOAD_URL
|
||||
@@ -95,13 +96,168 @@ jobs:
|
||||
labels: |
|
||||
report
|
||||
automated pr
|
||||
# team-reviewers: |
|
||||
# owners
|
||||
# maintainers
|
||||
draft: false
|
||||
|
||||
# - name: Update new version in krew-index
|
||||
# uses: rajatjindal/krew-release-bot@v0.0.43
|
||||
# with:
|
||||
# krew_template_file: .github/krew.yaml
|
||||
# debug: true
|
||||
release-helm-chart:
|
||||
name: Release KubeVPN Helm Chart
|
||||
needs: [ build ]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Helm tool installer
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: "v3.6.3"
|
||||
- name: Change chart version
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/*/}
|
||||
CHART_VERSION=${VERSION/#v/}
|
||||
sed -i "s/^appVersion:.*$/appVersion: \"${VERSION}\"/;s/^version:.*$/version: ${CHART_VERSION}/" charts/kubevpn/Chart.yaml
|
||||
sed -i "s/tag:.*$/tag: \"${VERSION}\"/" charts/kubevpn/values.yaml
|
||||
- name: Tar chart
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/*/}
|
||||
CHART_VERSION=${VERSION/#v/}
|
||||
tar --transform 's/^charts\/kubevpn/kubevpn/' -zcf kubevpn-${CHART_VERSION}.tgz charts/kubevpn
|
||||
shasum -a 256 kubevpn-${CHART_VERSION}.tgz | awk '{print $1}' > kubevpn-${CHART_VERSION}.tgz-SHA256
|
||||
- name: Download UPLOAD_URL
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: UPLOAD_URL
|
||||
- name: Get Release UPLOAD_URL
|
||||
id: get_release_info
|
||||
run: |
|
||||
UploadUrl=$(cat ./UPLOAD_URL)
|
||||
echo "::set-output name=upload_url::$UploadUrl"
|
||||
- name: Get assert name
|
||||
id: get_assert_info
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/*/}
|
||||
CHART_VERSION=${VERSION/#v/}
|
||||
AssertName=kubevpn-${CHART_VERSION}.tgz
|
||||
echo "::set-output name=assert_name::$AssertName"
|
||||
- name: Get assert SHA256 name
|
||||
id: get_assert_info_sha256
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/*/}
|
||||
CHART_VERSION=${VERSION/#v/}
|
||||
AssertName=kubevpn-${CHART_VERSION}.tgz-SHA256
|
||||
echo "::set-output name=assert_name::$AssertName"
|
||||
- name: Upload Release Asset KubeVPN Server Chart
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.get_release_info.outputs.upload_url }}
|
||||
asset_path: ${{ steps.get_assert_info.outputs.assert_name }}
|
||||
asset_name: ${{ steps.get_assert_info.outputs.assert_name }}
|
||||
asset_content_type: application/octet-stream
|
||||
- name: Upload Release Asset KubeVPN Chart SHA256
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.get_release_info.outputs.upload_url }}
|
||||
asset_path: ${{ steps.get_assert_info_sha256.outputs.assert_name }}
|
||||
asset_name: ${{ steps.get_assert_info_sha256.outputs.assert_name }}
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
github-pages-deploy:
|
||||
name: Release Helm Chart To branch master
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
needs: release-helm-chart
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v4
|
||||
- name: Change chart version
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/*/}
|
||||
CHART_VERSION=${VERSION/#v/}
|
||||
sed -i "s/^appVersion:.*$/appVersion: \"${VERSION}\"/;s/^version:.*$/version: ${CHART_VERSION}/" charts/kubevpn/Chart.yaml
|
||||
sed -i "s/tag:.*$/tag: \"${VERSION}\"/" charts/kubevpn/values.yaml
|
||||
- name: Package and upload helm chart
|
||||
run: |
|
||||
# download helm chart releaser
|
||||
curl -sSLo cr.tar.gz "https://github.com/helm/chart-releaser/releases/download/v1.6.1/chart-releaser_1.6.1_linux_amd64.tar.gz"
|
||||
tar -xzf cr.tar.gz
|
||||
rm -f cr.tar.gz
|
||||
owner=$(cut -d '/' -f 1 <<< "$GITHUB_REPOSITORY")
|
||||
repo=$(cut -d '/' -f 2 <<< "$GITHUB_REPOSITORY")
|
||||
# package chart
|
||||
./cr package charts/$repo
|
||||
# update index and push to github pages
|
||||
git config user.email "$owner@users.noreply.github.com"
|
||||
git config user.name "$owner"
|
||||
./cr index \
|
||||
--owner "$owner" \
|
||||
--git-repo "$repo" \
|
||||
--token "${{ secrets.CREATE_HELM_PR }}" \
|
||||
--release-name-template "v{{ .Version }}" \
|
||||
--index-path ./index.yaml \
|
||||
--charts-repo https://github.com/$owner/$repo \
|
||||
--pages-branch master \
|
||||
--pages-index-path charts/index.yaml \
|
||||
--pr
|
||||
snapcraft:
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
SNAPCRAFT_STORE_CREDENTIALS: ${{ secrets.SNAPCRAFT_TOKEN }}
|
||||
steps:
|
||||
- name: Check out Git repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install Snapcraft
|
||||
uses: samuelmeuli/action-snapcraft@v3
|
||||
|
||||
- name: Setup LXD
|
||||
uses: canonical/setup-lxd@main
|
||||
|
||||
- name: Use Snapcraft
|
||||
run: |
|
||||
RELEASE_VERSION=${GITHUB_REF#refs/*/}
|
||||
sed -i s#CRAFT_ARCH_BUILD_VERSION#$RELEASE_VERSION#g snap/snapcraft.yaml
|
||||
snapcraft
|
||||
snapcraft upload --release=stable kubevpn_${RELEASE_VERSION}_amd64.snap
|
||||
snapcraft-arm:
|
||||
runs-on: ubuntu-24.04-arm
|
||||
env:
|
||||
SNAPCRAFT_STORE_CREDENTIALS: ${{ secrets.SNAPCRAFT_TOKEN }}
|
||||
steps:
|
||||
- name: Check out Git repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install Snapcraft
|
||||
uses: samuelmeuli/action-snapcraft@v3
|
||||
|
||||
- name: Setup LXD
|
||||
uses: canonical/setup-lxd@main
|
||||
|
||||
- name: Use Snapcraft
|
||||
run: |
|
||||
RELEASE_VERSION=${GITHUB_REF#refs/*/}
|
||||
sed -i s#CRAFT_ARCH_BUILD_VERSION#$RELEASE_VERSION#g snap/snapcraft.yaml
|
||||
snapcraft
|
||||
snapcraft upload --release=stable kubevpn_${RELEASE_VERSION}_arm64.snap
|
||||
128
.github/workflows/test.yml
vendored
128
.github/workflows/test.yml
vendored
@@ -10,33 +10,35 @@ jobs:
|
||||
image:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
- name: Push image to docker hub
|
||||
run: |
|
||||
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USER }} --password-stdin
|
||||
docker buildx create --use
|
||||
export VERSION=test
|
||||
make container
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
export VERSION=${{github.event.pull_request.head.sha}}
|
||||
if [[ -z "$VERSION" ]]; then
|
||||
export VERSION=${{ github.sha }}
|
||||
fi
|
||||
make container-test
|
||||
linux:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ "image" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
- name: Setup Minikube
|
||||
id: minikube
|
||||
timeout-minutes: 30
|
||||
uses: medyagh/setup-minikube@master
|
||||
uses: medyagh/setup-minikube@latest
|
||||
with:
|
||||
cache: true
|
||||
|
||||
@@ -47,20 +49,22 @@ jobs:
|
||||
kubectl get pods -n kube-system -o wide
|
||||
- name: Install demo bookinfo
|
||||
run: |
|
||||
minikube image load --remote istio/examples-bookinfo-details-v1:1.16.2
|
||||
minikube image load --remote istio/examples-bookinfo-ratings-v1:1.16.2
|
||||
minikube image load --remote istio/examples-bookinfo-reviews-v1:1.16.2
|
||||
minikube image load --remote istio/examples-bookinfo-productpage-v1:1.16.2
|
||||
minikube image load --remote naison/authors:latest
|
||||
minikube image load --remote nginx:latest
|
||||
minikube image load --remote naison/kubevpn:test
|
||||
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-details-v1:1.20.2
|
||||
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-ratings-v1:1.20.2
|
||||
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-reviews-v1:1.20.2
|
||||
minikube image load --remote ghcr.io/kubenetworks/examples-bookinfo-productpage-v1:1.20.2
|
||||
minikube image load --remote ghcr.io/kubenetworks/authors:latest
|
||||
minikube image load --remote ghcr.io/kubenetworks/nginx:latest
|
||||
minikube image ls
|
||||
eval $(minikube docker-env)
|
||||
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
export VERSION=test
|
||||
export VERSION=${{github.event.pull_request.head.sha}}
|
||||
if [[ -z "$VERSION" ]]; then
|
||||
export VERSION=${{ github.sha }}
|
||||
fi
|
||||
make kubevpn-linux-amd64
|
||||
chmod +x ./bin/kubevpn
|
||||
cp ./bin/kubevpn /usr/local/bin/kubevpn
|
||||
@@ -68,8 +72,7 @@ jobs:
|
||||
|
||||
- name: Wait for pods reviews to be ready
|
||||
run: |
|
||||
kubectl wait pods -l app=reviews --for=condition=Ready --timeout=3600s
|
||||
kubectl wait pods -l app=productpage --for=condition=Ready --timeout=3600s
|
||||
kubectl wait --for=condition=Ready pods --all --timeout=3600s
|
||||
kubectl get svc -A -o wide
|
||||
kubectl get pod -A -o wide
|
||||
kubectl get all -o wide
|
||||
@@ -79,30 +82,44 @@ jobs:
|
||||
sudo ln /usr/bin/resolvectl /usr/bin/systemd-resolve
|
||||
|
||||
- name: Test
|
||||
run: go test -v -failfast ./... -timeout=60m
|
||||
run: make ut
|
||||
|
||||
macos:
|
||||
runs-on: macos-latest
|
||||
runs-on: macos-13
|
||||
needs: [ "image" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
- name: Set up Docker
|
||||
uses: crazy-max/ghaction-setup-docker@v1.4.0
|
||||
|
||||
# https://github.com/crazy-max/ghaction-setup-docker/issues/108
|
||||
- name: Set up QEMU
|
||||
uses: docker/actions-toolkit/.github/actions/macos-setup-qemu@19ca9ade20f5da695f76a10988d6532058575f82
|
||||
|
||||
- name: Set up Docker
|
||||
uses: docker/setup-docker-action@v4
|
||||
with:
|
||||
daemon-config: |
|
||||
{
|
||||
"debug": true,
|
||||
"features": {
|
||||
"containerd-snapshotter": true
|
||||
}
|
||||
}
|
||||
- uses: azure/setup-kubectl@v4
|
||||
- name: Install minikube
|
||||
run: |
|
||||
set -x
|
||||
docker version
|
||||
brew install minikube
|
||||
minikube start --driver=docker
|
||||
minikube start --driver=docker --memory=max --cpus=max --wait=all --wait-timeout=60m
|
||||
kubectl cluster-info
|
||||
kubectl config view --flatten --raw
|
||||
kubectl get pod -A -o wide
|
||||
minikube kubectl -- get pod -A -o wide
|
||||
|
||||
- name: Kubernetes info
|
||||
run: |
|
||||
@@ -112,20 +129,14 @@ jobs:
|
||||
|
||||
- name: Install demo bookinfo
|
||||
run: |
|
||||
minikube image load --remote istio/examples-bookinfo-details-v1:1.16.2
|
||||
minikube image load --remote istio/examples-bookinfo-ratings-v1:1.16.2
|
||||
minikube image load --remote istio/examples-bookinfo-reviews-v1:1.16.2
|
||||
minikube image load --remote istio/examples-bookinfo-productpage-v1:1.16.2
|
||||
minikube image load --remote naison/authors:latest
|
||||
minikube image load --remote nginx:latest
|
||||
minikube image load --remote naison/kubevpn:test
|
||||
minikube image ls
|
||||
eval $(minikube docker-env)
|
||||
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
export VERSION=test
|
||||
export VERSION=${{github.event.pull_request.head.sha}}
|
||||
if [[ -z "$VERSION" ]]; then
|
||||
export VERSION=${{ github.sha }}
|
||||
fi
|
||||
make kubevpn-darwin-amd64
|
||||
chmod +x ./bin/kubevpn
|
||||
cp ./bin/kubevpn /usr/local/bin/kubevpn
|
||||
@@ -133,8 +144,7 @@ jobs:
|
||||
|
||||
- name: Wait for pods reviews to be ready
|
||||
run: |
|
||||
kubectl wait pods -l app=reviews --for=condition=Ready --timeout=3600s
|
||||
kubectl wait pods -l app=productpage --for=condition=Ready --timeout=3600s
|
||||
kubectl wait --for=condition=Ready pods --all --timeout=3600s
|
||||
kubectl get svc -A -o wide || true
|
||||
kubectl get pod -A -o wide || true
|
||||
kubectl get all -o wide || true
|
||||
@@ -143,27 +153,45 @@ jobs:
|
||||
netstat -anr
|
||||
|
||||
- name: Test
|
||||
run: go test -v -failfast ./... -timeout=60m
|
||||
run: make ut
|
||||
|
||||
windows:
|
||||
runs-on: windows-latest
|
||||
env:
|
||||
VERSION: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
needs: [ "image" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.23'
|
||||
|
||||
- name: Set up Docker
|
||||
uses: crazy-max/ghaction-setup-docker@v1.4.0
|
||||
uses: docker/setup-docker-action@v4
|
||||
with:
|
||||
daemon-config: |
|
||||
{
|
||||
"debug": true,
|
||||
"features": {
|
||||
"containerd-snapshotter": true
|
||||
}
|
||||
}
|
||||
- run: |
|
||||
docker info --format '{{.OSType}}'
|
||||
choco install kind
|
||||
kind create cluster
|
||||
kubectl cluster-info
|
||||
kubectl config view --flatten --raw
|
||||
|
||||
- run: |
|
||||
choco install minikube
|
||||
minikube start --driver=docker
|
||||
choco install make
|
||||
|
||||
- name: Build
|
||||
run: make kubevpn-windows-amd64
|
||||
run: |
|
||||
make kubevpn-windows-amd64
|
||||
./bin/kubevpn.exe version
|
||||
./bin/kubevpn.exe status
|
||||
6
.github/workflows/upload_release.yml
vendored
6
.github/workflows/upload_release.yml
vendored
@@ -23,12 +23,12 @@ jobs:
|
||||
arch: 386
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Build kubevpn
|
||||
run: |
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,8 +1,6 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
|
||||
36
Makefile
36
Makefile
@@ -1,26 +1,28 @@
|
||||
VERSION ?= $(shell git tag -l --sort=v:refname | tail -1)
|
||||
GIT_COMMIT := $(shell git describe --match=NeVeRmAtCh --always --abbrev=40)
|
||||
BUILD_TIME := $(shell date +"%Y-%m-%dT%H:%M:%SZ")
|
||||
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
GIT_COMMIT ?= $(shell git describe --match=NeVeRmAtCh --always --abbrev=7)
|
||||
BUILD_TIME ?= $(shell date +"%Y-%m-%dT%H:%M:%SZ")
|
||||
BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
|
||||
GOOS := $(shell go env GOHOSTOS)
|
||||
GOARCH := $(shell go env GOHOSTARCH)
|
||||
TARGET := kubevpn-${GOOS}-${GOARCH}
|
||||
OS_ARCH := ${GOOS}/${GOARCH}
|
||||
|
||||
BASE := github.com/wencaiwulue/kubevpn
|
||||
BASE := github.com/wencaiwulue/kubevpn/v2
|
||||
FOLDER := ${BASE}/cmd/kubevpn
|
||||
BUILD_DIR := ./build
|
||||
OUTPUT_DIR := ./bin
|
||||
BUILD_DIR ?= ./build
|
||||
OUTPUT_DIR ?= ./bin
|
||||
REGISTRY ?= docker.io
|
||||
NAMESPACE ?= naison
|
||||
REPOSITORY ?= kubevpn
|
||||
IMAGE ?= $(REGISTRY)/$(NAMESPACE)/$(REPOSITORY):$(VERSION)
|
||||
IMAGE_DEFAULT = docker.io/naison/kubevpn:latest
|
||||
IMAGE_LATEST ?= docker.io/naison/kubevpn:latest
|
||||
IMAGE_GH ?= ghcr.io/kubenetworks/kubevpn:$(VERSION)
|
||||
IMAGE_GH_LATEST ?= ghcr.io/kubenetworks/kubevpn:latest
|
||||
|
||||
# Setup the -ldflags option for go build here, interpolate the variable values
|
||||
LDFLAGS=--ldflags "\
|
||||
-X ${BASE}/pkg/config.Image=${IMAGE} \
|
||||
LDFLAGS=--ldflags "-s -w\
|
||||
-X ${BASE}/pkg/config.Image=${IMAGE_GH} \
|
||||
-X ${BASE}/pkg/config.Version=${VERSION} \
|
||||
-X ${BASE}/pkg/config.GitCommit=${GIT_COMMIT} \
|
||||
-X ${BASE}/pkg/config.GitHubOAuthToken=${GitHubOAuthToken} \
|
||||
@@ -84,21 +86,29 @@ kubevpn-linux-386:
|
||||
|
||||
.PHONY: container
|
||||
container:
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE} -t ${IMAGE_DEFAULT} -f $(BUILD_DIR)/Dockerfile --push .
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE} -t ${IMAGE_LATEST} -t ${IMAGE_GH} -t ${IMAGE_GH_LATEST} -f $(BUILD_DIR)/Dockerfile --push .
|
||||
|
||||
############################ build local
|
||||
.PHONY: container-local
|
||||
container-local: kubevpn-linux-amd64
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t docker.io/naison/kubevpn:latest -f $(BUILD_DIR)/local.Dockerfile --push .
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE_LATEST} -t ${IMAGE_GH_LATEST} -f $(BUILD_DIR)/local.Dockerfile --push .
|
||||
|
||||
.PHONY: container-test
|
||||
container-test: kubevpn-linux-amd64
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t docker.io/naison/kubevpn:test -f $(BUILD_DIR)/test.Dockerfile --push .
|
||||
docker build -t ${IMAGE_GH} -f $(BUILD_DIR)/test.Dockerfile --push .
|
||||
|
||||
.PHONY: version
|
||||
version:
|
||||
go run github.com/wencaiwulue/kubevpn/pkg/util/krew
|
||||
go run ${BASE}/pkg/util/krew
|
||||
|
||||
.PHONY: gen
|
||||
gen:
|
||||
go generate ./...
|
||||
|
||||
.PHONY: ut
|
||||
ut:
|
||||
go test -p=1 -v -timeout=60m -coverprofile=coverage.txt -coverpkg=./... ./...
|
||||
|
||||
.PHONY: cover
|
||||
cover: ut
|
||||
go tool cover -html=coverage.txt
|
||||
660
README.md
660
README.md
@@ -1,125 +1,160 @@
|
||||

|
||||
|
||||
[![GitHub Workflow][1]](https://github.com/KubeNetworks/kubevpn/actions)
|
||||
[![Go Version][2]](https://github.com/KubeNetworks/kubevpn/blob/master/go.mod)
|
||||
[![Go Report][3]](https://goreportcard.com/badge/github.com/KubeNetworks/kubevpn)
|
||||
[![Maintainability][4]](https://codeclimate.com/github/KubeNetworks/kubevpn/maintainability)
|
||||
[![GitHub License][5]](https://github.com/KubeNetworks/kubevpn/blob/main/LICENSE)
|
||||
[![GitHub Workflow][1]](https://github.com/kubenetworks/kubevpn/actions)
|
||||
[![Go Version][2]](https://github.com/kubenetworks/kubevpn/blob/master/go.mod)
|
||||
[![Go Report][3]](https://goreportcard.com/report/github.com/wencaiwulue/kubevpn)
|
||||
[![Maintainability][4]](https://codeclimate.com/github/kubenetworks/kubevpn/maintainability)
|
||||
[![GitHub License][5]](https://github.com/kubenetworks/kubevpn/blob/main/LICENSE)
|
||||
[![Docker Pulls][6]](https://hub.docker.com/r/naison/kubevpn)
|
||||
[![Releases][7]](https://github.com/KubeNetworks/kubevpn/releases)
|
||||
[![Releases][7]](https://github.com/kubenetworks/kubevpn/releases)
|
||||
[](https://pkg.go.dev/github.com/wencaiwulue/kubevpn/v2)
|
||||
[](https://codecov.io/gh/wencaiwulue/kubevpn)
|
||||
[](https://snapcraft.io/kubevpn)
|
||||
|
||||
[1]: https://img.shields.io/github/actions/workflow/status/KubeNetworks/kubevpn/release.yml?logo=github
|
||||
[1]: https://img.shields.io/github/actions/workflow/status/kubenetworks/kubevpn/release.yml?logo=github
|
||||
|
||||
[2]: https://img.shields.io/github/go-mod/go-version/KubeNetworks/kubevpn?logo=go
|
||||
[2]: https://img.shields.io/github/go-mod/go-version/kubenetworks/kubevpn?logo=go
|
||||
|
||||
[3]: https://img.shields.io/badge/go%20report-A+-brightgreen.svg?style=flat
|
||||
[3]: https://goreportcard.com/badge/github.com/wencaiwulue/kubevpn?style=flat
|
||||
|
||||
[4]: https://api.codeclimate.com/v1/badges/b5b30239174fc6603aca/maintainability
|
||||
|
||||
[5]: https://img.shields.io/github/license/KubeNetworks/kubevpn
|
||||
[5]: https://img.shields.io/github/license/kubenetworks/kubevpn
|
||||
|
||||
[6]: https://img.shields.io/docker/pulls/naison/kubevpn?logo=docker
|
||||
|
||||
[7]: https://img.shields.io/github/v/release/KubeNetworks/kubevpn?logo=smartthings
|
||||
[7]: https://img.shields.io/github/v/release/kubenetworks/kubevpn?logo=smartthings
|
||||
|
||||
# KubeVPN
|
||||
|
||||
[中文](README_ZH.md) | [English](README.md) | [Wiki](https://github.com/KubeNetworks/kubevpn/wiki/Architecture)
|
||||
[中文](README_ZH.md) | [English](README.md) | [Wiki](https://github.com/kubenetworks/kubevpn/wiki/Architecture)
|
||||
|
||||
KubeVPN is Cloud Native Dev Environment, connect to kubernetes cluster network, you can access remote kubernetes
|
||||
cluster network, remote
|
||||
kubernetes cluster service can also access your local service. and more, you can run your kubernetes pod on local Docker
|
||||
container with same environment、volume、and network. you can develop your application on local PC totally.
|
||||
KubeVPN offers a Cloud-Native Dev Environment that seamlessly connects to your Kubernetes cluster network.
|
||||
|
||||
Gain access to the Kubernetes cluster network effortlessly using service names or Pod IP/Service IP. Facilitate the
|
||||
interception of inbound traffic from remote Kubernetes cluster services to your local PC through a service mesh and
|
||||
more.
|
||||
|
||||
For instance, you have the flexibility to run your Kubernetes pod within a local Docker container, ensuring an identical
|
||||
environment, volume, and network setup.
|
||||
With KubeVPN, empower yourself to develop applications entirely on your local PC!
|
||||
|
||||

|
||||
|
||||
## Content
|
||||
|
||||
1. [QuickStart](./README.md#quickstart)
|
||||
2. [Functions](./README.md#functions)
|
||||
3. [Architecture](./README.md#architecture)
|
||||
4. [Contributions](./README.md#Contributions)
|
||||
|
||||
## QuickStart
|
||||
|
||||
#### Install from GitHub release
|
||||
|
||||
[LINK](https://github.com/KubeNetworks/kubevpn/releases/latest)
|
||||
|
||||
#### Install from custom krew index
|
||||
### Install from script ( macOS / Linux)
|
||||
|
||||
```shell
|
||||
(
|
||||
kubectl krew index add kubevpn https://github.com/KubeNetworks/kubevpn.git && \
|
||||
kubectl krew install kubevpn/kubevpn && kubectl kubevpn
|
||||
)
|
||||
curl -fsSL https://kubevpn.dev/install.sh | sh
|
||||
```
|
||||
|
||||
#### Install from build it manually
|
||||
|
||||
### Install from [brew](https://brew.sh/) (macOS / Linux)
|
||||
|
||||
```shell
|
||||
(
|
||||
git clone https://github.com/KubeNetworks/kubevpn.git && \
|
||||
cd kubevpn && make kubevpn && ./bin/kubevpn
|
||||
)
|
||||
|
||||
brew install kubevpn
|
||||
```
|
||||
|
||||
### Install from [snap](https://snapcraft.io/kubevpn) (Linux)
|
||||
|
||||
```shell
|
||||
sudo snap install kubevpn
|
||||
```
|
||||
|
||||
### Install from [scoop](https://scoop.sh/) (Windows)
|
||||
|
||||
```shell
|
||||
scoop bucket add extras
|
||||
scoop install kubevpn
|
||||
```
|
||||
|
||||
### Install from [krew](https://krew.sigs.k8s.io/) (Windows / macOS / Linux)
|
||||
|
||||
```shell
|
||||
kubectl krew index add kubevpn https://github.com/kubenetworks/kubevpn.git
|
||||
kubectl krew install kubevpn/kubevpn
|
||||
kubectl kubevpn
|
||||
```
|
||||
|
||||
### Install from GitHub release (Windows / macOS / Linux)
|
||||
|
||||
[https://github.com/kubenetworks/kubevpn/releases/latest](https://github.com/kubenetworks/kubevpn/releases/latest)
|
||||
|
||||
### Install bookinfo as demo application
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
```
|
||||
|
||||
For clean up after test
|
||||
|
||||
```shell
|
||||
kubectl delete -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
kubectl delete -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
```
|
||||
|
||||
## Functions
|
||||
|
||||
### Connect to k8s cluster network
|
||||
|
||||
use command `kubevpn connect` connect to k8s cluster network, prompt `Password:` need to input computer
|
||||
password. to enable root operation (create a tun device).
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn connect
|
||||
Password:
|
||||
start to connect
|
||||
get cidr from cluster info...
|
||||
get cidr from cluster info ok
|
||||
get cidr from cni...
|
||||
wait pod cni-net-dir-kubevpn to be running timeout, reason , ignore
|
||||
get cidr from svc...
|
||||
get cidr from svc ok
|
||||
get cidr successfully
|
||||
traffic manager not exist, try to create it...
|
||||
label namespace default
|
||||
create serviceAccount kubevpn-traffic-manager
|
||||
create roles kubevpn-traffic-manager
|
||||
create roleBinding kubevpn-traffic-manager
|
||||
create service kubevpn-traffic-manager
|
||||
create deployment kubevpn-traffic-manager
|
||||
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
|
||||
Starting connect
|
||||
Getting network CIDR from cluster info...
|
||||
Getting network CIDR from CNI...
|
||||
Getting network CIDR from services...
|
||||
Labeling Namespace default
|
||||
Creating ServiceAccount kubevpn-traffic-manager
|
||||
Creating Roles kubevpn-traffic-manager
|
||||
Creating RoleBinding kubevpn-traffic-manager
|
||||
Creating Service kubevpn-traffic-manager
|
||||
Creating MutatingWebhookConfiguration kubevpn-traffic-manager
|
||||
Creating Deployment kubevpn-traffic-manager
|
||||
|
||||
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
|
||||
Container Reason Message
|
||||
control-plane ContainerCreating
|
||||
vpn ContainerCreating
|
||||
webhook ContainerCreating
|
||||
|
||||
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
|
||||
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
|
||||
Container Reason Message
|
||||
control-plane ContainerRunning
|
||||
vpn ContainerRunning
|
||||
webhook ContainerRunning
|
||||
|
||||
Creating mutatingWebhook_configuration for kubevpn-traffic-manager
|
||||
update ref count successfully
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
already connected to cluster network, use command `kubevpn status` to check status
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn status
|
||||
ID Mode Cluster Kubeconfig Namespace Status
|
||||
0 full ccijorbccotmqodvr189g /Users/bytedance/.kube/config default Connected
|
||||
ID Mode Cluster Kubeconfig Namespace Status
|
||||
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
|
||||
➜ ~
|
||||
```
|
||||
|
||||
use pod `productpage-788df7ff7f-jpkcs` IP `172.29.2.134`
|
||||
|
||||
```shell
|
||||
➜ ~ kubectl get pods -o wide
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
@@ -131,6 +166,8 @@ ratings-77b6cd4499-zvl6c 1/1 Running 0
|
||||
reviews-85c88894d9-vgkxd 1/1 Running 0 24d 172.29.2.249 192.168.0.5 <none> <none>
|
||||
```
|
||||
|
||||
use `ping` to test connection, seems good
|
||||
|
||||
```shell
|
||||
➜ ~ ping 172.29.2.134
|
||||
PING 172.29.2.134 (172.29.2.134): 56 data bytes
|
||||
@@ -144,6 +181,8 @@ PING 172.29.2.134 (172.29.2.134): 56 data bytes
|
||||
round-trip min/avg/max/stddev = 54.293/55.380/56.270/0.728 ms
|
||||
```
|
||||
|
||||
use service `productpage` IP `172.21.10.49`
|
||||
|
||||
```shell
|
||||
➜ ~ kubectl get services -o wide
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
|
||||
@@ -156,6 +195,8 @@ ratings ClusterIP 172.21.3.247 <none> 9080/TCP
|
||||
reviews ClusterIP 172.21.8.24 <none> 9080/TCP 114d app=reviews
|
||||
```
|
||||
|
||||
use command `curl` to test service connection
|
||||
|
||||
```shell
|
||||
➜ ~ curl 172.21.10.49:9080
|
||||
<!DOCTYPE html>
|
||||
@@ -167,8 +208,18 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
```
|
||||
|
||||
seems good too~
|
||||
|
||||
### Domain resolve
|
||||
|
||||
support k8s dns name resolve.
|
||||
|
||||
a Pod/Service named `productpage` in the `default` namespace can successfully resolve by following name:
|
||||
|
||||
- `productpage`
|
||||
- `productpage.default`
|
||||
- `productpage.default.svc.cluster.local`
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage.default.svc.cluster.local:9080
|
||||
<!DOCTYPE html>
|
||||
@@ -182,7 +233,8 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
|
||||
|
||||
### Short domain resolve
|
||||
|
||||
To access the service in the cluster, service name or you can use the short domain name, such as `productpage.default.svc.cluster.local`
|
||||
To access the service in the cluster, service name or you can use the short domain name, such
|
||||
as `productpage`
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage:9080
|
||||
@@ -195,50 +247,94 @@ To access the service in the cluster, service name or you can use the short doma
|
||||
...
|
||||
```
|
||||
|
||||
***Disclaimer:*** This only works on the namespace where kubevpn-traffic-manager is deployed. Otherwise,
|
||||
use [Domain resolve](./README.md#domain-resolve)
|
||||
|
||||
### Connect to multiple kubernetes cluster network
|
||||
|
||||
- Mode `lite`: can connect to multiple cluster network, design for only connecting to multiple cluster network.
|
||||
- Mode `Full`: not only connect to cluster network, it also supports proxy workloads inbound traffic to local PC.
|
||||
|
||||
already connected cluster `ccijorbccotmqodvr189g` with mode `full`
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn status
|
||||
ID Mode Cluster Kubeconfig Namespace Status
|
||||
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
|
||||
```
|
||||
|
||||
then connect to another cluster `ccidd77aam2dtnc3qnddg` with mode `lite`
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn connect -n default --kubeconfig ~/.kube/dev_config --lite
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
```
|
||||
|
||||
use command `kubevpn status` to check connection status
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn status
|
||||
ID Mode Cluster Kubeconfig Namespace Status
|
||||
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
|
||||
1 lite ccidd77aam2dtnc3qnddg /Users/naison/.kube/dev_config default Connected
|
||||
➜ ~
|
||||
```
|
||||
|
||||
### Reverse proxy
|
||||
|
||||
use command `kubevpn proxy` to proxy all inbound traffic to local computer.
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn proxy deployment/productpage
|
||||
already connect to cluster
|
||||
start to create remote inbound pod for deployment/productpage
|
||||
workload default/deployment/productpage is controlled by a controller
|
||||
rollout status for deployment/productpage
|
||||
Connected to cluster
|
||||
Injecting inbound sidecar for deployment/productpage
|
||||
Checking rollout status for deployment/productpage
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "productpage" successfully rolled out
|
||||
rollout status for deployment/productpage successfully
|
||||
create remote inbound pod for deployment/productpage successfully
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
Rollout successfully for deployment/productpage
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
For local testing, save the following code as `hello.go`
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func main() {
|
||||
http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
|
||||
_, _ = io.WriteString(writer, "Hello world!")
|
||||
fmt.Printf(">>Received request: %s %s from %s\n", request.Method, request.RequestURI, request.RemoteAddr)
|
||||
})
|
||||
_ = http.ListenAndServe(":9080", nil)
|
||||
http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
|
||||
_, _ = io.WriteString(writer, "Hello world!")
|
||||
fmt.Printf(">>Received request: %s %s from %s\n", request.Method, request.RequestURI, request.RemoteAddr)
|
||||
})
|
||||
_ = http.ListenAndServe(":9080", nil)
|
||||
}
|
||||
```
|
||||
|
||||
and compile it
|
||||
|
||||
```
|
||||
go build hello.go
|
||||
```
|
||||
|
||||
then run it
|
||||
|
||||
```
|
||||
./hello &
|
||||
```
|
||||
@@ -247,20 +343,20 @@ then run it
|
||||
export selector=productpage
|
||||
export pod=`kubectl get pods -l app=${selector} -n default -o jsonpath='{.items[0].metadata.name}'`
|
||||
export pod_ip=`kubectl get pod $pod -n default -o jsonpath='{.status.podIP}'`
|
||||
curl -v -H "a: 1" http://$pod_ip:9080/health
|
||||
curl -v -H "foo: bar" http://$pod_ip:9080/health
|
||||
```
|
||||
|
||||
response would like below
|
||||
|
||||
```
|
||||
❯ curl -v -H "a: 1" http://$pod_ip:9080/health
|
||||
❯ curl -v -H "foo: bar" http://$pod_ip:9080/health
|
||||
* Trying 192.168.72.77:9080...
|
||||
* Connected to 192.168.72.77 (192.168.72.77) port 9080 (#0)
|
||||
> GET /health HTTP/1.1
|
||||
> Host: 192.168.72.77:9080
|
||||
> User-Agent: curl/7.87.0
|
||||
> Accept: */*
|
||||
> a: 1
|
||||
> foo: bar
|
||||
>
|
||||
>>Received request: GET /health from xxx.xxx.xxx.xxx:52974
|
||||
* Mark bundle as not supporting multiuse
|
||||
@@ -282,30 +378,25 @@ Hello world!%
|
||||
Hello world!%
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Reverse proxy with mesh
|
||||
|
||||
Support HTTP, GRPC and WebSocket etc. with specific header `"a: 1"` will route to your local machine
|
||||
Support HTTP, GRPC and WebSocket etc. with specific header `"foo: bar"` will route to your local machine
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn proxy deployment/productpage --headers a=1
|
||||
already connect to cluster
|
||||
start to create remote inbound pod for deployment/productpage
|
||||
patch workload default/deployment/productpage with sidecar
|
||||
rollout status for deployment/productpage
|
||||
➜ ~ kubevpn proxy deployment/productpage --headers foo=bar
|
||||
Connected to cluster
|
||||
Injecting inbound sidecar for deployment/productpage
|
||||
Checking rollout status for deployment/productpage
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "productpage" successfully rolled out
|
||||
rollout status for deployment/productpage successfully
|
||||
create remote inbound pod for deployment/productpage successfully
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
Rollout successfully for deployment/productpage
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
first access without header "a: 1", it will access existing pod on kubernetes cluster.
|
||||
first access without header "foo: bar", it will access existing pod on kubernetes cluster.
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage:9080
|
||||
@@ -319,38 +410,47 @@ first access without header "a: 1", it will access existing pod on kubernetes cl
|
||||
...
|
||||
```
|
||||
|
||||
Now let's access local service with header `"a: 1"`
|
||||
Now let's access local service with header `"foo: bar"`
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage:9080 -H "a: 1"
|
||||
➜ ~ curl productpage:9080 -H "foo: bar"
|
||||
>>Received request: GET / from xxx.xxx.xxx.xxx:51296
|
||||
Hello world!
|
||||
```
|
||||
|
||||
If you want to cancel proxy, just run command:
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn leave deployments/productpage
|
||||
Leaving workload deployments/productpage
|
||||
Checking rollout status for deployments/productpage
|
||||
Waiting for deployment "productpage" rollout to finish: 0 out of 1 new replicas have been updated...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Rollout successfully for deployments/productpage
|
||||
```
|
||||
|
||||
### Dev mode in local Docker 🐳
|
||||
|
||||
Run the Kubernetes pod in the local Docker container, and cooperate with the service mesh to intercept the traffic with
|
||||
the specified header to the local, or all the traffic to the local.
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn dev deployment/authors --headers a=1 -it --rm --entrypoint sh
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
start to create remote inbound pod for Deployment.apps/authors
|
||||
patch workload default/Deployment.apps/authors with sidecar
|
||||
rollout status for Deployment.apps/authors
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
➜ ~ kubevpn dev deployment/authors --headers foo=bar --entrypoint sh
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
Injecting inbound sidecar for deployment/authors
|
||||
Patching workload deployment/authors
|
||||
Checking rollout status for deployment/authors
|
||||
Waiting for deployment "authors" rollout to finish: 0 out of 1 new replicas have been updated...
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "authors" successfully rolled out
|
||||
rollout status for Deployment.apps/authors successfully
|
||||
create remote inbound pod for Deployment.apps/authors successfully
|
||||
Rollout successfully for Deployment.apps/authors
|
||||
tar: removing leading '/' from member names
|
||||
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/4563987760170736212:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
tar: Removing leading `/' from member names
|
||||
@@ -390,23 +490,21 @@ OK: 8 MiB in 19 packages
|
||||
{"status":"Authors is healthy"} /opt/microservices # echo "continue testing pod access..."
|
||||
continue testing pod access...
|
||||
/opt/microservices # exit
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
tun device closed
|
||||
leave resource: deployments.apps/authors
|
||||
workload default/deployments.apps/authors is controlled by a controller
|
||||
leave resource: deployments.apps/authors successfully
|
||||
clean up successfully
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
clean up successfully
|
||||
Created container: default_authors
|
||||
Wait container default_authors to be running...
|
||||
Container default_authors is running now
|
||||
Disconnecting from the cluster...
|
||||
Leaving workload deployments.apps/authors
|
||||
Disconnecting from the cluster...
|
||||
Performing cleanup operations
|
||||
Clearing DNS settings
|
||||
➜ ~
|
||||
```
|
||||
|
||||
You can see that it will start up two containers with docker, mapping to pod two container, and share port with same
|
||||
network, you can use `localhost:port`
|
||||
to access another container. And more, all environment、volume and network are the same as remote kubernetes pod, it is
|
||||
truly consistent with the kubernetes runtime. Makes develop on local PC comes true.
|
||||
truly consistent with the kubernetes runtime. Makes develop on local PC come true.
|
||||
|
||||
```shell
|
||||
➜ ~ docker ps
|
||||
@@ -416,39 +514,37 @@ fc04e42799a5 nginx:latest "/docker-entrypoint.…" 37 sec
|
||||
➜ ~
|
||||
```
|
||||
|
||||
Here is how to access pod in local docker container
|
||||
Here is how to access pod in local docker container
|
||||
|
||||
```shell
|
||||
export authors_pod=`kubectl get pods -l app=authors -n default -o jsonpath='{.items[0].metadata.name}'`
|
||||
export authors_pod_ip=`kubectl get pod $authors_pod -n default -o jsonpath='{.status.podIP}'`
|
||||
curl -kv -H "a: 1" http://$authors_pod_ip:80/health
|
||||
curl -kv -H "foo: bar" http://$authors_pod_ip:80/health
|
||||
```
|
||||
|
||||
Verify logs of nginx container
|
||||
|
||||
```shell
|
||||
docker logs $(docker ps --format '{{.Names}}' | grep nginx_default_kubevpn)
|
||||
```
|
||||
|
||||
|
||||
If you just want to start up a docker image, you can use simple way like this:
|
||||
If you just want to start up a docker image, you can use a simple way like this:
|
||||
|
||||
```shell
|
||||
kubevpn dev deployment/authors --no-proxy -it --rm
|
||||
kubevpn dev deployment/authors --no-proxy
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn dev deployment/authors --no-proxy -it --rm
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
➜ ~ kubevpn dev deployment/authors --no-proxy
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
tar: removing leading '/' from member names
|
||||
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/5631078868924498209:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
tar: Removing leading `/' from member names
|
||||
@@ -466,7 +562,7 @@ Created main container: authors_default_kubevpn_ff34b
|
||||
|
||||
Now the main process will hang up to show you log.
|
||||
|
||||
If you want to specify the image to start the container locally, you can use the parameter `--docker-image`. When the
|
||||
If you want to specify the image to start the container locally, you can use the parameter `--dev-image`. When the
|
||||
image does not exist locally, it will be pulled from the corresponding mirror warehouse. If you want to specify startup
|
||||
parameters, you can use `--entrypoint` parameter, replace it with the command you want to execute, such
|
||||
as `--entrypoint /bin/bash`, for more parameters, see `kubevpn dev --help`.
|
||||
@@ -474,63 +570,53 @@ as `--entrypoint /bin/bash`, for more parameters, see `kubevpn dev --help`.
|
||||
### DinD ( Docker in Docker ) use kubevpn in Docker
|
||||
|
||||
If you want to start the development mode locally using Docker in Docker (DinD), because the program will read and
|
||||
write the `/tmp` directory, you need to manually add the parameter `-v /tmp:/tmp` (outer docker) and other thing is you
|
||||
write the `/tmp` directory, you need to manually add the parameter `-v /tmp:/tmp` (outer docker) and another thing is
|
||||
you
|
||||
need to special parameter `--network` (inner docker) for sharing network and pid
|
||||
|
||||
Example:
|
||||
|
||||
```shell
|
||||
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
|
||||
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 ghcr.io/kubenetworks/kubevpn:latest
|
||||
```
|
||||
|
||||
```shell
|
||||
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
|
||||
Unable to find image 'naison/kubevpn:v2.0.0' locally
|
||||
v2.0.0: Pulling from naison/kubevpn
|
||||
445a6a12be2b: Already exists
|
||||
bd6c670dd834: Pull complete
|
||||
64a7297475a2: Pull complete
|
||||
33fa2e3224db: Pull complete
|
||||
e008f553422a: Pull complete
|
||||
5132e0110ddc: Pull complete
|
||||
5b2243de1f1a: Pull complete
|
||||
662a712db21d: Pull complete
|
||||
4f4fb700ef54: Pull complete
|
||||
33f0298d1d4f: Pull complete
|
||||
Digest: sha256:115b975a97edd0b41ce7a0bc1d8428e6b8569c91a72fe31ea0bada63c685742e
|
||||
Status: Downloaded newer image for naison/kubevpn:v2.0.0
|
||||
root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison -it --entrypoint sh
|
||||
|
||||
----------------------------------------------------------------------------------
|
||||
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
|
||||
Because of sudo user env and user env are different.
|
||||
Current env KUBECONFIG value:
|
||||
----------------------------------------------------------------------------------
|
||||
|
||||
hostname is d0b3dab8912a
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
start to create remote inbound pod for Deployment.apps/authors
|
||||
patch workload default/Deployment.apps/authors with sidecar
|
||||
rollout status for Deployment.apps/authors
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 ghcr.io/kubenetworks/kubevpn:latest
|
||||
Unable to find image 'ghcr.io/kubenetworks/kubevpn:latest' locally
|
||||
latest: Pulling from ghcr.io/kubenetworks/kubevpn
|
||||
9c704ecd0c69: Already exists
|
||||
4987d0a976b5: Pull complete
|
||||
8aa94c4fc048: Pull complete
|
||||
526fee014382: Pull complete
|
||||
6c1c2bedceb6: Pull complete
|
||||
97ac845120c5: Pull complete
|
||||
ca82aef6a9eb: Pull complete
|
||||
1fd9534c7596: Pull complete
|
||||
588bd802eb9c: Pull complete
|
||||
Digest: sha256:368db2e0d98f6866dcefd60512960ce1310e85c24a398fea2a347905ced9507d
|
||||
Status: Downloaded newer image for ghcr.io/kubenetworks/kubevpn:latest
|
||||
WARNING: image with reference ghcr.io/kubenetworks/kubevpn was found but does not match the specified platform: wanted linux/amd64, actual: linux/arm64
|
||||
root@5732124e6447:/app# kubevpn dev deployment/authors --headers user=naison --entrypoint sh
|
||||
hostname is 5732124e6447
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
Injecting inbound sidecar for deployment/authors
|
||||
Patching workload deployment/authors
|
||||
Checking rollout status for deployment/authors
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "authors" successfully rolled out
|
||||
rollout status for Deployment.apps/authors successfully
|
||||
create remote inbound pod for Deployment.apps/authors successfully
|
||||
Rollout successfully for Deployment.apps/authors
|
||||
tar: removing leading '/' from member names
|
||||
/tmp/6460902982794789917:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
tar: Removing leading `/' from member names
|
||||
tar: Removing leading `/' from hard link targets
|
||||
/tmp/5028895788722532426:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
network mode is container:d0b3dab8912a
|
||||
Network mode is container:d0b3dab8912a
|
||||
Created container: nginx_default_kubevpn_6df63
|
||||
Wait container nginx_default_kubevpn_6df63 to be running...
|
||||
Container nginx_default_kubevpn_6df63 is running now
|
||||
@@ -585,49 +671,50 @@ OK: 8 MiB in 19 packages
|
||||
>> Container Received request: GET / from 127.0.0.1:41230
|
||||
Hello world!/opt/microservices #
|
||||
|
||||
/opt/microservices # curl authors:9080/health -H "a: 1"
|
||||
>>Received request: GET /health from 223.254.0.109:57930
|
||||
/opt/microservices # curl authors:9080/health -H "foo: bar"
|
||||
>>Received request: GET /health from 198.19.0.109:57930
|
||||
Hello world!/opt/microservices #
|
||||
/opt/microservices # curl localhost:9080/health
|
||||
{"status":"Authors is healthy"}/opt/microservices # exit
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
tun device closed
|
||||
leave resource: deployments.apps/authors
|
||||
workload default/deployments.apps/authors is controlled by a controller
|
||||
leave resource: deployments.apps/authors successfully
|
||||
clean up successfully
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
clean up successfully
|
||||
Created container: default_authors
|
||||
Wait container default_authors to be running...
|
||||
Container default_authors is running now
|
||||
Disconnecting from the cluster...
|
||||
Leaving workload deployments.apps/authors
|
||||
Disconnecting from the cluster...
|
||||
Performing cleanup operations
|
||||
Clearing DNS settings
|
||||
root@d0b3dab8912a:/app# exit
|
||||
exit
|
||||
➜ ~
|
||||
```
|
||||
|
||||
during test, check what container is running
|
||||
|
||||
```text
|
||||
➜ ~ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
|
||||
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
|
||||
d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
|
||||
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
|
||||
d0b3dab8912a ghcr.io/kubenetworks/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
|
||||
➜ ~
|
||||
```
|
||||
|
||||
* For clean up after test
|
||||
|
||||
```shell
|
||||
kubectl delete -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
kubectl delete -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
```
|
||||
|
||||
|
||||
### Multiple Protocol
|
||||
|
||||
support OSI model layers 3 and above, protocols like `ICMP`, `TCP`, and `UDP`...
|
||||
|
||||
- TCP
|
||||
- UDP
|
||||
- ICMP
|
||||
- GRPC
|
||||
- gRPC
|
||||
- Thrift
|
||||
- WebSocket
|
||||
- HTTP
|
||||
- ...
|
||||
@@ -638,166 +725,21 @@ kubectl delete -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/
|
||||
- Linux
|
||||
- Windows
|
||||
|
||||
on Windows platform, you need to
|
||||
install [PowerShell](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.2)
|
||||
in advance
|
||||
## Architecture
|
||||
|
||||
## FAQ
|
||||
[architecture](https://kubevpn.dev/docs/architecture/connect).
|
||||
|
||||
### 1, What should I do if the dependent image cannot be pulled, or the inner environment cannot access docker.io?
|
||||
## Contributions
|
||||
|
||||
Answer: here are two solution to solve this problem
|
||||
Always welcome. Just opening an issue should be also grateful.
|
||||
|
||||
- Solution 1: In the network that can access docker.io, transfer the image in the command `kubevpn version` to your own
|
||||
private image registry, and then add option `--image` to special image when starting the command.
|
||||
Example:
|
||||
If you want to debug this project on local PC. Please follow the steps bellow:
|
||||
|
||||
``` shell
|
||||
➜ ~ kubevpn version
|
||||
KubeVPN: CLI
|
||||
Version: v2.0.0
|
||||
DaemonVersion: v2.0.0
|
||||
Image: docker.io/naison/kubevpn:v2.0.0
|
||||
Branch: feature/daemon
|
||||
Git commit: 7c3a87e14e05c238d8fb23548f95fa1dd6e96936
|
||||
Built time: 2023-09-30 22:01:51
|
||||
Built OS/Arch: darwin/arm64
|
||||
Built Go version: go1.20.5
|
||||
```
|
||||
- Startup daemon and sudo daemon process with IDE debug mode. (Essentially two GRPC server)
|
||||
- Add breakpoint to file `pkg/daemon/action/connect.go:21`.
|
||||
- Open another terminal run `make kubevpn`.
|
||||
- Then run `./bin/kubevpn connect` and it will hit breakpoint.
|
||||
|
||||
Image is `docker.io/naison/kubevpn:v2.0.0`, transfer this image to private docker registry
|
||||
### Supported by
|
||||
|
||||
```text
|
||||
docker pull docker.io/naison/kubevpn:v2.0.0
|
||||
docker tag docker.io/naison/kubevpn:v2.0.0 [docker registry]/[namespace]/[repo]:[tag]
|
||||
docker push [docker registry]/[namespace]/[repo]:[tag]
|
||||
```
|
||||
|
||||
Then you can use this image, as follows:
|
||||
|
||||
```text
|
||||
➜ ~ kubevpn connect --image [docker registry]/[namespace]/[repo]:[tag]
|
||||
got cidr from cache
|
||||
traffic manager not exist, try to create it...
|
||||
pod [kubevpn-traffic-manager] status is Running
|
||||
...
|
||||
```
|
||||
|
||||
- Solution 2: Use options `--transfer-image`, enable this flags will transfer image from default image to `--image`
|
||||
special address automatically。
|
||||
Example
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn connect --transfer-image --image nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn:v2.0.0
|
||||
v2.0.0: Pulling from naison/kubevpn
|
||||
Digest: sha256:450446850891eb71925c54a2fab5edb903d71103b485d6a4a16212d25091b5f4
|
||||
Status: Image is up to date for naison/kubevpn:v2.0.0
|
||||
The push refers to repository [nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn]
|
||||
ecc065754c15: Preparing
|
||||
f2b6c07cb397: Pushed
|
||||
448eaa16d666: Pushed
|
||||
f5507edfc283: Pushed
|
||||
3b6ea9aa4889: Pushed
|
||||
ecc065754c15: Pushed
|
||||
feda785382bb: Pushed
|
||||
v2.0.0: digest: sha256:85d29ebb53af7d95b9137f8e743d49cbc16eff1cdb9983128ab6e46e0c25892c size: 2000
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
### 2, When use `kubevpn dev`, but got error code 137, how to resolve ?
|
||||
|
||||
```text
|
||||
dns service ok
|
||||
tar: Removing leading `/' from member names
|
||||
tar: Removing leading `/' from hard link targets
|
||||
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/7375606548554947868:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
Created container: server_vke-system_kubevpn_0db84
|
||||
Wait container server_vke-system_kubevpn_0db84 to be running...
|
||||
Container server_vke-system_kubevpn_0db84 is running on port 8888/tcp: 6789/tcp:6789 now
|
||||
$ Status: , Code: 137
|
||||
prepare to exit, cleaning up
|
||||
port-forward occurs error, err: lost connection to pod, retrying
|
||||
update ref count successfully
|
||||
ref-count is zero, prepare to clean up resource
|
||||
clean up successfully
|
||||
```
|
||||
|
||||
This is because of your docker-desktop required resource is less than pod running request resource, it OOM killed, so
|
||||
you can add more resource in your docker-desktop setting `Preferences --> Resources --> Memory`
|
||||
|
||||
### 3, Using WSL( Windows Sub Linux ) Docker, when use mode `kubevpn dev`, can not connect to cluster network, how to solve this problem?
|
||||
|
||||
Answer:
|
||||
|
||||
this is because WSL'Docker using Windows's Network, so if even start a container in WSL, this container will not use WSL
|
||||
network, but use Windows network
|
||||
|
||||
Solution:
|
||||
|
||||
- 1): install docker in WSL, not use Windows Docker-desktop
|
||||
- 2): use command `kubevpn connect` on Windows, and then startup `kubevpn dev` in WSL
|
||||
- 3): startup a container using command `kubevpn connect` on Windows, and then
|
||||
startup `kubevpn dev --network container:$CONTAINER_ID` in WSL
|
||||
|
||||
### 4,After use command `kubevpn dev` enter develop mode,but can't assess kubernetes api-server,occur error `172.17.0.1:443 connect refusued`,how to solve this problem?
|
||||
|
||||
Answer:
|
||||
|
||||
Maybe k8s network subnet is conflict with docker subnet
|
||||
|
||||
Solution:
|
||||
|
||||
- Use option `--connect-mode container` to startup command `kubevpn dev`
|
||||
- Modify `~/.docker/daemon.json`, add not conflict subnet, eg: `"bip": "172.15.0.1/24"`.
|
||||
|
||||
```shell
|
||||
➜ ~ cat ~/.docker/daemon.json
|
||||
{
|
||||
"builder": {
|
||||
"gc": {
|
||||
"defaultKeepStorage": "20GB",
|
||||
"enabled": true
|
||||
}
|
||||
},
|
||||
"experimental": false,
|
||||
"features": {
|
||||
"buildkit": true
|
||||
},
|
||||
"insecure-registries": [
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
add subnet not conflict, eg: 172.15.0.1/24
|
||||
|
||||
```shell
|
||||
➜ ~ cat ~/.docker/daemon.json
|
||||
{
|
||||
"builder": {
|
||||
"gc": {
|
||||
"defaultKeepStorage": "20GB",
|
||||
"enabled": true
|
||||
}
|
||||
},
|
||||
"experimental": false,
|
||||
"features": {
|
||||
"buildkit": true
|
||||
},
|
||||
"insecure-registries": [
|
||||
],
|
||||
"bip": "172.15.0.1/24"
|
||||
}
|
||||
```
|
||||
|
||||
restart docker and retry
|
||||
[](https://jb.gg/OpenSourceSupport)
|
||||
689
README_ZH.md
689
README_ZH.md
@@ -1,110 +1,138 @@
|
||||

|
||||
|
||||
[![GitHub Workflow][1]](https://github.com/KubeNetworks/kubevpn/actions)
|
||||
[![Go Version][2]](https://github.com/KubeNetworks/kubevpn/blob/master/go.mod)
|
||||
[![Go Report][3]](https://goreportcard.com/badge/github.com/KubeNetworks/kubevpn)
|
||||
[![Maintainability][4]](https://codeclimate.com/github/KubeNetworks/kubevpn/maintainability)
|
||||
[![GitHub License][5]](https://github.com/KubeNetworks/kubevpn/blob/main/LICENSE)
|
||||
[![GitHub Workflow][1]](https://github.com/kubenetworks/kubevpn/actions)
|
||||
[![Go Version][2]](https://github.com/kubenetworks/kubevpn/blob/master/go.mod)
|
||||
[![Go Report][3]](https://goreportcard.com/report/github.com/wencaiwulue/kubevpn)
|
||||
[![Maintainability][4]](https://codeclimate.com/github/kubenetworks/kubevpn/maintainability)
|
||||
[![GitHub License][5]](https://github.com/kubenetworks/kubevpn/blob/main/LICENSE)
|
||||
[![Docker Pulls][6]](https://hub.docker.com/r/naison/kubevpn)
|
||||
[![Releases][7]](https://github.com/KubeNetworks/kubevpn/releases)
|
||||
[![Releases][7]](https://github.com/kubenetworks/kubevpn/releases)
|
||||
[](https://pkg.go.dev/github.com/wencaiwulue/kubevpn/v2)
|
||||
[](https://codecov.io/gh/wencaiwulue/kubevpn)
|
||||
[](https://snapcraft.io/kubevpn)
|
||||
|
||||
[1]: https://img.shields.io/github/actions/workflow/status/KubeNetworks/kubevpn/release.yml?logo=github
|
||||
[1]: https://img.shields.io/github/actions/workflow/status/kubenetworks/kubevpn/release.yml?logo=github
|
||||
|
||||
[2]: https://img.shields.io/github/go-mod/go-version/KubeNetworks/kubevpn?logo=go
|
||||
[2]: https://img.shields.io/github/go-mod/go-version/kubenetworks/kubevpn?logo=go
|
||||
|
||||
[3]: https://img.shields.io/badge/go%20report-A+-brightgreen.svg?style=flat
|
||||
[3]: https://goreportcard.com/badge/github.com/wencaiwulue/kubevpn?style=flat
|
||||
|
||||
[4]: https://api.codeclimate.com/v1/badges/b5b30239174fc6603aca/maintainability
|
||||
|
||||
[5]: https://img.shields.io/github/license/KubeNetworks/kubevpn
|
||||
[5]: https://img.shields.io/github/license/kubenetworks/kubevpn
|
||||
|
||||
[6]: https://img.shields.io/docker/pulls/naison/kubevpn?logo=docker
|
||||
|
||||
[7]: https://img.shields.io/github/v/release/KubeNetworks/kubevpn?logo=smartthings
|
||||
[7]: https://img.shields.io/github/v/release/kubenetworks/kubevpn?logo=smartthings
|
||||
|
||||
# KubeVPN
|
||||
|
||||
[English](README.md) | [中文](README_ZH.md) | [维基](https://github.com/KubeNetworks/kubevpn/wiki/%E6%9E%B6%E6%9E%84)
|
||||
[English](README.md) | [中文](README_ZH.md) | [维基](https://github.com/kubenetworks/kubevpn/wiki/%E6%9E%B6%E6%9E%84)
|
||||
|
||||
KubeVPN 是一个云原生开发工具, 可以在本地连接云端 kubernetes 网络的工具,可以在本地直接访问远端集群的服务。也可以在远端集群访问到本地服务,便于调试及开发。同时还可以使用开发模式,直接在本地使用 Docker
|
||||
将远程容器运行在本地。
|
||||
KubeVPN 提供一个云原生开发环境。通过连接云端 kubernetes 网络,可以在本地使用 k8s dns 或者 Pod IP / Service IP
|
||||
直接访问远端集群中的服务。拦截远端集群中的工作负载的入流量到本地电脑,配合服务网格便于调试及开发。同时还可以使用开发模式,直接在本地使用
|
||||
Docker
|
||||
模拟 k8s pod runtime 将容器运行在本地 (具有相同的环境变量,磁盘和网络)。
|
||||
|
||||

|
||||
|
||||
## 内容
|
||||
|
||||
1. [快速开始](./README_ZH.md#快速开始)
|
||||
2. [功能](./README_ZH.md#功能)
|
||||
3. [架构](./README_ZH.md#架构)
|
||||
4. [贡献代码](./README_ZH.md#贡献代码)
|
||||
|
||||
## 快速开始
|
||||
|
||||
#### 从 Github release 下载编译好的二进制文件
|
||||
|
||||
[链接](https://github.com/KubeNetworks/kubevpn/releases/latest)
|
||||
|
||||
#### 从 自定义 Krew 仓库安装
|
||||
### 使用脚本安装 ( macOS / Linux)
|
||||
|
||||
```shell
|
||||
(
|
||||
kubectl krew index add kubevpn https://github.com/KubeNetworks/kubevpn.git && \
|
||||
kubectl krew install kubevpn/kubevpn && kubectl kubevpn
|
||||
)
|
||||
curl -fsSL https://kubevpn.dev/install.sh | sh
|
||||
```
|
||||
|
||||
#### 自己构建二进制文件
|
||||
### 使用 [brew](https://brew.sh/) 安装 (macOS / Linux)
|
||||
|
||||
```shell
|
||||
(
|
||||
git clone https://github.com/KubeNetworks/kubevpn.git && \
|
||||
cd kubevpn && make kubevpn && ./bin/kubevpn
|
||||
)
|
||||
|
||||
brew install kubevpn
|
||||
```
|
||||
|
||||
#### 安装 bookinfo 作为 demo 应用
|
||||
### 使用 [snap](https://snapcraft.io/kubevpn) 安装 (Linux)
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
sudo snap install kubevpn
|
||||
```
|
||||
|
||||
### 使用 [scoop](https://scoop.sh/) (Windows)
|
||||
|
||||
```shell
|
||||
scoop bucket add extras
|
||||
scoop install kubevpn
|
||||
```
|
||||
|
||||
### 使用 [krew](https://krew.sigs.k8s.io/) (Windows / macOS / Linux)
|
||||
|
||||
```shell
|
||||
kubectl krew index add kubevpn https://github.com/kubenetworks/kubevpn.git
|
||||
kubectl krew install kubevpn/kubevpn
|
||||
kubectl kubevpn
|
||||
```
|
||||
|
||||
### 从 Github release 下载 (Windows / macOS / Linux)
|
||||
|
||||
[https://github.com/kubenetworks/kubevpn/releases/latest](https://github.com/kubenetworks/kubevpn/releases/latest)
|
||||
|
||||
### 安装 bookinfo 作为 demo 应用
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
```
|
||||
|
||||
## 功能
|
||||
|
||||
### 链接到集群网络
|
||||
|
||||
使用命令 `kubevpn connect` 链接到集群,请注意这里需要输入电脑密码。因为需要 `root` 权限。(创建虚拟网卡)
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn connect
|
||||
Password:
|
||||
start to connect
|
||||
get cidr from cluster info...
|
||||
get cidr from cluster info ok
|
||||
get cidr from cni...
|
||||
wait pod cni-net-dir-kubevpn to be running timeout, reason , ignore
|
||||
get cidr from svc...
|
||||
get cidr from svc ok
|
||||
get cidr successfully
|
||||
traffic manager not exist, try to create it...
|
||||
label namespace default
|
||||
create serviceAccount kubevpn-traffic-manager
|
||||
create roles kubevpn-traffic-manager
|
||||
create roleBinding kubevpn-traffic-manager
|
||||
create service kubevpn-traffic-manager
|
||||
create deployment kubevpn-traffic-manager
|
||||
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
|
||||
Starting connect
|
||||
Getting network CIDR from cluster info...
|
||||
Getting network CIDR from CNI...
|
||||
Getting network CIDR from services...
|
||||
Labeling Namespace default
|
||||
Creating ServiceAccount kubevpn-traffic-manager
|
||||
Creating Roles kubevpn-traffic-manager
|
||||
Creating RoleBinding kubevpn-traffic-manager
|
||||
Creating Service kubevpn-traffic-manager
|
||||
Creating MutatingWebhookConfiguration kubevpn-traffic-manager
|
||||
Creating Deployment kubevpn-traffic-manager
|
||||
|
||||
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
|
||||
Container Reason Message
|
||||
control-plane ContainerCreating
|
||||
vpn ContainerCreating
|
||||
webhook ContainerCreating
|
||||
|
||||
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
|
||||
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
|
||||
Container Reason Message
|
||||
control-plane ContainerRunning
|
||||
vpn ContainerRunning
|
||||
webhook ContainerRunning
|
||||
|
||||
Creating mutatingWebhook_configuration for kubevpn-traffic-manager
|
||||
update ref count successfully
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
提示已经链接到集群了。使用命令 `kubevpn status` 检查一下状态。
|
||||
|
||||
```shell
|
||||
➜ ~ kubectl get pods -o wide
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
@@ -116,6 +144,8 @@ ratings-77b6cd4499-zvl6c 1/1 Running 0
|
||||
reviews-85c88894d9-vgkxd 1/1 Running 0 24d 172.29.2.249 192.168.0.5 <none> <none>
|
||||
```
|
||||
|
||||
找一个 pod 的 IP,比如 `productpage-788df7ff7f-jpkcs` 的 IP `172.29.2.134`
|
||||
|
||||
```shell
|
||||
➜ ~ ping 172.29.2.134
|
||||
PING 172.29.2.134 (172.29.2.134): 56 data bytes
|
||||
@@ -129,6 +159,8 @@ PING 172.29.2.134 (172.29.2.134): 56 data bytes
|
||||
round-trip min/avg/max/stddev = 54.293/55.380/56.270/0.728 ms
|
||||
```
|
||||
|
||||
测试应该可以直接 Ping 通,说明本地可以正常访问到集群网络了。
|
||||
|
||||
```shell
|
||||
➜ ~ kubectl get services -o wide
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
|
||||
@@ -141,6 +173,8 @@ ratings ClusterIP 172.21.3.247 <none> 9080/TCP
|
||||
reviews ClusterIP 172.21.8.24 <none> 9080/TCP 114d app=reviews
|
||||
```
|
||||
|
||||
找一个 service 的 IP,比如 `productpage` 的 IP `172.21.10.49`,试着访问一下服务 `productpage`
|
||||
|
||||
```shell
|
||||
➜ ~ curl 172.21.10.49:9080
|
||||
<!DOCTYPE html>
|
||||
@@ -152,8 +186,16 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
```
|
||||
|
||||
可以看到也可以正常访问,也就是可以在本地访问到集群的 pod 和 service 了~
|
||||
|
||||
### 域名解析功能
|
||||
|
||||
支持 k8s dns 解析。比如一个名为 `productpage` 的 Pod 或者 Service 处于 `default` 命名空间下可以被如下域名正常解析到:
|
||||
|
||||
- `productpage`
|
||||
- `productpage.default`
|
||||
- `productpage.default.svc.cluster.local`
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage.default.svc.cluster.local:9080
|
||||
<!DOCTYPE html>
|
||||
@@ -165,8 +207,15 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
```
|
||||
|
||||
可以看到能够被正常解析,并且返回相应内容。
|
||||
|
||||
### 短域名解析功能
|
||||
|
||||
连接到此命名空间下,可以直接使用 `service` name 的方式访问,否则访问其它命令空间下的服务,需要带上命令空间作为域名的一部分,使用如下的域名即可。
|
||||
|
||||
- `productpage.default`
|
||||
- `productpage.default.svc.cluster.local`
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage:9080
|
||||
<!DOCTYPE html>
|
||||
@@ -178,25 +227,71 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
|
||||
...
|
||||
```
|
||||
|
||||
可以看到直接使用 service name 的方式,可以正常访问到集群资源。
|
||||
|
||||
### 链接到多集群网络
|
||||
|
||||
有个两个模式
|
||||
|
||||
- 模式 `lite`: 可以链接到多个集群网络,但是仅支持链接到多集群。
|
||||
- 模式 `full`: 不仅支持链接到单个集群网络,还可以拦截工作负载流量到本地电脑。
|
||||
|
||||
可以看到已经链接到了一个集群 `ccijorbccotmqodvr189g`,是 `full` 模式
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn status
|
||||
ID Mode Cluster Kubeconfig Namespace Status
|
||||
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
|
||||
```
|
||||
|
||||
此时还可以使用 `lite` 模式链接到其它集群
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn connect -n default --kubeconfig ~/.kube/dev_config --lite
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
```
|
||||
|
||||
使用命令 `kubevpn status` 查看当前链接状态。
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn status
|
||||
ID Mode Cluster Kubeconfig Namespace Status
|
||||
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
|
||||
1 lite ccidd77aam2dtnc3qnddg /Users/naison/.kube/dev_config default Connected
|
||||
➜ ~
|
||||
```
|
||||
|
||||
可以看到连接到了多个集群。
|
||||
|
||||
### 反向代理
|
||||
|
||||
使用命令 `kubevpn proxy` 代理所有的入站流量到本地电脑。
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn proxy deployment/productpage
|
||||
already connect to cluster
|
||||
start to create remote inbound pod for deployment/productpage
|
||||
workload default/deployment/productpage is controlled by a controller
|
||||
rollout status for deployment/productpage
|
||||
Connected to cluster
|
||||
Injecting inbound sidecar for deployment/productpage
|
||||
Checking rollout status for deployment/productpage
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "productpage" successfully rolled out
|
||||
rollout status for deployment/productpage successfully
|
||||
create remote inbound pod for deployment/productpage successfully
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
Rollout successfully for deployment/productpage
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
此时在本地使用 `go` 启动一个服务,用于承接流量。
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
@@ -213,6 +308,8 @@ func main() {
|
||||
}
|
||||
```
|
||||
|
||||
使用 `service` name 的方式,直接访问集群中的 `productpage` 服务。
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage:9080
|
||||
Hello world!%
|
||||
@@ -220,27 +317,28 @@ Hello world!%
|
||||
Hello world!%
|
||||
```
|
||||
|
||||
可以看到直接击中了本地电脑的服务。
|
||||
|
||||
### 反向代理支持 service mesh
|
||||
|
||||
支持 HTTP, GRPC 和 WebSocket 等, 携带了指定 header `"a: 1"` 的流量,将会路由到本地
|
||||
支持 HTTP, GRPC 和 WebSocket 等, 携带了指定 header `"foo: bar"` 的流量,将会路由到本地
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn proxy deployment/productpage --headers a=1
|
||||
already connect to cluster
|
||||
start to create remote inbound pod for deployment/productpage
|
||||
patch workload default/deployment/productpage with sidecar
|
||||
rollout status for deployment/productpage
|
||||
➜ ~ kubevpn proxy deployment/productpage --headers foo=bar
|
||||
Connected to cluster
|
||||
Injecting inbound sidecar for deployment/productpage
|
||||
Checking rollout status for deployment/productpage
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "productpage" successfully rolled out
|
||||
rollout status for deployment/productpage successfully
|
||||
create remote inbound pod for deployment/productpage successfully
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
Rollout successfully for deployment/productpage
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
不带 header 直接访问集群资源,可以看到返回的是集群中的服务内容。
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage:9080
|
||||
<!DOCTYPE html>
|
||||
@@ -253,34 +351,46 @@ create remote inbound pod for deployment/productpage successfully
|
||||
...
|
||||
```
|
||||
|
||||
带上特定 header 访问集群资源,可以看到返回了本地服务的内容。
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage:9080 -H "a: 1"
|
||||
➜ ~ curl productpage:9080 -H "foo: bar"
|
||||
Hello world!%
|
||||
```
|
||||
|
||||
如果你需要取消代理流量,可以执行如下命令:
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn leave deployments/productpage
|
||||
Leaving workload deployments/productpage
|
||||
Checking rollout status for deployments/productpage
|
||||
Waiting for deployment "productpage" rollout to finish: 0 out of 1 new replicas have been updated...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Rollout successfully for deployments/productpage
|
||||
```
|
||||
|
||||
### 本地进入开发模式 🐳
|
||||
|
||||
将 Kubernetes pod 运行在本地的 Docker 容器中,同时配合 service mesh, 拦截带有指定 header 的流量到本地,或者所有的流量到本地。这个开发模式依赖于本地 Docker。
|
||||
将 Kubernetes pod 运行在本地的 Docker 容器中,同时配合 service mesh, 拦截带有指定 header 的流量到本地,或者所有的流量到本地。这个开发模式依赖于本地
|
||||
Docker。
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn dev deployment/authors --headers a=1 -it --rm --entrypoint sh
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
start to create remote inbound pod for Deployment.apps/authors
|
||||
patch workload default/Deployment.apps/authors with sidecar
|
||||
rollout status for Deployment.apps/authors
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
➜ ~ kubevpn dev deployment/authors --headers foo=bar --entrypoint sh
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
Injecting inbound sidecar for deployment/authors
|
||||
Patching workload deployment/authors
|
||||
Checking rollout status for deployment/authors
|
||||
Waiting for deployment "authors" rollout to finish: 0 out of 1 new replicas have been updated...
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "authors" successfully rolled out
|
||||
rollout status for Deployment.apps/authors successfully
|
||||
create remote inbound pod for Deployment.apps/authors successfully
|
||||
Rollout successfully for Deployment.apps/authors
|
||||
tar: removing leading '/' from member names
|
||||
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/4563987760170736212:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
tar: Removing leading `/' from member names
|
||||
@@ -317,21 +427,22 @@ OK: 8 MiB in 19 packages
|
||||
/opt/microservices # 2023/09/30 13:41:58 Start listening http port 9080 ...
|
||||
|
||||
/opt/microservices # curl localhost:9080/health
|
||||
{"status":"Authors is healthy"}/opt/microservices # exit
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
tun device closed
|
||||
leave resource: deployments.apps/authors
|
||||
workload default/deployments.apps/authors is controlled by a controller
|
||||
leave resource: deployments.apps/authors successfully
|
||||
clean up successfully
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
clean up successfully
|
||||
{"status":"Authors is healthy"} /opt/microservices # echo "continue testing pod access..."
|
||||
continue testing pod access...
|
||||
/opt/microservices # exit
|
||||
Created container: default_authors
|
||||
Wait container default_authors to be running...
|
||||
Container default_authors is running now
|
||||
Disconnecting from the cluster...
|
||||
Leaving workload deployments.apps/authors
|
||||
Disconnecting from the cluster...
|
||||
Performing cleanup operations
|
||||
Clearing DNS settings
|
||||
➜ ~
|
||||
```
|
||||
|
||||
此时本地会启动两个 container, 对应 pod 容器中的两个 container, 并且共享端口, 可以直接使用 localhost:port 的形式直接访问另一个 container,
|
||||
此时本地会启动两个 container, 对应 pod 容器中的两个 container, 并且共享端口, 可以直接使用 localhost:port 的形式直接访问另一个
|
||||
container,
|
||||
并且, 所有的环境变量、挂载卷、网络条件都和 pod 一样, 真正做到与 kubernetes 运行环境一致。
|
||||
|
||||
```shell
|
||||
@@ -345,22 +456,20 @@ fc04e42799a5 nginx:latest "/docker-entrypoint.…" 37 sec
|
||||
如果你只是想在本地启动镜像,可以用一种简单的方式:
|
||||
|
||||
```shell
|
||||
kubevpn dev deployment/authors --no-proxy -it --rm
|
||||
kubevpn dev deployment/authors --no-proxy
|
||||
```
|
||||
|
||||
例如:
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn dev deployment/authors --no-proxy -it --rm
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
➜ ~ kubevpn dev deployment/authors --no-proxy
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
tar: removing leading '/' from member names
|
||||
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/5631078868924498209:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
tar: Removing leading `/' from member names
|
||||
@@ -378,68 +487,59 @@ Created main container: authors_default_kubevpn_ff34b
|
||||
|
||||
此时程序会挂起,默认为显示日志
|
||||
|
||||
如果你想指定在本地启动容器的镜像, 可以使用参数 `--docker-image`, 当本地不存在该镜像时, 会从对应的镜像仓库拉取。如果你想指定启动参数,可以使用 `--entrypoint`
|
||||
如果你想指定在本地启动容器的镜像, 可以使用参数 `--dev-image`, 当本地不存在该镜像时,
|
||||
会从对应的镜像仓库拉取。如果你想指定启动参数,可以使用 `--entrypoint`
|
||||
参数,替换为你想要执行的命令,比如 `--entrypoint /bin/bash`, 更多使用参数,请参见 `kubevpn dev --help`.
|
||||
|
||||
### DinD ( Docker in Docker ) 在 Docker 中使用 kubevpn
|
||||
|
||||
如果你想在本地使用 Docker in Docker (DinD) 的方式启动开发模式, 由于程序会读写 `/tmp` 目录,您需要手动添加参数 `-v /tmp:/tmp`, 还有一点需要注意, 如果使用 DinD
|
||||
如果你想在本地使用 Docker in Docker (DinD) 的方式启动开发模式, 由于程序会读写 `/tmp`
|
||||
目录,您需要手动添加参数 `-v /tmp:/tmp`, 还有一点需要注意, 如果使用 DinD
|
||||
模式,为了共享容器网络和 pid, 还需要指定参数 `--network`
|
||||
|
||||
例如:
|
||||
|
||||
```shell
|
||||
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
|
||||
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 ghcr.io/kubenetworks/kubevpn:latest
|
||||
```
|
||||
|
||||
```shell
|
||||
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
|
||||
Unable to find image 'naison/kubevpn:v2.0.0' locally
|
||||
v2.0.0: Pulling from naison/kubevpn
|
||||
445a6a12be2b: Already exists
|
||||
bd6c670dd834: Pull complete
|
||||
64a7297475a2: Pull complete
|
||||
33fa2e3224db: Pull complete
|
||||
e008f553422a: Pull complete
|
||||
5132e0110ddc: Pull complete
|
||||
5b2243de1f1a: Pull complete
|
||||
662a712db21d: Pull complete
|
||||
4f4fb700ef54: Pull complete
|
||||
33f0298d1d4f: Pull complete
|
||||
Digest: sha256:115b975a97edd0b41ce7a0bc1d8428e6b8569c91a72fe31ea0bada63c685742e
|
||||
Status: Downloaded newer image for naison/kubevpn:v2.0.0
|
||||
root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison -it --entrypoint sh
|
||||
|
||||
----------------------------------------------------------------------------------
|
||||
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
|
||||
Because of sudo user env and user env are different.
|
||||
Current env KUBECONFIG value:
|
||||
----------------------------------------------------------------------------------
|
||||
|
||||
hostname is d0b3dab8912a
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
start to create remote inbound pod for Deployment.apps/authors
|
||||
patch workload default/Deployment.apps/authors with sidecar
|
||||
rollout status for Deployment.apps/authors
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 ghcr.io/kubenetworks/kubevpn:latest
|
||||
Unable to find image 'ghcr.io/kubenetworks/kubevpn:latest' locally
|
||||
latest: Pulling from ghcr.io/kubenetworks/kubevpn
|
||||
9c704ecd0c69: Already exists
|
||||
4987d0a976b5: Pull complete
|
||||
8aa94c4fc048: Pull complete
|
||||
526fee014382: Pull complete
|
||||
6c1c2bedceb6: Pull complete
|
||||
97ac845120c5: Pull complete
|
||||
ca82aef6a9eb: Pull complete
|
||||
1fd9534c7596: Pull complete
|
||||
588bd802eb9c: Pull complete
|
||||
Digest: sha256:368db2e0d98f6866dcefd60512960ce1310e85c24a398fea2a347905ced9507d
|
||||
Status: Downloaded newer image for ghcr.io/kubenetworks/kubevpn:latest
|
||||
WARNING: image with reference ghcr.io/kubenetworks/kubevpn was found but does not match the specified platform: wanted linux/amd64, actual: linux/arm64
|
||||
root@5732124e6447:/app# kubevpn dev deployment/authors --headers user=naison --entrypoint sh
|
||||
hostname is 5732124e6447
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
Injecting inbound sidecar for deployment/authors
|
||||
Patching workload deployment/authors
|
||||
Checking rollout status for deployment/authors
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "authors" successfully rolled out
|
||||
rollout status for Deployment.apps/authors successfully
|
||||
create remote inbound pod for Deployment.apps/authors successfully
|
||||
Rollout successfully for Deployment.apps/authors
|
||||
tar: removing leading '/' from member names
|
||||
/tmp/6460902982794789917:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
tar: Removing leading `/' from member names
|
||||
tar: Removing leading `/' from hard link targets
|
||||
/tmp/5028895788722532426:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
network mode is container:d0b3dab8912a
|
||||
Network mode is container:d0b3dab8912a
|
||||
Created container: nginx_default_kubevpn_6df63
|
||||
Wait container nginx_default_kubevpn_6df63 to be running...
|
||||
Container nginx_default_kubevpn_6df63 is running now
|
||||
@@ -456,77 +556,82 @@ PID USER TIME COMMAND
|
||||
Executing busybox-1.33.1-r3.trigger
|
||||
OK: 8 MiB in 19 packagesnx: worker process
|
||||
/opt/microservices #
|
||||
|
||||
/opt/microservices # cat > hello.go <<EOF
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func main() {
|
||||
http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
|
||||
_, _ = io.WriteString(writer, "Hello world!")
|
||||
fmt.Println(">> Container Received request: %s %s from %s\n", request.Method, request.RequestURI, request.RemoteAddr)
|
||||
})
|
||||
fmt.Println("Start listening http port 9080 ...")
|
||||
_ = http.ListenAndServe(":9080", nil)
|
||||
}
|
||||
EOF
|
||||
/opt/microservices # go build hello.go
|
||||
/opt/microservices #
|
||||
//opt/microservices # ls -alh
|
||||
total 12M
|
||||
drwxr-xr-x 1 root root 26 Nov 4 10:29 .
|
||||
drwxr-xr-x 1 root root 26 Oct 18 2021 ..
|
||||
-rwxr-xr-x 1 root root 6.3M Oct 18 2021 app
|
||||
-rwxr-xr-x 1 root root 5.8M Nov 4 10:29 hello
|
||||
-rw-r--r-- 1 root root 387 Nov 4 10:28 hello.go
|
||||
/opt/microservices #
|
||||
/opt/microservices # apk add curl
|
||||
OK: 8 MiB in 19 packages
|
||||
/opt/microservices # curl localhost:80
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Welcome to nginx!</title>
|
||||
<style>
|
||||
html { color-scheme: light dark; }
|
||||
body { width: 35em; margin: 0 auto;
|
||||
font-family: Tahoma, Verdana, Arial, sans-serif; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Welcome to nginx!</h1>
|
||||
<p>If you see this page, the nginx web server is successfully installed and
|
||||
working. Further configuration is required.</p>
|
||||
/opt/microservices # ./hello &
|
||||
/opt/microservices # Start listening http port 9080 ...
|
||||
[2]+ Done ./hello
|
||||
/opt/microservices # curl localhost:9080
|
||||
>> Container Received request: GET / from 127.0.0.1:41230
|
||||
Hello world!/opt/microservices #
|
||||
|
||||
<p>For online documentation and support please refer to
|
||||
<a href="http://nginx.org/">nginx.org</a>.<br/>
|
||||
Commercial support is available at
|
||||
<a href="http://nginx.com/">nginx.com</a>.</p>
|
||||
|
||||
<p><em>Thank you for using nginx.</em></p>
|
||||
</body>
|
||||
</html>
|
||||
/opt/microservices # ls
|
||||
app
|
||||
/opt/microservices # ls -alh
|
||||
total 6M
|
||||
drwxr-xr-x 2 root root 4.0K Oct 18 2021 .
|
||||
drwxr-xr-x 1 root root 4.0K Oct 18 2021 ..
|
||||
-rwxr-xr-x 1 root root 6.3M Oct 18 2021 app
|
||||
/opt/microservices # ./app &
|
||||
/opt/microservices # 2023/09/30 14:27:32 Start listening http port 9080 ...
|
||||
|
||||
/opt/microservices # curl authors:9080/health
|
||||
/opt/microservices # curl authors:9080/health
|
||||
{"status":"Authors is healthy"}/opt/microservices #
|
||||
/opt/microservices # curl authors:9080/health -H "foo: bar"
|
||||
>>Received request: GET /health from 198.19.0.109:57930
|
||||
Hello world!/opt/microservices #
|
||||
/opt/microservices # curl localhost:9080/health
|
||||
{"status":"Authors is healthy"}/opt/microservices # exit
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
tun device closed
|
||||
leave resource: deployments.apps/authors
|
||||
workload default/deployments.apps/authors is controlled by a controller
|
||||
leave resource: deployments.apps/authors successfully
|
||||
clean up successfully
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
clean up successfully
|
||||
Created container: default_authors
|
||||
Wait container default_authors to be running...
|
||||
Container default_authors is running now
|
||||
Disconnecting from the cluster...
|
||||
Leaving workload deployments.apps/authors
|
||||
Disconnecting from the cluster...
|
||||
Performing cleanup operations
|
||||
Clearing DNS settings
|
||||
root@d0b3dab8912a:/app# exit
|
||||
exit
|
||||
➜ ~
|
||||
```
|
||||
|
||||
可以看到实际上是在本地使用 `Docker` 启动了三个容器。
|
||||
|
||||
```text
|
||||
➜ ~ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
|
||||
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
|
||||
d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
|
||||
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
|
||||
d0b3dab8912a ghcr.io/kubenetworks/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
|
||||
➜ ~
|
||||
```
|
||||
|
||||
### 支持多种协议
|
||||
|
||||
支持 OSI 模型三层及三层以上的协议,例如:
|
||||
|
||||
- TCP
|
||||
- UDP
|
||||
- ICMP
|
||||
- GRPC
|
||||
- gRPC
|
||||
- Thrift
|
||||
- WebSocket
|
||||
- HTTP
|
||||
- ...
|
||||
@@ -537,157 +642,21 @@ d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago
|
||||
- Linux
|
||||
- Windows
|
||||
|
||||
Windows
|
||||
下需要安装 [PowerShell](https://docs.microsoft.com/zh-cn/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.2)
|
||||
## 架构
|
||||
|
||||
## 问答
|
||||
[架构](https://kubevpn.dev/docs/architecture/connect)
|
||||
|
||||
### 1,依赖的镜像拉不下来,或者内网环境无法访问 docker.io 怎么办?
|
||||
## 贡献代码
|
||||
|
||||
答:有两种方法可以解决
|
||||
所有都是欢迎的,只是打开一个问题也是受欢迎的~
|
||||
|
||||
- 第一种,在可以访问 docker.io 的网络中,将命令 `kubevpn version` 中的 image 镜像, 转存到自己的私有镜像仓库,然后启动命令的时候,加上 `--image 新镜像` 即可。
|
||||
例如:
|
||||
如果你想在本地电脑上调试项目,可以按照这样的步骤:
|
||||
|
||||
``` shell
|
||||
➜ ~ kubevpn version
|
||||
KubeVPN: CLI
|
||||
Version: v2.0.0
|
||||
DaemonVersion: v2.0.0
|
||||
Image: docker.io/naison/kubevpn:v2.0.0
|
||||
Branch: feature/daemon
|
||||
Git commit: 7c3a87e14e05c238d8fb23548f95fa1dd6e96936
|
||||
Built time: 2023-09-30 22:01:51
|
||||
Built OS/Arch: darwin/arm64
|
||||
Built Go version: go1.20.5
|
||||
```
|
||||
- 使用喜欢的 IDE Debug 启动 daemon 和 sudo daemon 两个后台进程。(本质上是两个 GRPC server)
|
||||
- 添加断点给文件 `pkg/daemon/action/connect.go:21`
|
||||
- 新开个终端,执行命令 `make kubevpn`
|
||||
- 然后运行命令 `./bin/kubevpn connect` 这样将会击中断点
|
||||
|
||||
镜像是 `docker.io/naison/kubevpn:v2.0.0`,将此镜像转存到自己的镜像仓库。
|
||||
### 支持者
|
||||
|
||||
```text
|
||||
docker pull docker.io/naison/kubevpn:v2.0.0
|
||||
docker tag docker.io/naison/kubevpn:v2.0.0 [镜像仓库地址]/[命名空间]/[镜像仓库]:[镜像版本号]
|
||||
docker push [镜像仓库地址]/[命名空间]/[镜像仓库]:[镜像版本号]
|
||||
```
|
||||
|
||||
然后就可以使用这个镜像了,如下:
|
||||
|
||||
```text
|
||||
➜ ~ kubevpn connect --image [docker registry]/[namespace]/[repo]:[tag]
|
||||
got cidr from cache
|
||||
traffic manager not exist, try to create it...
|
||||
pod [kubevpn-traffic-manager] status is Running
|
||||
...
|
||||
```
|
||||
|
||||
- 第二种,使用选项 `--transfer-image`, 这个选项将会自动转存镜像到选项 `--image` 指定的地址。
|
||||
例如:
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn connect --transfer-image --image nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn:v2.0.0
|
||||
v2.0.0: Pulling from naison/kubevpn
|
||||
Digest: sha256:450446850891eb71925c54a2fab5edb903d71103b485d6a4a16212d25091b5f4
|
||||
Status: Image is up to date for naison/kubevpn:v2.0.0
|
||||
The push refers to repository [nocalhost-team-docker.pkg.coding.net/nocalhost/public/kubevpn]
|
||||
ecc065754c15: Preparing
|
||||
f2b6c07cb397: Pushed
|
||||
448eaa16d666: Pushed
|
||||
f5507edfc283: Pushed
|
||||
3b6ea9aa4889: Pushed
|
||||
ecc065754c15: Pushed
|
||||
feda785382bb: Pushed
|
||||
v2.0.0: digest: sha256:85d29ebb53af7d95b9137f8e743d49cbc16eff1cdb9983128ab6e46e0c25892c size: 2000
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
### 2,在使用 `kubevpn dev` 进入开发模式的时候,有出现报错 137, 改怎么解决 ?
|
||||
|
||||
```text
|
||||
dns service ok
|
||||
tar: Removing leading `/' from member names
|
||||
tar: Removing leading `/' from hard link targets
|
||||
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/7375606548554947868:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
Created container: server_vke-system_kubevpn_0db84
|
||||
Wait container server_vke-system_kubevpn_0db84 to be running...
|
||||
Container server_vke-system_kubevpn_0db84 is running on port 8888/tcp: 6789/tcp:6789 now
|
||||
$ Status: , Code: 137
|
||||
prepare to exit, cleaning up
|
||||
port-forward occurs error, err: lost connection to pod, retrying
|
||||
update ref count successfully
|
||||
ref-count is zero, prepare to clean up resource
|
||||
clean up successfully
|
||||
```
|
||||
|
||||
这是因为你的 `Docker-desktop` 声明的资源, 小于 container 容器启动时所需要的资源, 因此被 OOM 杀掉了, 你可以增加 `Docker-desktop` 对于 resources
|
||||
的设置, 目录是:`Preferences --> Resources --> Memory`
|
||||
|
||||
### 3,使用 WSL( Windows Sub Linux ) Docker, 用命令 `kubevpn dev` 进入开发模式的时候, 在 terminal 中无法提示链接集群网络, 这是为什么, 如何解决?
|
||||
|
||||
答案: 这是因为 WSL 的 Docker 使用的是 主机 Windows 的网络, 所以即便在 WSL 中启动 container, 这个 container 不会使用 WSL 的网络,而是使用 Windows 的网络。
|
||||
解决方案:
|
||||
|
||||
- 1): 在 WSL 中安装 Docker, 不要使用 Windows 版本的 Docker-desktop
|
||||
- 2): 在主机 Windows 使用命令 `kubevpn connect`, 然后在 WSL 中使用 `kubevpn dev` 进入开发模式
|
||||
- 3): 在主机 Windows 上启动一个 container,在 container 中使用命令 `kubevpn connect`, 然后在 WSL
|
||||
中使用 `kubevpn dev --network container:$CONTAINER_ID`
|
||||
|
||||
### 4,在使用 `kubevpn dev` 进入开发模式后,无法访问容器网络,出现错误 `172.17.0.1:443 connect refusued`,该如何解决?
|
||||
|
||||
答案:大概率是因为 k8s 容器网络和 docker 网络网段冲突了。
|
||||
|
||||
解决方案:
|
||||
|
||||
- 使用参数 `--connect-mode container` 在容器中链接,也可以解决此问题
|
||||
- 可以修改文件 `~/.docker/daemon.json` 增加不冲突的网络,例如 `"bip": "172.15.0.1/24"`.
|
||||
|
||||
```shell
|
||||
➜ ~ cat ~/.docker/daemon.json
|
||||
{
|
||||
"builder": {
|
||||
"gc": {
|
||||
"defaultKeepStorage": "20GB",
|
||||
"enabled": true
|
||||
}
|
||||
},
|
||||
"experimental": false,
|
||||
"features": {
|
||||
"buildkit": true
|
||||
},
|
||||
"insecure-registries": [
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
增加不冲突的网段
|
||||
|
||||
```shell
|
||||
➜ ~ cat ~/.docker/daemon.json
|
||||
{
|
||||
"builder": {
|
||||
"gc": {
|
||||
"defaultKeepStorage": "20GB",
|
||||
"enabled": true
|
||||
}
|
||||
},
|
||||
"experimental": false,
|
||||
"features": {
|
||||
"buildkit": true
|
||||
},
|
||||
"insecure-registries": [
|
||||
],
|
||||
"bip": "172.15.0.1/24"
|
||||
}
|
||||
```
|
||||
|
||||
重启 docker,重新操作即可
|
||||
[](https://jb.gg/OpenSourceSupport)
|
||||
|
||||
24
TODO.MD
24
TODO.MD
@@ -1,24 +0,0 @@
|
||||
## TODO
|
||||
|
||||
- [x] 访问集群网络
|
||||
- [x] 域名解析功能
|
||||
- [x] 支持多个 service 反向代理
|
||||
- [x] 短域名解析
|
||||
- [x] 优化 DHCP 功能
|
||||
- [x] 支持多种类型,例如 statefulset, replicaset...
|
||||
- [ ] 支持 ipv6
|
||||
- [x] 自己实现 socks5 协议
|
||||
- [ ] 考虑是否需要把 openvpn tap/tun 驱动作为后备方案
|
||||
- [x] 加入 TLS 以提高安全性
|
||||
- [ ] 写个 CNI 网络插件,直接提供 VPN 功能
|
||||
- [x] 优化重连逻辑
|
||||
- [x] 支持 service mesh
|
||||
- [x] service mesh 支持多端口
|
||||
- [x] 使用自己写的 proxy 替换 envoy
|
||||
- [ ] 优化性能,Windows 上考虑使用 IPC 通信
|
||||
- [x] 自己写个 control plane
|
||||
- [x] 考虑是否将 control plane 和服务分开
|
||||
- [x] 写单元测试,优化 GitHub action
|
||||
- [x] Linux 和 macOS 也改用 WireGuard library
|
||||
- [x] 探测是否有重复路由的 utun设备,禁用 `sudo ifconfig utun1 down`
|
||||
|
||||
@@ -1,34 +1,34 @@
|
||||
FROM envoyproxy/envoy:v1.25.0 AS envoy
|
||||
FROM golang:1.20 AS builder
|
||||
FROM golang:1.23 AS builder
|
||||
ARG BASE=github.com/wencaiwulue/kubevpn
|
||||
|
||||
COPY . /go/src/$BASE
|
||||
|
||||
WORKDIR /go/src/$BASE
|
||||
|
||||
RUN go env -w GO111MODULE=on && go env -w GOPROXY=https://goproxy.cn,direct
|
||||
RUN make kubevpn
|
||||
RUN go install github.com/go-delve/delve/cmd/dlv@latest
|
||||
|
||||
FROM ubuntu:latest
|
||||
FROM debian:bookworm-slim
|
||||
ARG BASE=github.com/wencaiwulue/kubevpn
|
||||
|
||||
RUN sed -i s@/security.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list \
|
||||
&& sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list
|
||||
RUN apt-get clean && apt-get update && apt-get install -y wget dnsutils vim curl \
|
||||
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3
|
||||
|
||||
ENV TZ=Asia/Shanghai \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt update \
|
||||
&& apt install -y tzdata \
|
||||
&& ln -fs /usr/share/zoneinfo/${TZ} /etc/localtime \
|
||||
&& echo ${TZ} > /etc/timezone \
|
||||
&& dpkg-reconfigure --frontend noninteractive tzdata \
|
||||
RUN apt-get update && apt-get install -y openssl iptables curl dnsutils \
|
||||
&& if [ $(uname -m) = "x86_64" ]; then \
|
||||
echo "The architecture is AMD64"; \
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"; \
|
||||
elif [ $(uname -m) = "aarch64" ]; then \
|
||||
echo "The architecture is ARM64"; \
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl"; \
|
||||
else \
|
||||
echo "Unsupported architecture."; \
|
||||
exit 1; \
|
||||
fi \
|
||||
&& chmod +x kubectl && mv kubectl /usr/local/bin \
|
||||
&& apt-get remove -y curl \
|
||||
&& apt-get autoremove -y \
|
||||
&& apt-get clean -y \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=builder /go/src/$BASE/bin/kubevpn /usr/local/bin/kubevpn
|
||||
COPY --from=builder /go/bin/dlv /usr/local/bin/dlv
|
||||
COPY --from=envoy /usr/local/bin/envoy /usr/local/bin/envoy
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
FROM golang:1.20 as delve
|
||||
RUN curl --location --output delve-1.20.1.tar.gz https://github.com/go-delve/delve/archive/v1.20.1.tar.gz \
|
||||
&& tar xzf delve-1.20.1.tar.gz
|
||||
RUN cd delve-1.20.1 && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /go/dlv -ldflags '-extldflags "-static"' ./cmd/dlv/
|
||||
FROM golang:1.23 as delve
|
||||
RUN curl --location --output delve-1.23.1.tar.gz https://github.com/go-delve/delve/archive/v1.23.1.tar.gz \
|
||||
&& tar xzf delve-1.23.1.tar.gz
|
||||
RUN cd delve-1.23.1 && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /go/dlv -ldflags '-extldflags "-static"' ./cmd/dlv/
|
||||
FROM busybox
|
||||
COPY --from=delve /go/dlv /bin/dlv
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.20 AS builder
|
||||
FROM golang:1.23 AS builder
|
||||
RUN go env -w GO111MODULE=on && go env -w GOPROXY=https://goproxy.cn,direct
|
||||
RUN go install github.com/go-delve/delve/cmd/dlv@latest
|
||||
|
||||
@@ -8,7 +8,18 @@ FROM ubuntu:latest
|
||||
RUN sed -i s@/security.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list \
|
||||
&& sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list
|
||||
RUN apt-get clean && apt-get update && apt-get install -y wget dnsutils vim curl \
|
||||
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3
|
||||
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3 \
|
||||
apt-transport-https ca-certificates curl
|
||||
|
||||
RUN if [ $(uname -m) = "x86_64" ]; then \
|
||||
echo "The architecture is AMD64"; \
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
|
||||
elif [ $(uname -m) = "aarch64" ]; then \
|
||||
echo "The architecture is ARM64"; \
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
|
||||
else \
|
||||
echo "Unsupported architecture."; \
|
||||
fi
|
||||
|
||||
ENV TZ=Asia/Shanghai \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
@@ -1,5 +1,34 @@
|
||||
FROM naison/kubevpn:latest
|
||||
FROM envoyproxy/envoy:v1.25.0 AS envoy
|
||||
FROM golang:1.23 AS builder
|
||||
ARG BASE=github.com/wencaiwulue/kubevpn
|
||||
|
||||
COPY . /go/src/$BASE
|
||||
|
||||
WORKDIR /go/src/$BASE
|
||||
|
||||
RUN make kubevpn
|
||||
|
||||
FROM debian:bookworm-slim
|
||||
ARG BASE=github.com/wencaiwulue/kubevpn
|
||||
|
||||
RUN apt-get update && apt-get install -y openssl iptables curl dnsutils \
|
||||
&& if [ $(uname -m) = "x86_64" ]; then \
|
||||
echo "The architecture is AMD64"; \
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"; \
|
||||
elif [ $(uname -m) = "aarch64" ]; then \
|
||||
echo "The architecture is ARM64"; \
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl"; \
|
||||
else \
|
||||
echo "Unsupported architecture."; \
|
||||
exit 1; \
|
||||
fi \
|
||||
&& chmod +x kubectl && mv kubectl /usr/local/bin \
|
||||
&& apt-get remove -y curl \
|
||||
&& apt-get autoremove -y \
|
||||
&& apt-get clean -y \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY bin/kubevpn /usr/local/bin/kubevpn
|
||||
COPY --from=builder /go/src/$BASE/bin/kubevpn /usr/local/bin/kubevpn
|
||||
COPY --from=envoy /usr/local/bin/envoy /usr/local/bin/envoy
|
||||
566
charts/index.yaml
Normal file
566
charts/index.yaml
Normal file
@@ -0,0 +1,566 @@
|
||||
apiVersion: v1
|
||||
entries:
|
||||
kubevpn:
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.7.11
|
||||
created: "2025-05-18T09:03:21.60777933Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: ee30c2533dff51fa389767e56931583cdfff8c5fca7d6c9698f521c6fc508d42
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.11/kubevpn-2.7.11.tgz
|
||||
version: 2.7.11
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.7.10
|
||||
created: "2025-05-14T13:08:51.09371872Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: fd23dd5bf0c3a9343d73276c4997a34027a93c1a88667265d92297630579d165
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.10/kubevpn-2.7.10.tgz
|
||||
version: 2.7.10
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.7.9
|
||||
created: "2025-05-12T09:14:52.66116293Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 56e022017177603290575849553c2e9c19f6a1691288dbd67c32a2fdcbde0834
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.9/kubevpn-2.7.9.tgz
|
||||
version: 2.7.9
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.7.8
|
||||
created: "2025-05-10T15:46:13.342045201Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: bfab5a7e4e1e795071a7ce3fd7713b517aa447d967ec58500e5a551564869109
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.8/kubevpn-2.7.8.tgz
|
||||
version: 2.7.8
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.7.7
|
||||
created: "2025-05-09T06:43:01.403047355Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 14b3e7873aa71fa7a380631c83be8df1dfb8d0ccb49eb6746aa4f83e3df934f6
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.7/kubevpn-2.7.7.tgz
|
||||
version: 2.7.7
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.7.6
|
||||
created: "2025-05-07T11:46:09.644201893Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 2146d5245440dff7d551ccc745aa1d9476d4f42053ff8a80f33f835d8da57712
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.6/kubevpn-2.7.6.tgz
|
||||
version: 2.7.6
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.7.5
|
||||
created: "2025-05-07T01:56:15.201307242Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 34799e9605b3048aac75484bb32fb6c70f9e7eb7470e9b77c51be075a548c25e
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.5/kubevpn-2.7.5.tgz
|
||||
version: 2.7.5
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.7.4
|
||||
created: "2025-05-06T17:01:13.789138284Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 5c6f2d1a178e917ac83ec72d0a46de9a0ff68f80a3aeb813d15dfb92c8ad36be
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.4/kubevpn-2.7.4.tgz
|
||||
version: 2.7.4
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.7.3
|
||||
created: "2025-05-06T15:40:24.505449375Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 86ef4b1de6ea15f6738824f7c389a891f53500b9163b1288847172eb7dc6817e
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.3/kubevpn-2.7.3.tgz
|
||||
version: 2.7.3
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.7.2
|
||||
created: "2025-04-25T15:40:08.296727519Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 8711dae30f4ff9bc9cea018fa16ae70087a17af42262f7f31c43950a34fffa08
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.2/kubevpn-2.7.2.tgz
|
||||
version: 2.7.2
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.7.1
|
||||
created: "2025-04-15T15:18:20.818055207Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 79c40c942fd2cfcca63dd82921e04871680838f01717c6fcb3ee06bfb7f59535
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.1/kubevpn-2.7.1.tgz
|
||||
version: 2.7.1
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.7.0
|
||||
created: "2025-04-12T05:37:01.063235951Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: a4b4de15f474fba43367fc7239c31e2020a6a1e0e3b29e02eb653cb9922b02e8
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.7.0/kubevpn-2.7.0.tgz
|
||||
version: 2.7.0
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.6.0
|
||||
created: "2025-04-06T12:54:49.852649414Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 58d930de19ac808e9f0ee501fe6f74b6f38376692708fc94fe7200496d9c5ca2
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.6.0/kubevpn-2.6.0.tgz
|
||||
version: 2.6.0
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.5.1
|
||||
created: "2025-04-03T15:46:28.062220333Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 6daf003256c42bb0db414eb17eb06294e46d33bc6c63f01419012a37318d0d2f
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.5.1/kubevpn-2.5.1.tgz
|
||||
version: 2.5.1
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.5.0
|
||||
created: "2025-03-31T05:36:16.050204161Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 301137b1599c232efd61ce9360e0a60da89e0a5c2eb076750bf461b38d26cfaf
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.5.0/kubevpn-2.5.0.tgz
|
||||
version: 2.5.0
|
||||
- annotations:
|
||||
app: kubevpn
|
||||
apiVersion: v2
|
||||
appVersion: v2.4.3
|
||||
created: "2025-03-30T13:48:42.333380676Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 8ef28a43cb3d04f071445cf7d1199aba7392d78e1941707bab82853c5541c93c
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.4.3/kubevpn-2.4.3.tgz
|
||||
version: 2.4.3
|
||||
- apiVersion: v2
|
||||
appVersion: v2.4.2
|
||||
created: "2025-03-23T12:53:35.793492243Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: c627f69ac904ddb41c396909873425d85264fb3393d550fa1b0e8d2abfc402e9
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.4.2/kubevpn-2.4.2.tgz
|
||||
version: 2.4.2
|
||||
- apiVersion: v2
|
||||
appVersion: v2.4.1
|
||||
created: "2025-03-16T09:48:30.691242519Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 1766431ce46b43758353928188cc993832e41cd0e352c9bc7991390bbbf41b04
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.4.1/kubevpn-2.4.1.tgz
|
||||
version: 2.4.1
|
||||
- apiVersion: v2
|
||||
appVersion: v2.4.0
|
||||
created: "2025-03-14T14:16:56.392516206Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: ffece68d3234ba629e02456fd3b0d31b5d2d1330c4c7f5d82ac2e0e1e97d82f3
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.4.0/kubevpn-2.4.0.tgz
|
||||
version: 2.4.0
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.13
|
||||
created: "2025-02-23T14:30:35.221348419Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: e79cdd07eae2ba3f36997debf898b091e1e68412fde7a34e823bad902e803105
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.13/kubevpn-2.3.13.tgz
|
||||
version: 2.3.13
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.12
|
||||
created: "2025-02-13T07:46:06.029130129Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 0b7d9f8b4cd306377e4452a9d86530387afcae379e11665909b90e15f2d82a04
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.12/kubevpn-2.3.12.tgz
|
||||
version: 2.3.12
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.11
|
||||
created: "2025-02-03T09:24:54.033585049Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: a54a2ed19e6f4aa5c274186d6b188c0230244582055905155c4620ebe8864838
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.11/kubevpn-2.3.11.tgz
|
||||
version: 2.3.11
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.10
|
||||
created: "2025-01-24T13:36:34.489289734Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 987b73399637eee01570492115114696fdb054074507f0d16e47d077e4ea770c
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.10/kubevpn-2.3.10.tgz
|
||||
version: 2.3.10
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.9
|
||||
created: "2024-12-21T15:29:42.173109915Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 0f9dd91504c1d1c3149cca785f0a9d72ef860d002ee73590f41e3d8decc99365
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.9/kubevpn-2.3.9.tgz
|
||||
version: 2.3.9
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.8
|
||||
created: "2024-12-19T14:19:38.126241384Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 84239f1bce053eaa9314e53b820ad0ba32bbc51c37dcac6ae8abd03bef6f7fd2
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.8/kubevpn-2.3.8.tgz
|
||||
version: 2.3.8
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.7
|
||||
created: "2024-12-14T17:25:08.398840622Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 437faa6cd98e81c4ad2c1b48c9ef7a33e7d435cf6343c5cc2c88ea251b2a545b
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.7/kubevpn-2.3.7.tgz
|
||||
version: 2.3.7
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.6
|
||||
created: "2024-12-09T11:52:04.779835011Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 7b23d14f6aea4410d68911d202199f15c88cb96cef8edbd94d4a95e9b9254bf7
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.6/kubevpn-2.3.6.tgz
|
||||
version: 2.3.6
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.5
|
||||
created: "2024-12-06T14:40:11.685095653Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: c2a85f446af834b60308b1384e6cae5662229c34370053319c0f759f650a1cb5
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.5/kubevpn-2.3.5.tgz
|
||||
version: 2.3.5
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.4
|
||||
created: "2024-11-29T13:03:24.255324387Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 2804aa624f6139695f3fb723bdc6ba087492bcd8810baf7196a1ae88bd2a62b5
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.4/kubevpn-2.3.4.tgz
|
||||
version: 2.3.4
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.3
|
||||
created: "2024-11-22T14:54:13.795282085Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 33cbbc9312e7b7e415fb14f80f17df50d305194617bcf75d1501227cb90b8f32
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.3/kubevpn-2.3.3.tgz
|
||||
version: 2.3.3
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.2
|
||||
created: "2024-11-18T11:52:12.076510627Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: cdb38ab84bf1649ac4280f6996060c49a095f9c056044cd5f691e7bf4f259dad
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.2/kubevpn-2.3.2.tgz
|
||||
version: 2.3.2
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.1
|
||||
created: "2024-11-15T13:36:37.056311943Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 10c1200241309be4ec2eb88e9689ebbf96704c8fad270e6fda30047135aeccf2
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.1/kubevpn-2.3.1.tgz
|
||||
version: 2.3.1
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.22
|
||||
created: "2024-10-30T08:46:08.845218523Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: c2dc336383d7de2fb97cfd40a15e9f6c29a9a598484b88515a98bcaeb4925eda
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.22/kubevpn-2.2.22.tgz
|
||||
version: 2.2.22
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.21
|
||||
created: "2024-10-25T14:10:25.545716679Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 98ae51247535525ff6a10b5f493d8bfc573af62759432f7aa54dd7eb6edeffd5
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.21/kubevpn-2.2.21.tgz
|
||||
version: 2.2.21
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.20
|
||||
created: "2024-10-20T04:00:07.263734809Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 7863701dff5b3fce0795ee8e0b73044b7c88f8777c86a65adc1f5563123565dc
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.20/kubevpn-2.2.20.tgz
|
||||
version: 2.2.20
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.19
|
||||
created: "2024-10-10T00:47:08.858011096Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: be2c672081307c03b7fe6b635d524c8f3f73d70ae3316efa85e781a62c25a46d
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.19/kubevpn-2.2.19.tgz
|
||||
version: 2.2.19
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.18
|
||||
created: "2024-09-10T09:39:11.71407425Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 2d953103425ca2a087a2d521c9297662f97b72e78cf831e947942f292bbcc643
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.18/kubevpn-2.2.18.tgz
|
||||
version: 2.2.18
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.17
|
||||
created: "2024-08-03T07:45:55.228743946Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 476317ad82b2c59a623e1fca968c09a28554ebcabec337c1c363e7296bb27514
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.17/kubevpn-2.2.17.tgz
|
||||
version: 2.2.17
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.16
|
||||
created: "2024-07-26T13:43:50.473565863Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 6cdb809d04687197a8defbf4349871c505ac699924833fecc210d8a6d82a9f20
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.16/kubevpn-2.2.16.tgz
|
||||
version: 2.2.16
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.15
|
||||
created: "2024-07-19T15:03:13.558586823Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 279b24976cef25e1dd8a4cd612a7c6a5767cecd4ba386ccab80fc00db76117e7
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.15/kubevpn-2.2.15.tgz
|
||||
version: 2.2.15
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.14
|
||||
created: "2024-07-12T15:24:27.825047662Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 52ab9b89ea3773792bf3839e4a7c23a9ea60a6c72547024dc0907c973a8d34b3
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.14/kubevpn-2.2.14.tgz
|
||||
version: 2.2.14
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.13
|
||||
created: "2024-07-05T15:08:40.140645659Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 610c5528952826839d5636b8bd940ac907ab0e70377e37538063cb53a5f75443
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.13/kubevpn-2.2.13.tgz
|
||||
version: 2.2.13
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.12
|
||||
created: "2024-06-29T15:36:12.429229459Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: a129ac0efda2e2967937407b904d59122e7b9725fb225c0bcbfdf2260337c032
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.12/kubevpn-2.2.12.tgz
|
||||
version: 2.2.12
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.11
|
||||
created: "2024-06-21T14:13:53.982206886Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 3a7fa4cb3e1785da68e422ef151a3c7f621fbe76862b557ae2750af70d34e1ad
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.11/kubevpn-2.2.11.tgz
|
||||
version: 2.2.11
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.10
|
||||
created: "2024-05-21T06:46:20.368800554Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 89be252c9eedb13560224550f06270f8be88049edfb0a46ca170ab5c8c493a6c
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.10/kubevpn-2.2.10.tgz
|
||||
version: 2.2.10
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.9
|
||||
created: "2024-05-14T11:50:54.700148975Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: e94debe7c904e21f791c1e3bb877ca8132888a3bb3c53beaa74e2ff1e7dd8769
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.9/kubevpn-2.2.9.tgz
|
||||
version: 2.2.9
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.8
|
||||
created: "2024-05-03T15:50:13.647253665Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 9e18d0d02f123e5d8f096362daa5e6893d5db1e8447a632585ae23d6ce755489
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.8/kubevpn-2.2.8.tgz
|
||||
version: 2.2.8
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.7
|
||||
created: "2024-04-27T12:11:35.594701859Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 3828f5b20d6bf4c0c7d94654cc33fd8d7b4c5f2aa20a3cc18d18b9298f459456
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.7/kubevpn-2.2.7.tgz
|
||||
version: 2.2.7
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.6
|
||||
created: "2024-04-16T05:44:31.777079658Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 63668930b99e6c18f6dd77a25e5ce2d21579d52a83451f58be3bc0ca32678829
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.6/kubevpn-2.2.6.tgz
|
||||
version: 2.2.6
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.5
|
||||
created: "2024-04-14T08:46:13.877936123Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 8509aeec7584935344bdf465efd8f0d5efb58ef1b7a31fd2738e5c2790f680c4
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.5/kubevpn-2.2.5.tgz
|
||||
version: 2.2.5
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.4
|
||||
created: "2024-04-02T05:15:00.372823536Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 07e87e648b7ad5688146a356c93c1771e94485c2fd9d5441553d94ce6371c19f
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.4/kubevpn-2.2.4.tgz
|
||||
version: 2.2.4
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.3
|
||||
created: "2024-03-03T11:52:37.856463964Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: cb1b8c210259292488548853bdeb2eb9ef4c60d1643e0d6537174349514dc8e9
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.3/kubevpn-2.2.3.tgz
|
||||
version: 2.2.3
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.2
|
||||
created: "2024-02-15T13:35:35.121411893Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: b7589312eab83e50db9ae5703a30e76f0b40fd280c81d102a823aeeb61e14c1c
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.2/kubevpn-2.2.2.tgz
|
||||
version: 2.2.2
|
||||
generated: "2025-05-18T09:03:21.608024937Z"
|
||||
23
charts/kubevpn/.helmignore
Normal file
23
charts/kubevpn/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
8
charts/kubevpn/Chart.yaml
Normal file
8
charts/kubevpn/Chart.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v2
|
||||
name: kubevpn
|
||||
description: A Helm chart for KubeVPN
|
||||
type: application
|
||||
version: 0.1.0
|
||||
appVersion: "1.16.0"
|
||||
annotations:
|
||||
app: kubevpn
|
||||
24
charts/kubevpn/README.md
Normal file
24
charts/kubevpn/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Helm charts for KubeVPN server
|
||||
|
||||
Use helm to install kubevpn server means use cluster mode. All user will use this instance.
|
||||
|
||||
- Please make sure users should have permission to namespace `kubevpn`.
|
||||
- Otherwise, will fall back to create `kubevpn` deployment in own namespace.
|
||||
|
||||
## Install with default mode
|
||||
|
||||
```shell
|
||||
helm install kubevpn kubevpn/kubevpn -n kubevpn --create-namespace
|
||||
```
|
||||
|
||||
in China, you can use tencent image registry
|
||||
|
||||
```shell
|
||||
helm install kubevpn kubevpn/kubevpn --set image.repository=ccr.ccs.tencentyun.com/kubevpn/kubevpn -n kubevpn --create-namespace
|
||||
```
|
||||
|
||||
## AWS Fargate cluster
|
||||
|
||||
```shell
|
||||
helm install kubevpn kubevpn/kubevpn --set netstack=gvisor -n kubevpn --create-namespace
|
||||
```
|
||||
4
charts/kubevpn/templates/NOTES.txt
Normal file
4
charts/kubevpn/templates/NOTES.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
1. Connect to cluster network by running these commands:
|
||||
kubevpn connect --namespace {{ include "kubevpn.namespace" . }}
|
||||
export POD_IP=$(kubectl get pods --namespace {{ include "kubevpn.namespace" . }} -l "app.kubernetes.io/name={{ include "kubevpn.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].status.podIP}")
|
||||
ping $POD_IP
|
||||
82
charts/kubevpn/templates/_helpers.tpl
Normal file
82
charts/kubevpn/templates/_helpers.tpl
Normal file
@@ -0,0 +1,82 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "kubevpn.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "kubevpn.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "kubevpn.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "kubevpn.labels" -}}
|
||||
helm.sh/chart: {{ include "kubevpn.chart" . }}
|
||||
app: kubevpn-traffic-manager
|
||||
{{ include "kubevpn.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "kubevpn.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "kubevpn.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "kubevpn.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "kubevpn.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Namespace
|
||||
1. special by -n
|
||||
2. use default namespace kubevpn
|
||||
*/}}
|
||||
{{- define "kubevpn.namespace" -}}
|
||||
{{- if .Release.Namespace }}
|
||||
{{- if eq .Release.Namespace "default" }}
|
||||
{{- .Values.namespace }}
|
||||
{{- else }}
|
||||
{{- .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- else if .Values.namespace }}
|
||||
{{- .Values.namespace }}
|
||||
{{- else }}
|
||||
{{- .Values.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
11
charts/kubevpn/templates/configmap.yaml
Normal file
11
charts/kubevpn/templates/configmap.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
namespace: {{ include "kubevpn.namespace" . }}
|
||||
data:
|
||||
DHCP: ""
|
||||
DHCP6: ""
|
||||
ENVOY_CONFIG: ""
|
||||
IPv4_POOLS: "{{ .Values.cidr.pod }} {{ .Values.cidr.service }}"
|
||||
REF_COUNT: "0"
|
||||
138
charts/kubevpn/templates/deployment.yaml
Normal file
138
charts/kubevpn/templates/deployment.yaml
Normal file
@@ -0,0 +1,138 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
namespace: {{ include "kubevpn.namespace" . }}
|
||||
labels:
|
||||
{{- include "kubevpn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "kubevpn.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "kubevpn.labels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "kubevpn.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- args:
|
||||
{{- if eq .Values.netstack "system" }}
|
||||
- |
|
||||
echo 1 > /proc/sys/net/ipv4/ip_forward
|
||||
echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6
|
||||
echo 1 > /proc/sys/net/ipv6/conf/all/forwarding
|
||||
update-alternatives --set iptables /usr/sbin/iptables-legacy
|
||||
iptables -P INPUT ACCEPT
|
||||
ip6tables -P INPUT ACCEPT
|
||||
iptables -P FORWARD ACCEPT
|
||||
ip6tables -P FORWARD ACCEPT
|
||||
iptables -t nat -A POSTROUTING -s ${CIDR4} -o eth0 -j MASQUERADE
|
||||
ip6tables -t nat -A POSTROUTING -s ${CIDR6} -o eth0 -j MASQUERADE
|
||||
kubevpn server -l "tcp://:10800" -l "tun://:8422?net=${TunIPv4}&net6=${TunIPv6}" -l "gtcp://:10801" -l "gudp://:10802"
|
||||
{{- else }}
|
||||
- kubevpn server -l "tcp://:10800" -l "gtcp://:10801" -l "gudp://:10802"
|
||||
{{- end }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
env:
|
||||
- name: CIDR4
|
||||
value: 198.19.0.0/16
|
||||
- name: CIDR6
|
||||
value: 2001:2::/64
|
||||
- name: TunIPv4
|
||||
value: 198.19.0.100/16
|
||||
- name: TunIPv6
|
||||
value: 2001:2::9999/64
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
name: vpn
|
||||
ports:
|
||||
- containerPort: {{ .Values.service.port8422 }}
|
||||
name: 8422-for-udp
|
||||
protocol: UDP
|
||||
- containerPort: {{ .Values.service.port10800 }}
|
||||
name: 10800-for-tcp
|
||||
protocol: TCP
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- if eq .Values.netstack "system" }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
- args:
|
||||
- control-plane
|
||||
- --watchDirectoryFilename
|
||||
- /etc/envoy/envoy-config.yaml
|
||||
command:
|
||||
- kubevpn
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
name: control-plane
|
||||
ports:
|
||||
- containerPort: {{ .Values.service.port9002 }}
|
||||
name: 9002-for-envoy
|
||||
protocol: TCP
|
||||
resources:
|
||||
{{- toYaml .Values.resourcesSmall | nindent 12 }}
|
||||
volumeMounts:
|
||||
- mountPath: /etc/envoy
|
||||
name: envoy-config
|
||||
readOnly: true
|
||||
- args:
|
||||
- webhook
|
||||
command:
|
||||
- kubevpn
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
env:
|
||||
- name: "POD_NAMESPACE"
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
name: webhook
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: 80-for-webhook
|
||||
protocol: TCP
|
||||
resources:
|
||||
{{- toYaml .Values.resourcesSmall | nindent 12 }}
|
||||
{{- with .Values.volumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
33
charts/kubevpn/templates/hpa.yaml
Normal file
33
charts/kubevpn/templates/hpa.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
{{- if .Values.autoscaling.enabled }}
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
namespace: {{ include "kubevpn.namespace" . }}
|
||||
labels:
|
||||
{{- include "kubevpn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
minReplicas: {{ .Values.autoscaling.minReplicas }}
|
||||
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
73
charts/kubevpn/templates/job.yaml
Normal file
73
charts/kubevpn/templates/job.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
namespace: {{ include "kubevpn.namespace" . }}
|
||||
labels:
|
||||
{{- include "kubevpn.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-install
|
||||
"helm.sh/hook-delete-policy": before-hook-creation
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "kubevpn.labels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
restartPolicy: Never
|
||||
serviceAccountName: {{ include "kubevpn.serviceAccountName" . }}
|
||||
containers:
|
||||
- name: label-ns
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
args:
|
||||
- |2-
|
||||
|
||||
echo "Label namespace {{ include "kubevpn.namespace" . }}"
|
||||
kubectl label ns {{ include "kubevpn.namespace" . }} ns={{ include "kubevpn.namespace" . }}
|
||||
|
||||
echo "Generating https certificate"
|
||||
openssl req -x509 -nodes -days 36500 -newkey rsa:2048 -subj "/CN={{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}" -addext "subjectAltName=DNS:{{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}.svc.cluster.local,DNS:{{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}.svc,DNS:{{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }},DNS:localhost,IP:127.0.0.1" -keyout server.key -out server.crt
|
||||
|
||||
export TLS_CRT=$(cat server.crt | base64 | tr -d '\n')
|
||||
echo "Patch mutatingwebhookconfigurations {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}"
|
||||
kubectl patch mutatingwebhookconfigurations {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }} -p "{\"webhooks\":[{\"name\":\"{{ include "kubevpn.fullname" . }}.naison.io\",\"sideEffects\":\"None\",\"admissionReviewVersions\":[\"v1\", \"v1beta1\"],\"clientConfig\":{\"service\":{\"namespace\":\"{{ include "kubevpn.namespace" . }}\",\"name\":\"{{ include "kubevpn.fullname" . }}\"},\"caBundle\":\"$TLS_CRT\"}}]}"
|
||||
|
||||
export TLS_KEY=$(cat server.key | base64 | tr -d '\n')
|
||||
echo "Patch secret {{ include "kubevpn.fullname" . }}"
|
||||
kubectl patch secret {{ include "kubevpn.fullname" . }} -n {{ include "kubevpn.namespace" . }} -p "{\"data\":{\"tls_key\":\"$TLS_KEY\",\"tls_crt\":\"$TLS_CRT\"}}"
|
||||
|
||||
echo "Restart the pods..."
|
||||
kubectl scale -n {{ include "kubevpn.namespace" . }} --replicas=0 deployment/{{ include "kubevpn.fullname" . }}
|
||||
kubectl scale -n {{ include "kubevpn.namespace" . }} --replicas=1 deployment/{{ include "kubevpn.fullname" . }}
|
||||
|
||||
export POOLS=$(kubectl get cm {{ include "kubevpn.fullname" . }} -n {{ include "kubevpn.namespace" . }} -o jsonpath='{.data.IPv4_POOLS}')
|
||||
if [[ -z "${POOLS// }" ]];then
|
||||
echo "Cidr is empty"
|
||||
echo "Get pod cidr..."
|
||||
export POD_CIDR=$(kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' | tr -s '\n' ' ')
|
||||
echo "Get service cidr..."
|
||||
export SVC_CIDR=$(echo '{"apiVersion":"v1","kind":"Service","metadata":{"name":"kubevpn-get-svc-cidr-{{ include "kubevpn.namespace" . }}", "namespace": "{{ include "kubevpn.namespace" . }}"},"spec":{"clusterIP":"1.1.1.1","ports":[{"port":443}]}}' | kubectl apply -f - 2>&1 | sed 's/.*valid IPs is //')
|
||||
echo "Pod cidr: $POD_CIDR, service cidr: $SVC_CIDR"
|
||||
echo "Patch configmap {{ include "kubevpn.fullname" . }}"
|
||||
kubectl patch configmap {{ include "kubevpn.fullname" . }} -n {{ include "kubevpn.namespace" . }} -p "{\"data\":{\"IPv4_POOLS\":\"$POD_CIDR $SVC_CIDR\"}}"
|
||||
else
|
||||
echo "Cidr is NOT empty"
|
||||
fi
|
||||
|
||||
echo "Done~"
|
||||
exit 0
|
||||
35
charts/kubevpn/templates/mutatingwebhookconfiguration.yaml
Normal file
35
charts/kubevpn/templates/mutatingwebhookconfiguration.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
|
||||
namespace: {{ include "kubevpn.namespace" . }}
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
caBundle: {{ .Values.tls.crt }}
|
||||
service:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
namespace: {{ include "kubevpn.namespace" . }}
|
||||
path: /pods
|
||||
port: 80
|
||||
failurePolicy: Ignore
|
||||
matchPolicy: Equivalent
|
||||
name: {{ include "kubevpn.fullname" . }}.naison.io
|
||||
namespaceSelector: { }
|
||||
objectSelector: { }
|
||||
reinvocationPolicy: Never
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- DELETE
|
||||
resources:
|
||||
- pods
|
||||
scope: Namespaced
|
||||
sideEffects: None
|
||||
timeoutSeconds: 15
|
||||
70
charts/kubevpn/templates/role.yaml
Normal file
70
charts/kubevpn/templates/role.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
namespace: {{ include "kubevpn.namespace" . }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- {{ include "kubevpn.fullname" . }}
|
||||
resources:
|
||||
- configmaps
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "namespaces" ]
|
||||
resourceNames: ["{{ include "kubevpn.namespace" . }}"]
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- apiGroups: [ "apps" ]
|
||||
resources: [ "deployments/scale", "deployments" ]
|
||||
resourceNames:
|
||||
- {{ include "kubevpn.fullname" . }}
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- admissionregistration.k8s.io
|
||||
resources:
|
||||
- mutatingwebhookconfigurations
|
||||
resourceNames:
|
||||
- {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
27
charts/kubevpn/templates/rolebinding.yaml
Normal file
27
charts/kubevpn/templates/rolebinding.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
namespace: {{ include "kubevpn.namespace" . }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
namespace: {{ include "kubevpn.namespace" . }}
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
namespace: {{ include "kubevpn.namespace" . }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
10
charts/kubevpn/templates/secret.yaml
Normal file
10
charts/kubevpn/templates/secret.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
namespace: {{ include "kubevpn.namespace" . }}
|
||||
type: Opaque
|
||||
stringData:
|
||||
tls_crt: {{ .Values.tls.crt }}
|
||||
tls_key: {{ .Values.tls.key }}
|
||||
tls_server_name: {{ include "kubevpn.fullname" . }}.{{ include "kubevpn.namespace" . }}
|
||||
32
charts/kubevpn/templates/service.yaml
Normal file
32
charts/kubevpn/templates/service.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
namespace: {{ include "kubevpn.namespace" . }}
|
||||
labels:
|
||||
{{- include "kubevpn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- name: 8422-for-udp
|
||||
port: {{ .Values.service.port8422 }}
|
||||
protocol: UDP
|
||||
targetPort: 8422
|
||||
- name: 10800-for-tcp
|
||||
port: {{ .Values.service.port10800 }}
|
||||
protocol: TCP
|
||||
targetPort: 10800
|
||||
- name: 9002-for-envoy
|
||||
port: {{ .Values.service.port9002 }}
|
||||
protocol: TCP
|
||||
targetPort: 9002
|
||||
- name: 80-for-webhook
|
||||
port: {{ .Values.service.port80 }}
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
- name: 53-for-dns
|
||||
port: {{ .Values.service.port53 }}
|
||||
protocol: UDP
|
||||
targetPort: 53
|
||||
selector:
|
||||
{{- include "kubevpn.selectorLabels" . | nindent 4 }}
|
||||
14
charts/kubevpn/templates/serviceaccount.yaml
Normal file
14
charts/kubevpn/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "kubevpn.serviceAccountName" . }}
|
||||
namespace: {{ include "kubevpn.namespace" . }}
|
||||
labels:
|
||||
{{- include "kubevpn.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
|
||||
{{- end }}
|
||||
120
charts/kubevpn/values.yaml
Normal file
120
charts/kubevpn/values.yaml
Normal file
@@ -0,0 +1,120 @@
|
||||
# Default values for kubevpn.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# default namespace
|
||||
namespace: kubevpn
|
||||
# default is system mode, available ["system", "gvisor"]
|
||||
# system: needs privilege permission and cap NET_ADMIN (Best experience)
|
||||
# gvisor: no needs any additional permission (Best compatibility)
|
||||
netstack: system
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: ghcr.io/kubenetworks/kubevpn
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
imagePullSecrets: [ ]
|
||||
nameOverride: ""
|
||||
fullnameOverride: "kubevpn-traffic-manager"
|
||||
|
||||
# this filed is import if configured this value
|
||||
# if not configured, it will get this value from cluster automatically
|
||||
cidr:
|
||||
pod: ""
|
||||
service: ""
|
||||
|
||||
tls:
|
||||
# will auto generate in job
|
||||
crt: ''''''
|
||||
key: ''''''
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: { }
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
podAnnotations: { }
|
||||
podLabels:
|
||||
|
||||
podSecurityContext: { }
|
||||
# fsGroup: 2000
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
privileged: true
|
||||
runAsUser: 0
|
||||
runAsGroup: 0
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port8422: 8422
|
||||
port9002: 9002
|
||||
port10800: 10800
|
||||
port80: 80
|
||||
port53: 53
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: "2"
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
resourcesSmall:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 1
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
# Additional volumes on the output Deployment definition.
|
||||
volumes:
|
||||
- configMap:
|
||||
defaultMode: 420
|
||||
items:
|
||||
- key: ENVOY_CONFIG
|
||||
path: envoy-config.yaml
|
||||
name: kubevpn-traffic-manager
|
||||
optional: false
|
||||
name: envoy-config
|
||||
|
||||
|
||||
# Additional volumeMounts on the output Deployment definition.
|
||||
volumeMounts: [ ]
|
||||
# - name: foo
|
||||
# mountPath: "/etc/foo"
|
||||
# readOnly: true
|
||||
|
||||
nodeSelector: { }
|
||||
|
||||
tolerations: [ ]
|
||||
|
||||
affinity: { }
|
||||
210
cmd/kubevpn/cmds/alias.go
Normal file
210
cmd/kubevpn/cmds/alias.go
Normal file
@@ -0,0 +1,210 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
yaml "sigs.k8s.io/yaml/goyaml.v3"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
// CmdAlias
|
||||
/**
|
||||
Name: test
|
||||
Description: this is a test environment
|
||||
Needs: test1
|
||||
Flags:
|
||||
- connect
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --lite
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Description: this is another test environment
|
||||
Flags:
|
||||
- connect
|
||||
- --kubeconfig=~/.kube/jumper_config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
*/
|
||||
func CmdAlias(f cmdutil.Factory) *cobra.Command {
|
||||
var localFile, remoteAddr string
|
||||
cmd := &cobra.Command{
|
||||
Use: "alias",
|
||||
Short: i18n.T("Config file alias to execute command simply"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Config file alias to execute command simply, just like ssh alias config
|
||||
|
||||
Please point to an existing, complete config file:
|
||||
|
||||
1. Via the command-line flag --kubevpnconfig
|
||||
2. Via the KUBEVPNCONFIG environment variable
|
||||
3. In your home directory as ~/.kubevpn/config.yaml
|
||||
|
||||
It will read ~/.kubevpn/config.yaml file as config, also support special file path
|
||||
by flag -f. It also supports depends relationship, like one cluster api server needs to
|
||||
access via another cluster, you can use syntax needs. it will do action to needs cluster first
|
||||
and then do action to target cluster
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
If you have following config in your ~/.kubevpn/config.yaml
|
||||
|
||||
Name: dev
|
||||
Needs: jumper
|
||||
Flags:
|
||||
- connect
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=default
|
||||
- --lite
|
||||
---
|
||||
|
||||
Name: jumper
|
||||
Flags:
|
||||
- connect
|
||||
- --kubeconfig=~/.kube/jumper_config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
|
||||
Config file support three field: Name,Needs,Flags
|
||||
|
||||
# Use kubevpn alias config to simply execute command, connect to cluster network by order: jumper --> dev
|
||||
kubevpn alias dev
|
||||
|
||||
# kubevpn alias jumper, just connect to cluster jumper
|
||||
kubevpn alias jumper
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
if localFile != "" {
|
||||
_, err = os.Stat(localFile)
|
||||
}
|
||||
return err
|
||||
},
|
||||
Args: cobra.MatchAll(cobra.ExactArgs(1)),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
configs, err := ParseAndGet(localFile, remoteAddr, args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name, err := os.Executable()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, conf := range configs {
|
||||
c := exec.Command(name, conf.Flags...)
|
||||
c.Stdout = os.Stdout
|
||||
c.Stdin = os.Stdin
|
||||
c.Stderr = os.Stderr
|
||||
fmt.Println(fmt.Sprintf("Name: %s", conf.Name))
|
||||
if conf.Description != "" {
|
||||
fmt.Println(fmt.Sprintf("Description: %s", conf.Description))
|
||||
}
|
||||
fmt.Println(fmt.Sprintf("Command: %v", c.Args))
|
||||
err = c.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVarP(&localFile, "kubevpnconfig", "f", util.If(os.Getenv("KUBEVPNCONFIG") != "", os.Getenv("KUBEVPNCONFIG"), config.GetConfigFile()), "Path to the kubevpnconfig file to use for CLI requests.")
|
||||
cmd.Flags().StringVarP(&remoteAddr, "remote", "r", "", "Remote config file, eg: https://raw.githubusercontent.com/kubenetworks/kubevpn/master/pkg/config/config.yaml")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func ParseAndGet(localFile, remoteAddr string, aliasName string) ([]Config, error) {
|
||||
var content []byte
|
||||
var err error
|
||||
var path string
|
||||
if localFile != "" {
|
||||
path = localFile
|
||||
content, err = os.ReadFile(path)
|
||||
} else if remoteAddr != "" {
|
||||
path = remoteAddr
|
||||
content, err = util.DownloadFileStream(path)
|
||||
} else {
|
||||
path = config.GetConfigFile()
|
||||
content, err = os.ReadFile(path)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list, err := ParseConfig(content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configs, err := GetConfigs(list, aliasName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(configs) == 0 {
|
||||
var names []string
|
||||
for _, c := range list {
|
||||
if c.Name != "" {
|
||||
names = append(names, c.Name)
|
||||
}
|
||||
}
|
||||
err = errors.New(fmt.Sprintf("Can't find any alias for the name: '%s', avaliable: \n[\"%s\"]\nPlease check config file: %s", aliasName, strings.Join(names, "\", \""), path))
|
||||
return nil, err
|
||||
}
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
func ParseConfig(file []byte) ([]Config, error) {
|
||||
decoder := yaml.NewDecoder(strings.NewReader(string(file)))
|
||||
var configs []Config
|
||||
for {
|
||||
var cfg Config
|
||||
err := decoder.Decode(&cfg)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
configs = append(configs, cfg)
|
||||
}
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
func GetConfigs(configs []Config, name string) ([]Config, error) {
|
||||
m := make(map[string]Config)
|
||||
for _, config := range configs {
|
||||
m[config.Name] = config
|
||||
}
|
||||
var result []Config
|
||||
var set []string
|
||||
for !sets.New[string](set...).Has(name) {
|
||||
config, ok := m[name]
|
||||
if ok {
|
||||
result = append([]Config{config}, result...)
|
||||
set = append(set, name)
|
||||
name = config.Needs
|
||||
if name == "" {
|
||||
return result, nil
|
||||
}
|
||||
} else {
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("loop jump detected: %s. verify your configuration", strings.Join(append(set, name), " -> "))
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"Name"`
|
||||
Description string `yaml:"Description"`
|
||||
Needs string `yaml:"Needs,omitempty"`
|
||||
Flags []string `yaml:"Flags,omitempty"`
|
||||
}
|
||||
229
cmd/kubevpn/cmds/alias_test.go
Normal file
229
cmd/kubevpn/cmds/alias_test.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
)
|
||||
|
||||
func TestAlias(t *testing.T) {
|
||||
str := `Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com`
|
||||
_, err := ParseConfig([]byte(str))
|
||||
if err != nil {
|
||||
plog.G(context.Background()).Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckLoop(t *testing.T) {
|
||||
str := `Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com`
|
||||
_, err := ParseConfig([]byte(str))
|
||||
if err != nil {
|
||||
plog.G(context.Background()).Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoop(t *testing.T) {
|
||||
data := []struct {
|
||||
Config string
|
||||
Run string
|
||||
ExpectError bool
|
||||
ExpectOrder []string
|
||||
}{
|
||||
{
|
||||
Config: `
|
||||
Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Needs: test
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
`,
|
||||
Run: "test",
|
||||
ExpectError: true,
|
||||
ExpectOrder: nil,
|
||||
},
|
||||
{
|
||||
Config: `
|
||||
Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
`,
|
||||
Run: "test",
|
||||
ExpectError: false,
|
||||
ExpectOrder: []string{"test1", "test"},
|
||||
},
|
||||
{
|
||||
Config: `
|
||||
Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Needs: test2
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
`,
|
||||
Run: "test",
|
||||
ExpectError: false,
|
||||
ExpectOrder: []string{"test1", "test"},
|
||||
},
|
||||
{
|
||||
Config: `
|
||||
Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Needs: test2
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
|
||||
---
|
||||
|
||||
Name: test2
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
`,
|
||||
Run: "test",
|
||||
ExpectError: false,
|
||||
ExpectOrder: []string{"test2", "test1", "test"},
|
||||
},
|
||||
{
|
||||
Config: `
|
||||
Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Needs: test2
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
|
||||
---
|
||||
|
||||
Name: test2
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
`,
|
||||
Run: "test2",
|
||||
ExpectError: false,
|
||||
ExpectOrder: []string{"test2"},
|
||||
},
|
||||
{
|
||||
Config: `
|
||||
Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Needs: test2
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
|
||||
---
|
||||
|
||||
Name: test2
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
`,
|
||||
Run: "test1",
|
||||
ExpectError: false,
|
||||
ExpectOrder: []string{"test2", "test1"},
|
||||
},
|
||||
}
|
||||
for _, datum := range data {
|
||||
configs, err := ParseConfig([]byte(datum.Config))
|
||||
if err != nil {
|
||||
plog.G(context.Background()).Fatal(err)
|
||||
}
|
||||
getConfigs, err := GetConfigs(configs, datum.Run)
|
||||
if err != nil && !datum.ExpectError {
|
||||
plog.G(context.Background()).Fatal(err)
|
||||
} else if err != nil {
|
||||
}
|
||||
if datum.ExpectError {
|
||||
continue
|
||||
}
|
||||
var c []string
|
||||
for _, config := range getConfigs {
|
||||
c = append(c, config.Name)
|
||||
}
|
||||
if !reflect.DeepEqual(c, datum.ExpectOrder) {
|
||||
plog.G(context.Background()).Fatalf("Not match, expect: %v, real: %v", datum.ExpectOrder, c)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,11 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
pkgerr "github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
@@ -14,59 +15,78 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
|
||||
)
|
||||
|
||||
// CmdClone multiple cluster operate, can start up one deployment to another cluster
|
||||
// kubectl exec POD_NAME -c CONTAINER_NAME /sbin/killall5 or ephemeralcontainers
|
||||
func CmdClone(f cmdutil.Factory) *cobra.Command {
|
||||
var options = handler.CloneOptions{}
|
||||
var sshConf = &util.SshConfig{}
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
var extraRoute = &handler.ExtraRouteInfo{}
|
||||
var transferImage bool
|
||||
var syncDir string
|
||||
var imagePullSecretName string
|
||||
cmd := &cobra.Command{
|
||||
Use: "clone",
|
||||
Short: i18n.T("Clone workloads to target-kubeconfig cluster with same volume、env、and network"),
|
||||
Long: templates.LongDesc(i18n.T(`Clone workloads to target-kubeconfig cluster with same volume、env、and network`)),
|
||||
Short: i18n.T("Clone workloads to run in target-kubeconfig cluster with same volume、env、and network"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Clone workloads to run into target-kubeconfig cluster with same volume、env、and network
|
||||
|
||||
In this way, you can startup another deployment in same cluster or not, but with different image version,
|
||||
it also supports service mesh proxy. only traffic with special header will hit to cloned_resource.
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# clone
|
||||
- clone deployment in current cluster and current namespace
|
||||
- clone deployment run into current cluster and current namespace
|
||||
kubevpn clone deployment/productpage
|
||||
|
||||
- clone deployment in current cluster with different namespace
|
||||
- clone deployment run into current cluster with different namespace
|
||||
kubevpn clone deployment/productpage -n test
|
||||
|
||||
- clone deployment to another cluster
|
||||
- clone deployment run into another cluster
|
||||
kubevpn clone deployment/productpage --target-kubeconfig ~/.kube/other-kubeconfig
|
||||
|
||||
- clone multiple workloads
|
||||
- clone multiple workloads run into current cluster and current namespace
|
||||
kubevpn clone deployment/authors deployment/productpage
|
||||
or
|
||||
kubevpn clone deployment authors productpage
|
||||
|
||||
# clone with mesh, traffic with header a=1, will hit cloned workloads, otherwise hit origin workloads
|
||||
kubevpn clone deployment/productpage --headers a=1
|
||||
# clone with mesh, traffic with header foo=bar, will hit cloned workloads, otherwise hit origin workloads
|
||||
kubevpn clone deployment/productpage --headers foo=bar
|
||||
|
||||
# clone workloads which api-server behind of bastion host or ssh jump host
|
||||
kubevpn clone deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers a=1
|
||||
kubevpn clone deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers foo=bar
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn clone service/productpage --ssh-alias <alias> --headers a=1
|
||||
kubevpn clone service/productpage --ssh-alias <alias> --headers foo=bar
|
||||
|
||||
`)),
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn clone service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
|
||||
kubevpn clone service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
|
||||
kubevpn clone service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
// not support temporally
|
||||
if options.Engine == config.EngineGvisor {
|
||||
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
|
||||
}
|
||||
plog.InitLoggerForClient()
|
||||
// startup daemon process and sudo process
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
err = daemon.StartupDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if transferImage {
|
||||
err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image)
|
||||
}
|
||||
return err
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
@@ -81,19 +101,33 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
|
||||
// special empty string, eg: --target-registry ""
|
||||
options.IsChangeTargetRegistry = cmd.Flags().Changed("target-registry")
|
||||
|
||||
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
|
||||
if syncDir != "" {
|
||||
local, remote, err := util.ParseDirMapping(syncDir)
|
||||
if err != nil {
|
||||
return pkgerr.Wrapf(err, "options 'sync' is invalid, %s", syncDir)
|
||||
}
|
||||
options.LocalDir = local
|
||||
options.RemoteDir = remote
|
||||
} else {
|
||||
options.RemoteDir = config.DefaultRemoteDir
|
||||
}
|
||||
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !sshConf.IsEmpty() {
|
||||
if ip := util.GetAPIServerFromKubeConfigBytes(bytes); ip != nil {
|
||||
extraRoute.ExtraCIDR = append(extraRoute.ExtraCIDR, ip.String())
|
||||
}
|
||||
}
|
||||
req := &rpc.CloneRequest{
|
||||
KubeconfigBytes: string(bytes),
|
||||
Namespace: ns,
|
||||
Headers: options.Headers,
|
||||
Workloads: args,
|
||||
ExtraCIDR: options.ExtraCIDR,
|
||||
ExtraDomain: options.ExtraDomain,
|
||||
UseLocalDNS: options.UseLocalDNS,
|
||||
OriginKubeconfigPath: util.GetKubeconfigPath(f),
|
||||
ExtraRoute: extraRoute.ToRPC(),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
||||
Engine: string(options.Engine),
|
||||
SshJump: sshConf.ToRPC(),
|
||||
TargetKubeconfig: options.TargetKubeconfig,
|
||||
@@ -104,44 +138,60 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
|
||||
IsChangeTargetRegistry: options.IsChangeTargetRegistry,
|
||||
TransferImage: transferImage,
|
||||
Image: config.Image,
|
||||
Level: int32(log.DebugLevel),
|
||||
ImagePullSecretName: imagePullSecretName,
|
||||
Level: int32(util.If(config.Debug, log.DebugLevel, log.InfoLevel)),
|
||||
LocalDir: options.LocalDir,
|
||||
RemoteDir: options.RemoteDir,
|
||||
}
|
||||
cli, err := daemon.GetClient(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cli := daemon.GetClient(false)
|
||||
resp, err := cli.Clone(cmd.Context(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
recv, err := resp.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
err = util.PrintGRPCStream[rpc.CloneResponse](resp)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Canceled {
|
||||
err = remove(cli, args)
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, recv.GetMessage())
|
||||
return err
|
||||
}
|
||||
util.Print(os.Stdout, "Now clone workloads running successfully on other cluster, enjoy it :)")
|
||||
util.Print(os.Stdout, config.Slogan)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to clone workloads, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to clone workloads, format is k=v, like: k1=v1,k2=v2")
|
||||
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug mode or not, true or false")
|
||||
cmd.Flags().StringVar(&config.Image, "image", config.Image, "Use this image to startup container")
|
||||
cmd.Flags().StringArrayVar(&options.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
|
||||
cmd.Flags().StringArrayVar(&options.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
|
||||
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
|
||||
cmd.Flags().StringVar((*string)(&options.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
|
||||
cmd.Flags().BoolVar(&options.UseLocalDNS, "use-localdns", false, "if use-lcoaldns is true, kubevpn will start coredns listen at 53 to forward your dns queries. only support on linux now")
|
||||
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers (use `and` to match all headers) with reverse it to target cluster cloned workloads, If not special, redirect all traffic to target cluster cloned workloads. eg: --headers foo=bar --headers env=dev")
|
||||
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &options.Engine)
|
||||
|
||||
cmdutil.AddContainerVarFlags(cmd, &options.TargetContainer, options.TargetContainer)
|
||||
cmd.Flags().StringVar(&options.TargetImage, "target-image", "", "Clone container use this image to startup container, if not special, use origin image")
|
||||
cmd.Flags().StringVar(&options.TargetContainer, "target-container", "", "Clone container use special image to startup this container, if not special, use origin image")
|
||||
cmd.Flags().StringVar(&options.TargetNamespace, "target-namespace", "", "Clone workloads in this namespace, if not special, use origin namespace")
|
||||
cmd.Flags().StringVar(&options.TargetKubeconfig, "target-kubeconfig", "", "Clone workloads will create in this cluster, if not special, use origin cluster")
|
||||
cmd.Flags().StringVar(&options.TargetRegistry, "target-registry", "", "Clone workloads will create this registry domain to replace origin registry, if not special, use origin registry")
|
||||
cmd.Flags().StringVar(&syncDir, "sync", "", "Sync local dir to remote pod dir. format: LOCAL_DIR:REMOTE_DIR, eg: ~/code:/app/code")
|
||||
|
||||
addSshFlags(cmd, sshConf)
|
||||
handler.AddExtraRoute(cmd.Flags(), extraRoute)
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
cmd.ValidArgsFunction = utilcomp.ResourceTypeAndNameCompletionFunc(f)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func remove(cli rpc.DaemonClient, args []string) error {
|
||||
resp, err := cli.Remove(context.Background(), &rpc.RemoveRequest{
|
||||
Workloads: args,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.DisconnectResponse](resp)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Canceled {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,9 +9,10 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdConfig(f cmdutil.Factory) *cobra.Command {
|
||||
@@ -25,27 +26,27 @@ func CmdConfig(f cmdutil.Factory) *cobra.Command {
|
||||
}
|
||||
|
||||
func cmdConfigAdd(f cmdutil.Factory) *cobra.Command {
|
||||
var sshConf = &util.SshConfig{}
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "add",
|
||||
Short: "Proxy kubeconfig",
|
||||
Short: i18n.T("Proxy kubeconfig"),
|
||||
Long: templates.LongDesc(i18n.T(`proxy kubeconfig which behind of ssh jump server`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# proxy api-server which api-server behind of bastion host or ssh jump host
|
||||
kubevpn config add --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn config add --ssh-alias <alias>
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
// startup daemon process and sudo process
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -54,28 +55,33 @@ func cmdConfigAdd(f cmdutil.Factory) *cobra.Command {
|
||||
Namespace: ns,
|
||||
SshJump: sshConf.ToRPC(),
|
||||
}
|
||||
cli := daemon.GetClient(false)
|
||||
cli, err := daemon.GetClient(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := cli.ConfigAdd(cmd.Context(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, resp.ClusterID)
|
||||
_, _ = fmt.Fprint(os.Stdout, resp.ClusterID)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
addSshFlags(cmd, sshConf)
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func cmdConfigRemove(f cmdutil.Factory) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "remove",
|
||||
Short: "Remove proxy kubeconfig",
|
||||
Long: templates.LongDesc(i18n.T(`Remove proxy kubeconfig which behind of ssh jump server`)),
|
||||
Short: i18n.T("Remove proxy kubeconfig"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Remove proxy kubeconfig which behind of ssh jump server
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# remove proxy api-server which api-server behind of bastion host or ssh jump host
|
||||
kubevpn config remove --kubeconfig /var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/947048961.kubeconfig
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
// startup daemon process and sudo process
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
@@ -85,8 +91,11 @@ func cmdConfigRemove(f cmdutil.Factory) *cobra.Command {
|
||||
req := &rpc.ConfigRemoveRequest{
|
||||
ClusterID: args[0],
|
||||
}
|
||||
cli := daemon.GetClient(false)
|
||||
_, err := cli.ConfigRemove(cmd.Context(), req)
|
||||
cli, err := daemon.GetClient(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = cli.ConfigRemove(cmd.Context(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,33 +1,49 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
|
||||
)
|
||||
|
||||
func CmdConnect(f cmdutil.Factory) *cobra.Command {
|
||||
var connect = &handler.ConnectOptions{}
|
||||
var sshConf = &util.SshConfig{}
|
||||
var extraRoute = &handler.ExtraRouteInfo{}
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
var transferImage, foreground, lite bool
|
||||
var imagePullSecretName string
|
||||
var managerNamespace string
|
||||
cmd := &cobra.Command{
|
||||
Use: "connect",
|
||||
Short: i18n.T("Connect to kubernetes cluster network"),
|
||||
Long: templates.LongDesc(i18n.T(`Connect to kubernetes cluster network`)),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Connect to kubernetes cluster network
|
||||
|
||||
After connect to kubernetes cluster network, you can ping PodIP or
|
||||
curl ServiceIP in local PC, it also supports k8s DNS resolve.
|
||||
Like: curl authors/authors.default/authors.default.svc/authors.default.svc.cluster.local.
|
||||
So you can start up your application in local PC. depends on anything in
|
||||
k8s cluster is ok, connect to them just like in k8s cluster.
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# Connect to k8s cluster network
|
||||
kubevpn connect
|
||||
@@ -35,88 +51,117 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
|
||||
# Connect to api-server behind of bastion host or ssh jump host
|
||||
kubevpn connect --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn connect --ssh-alias <alias>
|
||||
|
||||
`)),
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn connect --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
|
||||
kubevpn connect --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
|
||||
kubevpn connect --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
|
||||
|
||||
# Support ssh jump inline
|
||||
kubevpn connect --ssh-jump "--ssh-addr jump.naison.org --ssh-username naison --gssapi-password xxx" --ssh-username root --ssh-addr 127.0.0.1:22 --ssh-keyfile ~/.ssh/dst.pem
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
plog.InitLoggerForClient()
|
||||
// startup daemon process and sudo process
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
|
||||
err := daemon.StartupDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if transferImage {
|
||||
err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image)
|
||||
}
|
||||
return err
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !sshConf.IsEmpty() {
|
||||
if ip := util.GetAPIServerFromKubeConfigBytes(bytes); ip != nil {
|
||||
extraRoute.ExtraCIDR = append(extraRoute.ExtraCIDR, ip.String())
|
||||
}
|
||||
}
|
||||
req := &rpc.ConnectRequest{
|
||||
KubeconfigBytes: string(bytes),
|
||||
Namespace: ns,
|
||||
ExtraCIDR: connect.ExtraCIDR,
|
||||
ExtraDomain: connect.ExtraDomain,
|
||||
UseLocalDNS: connect.UseLocalDNS,
|
||||
ExtraRoute: extraRoute.ToRPC(),
|
||||
Engine: string(connect.Engine),
|
||||
OriginKubeconfigPath: util.GetKubeconfigPath(f),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
||||
|
||||
SshJump: sshConf.ToRPC(),
|
||||
TransferImage: transferImage,
|
||||
Foreground: foreground,
|
||||
Image: config.Image,
|
||||
Level: int32(log.DebugLevel),
|
||||
SshJump: sshConf.ToRPC(),
|
||||
TransferImage: transferImage,
|
||||
Image: config.Image,
|
||||
ImagePullSecretName: imagePullSecretName,
|
||||
Level: int32(util.If(config.Debug, log.DebugLevel, log.InfoLevel)),
|
||||
ManagerNamespace: managerNamespace,
|
||||
}
|
||||
// if is foreground, send to sudo daemon server
|
||||
cli := daemon.GetClient(false)
|
||||
if lite {
|
||||
resp, err := cli.ConnectFork(cmd.Context(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
recv, err := resp.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, recv.GetMessage())
|
||||
}
|
||||
} else {
|
||||
resp, err := cli.Connect(cmd.Context(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
recv, err := resp.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, recv.GetMessage())
|
||||
}
|
||||
cli, err := daemon.GetClient(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !req.Foreground {
|
||||
util.Print(os.Stdout, "Now you can access resources in the kubernetes cluster, enjoy it :)")
|
||||
var resp grpc.ClientStream
|
||||
if lite {
|
||||
resp, err = cli.ConnectFork(cmd.Context(), req)
|
||||
} else {
|
||||
resp, err = cli.Connect(cmd.Context(), req)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.ConnectResponse](resp)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Canceled {
|
||||
err = disconnect(cli, bytes, ns, sshConf)
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !foreground {
|
||||
util.Print(os.Stdout, config.Slogan)
|
||||
} else {
|
||||
<-cmd.Context().Done()
|
||||
err = disconnect(cli, bytes, ns, sshConf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprint(os.Stdout, "Disconnect completed")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().BoolVar(&config.Debug, "debug", false, "enable debug mode or not, true or false")
|
||||
cmd.Flags().StringVar(&config.Image, "image", config.Image, "use this image to startup container")
|
||||
cmd.Flags().StringArrayVar(&connect.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
|
||||
cmd.Flags().StringArrayVar(&connect.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
|
||||
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
|
||||
cmd.Flags().BoolVar(&connect.UseLocalDNS, "use-localdns", false, "if use-lcoaldns is true, kubevpn will start coredns listen at 53 to forward your dns queries. only support on linux now")
|
||||
cmd.Flags().StringVar((*string)(&connect.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
|
||||
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &connect.Engine)
|
||||
cmd.Flags().BoolVar(&foreground, "foreground", false, "Hang up")
|
||||
cmd.Flags().BoolVar(&lite, "lite", false, "connect to multiple cluster in lite mode, you needs to special this options")
|
||||
cmd.Flags().BoolVar(&lite, "lite", false, "connect to multiple cluster in lite mode. mode \"lite\": design for only connecting to multiple cluster network. mode \"full\": not only connect to cluster network, it also supports proxy workloads inbound traffic to local PC.")
|
||||
cmd.Flags().StringVar(&managerNamespace, "manager-namespace", "", "The namespace where the traffic manager is to be found. Only works in cluster mode (install kubevpn server by helm)")
|
||||
|
||||
addSshFlags(cmd, sshConf)
|
||||
handler.AddExtraRoute(cmd.Flags(), extraRoute)
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func disconnect(cli rpc.DaemonClient, bytes []byte, ns string, sshConf *pkgssh.SshConfig) error {
|
||||
resp, err := cli.Disconnect(context.Background(), &rpc.DisconnectRequest{
|
||||
KubeconfigBytes: ptr.To(string(bytes)),
|
||||
Namespace: ptr.To(ns),
|
||||
SshJump: sshConf.ToRPC(),
|
||||
})
|
||||
if err != nil {
|
||||
plog.G(context.Background()).Errorf("Disconnect error: %v", err)
|
||||
return err
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.DisconnectResponse](resp)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Canceled {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,16 +1,23 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/libnetwork/resolvconf"
|
||||
miekgdns "github.com/miekg/dns"
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/controlplane"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/controlplane"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/dns"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdControlPlane(_ cmdutil.Factory) *cobra.Command {
|
||||
func CmdControlPlane(cmdutil.Factory) *cobra.Command {
|
||||
var (
|
||||
watchDirectoryFilename string
|
||||
port uint = 9002
|
||||
@@ -18,12 +25,21 @@ func CmdControlPlane(_ cmdutil.Factory) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "control-plane",
|
||||
Hidden: true,
|
||||
Short: "Control-plane is a envoy xds server",
|
||||
Long: `Control-plane is a envoy xds server, distribute envoy route configuration`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
util.InitLogger(config.Debug)
|
||||
go util.StartupPProf(0)
|
||||
controlplane.Main(watchDirectoryFilename, port, log.StandardLogger())
|
||||
Short: i18n.T("Control-plane is a envoy xds server"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Control-plane is a envoy xds server, distribute envoy route configuration
|
||||
`)),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
go util.StartupPProfForServer(0)
|
||||
go func() {
|
||||
conf, err := miekgdns.ClientConfigFromFile(resolvconf.Path())
|
||||
if err != nil {
|
||||
plog.G(context.Background()).Fatal(err)
|
||||
}
|
||||
plog.G(context.Background()).Fatal(dns.ListenAndServe("udp", ":53", conf))
|
||||
}()
|
||||
err := controlplane.Main(cmd.Context(), watchDirectoryFilename, port, plog.G(context.Background()))
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVarP(&watchDirectoryFilename, "watchDirectoryFilename", "w", "/etc/envoy/envoy-config.yaml", "full path to directory to watch for files")
|
||||
|
||||
@@ -6,15 +6,14 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/cli-runtime/pkg/genericiooptions"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/completion"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/cp"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/cp"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
)
|
||||
|
||||
var cpExample = templates.Examples(i18n.T(`
|
||||
@@ -46,29 +45,35 @@ var cpExample = templates.Examples(i18n.T(`
|
||||
# copy reverse proxy api-server behind of bastion host or ssh jump host
|
||||
kubevpn cp deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn cp deployment/productpage --ssh-alias <alias>
|
||||
kubevpn cp deployment/productpage:/tmp/foo /tmp/bar --ssh-alias <alias>
|
||||
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn cp deployment/productpage:/tmp/foo /tmp/bar --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
|
||||
kubevpn cp deployment/productpage:/tmp/foo /tmp/bar --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
|
||||
kubevpn cp deployment/productpage:/tmp/foo /tmp/bar --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
|
||||
`,
|
||||
))
|
||||
|
||||
func CmdCp(f cmdutil.Factory) *cobra.Command {
|
||||
o := cp.NewCopyOptions(genericclioptions.IOStreams{
|
||||
o := cp.NewCopyOptions(genericiooptions.IOStreams{
|
||||
In: os.Stdin,
|
||||
Out: os.Stdout,
|
||||
ErrOut: os.Stderr,
|
||||
})
|
||||
var sshConf = &util.SshConfig{}
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "cp <file-spec-src> <file-spec-dest>",
|
||||
DisableFlagsInUseLine: true,
|
||||
Hidden: true,
|
||||
Short: i18n.T("Copy files and directories to and from containers"),
|
||||
Long: i18n.T("Copy files and directories to and from containers. Different between kubectl cp is it will de-reference symbol link."),
|
||||
Example: cpExample,
|
||||
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
cmdutil.CheckErr(handler.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false))
|
||||
cmdutil.CheckErr(pkgssh.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false))
|
||||
|
||||
var comps []string
|
||||
if len(args) == 0 {
|
||||
@@ -80,14 +85,14 @@ func CmdCp(f cmdutil.Factory) *cobra.Command {
|
||||
// complete <namespace>/<pod>
|
||||
namespace := toComplete[:idx]
|
||||
template := "{{ range .items }}{{ .metadata.namespace }}/{{ .metadata.name }}: {{ end }}"
|
||||
comps = completion.CompGetFromTemplate(&template, f, namespace, cmd, []string{"pod"}, toComplete)
|
||||
comps = completion.CompGetFromTemplate(&template, f, namespace, []string{"pod"}, toComplete)
|
||||
} else {
|
||||
// Complete namespaces followed by a /
|
||||
for _, ns := range completion.CompGetResource(f, cmd, "namespace", toComplete) {
|
||||
for _, ns := range completion.CompGetResource(f, "namespace", toComplete) {
|
||||
comps = append(comps, fmt.Sprintf("%s/", ns))
|
||||
}
|
||||
// Complete pod names followed by a :
|
||||
for _, pod := range completion.CompGetResource(f, cmd, "pod", toComplete) {
|
||||
for _, pod := range completion.CompGetResource(f, "pod", toComplete) {
|
||||
comps = append(comps, fmt.Sprintf("%s:", pod))
|
||||
}
|
||||
|
||||
@@ -130,11 +135,6 @@ func CmdCp(f cmdutil.Factory) *cobra.Command {
|
||||
cmd.Flags().BoolVarP(&o.NoPreserve, "no-preserve", "", false, "The copied file/directory's ownership and permissions will not be preserved in the container")
|
||||
cmd.Flags().IntVarP(&o.MaxTries, "retries", "", 0, "Set number of retries to complete a copy operation from a container. Specify 0 to disable or any negative value for infinite retrying. The default is 0 (no retry).")
|
||||
|
||||
// for ssh jumper host
|
||||
cmd.Flags().StringVar(&sshConf.Addr, "ssh-addr", "", "Optional ssh jump server address to dial as <hostname>:<port>, eg: 127.0.0.1:22")
|
||||
cmd.Flags().StringVar(&sshConf.User, "ssh-username", "", "Optional username for ssh jump server")
|
||||
cmd.Flags().StringVar(&sshConf.Password, "ssh-password", "", "Optional password for ssh jump server")
|
||||
cmd.Flags().StringVar(&sshConf.Keyfile, "ssh-keyfile", "", "Optional file with private key for SSH authentication")
|
||||
cmd.Flags().StringVar(&sshConf.ConfigAlias, "ssh-alias", "", "Optional config alias with ~/.ssh/config for SSH authentication")
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -1,55 +1,89 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"path/filepath"
|
||||
"runtime/pprof"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/dns"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdDaemon(_ cmdutil.Factory) *cobra.Command {
|
||||
func CmdDaemon(cmdutil.Factory) *cobra.Command {
|
||||
var opt = &daemon.SvrOption{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "daemon",
|
||||
Short: i18n.T("Startup kubevpn daemon server"),
|
||||
Long: i18n.T(`Startup kubevpn daemon server`),
|
||||
Long: templates.LongDesc(i18n.T(`Startup kubevpn daemon server`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
sockPath := daemon.GetSockPath(opt.IsSudo)
|
||||
err := os.Remove(sockPath)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
b := make([]byte, 32)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return err
|
||||
}
|
||||
pidPath := daemon.GetPidPath(opt.IsSudo)
|
||||
err = os.Remove(pidPath)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return err
|
||||
opt.ID = base64.URLEncoding.EncodeToString(b)
|
||||
|
||||
if opt.IsSudo {
|
||||
go util.StartupPProf(config.SudoPProfPort)
|
||||
_ = os.RemoveAll("/etc/resolver")
|
||||
_ = dns.CleanupHosts()
|
||||
_ = util.CleanupTempKubeConfigFile()
|
||||
} else {
|
||||
go util.StartupPProf(config.PProfPort)
|
||||
}
|
||||
pid := os.Getpid()
|
||||
err = os.WriteFile(pidPath, []byte(strconv.Itoa(pid)), os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Chmod(pidPath, os.ModePerm)
|
||||
return err
|
||||
return initLogfile(config.GetDaemonLogPath(opt.IsSudo))
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
defer opt.Stop()
|
||||
defer func() {
|
||||
if errors.Is(err, http.ErrServerClosed) {
|
||||
err = nil
|
||||
}
|
||||
if opt.IsSudo {
|
||||
for _, profile := range pprof.Profiles() {
|
||||
func() {
|
||||
file, e := os.Create(filepath.Join(config.GetPProfPath(), profile.Name()))
|
||||
if e != nil {
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
e = profile.WriteTo(file, 1)
|
||||
if e != nil {
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}()
|
||||
return opt.Start(cmd.Context())
|
||||
},
|
||||
PostRun: func(cmd *cobra.Command, args []string) {
|
||||
sockPath := daemon.GetSockPath(opt.IsSudo)
|
||||
_ = os.Remove(sockPath)
|
||||
pidPath := daemon.GetPidPath(opt.IsSudo)
|
||||
_ = os.Remove(pidPath)
|
||||
},
|
||||
Hidden: true,
|
||||
DisableFlagsInUseLine: true,
|
||||
}
|
||||
cmd.Flags().BoolVar(&opt.IsSudo, "sudo", false, "is sudo or not")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func initLogfile(path string) error {
|
||||
_, err := os.Lstat(path)
|
||||
if os.IsNotExist(err) {
|
||||
var f *os.File
|
||||
f, err = os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = f.Close()
|
||||
return os.Chmod(path, 0644)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,50 +1,46 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
dockercomp "github.com/docker/cli/cli/command/completion"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/completion"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/dev"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/dev"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
|
||||
)
|
||||
|
||||
func CmdDev(f cmdutil.Factory) *cobra.Command {
|
||||
cli, dockerCli, err := util.GetClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
var options = &dev.Options{
|
||||
NoProxy: false,
|
||||
ExtraRouteInfo: handler.ExtraRouteInfo{},
|
||||
}
|
||||
var devOptions = &dev.Options{
|
||||
Factory: f,
|
||||
NoProxy: false,
|
||||
ExtraCIDR: []string{},
|
||||
Cli: cli,
|
||||
DockerCli: dockerCli,
|
||||
}
|
||||
var sshConf = &util.SshConfig{}
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
var transferImage bool
|
||||
var imagePullSecretName string
|
||||
var managerNamespace string
|
||||
cmd := &cobra.Command{
|
||||
Use: "dev TYPE/NAME [-c CONTAINER] [flags] -- [args...]",
|
||||
Short: i18n.T("Startup your kubernetes workloads in local Docker container"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Startup your kubernetes workloads in local Docker container with same volume、env、and network
|
||||
Startup your kubernetes workloads in local Docker container with same volume、env、and network
|
||||
|
||||
## What did i do:
|
||||
- Download volume which MountPath point to, mount to docker container
|
||||
- Connect to cluster network, set network to docker container
|
||||
- Get all environment with command (env), set env to docker container
|
||||
`)),
|
||||
## What did it do:
|
||||
- Download volume which MountPath point to, mount to docker container
|
||||
- Connect to cluster network, set network to docker container
|
||||
- Get all environment with command (env), set env to docker container
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# Develop workloads
|
||||
- develop deployment
|
||||
@@ -52,8 +48,8 @@ Startup your kubernetes workloads in local Docker container with same volume、e
|
||||
- develop service
|
||||
kubevpn dev service/productpage
|
||||
|
||||
# Develop workloads with mesh, traffic with header a=1, will hit local PC, otherwise no effect
|
||||
kubevpn dev service/productpage --headers a=1
|
||||
# Develop workloads with mesh, traffic with header foo=bar, will hit local PC, otherwise no effect
|
||||
kubevpn dev service/productpage --headers foo=bar
|
||||
|
||||
# Develop workloads without proxy traffic
|
||||
kubevpn dev service/productpage --no-proxy
|
||||
@@ -61,23 +57,28 @@ Startup your kubernetes workloads in local Docker container with same volume、e
|
||||
# Develop workloads which api-server behind of bastion host or ssh jump host
|
||||
kubevpn dev deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
|
||||
# It also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn dev deployment/productpage --ssh-alias <alias>
|
||||
|
||||
# Switch to terminal mode; send stdin to 'bash' and sends stdout/stderror from 'bash' back to the client
|
||||
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev -i -t --entrypoint /bin/bash
|
||||
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev --entrypoint /bin/bash
|
||||
or
|
||||
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev -it --entrypoint /bin/bash
|
||||
`)),
|
||||
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev --entrypoint /bin/bash
|
||||
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab --entrypoint /bin/bash
|
||||
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache --entrypoint /bin/bash
|
||||
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD> --entrypoint /bin/bash
|
||||
`)),
|
||||
ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f),
|
||||
Args: cobra.MatchAll(cobra.OnlyValidArgs),
|
||||
DisableFlagsInUseLine: true,
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
fmt.Fprintf(os.Stdout, "You must specify the type of resource to proxy. %s\n\n", cmdutil.SuggestAPIResources("kubevpn"))
|
||||
_, _ = fmt.Fprintf(os.Stdout, "You must specify the type of resource to proxy. %s\n\n", cmdutil.SuggestAPIResources("kubevpn"))
|
||||
fullCmdName := cmd.Parent().CommandPath()
|
||||
usageString := "Required resource not specified."
|
||||
if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") {
|
||||
@@ -85,92 +86,71 @@ Startup your kubernetes workloads in local Docker container with same volume、e
|
||||
}
|
||||
return cmdutil.UsageErrorf(cmd, usageString)
|
||||
}
|
||||
err = cmd.Flags().Parse(args[1:])
|
||||
err := cmd.Flags().Parse(args[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
util.InitLogger(false)
|
||||
// not support temporally
|
||||
if devOptions.Engine == config.EngineGvisor {
|
||||
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
|
||||
}
|
||||
|
||||
plog.InitLoggerForClient()
|
||||
err = daemon.StartupDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return handler.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false)
|
||||
if transferImage {
|
||||
err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pkgssh.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false)
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
devOptions.Workload = args[0]
|
||||
options.Workload = args[0]
|
||||
for i, arg := range args {
|
||||
if arg == "--" && i != len(args)-1 {
|
||||
devOptions.Copts.Args = args[i+1:]
|
||||
options.ContainerOptions.Args = args[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
err = dev.DoDev(cmd.Context(), devOptions, sshConf, cmd.Flags(), f, transferImage)
|
||||
for _, fun := range devOptions.GetRollbackFuncList() {
|
||||
if fun != nil {
|
||||
if err = fun(); err != nil {
|
||||
log.Errorf("roll back failed, error: %s", err.Error())
|
||||
defer func() {
|
||||
for _, function := range options.GetRollbackFuncList() {
|
||||
if function != nil {
|
||||
if err := function(); err != nil {
|
||||
plog.G(context.Background()).Errorf("Rollback failed, error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := options.InitClient(f); err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
|
||||
conf, hostConfig, err := dev.Parse(cmd.Flags(), options.ContainerOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return options.Main(cmd.Context(), sshConf, conf, hostConfig, imagePullSecretName, managerNamespace)
|
||||
},
|
||||
}
|
||||
cmd.Flags().SortFlags = false
|
||||
cmd.Flags().StringToStringVarP(&devOptions.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
|
||||
cmd.Flags().BoolVar(&config.Debug, "debug", false, "enable debug mode or not, true or false")
|
||||
cmd.Flags().StringVar(&config.Image, "image", config.Image, "use this image to startup container")
|
||||
cmd.Flags().BoolVar(&devOptions.NoProxy, "no-proxy", false, "Whether proxy remote workloads traffic into local or not, true: just startup container on local without inject containers to intercept traffic, false: intercept traffic and forward to local")
|
||||
cmdutil.AddContainerVarFlags(cmd, &devOptions.ContainerName, devOptions.ContainerName)
|
||||
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
|
||||
cmd.Flags().BoolVar(&options.NoProxy, "no-proxy", false, "Whether proxy remote workloads traffic into local or not, true: just startup container on local without inject containers to intercept traffic, false: intercept traffic and forward to local")
|
||||
cmdutil.AddContainerVarFlags(cmd, &options.ContainerName, options.ContainerName)
|
||||
cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc("container", completion.ContainerCompletionFunc(f)))
|
||||
cmd.Flags().StringArrayVar(&devOptions.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
|
||||
cmd.Flags().StringArrayVar(&devOptions.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
|
||||
cmd.Flags().StringVar((*string)(&devOptions.ConnectMode), "connect-mode", string(dev.ConnectModeHost), "Connect to kubernetes network in container or in host, eg: ["+string(dev.ConnectModeContainer)+"|"+string(dev.ConnectModeHost)+"]")
|
||||
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
|
||||
cmd.Flags().StringVar((*string)(&devOptions.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
|
||||
cmd.Flags().StringVar((*string)(&options.ConnectMode), "connect-mode", string(dev.ConnectModeHost), "Connect to kubernetes network in container or in host, eg: ["+string(dev.ConnectModeContainer)+"|"+string(dev.ConnectModeHost)+"]")
|
||||
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &options.Engine)
|
||||
cmd.Flags().StringVar(&managerNamespace, "manager-namespace", "", "The namespace where the traffic manager is to be found. Only works in cluster mode (install kubevpn server by helm)")
|
||||
|
||||
// diy docker options
|
||||
cmd.Flags().StringVar(&devOptions.DockerImage, "docker-image", "", "Overwrite the default K8s pod of the image")
|
||||
// origin docker options
|
||||
flags := cmd.Flags()
|
||||
flags.SetInterspersed(false)
|
||||
|
||||
// These are flags not stored in Config/HostConfig
|
||||
flags.BoolVarP(&devOptions.Options.Detach, "detach", "d", false, "Run container in background and print container ID")
|
||||
flags.StringVar(&devOptions.Options.Name, "name", "", "Assign a name to the container")
|
||||
flags.StringVar(&devOptions.Options.Pull, "pull", dev.PullImageMissing, `Pull image before running ("`+dev.PullImageAlways+`"|"`+dev.PullImageMissing+`"|"`+dev.PullImageNever+`")`)
|
||||
flags.BoolVarP(&devOptions.Options.Quiet, "quiet", "q", false, "Suppress the pull output")
|
||||
|
||||
// Add an explicit help that doesn't have a `-h` to prevent the conflict
|
||||
// with hostname
|
||||
flags.Bool("help", false, "Print usage")
|
||||
|
||||
command.AddPlatformFlag(flags, &devOptions.Options.Platform)
|
||||
command.AddTrustVerificationFlags(flags, &devOptions.Options.Untrusted, dockerCli.ContentTrustEnabled())
|
||||
devOptions.Copts = dev.AddFlags(flags)
|
||||
|
||||
_ = cmd.RegisterFlagCompletionFunc(
|
||||
"env",
|
||||
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return os.Environ(), cobra.ShellCompDirectiveNoFileComp
|
||||
},
|
||||
)
|
||||
_ = cmd.RegisterFlagCompletionFunc(
|
||||
"env-file",
|
||||
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
},
|
||||
)
|
||||
_ = cmd.RegisterFlagCompletionFunc(
|
||||
"network",
|
||||
dockercomp.NetworkNames(nil),
|
||||
)
|
||||
|
||||
addSshFlags(cmd, sshConf)
|
||||
cmd.Flags().StringVar(&options.DevImage, "dev-image", "", "Use to startup docker container, Default is pod image")
|
||||
// -- origin docker options -- start
|
||||
options.ContainerOptions = dev.AddFlags(cmd.Flags())
|
||||
cmd.Flags().StringVar(&options.RunOptions.Pull, "pull", dev.PullImageMissing, `Pull image before running ("`+dev.PullImageAlways+`"|"`+dev.PullImageMissing+`"|"`+dev.PullImageNever+`")`)
|
||||
command.AddPlatformFlag(cmd.Flags(), &options.RunOptions.Platform)
|
||||
// -- origin docker options -- end
|
||||
handler.AddExtraRoute(cmd.Flags(), &options.ExtraRouteInfo)
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
@@ -14,31 +13,45 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdDisconnect(f cmdutil.Factory) *cobra.Command {
|
||||
var all = false
|
||||
var clusterIDs []string
|
||||
cmd := &cobra.Command{
|
||||
Use: "disconnect",
|
||||
Short: i18n.T("Disconnect from kubernetes cluster network"),
|
||||
Long: templates.LongDesc(i18n.T(`Disconnect from kubernetes cluster network`)),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Disconnect from kubernetes cluster network
|
||||
|
||||
This command is to disconnect from cluster. after use command 'kubevpn connect',
|
||||
you can use this command to disconnect from a specific cluster.
|
||||
before disconnect, it will leave proxy resource and clone resource if resource depends on this cluster
|
||||
after disconnect it will also cleanup DNS and host
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# disconnect from cluster network and restore proxy resource
|
||||
kubevpn disconnect
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
plog.InitLoggerForClient()
|
||||
err = daemon.StartupDaemon(cmd.Context())
|
||||
return err
|
||||
},
|
||||
Args: cobra.MatchAll(cobra.OnlyValidArgs),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 && all {
|
||||
return fmt.Errorf("either specify --all or specific ID, not both")
|
||||
return fmt.Errorf("either specify --all or ID, not both")
|
||||
}
|
||||
if len(args) == 0 && !all {
|
||||
return fmt.Errorf("either specify --all or specific ID")
|
||||
if len(clusterIDs) > 0 && all {
|
||||
return fmt.Errorf("either specify --all or cluster-id, not both")
|
||||
}
|
||||
if len(args) == 0 && !all && len(clusterIDs) == 0 {
|
||||
return fmt.Errorf("either specify --all or ID or cluster-id")
|
||||
}
|
||||
var ids *int32
|
||||
if len(args) > 0 {
|
||||
@@ -48,30 +61,33 @@ func CmdDisconnect(f cmdutil.Factory) *cobra.Command {
|
||||
}
|
||||
ids = pointer.Int32(int32(integer))
|
||||
}
|
||||
client, err := daemon.GetClient(false).Disconnect(
|
||||
cli, err := daemon.GetClient(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client, err := cli.Disconnect(
|
||||
cmd.Context(),
|
||||
&rpc.DisconnectRequest{
|
||||
ID: ids,
|
||||
All: pointer.Bool(all),
|
||||
ID: ids,
|
||||
ClusterIDs: clusterIDs,
|
||||
All: pointer.Bool(all),
|
||||
},
|
||||
)
|
||||
var resp *rpc.DisconnectResponse
|
||||
for {
|
||||
resp, err = client.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err == nil {
|
||||
fmt.Fprint(os.Stdout, resp.Message)
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
break
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, "disconnect successfully")
|
||||
err = util.PrintGRPCStream[rpc.DisconnectResponse](client)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Canceled {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprint(os.Stdout, "Disconnect completed")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().BoolVar(&all, "all", all, "Select all, disconnect from all cluster network")
|
||||
cmd.Flags().BoolVar(&all, "all", all, "Disconnect all cluster, disconnect from all cluster network")
|
||||
cmd.Flags().StringArrayVar(&clusterIDs, "cluster-id", []string{}, "Cluster id, command status -o yaml/json will show cluster-id")
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -1,20 +1,27 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"cmp"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/cli-runtime/pkg/printers"
|
||||
cmdget "k8s.io/kubectl/pkg/cmd/get"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
)
|
||||
|
||||
func CmdGet(f cmdutil.Factory) *cobra.Command {
|
||||
var printFlags = cmdget.NewGetPrintFlags()
|
||||
cmd := &cobra.Command{
|
||||
Use: "get",
|
||||
Hidden: true,
|
||||
@@ -27,38 +34,80 @@ func CmdGet(f cmdutil.Factory) *cobra.Command {
|
||||
# Get api-server behind of bastion host or ssh jump host
|
||||
kubevpn get deployment --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn get service --ssh-alias <alias>
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
// startup daemon process and sudo process
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
namespace, _, err := f.ToRawKubeConfigLoader().Namespace()
|
||||
ns, _, err := f.ToRawKubeConfigLoader().Namespace()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client, err := daemon.GetClient(false).Get(
|
||||
cli, err := daemon.GetClient(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client, err := cli.Get(
|
||||
cmd.Context(),
|
||||
&rpc.GetRequest{
|
||||
Namespace: namespace,
|
||||
Namespace: ns,
|
||||
Resource: args[0],
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
marshal, err := yaml.Marshal(client.Metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
w := printers.GetNewTabWriter(os.Stdout)
|
||||
var toPrinter = func() (printers.ResourcePrinterFunc, error) {
|
||||
var flags = printFlags.Copy()
|
||||
_ = flags.EnsureWithNamespace()
|
||||
printer, err := flags.ToPrinter()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
printer, err = printers.NewTypeSetter(scheme.Scheme).WrapToPrinter(printer, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outputOption := cmd.Flags().Lookup("output").Value.String()
|
||||
if strings.Contains(outputOption, "custom-columns") || outputOption == "yaml" || strings.Contains(outputOption, "json") {
|
||||
} else {
|
||||
printer = &cmdget.TablePrinter{Delegate: printer}
|
||||
}
|
||||
return printer.PrintObj, nil
|
||||
}
|
||||
fmt.Fprint(os.Stdout, string(marshal))
|
||||
return nil
|
||||
var list []*v1.PartialObjectMetadata
|
||||
for _, m := range client.Metadata {
|
||||
var data v1.PartialObjectMetadata
|
||||
err = json.Unmarshal([]byte(m), &data)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
list = append(list, &data)
|
||||
}
|
||||
slices.SortStableFunc(list, func(a, b *v1.PartialObjectMetadata) int {
|
||||
compare := cmp.Compare(a.GetNamespace(), b.GetNamespace())
|
||||
if compare == 0 {
|
||||
return cmp.Compare(a.GetName(), b.GetName())
|
||||
}
|
||||
return compare
|
||||
})
|
||||
for _, m := range list {
|
||||
printer, err := toPrinter()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = printer.PrintObj(m, w)
|
||||
}
|
||||
return w.Flush()
|
||||
},
|
||||
}
|
||||
printFlags.AddFlags(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
43
cmd/kubevpn/cmds/imagecopy.go
Normal file
43
cmd/kubevpn/cmds/imagecopy.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
|
||||
)
|
||||
|
||||
func CmdImageCopy(cmdutil.Factory) *cobra.Command {
|
||||
var imageCmd = &cobra.Command{
|
||||
Use: "image <cmd>",
|
||||
Short: "copy images",
|
||||
}
|
||||
|
||||
copyCmd := &cobra.Command{
|
||||
Use: "copy <src_image_ref> <dst_image_ref>",
|
||||
Aliases: []string{"cp"},
|
||||
Short: "copy or re-tag image",
|
||||
Long: `Copy or re-tag an image. This works between registries and only pulls layers
|
||||
that do not exist at the target. In the same registry it attempts to mount
|
||||
the layers between repositories. And within the same repository it only
|
||||
sends the manifest with the new tag.`,
|
||||
Example: `
|
||||
# copy an image
|
||||
kubevpn image copy ghcr.io/kubenetworks/kubevpn:latest registry.example.org/kubevpn/kubevpn:latest
|
||||
|
||||
# re-tag an image
|
||||
kubevpn image copy ghcr.io/kubenetworks/kubevpn:latest ghcr.io/kubenetworks/kubevpn:v2.3.4`,
|
||||
Args: cobra.MatchAll(cobra.ExactArgs(2), cobra.OnlyValidArgs),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
plog.InitLoggerForClient()
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
err := regctl.TransferImageWithRegctl(cmd.Context(), args[0], args[1])
|
||||
return err
|
||||
},
|
||||
}
|
||||
imageCmd.AddCommand(copyCmd)
|
||||
return imageCmd
|
||||
}
|
||||
@@ -1,10 +1,6 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -12,40 +8,55 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdLeave(f cmdutil.Factory) *cobra.Command {
|
||||
var leaveCmd = &cobra.Command{
|
||||
Use: "leave",
|
||||
Short: "Leave proxy resource",
|
||||
Long: `leave proxy resource and restore it to origin`,
|
||||
Short: i18n.T("Leave proxy resource"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Leave proxy resource and restore it to origin
|
||||
|
||||
This command is used to leave proxy resources. after use command 'kubevpn proxy xxx',
|
||||
you can use this command to leave proxy resources.
|
||||
you can just leave proxy resources which do proxy by yourself.
|
||||
and the last one leave proxy resource, it will also restore workloads container.
|
||||
otherwise it will keep containers [vpn, envoy-proxy] until last one to leave.
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# leave proxy resource and restore it to origin
|
||||
kubevpn leave deployment/authors
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
leave, err := daemon.GetClient(false).Leave(cmd.Context(), &rpc.LeaveRequest{
|
||||
_, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cli, err := daemon.GetClient(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := cli.Leave(cmd.Context(), &rpc.LeaveRequest{
|
||||
Namespace: ns,
|
||||
Workloads: args,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
recv, err := leave.Recv()
|
||||
if err == io.EOF {
|
||||
err = util.PrintGRPCStream[rpc.LeaveResponse](resp)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Canceled {
|
||||
return nil
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, recv.GetMessage())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return leaveCmd
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
)
|
||||
|
||||
func CmdList(f cmdutil.Factory) *cobra.Command {
|
||||
@@ -20,12 +20,16 @@ func CmdList(f cmdutil.Factory) *cobra.Command {
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# list proxy resources
|
||||
kubevpn list
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := daemon.GetClient(true).List(
|
||||
cli, err := daemon.GetClient(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client, err := cli.List(
|
||||
cmd.Context(),
|
||||
&rpc.ListRequest{},
|
||||
)
|
||||
@@ -35,6 +39,7 @@ func CmdList(f cmdutil.Factory) *cobra.Command {
|
||||
fmt.Println(client.GetMessage())
|
||||
return nil
|
||||
},
|
||||
Hidden: true,
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -12,47 +8,51 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdLogs(f cmdutil.Factory) *cobra.Command {
|
||||
req := &rpc.LogRequest{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "logs",
|
||||
Short: i18n.T("Log kubevpn daemon server"),
|
||||
Long: templates.LongDesc(i18n.T(`Log kubevpn daemon server`)),
|
||||
Short: i18n.T("Log kubevpn daemon grpc server"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Print the logs for kubevpn daemon grpc server. it will show sudo daemon and daemon grpc server log in both
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# show log for kubevpn daemon server
|
||||
kubevpn logs
|
||||
# follow more log
|
||||
kubevpn logs -f
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
plog.InitLoggerForClient()
|
||||
// startup daemon process and sudo process
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := daemon.GetClient(true).Logs(cmd.Context(), req)
|
||||
cli, err := daemon.GetClient(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var resp *rpc.LogResponse
|
||||
for {
|
||||
resp, err = client.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err == nil {
|
||||
fmt.Fprintln(os.Stdout, resp.Message)
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
client, err := cli.Logs(cmd.Context(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.LogResponse](client)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Canceled {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().BoolVarP(&req.Follow, "follow", "f", false, "Specify if the logs should be streamed.")
|
||||
cmd.Flags().Int32VarP(&req.Lines, "number", "N", 10, "Lines of recent log file to display.")
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -12,14 +12,17 @@ import (
|
||||
var (
|
||||
optionsExample = templates.Examples(i18n.T(`
|
||||
# Print flags inherited by all commands
|
||||
kubevpn options`))
|
||||
kubevpn options
|
||||
`))
|
||||
)
|
||||
|
||||
func CmdOptions(cmdutil.Factory) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "options",
|
||||
Short: i18n.T("Print the list of flags inherited by all commands"),
|
||||
Long: i18n.T("Print the list of flags inherited by all commands"),
|
||||
Use: "options",
|
||||
Short: i18n.T("Print the list of flags inherited by all commands"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Print the list of flags inherited by all commands
|
||||
`)),
|
||||
Example: optionsExample,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmd.Usage()
|
||||
|
||||
@@ -3,7 +3,6 @@ package cmds
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -15,21 +14,38 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util/regctl"
|
||||
)
|
||||
|
||||
func CmdProxy(f cmdutil.Factory) *cobra.Command {
|
||||
var headers = make(map[string]string)
|
||||
var portmap []string
|
||||
var connect = handler.ConnectOptions{}
|
||||
var sshConf = &util.SshConfig{}
|
||||
var extraRoute = &handler.ExtraRouteInfo{}
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
var transferImage, foreground bool
|
||||
var imagePullSecretName string
|
||||
var managerNamespace string
|
||||
cmd := &cobra.Command{
|
||||
Use: "proxy",
|
||||
Short: i18n.T("Proxy kubernetes workloads inbound traffic into local PC"),
|
||||
Long: templates.LongDesc(i18n.T(`Proxy kubernetes workloads inbound traffic into local PC`)),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Proxy kubernetes workloads inbound traffic into local PC
|
||||
|
||||
Proxy k8s workloads inbound traffic into local PC with/without service mesh.
|
||||
Without service mesh, it will proxy all inbound traffic into local PC, even traffic protocol is layer 4(Transport layer).
|
||||
With service mesh, it will proxy traffic which has special header to local PC, support protocol HTTP,GRPC,THRIFT, WebSocket...
|
||||
After proxy resource, it also connected to cluster network automatically. so just startup your app in local PC
|
||||
and waiting for inbound traffic, make debug more easier.
|
||||
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# Reverse proxy
|
||||
- proxy deployment
|
||||
@@ -43,26 +59,45 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
|
||||
or
|
||||
kubevpn proxy deployment authors productpage
|
||||
|
||||
# Reverse proxy with mesh, traffic with header a=1, will hit local PC, otherwise no effect
|
||||
kubevpn proxy service/productpage --headers a=1
|
||||
# Reverse proxy with mesh, traffic with header foo=bar, will hit local PC, otherwise no effect
|
||||
kubevpn proxy service/productpage --headers foo=bar
|
||||
|
||||
# Reverse proxy with mesh, traffic with header foo=bar and env=dev, will hit local PC, otherwise no effect
|
||||
kubevpn proxy service/productpage --headers foo=bar --headers env=dev
|
||||
|
||||
# Connect to api-server behind of bastion host or ssh jump host and proxy kubernetes resource traffic into local PC
|
||||
kubevpn proxy deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers a=1
|
||||
kubevpn proxy deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers foo=bar
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn proxy service/productpage --ssh-alias <alias> --headers a=1
|
||||
kubevpn proxy service/productpage --ssh-alias <alias> --headers foo=bar
|
||||
|
||||
`)),
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn proxy service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
|
||||
kubevpn proxy service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
|
||||
kubevpn proxy service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
|
||||
|
||||
# Support port map, you can proxy container port to local port by command:
|
||||
kubevpn proxy deployment/productpage --portmap 80:8080
|
||||
|
||||
# Proxy container port 9080 to local port 8080 of TCP protocol
|
||||
kubevpn proxy deployment/productpage --portmap 9080:8080
|
||||
|
||||
# Proxy container port 9080 to local port 5000 of UDP protocol
|
||||
kubevpn proxy deployment/productpage --portmap udp/9080:5000
|
||||
|
||||
# Auto proxy container port to same local port, and auto detect protocol
|
||||
kubevpn proxy deployment/productpage
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
plog.InitLoggerForClient()
|
||||
if err = daemon.StartupDaemon(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
// not support temporally
|
||||
if connect.Engine == config.EngineGvisor {
|
||||
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
|
||||
if transferImage {
|
||||
err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image)
|
||||
}
|
||||
return err
|
||||
},
|
||||
@@ -77,93 +112,90 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
|
||||
return cmdutil.UsageErrorf(cmd, usageString)
|
||||
}
|
||||
|
||||
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !sshConf.IsEmpty() {
|
||||
if ip := util.GetAPIServerFromKubeConfigBytes(bytes); ip != nil {
|
||||
extraRoute.ExtraCIDR = append(extraRoute.ExtraCIDR, ip.String())
|
||||
}
|
||||
}
|
||||
// todo 将 doConnect 方法封装?内部使用 client 发送到daemon?
|
||||
cli := daemon.GetClient(false)
|
||||
client, err := cli.Proxy(
|
||||
cli, err := daemon.GetClient(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := cli.Proxy(
|
||||
cmd.Context(),
|
||||
&rpc.ConnectRequest{
|
||||
&rpc.ProxyRequest{
|
||||
KubeconfigBytes: string(bytes),
|
||||
Namespace: ns,
|
||||
Headers: connect.Headers,
|
||||
Headers: headers,
|
||||
PortMap: portmap,
|
||||
Workloads: args,
|
||||
ExtraCIDR: connect.ExtraCIDR,
|
||||
ExtraDomain: connect.ExtraDomain,
|
||||
UseLocalDNS: connect.UseLocalDNS,
|
||||
ExtraRoute: extraRoute.ToRPC(),
|
||||
Engine: string(connect.Engine),
|
||||
SshJump: sshConf.ToRPC(),
|
||||
TransferImage: transferImage,
|
||||
Image: config.Image,
|
||||
Level: int32(log.DebugLevel),
|
||||
OriginKubeconfigPath: util.GetKubeconfigPath(f),
|
||||
ImagePullSecretName: imagePullSecretName,
|
||||
Level: int32(util.If(config.Debug, log.DebugLevel, log.InfoLevel)),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
||||
ManagerNamespace: managerNamespace,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var resp *rpc.ConnectResponse
|
||||
for {
|
||||
resp, err = client.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err == nil {
|
||||
fmt.Fprint(os.Stdout, resp.Message)
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else {
|
||||
err = util.PrintGRPCStream[rpc.ConnectResponse](resp)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Canceled {
|
||||
err = leave(cli, ns, args)
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
util.Print(os.Stdout, "Now you can access resources in the kubernetes cluster, enjoy it :)")
|
||||
util.Print(os.Stdout, config.Slogan)
|
||||
// hangup
|
||||
if foreground {
|
||||
// leave from cluster resources
|
||||
<-cmd.Context().Done()
|
||||
|
||||
stream, err := cli.Leave(context.Background(), &rpc.LeaveRequest{
|
||||
Workloads: args,
|
||||
})
|
||||
var resp *rpc.LeaveResponse
|
||||
for {
|
||||
resp, err = stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, resp.Message)
|
||||
err = leave(cli, ns, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringToStringVarP(&connect.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
|
||||
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug mode or not, true or false")
|
||||
cmd.Flags().StringVar(&config.Image, "image", config.Image, "Use this image to startup container")
|
||||
cmd.Flags().StringArrayVar(&connect.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
|
||||
cmd.Flags().StringArrayVar(&connect.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
|
||||
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
|
||||
cmd.Flags().StringVar((*string)(&connect.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
|
||||
cmd.Flags().StringToStringVarP(&headers, "headers", "H", map[string]string{}, "Traffic with special headers (use `and` to match all headers) with reverse it to local PC, If not special, redirect all traffic to local PC. format: <KEY>=<VALUE> eg: --headers foo=bar --headers env=dev")
|
||||
cmd.Flags().StringArrayVar(&portmap, "portmap", []string{}, "Port map, map container port to local port, format: [tcp/udp]/containerPort:localPort, If not special, localPort will use containerPort. eg: tcp/80:8080 or udp/5000:5001 or 80 or 80:8080")
|
||||
handler.AddCommonFlags(cmd.Flags(), &transferImage, &imagePullSecretName, &connect.Engine)
|
||||
cmd.Flags().BoolVar(&foreground, "foreground", false, "foreground hang up")
|
||||
cmd.Flags().StringVar(&managerNamespace, "manager-namespace", "", "The namespace where the traffic manager is to be found. Only works in cluster mode (install kubevpn server by helm)")
|
||||
|
||||
addSshFlags(cmd, sshConf)
|
||||
handler.AddExtraRoute(cmd.Flags(), extraRoute)
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
cmd.ValidArgsFunction = utilcomp.ResourceTypeAndNameCompletionFunc(f)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addSshFlags(cmd *cobra.Command, sshConf *util.SshConfig) {
|
||||
// for ssh jumper host
|
||||
cmd.Flags().StringVar(&sshConf.Addr, "ssh-addr", "", "Optional ssh jump server address to dial as <hostname>:<port>, eg: 127.0.0.1:22")
|
||||
cmd.Flags().StringVar(&sshConf.User, "ssh-username", "", "Optional username for ssh jump server")
|
||||
cmd.Flags().StringVar(&sshConf.Password, "ssh-password", "", "Optional password for ssh jump server")
|
||||
cmd.Flags().StringVar(&sshConf.Keyfile, "ssh-keyfile", "", "Optional file with private key for SSH authentication")
|
||||
cmd.Flags().StringVar(&sshConf.ConfigAlias, "ssh-alias", "", "Optional config alias with ~/.ssh/config for SSH authentication")
|
||||
cmd.Flags().StringVar(&sshConf.RemoteKubeconfig, "remote-kubeconfig", "", "Remote kubeconfig abstract path of ssh server, default is /$ssh-user/.kube/config")
|
||||
lookup := cmd.Flags().Lookup("remote-kubeconfig")
|
||||
lookup.NoOptDefVal = "~/.kube/config"
|
||||
func leave(cli rpc.DaemonClient, ns string, args []string) error {
|
||||
stream, err := cli.Leave(context.Background(), &rpc.LeaveRequest{
|
||||
Namespace: ns,
|
||||
Workloads: args,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.LeaveResponse](stream)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Canceled {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package cmds
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -13,23 +12,27 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdQuit(f cmdutil.Factory) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "quit",
|
||||
Short: i18n.T("Quit kubevpn daemon server"),
|
||||
Long: templates.LongDesc(i18n.T(`Disconnect from cluster, leave proxy resources, and quit daemon`)),
|
||||
Short: i18n.T("Quit kubevpn daemon grpc server"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Disconnect from cluster, leave proxy resources, quit daemon grpc server and cleanup dns/hosts
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# before quit kubevpn, it will leave proxy resources to origin and disconnect from cluster
|
||||
kubevpn quit
|
||||
`)),
|
||||
`)),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
_ = quit(cmd.Context(), true)
|
||||
_ = quit(cmd.Context(), false)
|
||||
fmt.Fprint(os.Stdout, "quit successfully")
|
||||
util.CleanExtensionLib()
|
||||
_, _ = fmt.Fprint(os.Stdout, "Exited")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@@ -37,26 +40,20 @@ func CmdQuit(f cmdutil.Factory) *cobra.Command {
|
||||
}
|
||||
|
||||
func quit(ctx context.Context, isSudo bool) error {
|
||||
cli := daemon.GetClient(isSudo)
|
||||
if cli == nil {
|
||||
return nil
|
||||
cli, err := daemon.GetClient(isSudo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client, err := cli.Quit(ctx, &rpc.QuitRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var resp *rpc.QuitResponse
|
||||
for {
|
||||
resp, err = client.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err == nil {
|
||||
fmt.Fprint(os.Stdout, resp.Message)
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
err = util.PrintGRPCStream[rpc.QuitResponse](client)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Canceled {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -12,40 +8,48 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdRemove(f cmdutil.Factory) *cobra.Command {
|
||||
var cmd = &cobra.Command{
|
||||
Use: "remove",
|
||||
Short: "Remove cloned resource",
|
||||
Long: `Remove cloned resource`,
|
||||
Short: "Remove clone resource",
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Remove clone resource
|
||||
|
||||
This command is design to remove clone resources, after use command 'kubevpn clone xxx',
|
||||
it will generate and create a new resource in target k8s cluster with format [resource_name]_clone_xxxxx,
|
||||
use this command to remove this created resources.
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# leave proxy resources to origin
|
||||
kubevpn remove deployment/authors
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
leave, err := daemon.GetClient(false).Remove(cmd.Context(), &rpc.RemoveRequest{
|
||||
cli, err := daemon.GetClient(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := cli.Remove(cmd.Context(), &rpc.RemoveRequest{
|
||||
Workloads: args,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
recv, err := leave.Recv()
|
||||
if err == io.EOF {
|
||||
err = util.PrintGRPCStream[rpc.RemoveResponse](resp)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Canceled {
|
||||
return nil
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, recv.GetMessage())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
|
||||
@@ -1,64 +1,86 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdReset(factory cmdutil.Factory) *cobra.Command {
|
||||
var connect = handler.ConnectOptions{}
|
||||
var sshConf = &util.SshConfig{}
|
||||
func CmdReset(f cmdutil.Factory) *cobra.Command {
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "reset",
|
||||
Short: "Reset all changes made by KubeVPN",
|
||||
Long: `Reset all changes made by KubeVPN`,
|
||||
Short: "Reset workloads to origin status",
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Reset workloads to origin status
|
||||
|
||||
Reset will remove injected container envoy-proxy and vpn, and restore service mesh rules.
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# Reset default namespace
|
||||
kubevpn reset
|
||||
# Reset default namespace workloads depooyment/productpage
|
||||
kubevpn reset deployment/productpage
|
||||
|
||||
# Reset another namespace test
|
||||
kubevpn reset -n test
|
||||
# Reset another namespace test workloads depooyment/productpage
|
||||
kubevpn reset deployment/productpage -n test
|
||||
|
||||
# Reset cluster api-server behind of bastion host or ssh jump host
|
||||
kubevpn reset --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
# Reset workloads depooyment/productpage which api-server behind of bastion host or ssh jump host
|
||||
kubevpn reset deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn reset --ssh-alias <alias>
|
||||
kubevpn reset deployment/productpage --ssh-alias <alias>
|
||||
|
||||
`)),
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn reset deployment/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
|
||||
kubevpn reset deployment/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
|
||||
kubevpn reset deployment/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return handler.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false)
|
||||
plog.InitLoggerForClient()
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
util.InitLogger(false)
|
||||
if err := connect.InitClient(factory); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err := connect.Reset(cmd.Context())
|
||||
Args: cobra.MatchAll(cobra.ExactArgs(1)),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, "done")
|
||||
cli, err := daemon.GetClient(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req := &rpc.ResetRequest{
|
||||
KubeconfigBytes: string(bytes),
|
||||
Namespace: ns,
|
||||
Workloads: args,
|
||||
SshJump: sshConf.ToRPC(),
|
||||
}
|
||||
resp, err := cli.Reset(cmd.Context(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.ResetResponse](resp)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Canceled {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// for ssh jumper host
|
||||
cmd.Flags().StringVar(&sshConf.Addr, "ssh-addr", "", "Optional ssh jump server address to dial as <hostname>:<port>, eg: 127.0.0.1:22")
|
||||
cmd.Flags().StringVar(&sshConf.User, "ssh-username", "", "Optional username for ssh jump server")
|
||||
cmd.Flags().StringVar(&sshConf.Password, "ssh-password", "", "Optional password for ssh jump server")
|
||||
cmd.Flags().StringVar(&sshConf.Keyfile, "ssh-keyfile", "", "Optional file with private key for SSH authentication")
|
||||
cmd.Flags().StringVar(&sshConf.ConfigAlias, "ssh-alias", "", "Optional config alias with ~/.ssh/config for SSH authentication")
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -2,33 +2,36 @@ package cmds
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/client-go/rest"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/homedir"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
)
|
||||
|
||||
func NewKubeVPNCommand() *cobra.Command {
|
||||
var cmd = &cobra.Command{
|
||||
Use: "kubevpn",
|
||||
Short: i18n.T("kubevpn connect to Kubernetes cluster network"),
|
||||
Short: i18n.T("KubeVPN offers a Cloud-Native Dev Environment that seamlessly connects to your Kubernetes cluster network."),
|
||||
Long: templates.LongDesc(`
|
||||
kubevpn connect to Kubernetes cluster network.
|
||||
`),
|
||||
KubeVPN offers a Cloud-Native Dev Environment that seamlessly connects to your Kubernetes cluster network.
|
||||
`),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.PersistentFlags()
|
||||
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
|
||||
configFlags := genericclioptions.NewConfigFlags(true)
|
||||
configFlags.WrapConfigFn = func(c *rest.Config) *rest.Config {
|
||||
if path, ok := os.LookupEnv(config.EnvSSHJump); ok {
|
||||
kubeconfigBytes, err := os.ReadFile(path)
|
||||
@@ -41,7 +44,7 @@ func NewKubeVPNCommand() *cobra.Command {
|
||||
return c
|
||||
}
|
||||
configFlags.AddFlags(flags)
|
||||
matchVersionFlags := cmdutil.NewMatchVersionFlags(configFlags)
|
||||
matchVersionFlags := cmdutil.NewMatchVersionFlags(&warp{ConfigFlags: configFlags})
|
||||
matchVersionFlags.AddFlags(flags)
|
||||
factory := cmdutil.NewFactory(matchVersionFlags)
|
||||
|
||||
@@ -58,31 +61,35 @@ func NewKubeVPNCommand() *cobra.Command {
|
||||
CmdDev(factory),
|
||||
// Hidden, Server Commands (DO NOT USE IT !!!)
|
||||
CmdControlPlane(factory),
|
||||
CmdServe(factory),
|
||||
CmdServer(factory),
|
||||
CmdDaemon(factory),
|
||||
CmdWebhook(factory),
|
||||
CmdSyncthing(factory),
|
||||
},
|
||||
},
|
||||
{
|
||||
Message: "Management commands",
|
||||
Message: "Management commands:",
|
||||
Commands: []*cobra.Command{
|
||||
CmdStatus(factory),
|
||||
CmdList(factory),
|
||||
CmdAlias(factory),
|
||||
CmdGet(factory),
|
||||
CmdConfig(factory),
|
||||
CmdCp(factory),
|
||||
CmdSSH(factory),
|
||||
CmdSSHDaemon(factory),
|
||||
CmdImageCopy(factory),
|
||||
CmdLogs(factory),
|
||||
CmdCp(factory),
|
||||
CmdReset(factory),
|
||||
CmdUninstall(factory),
|
||||
CmdQuit(factory),
|
||||
},
|
||||
},
|
||||
{
|
||||
Message: "Other commands",
|
||||
Message: "Other commands:",
|
||||
Commands: []*cobra.Command{
|
||||
CmdStatus(factory),
|
||||
CmdVersion(factory),
|
||||
CmdUpgrade(factory),
|
||||
CmdVersion(factory),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -91,3 +98,15 @@ func NewKubeVPNCommand() *cobra.Command {
|
||||
cmd.AddCommand(CmdOptions(factory))
|
||||
return cmd
|
||||
}
|
||||
|
||||
type warp struct {
|
||||
*genericclioptions.ConfigFlags
|
||||
}
|
||||
|
||||
func (f *warp) ToRawKubeConfigLoader() clientcmd.ClientConfig {
|
||||
if strings.HasPrefix(ptr.Deref[string](f.KubeConfig, ""), "~") {
|
||||
home := homedir.HomeDir()
|
||||
f.KubeConfig = ptr.To(strings.Replace(*f.KubeConfig, "~", home, 1))
|
||||
}
|
||||
return f.ConfigFlags.ToRawKubeConfigLoader()
|
||||
}
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/core"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
)
|
||||
|
||||
func CmdServe(_ cmdutil.Factory) *cobra.Command {
|
||||
var route = &core.Route{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "serve",
|
||||
Hidden: true,
|
||||
Short: "Server side, startup traffic manager, forward inbound and outbound traffic",
|
||||
Long: templates.LongDesc(`Server side, startup traffic manager, forward inbound and outbound traffic.`),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# serve node
|
||||
kubevpn serve -L "tcp://:10800" -L "tun://127.0.0.1:8422?net=223.254.0.123/32"
|
||||
`)),
|
||||
PreRun: func(*cobra.Command, []string) {
|
||||
util.InitLogger(config.Debug)
|
||||
runtime.GOMAXPROCS(0)
|
||||
go util.StartupPProf(0)
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
_, _ = maxprocs.Set(maxprocs.Logger(nil))
|
||||
err := handler.RentIPIfNeeded(route)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
err := handler.ReleaseIPIfNeeded()
|
||||
if err != nil {
|
||||
log.Errorf("release ip failed: %v", err)
|
||||
}
|
||||
}()
|
||||
servers, err := handler.Parse(*route)
|
||||
if err != nil {
|
||||
log.Errorf("parse server failed: %v", err)
|
||||
return err
|
||||
}
|
||||
ctx := cmd.Context()
|
||||
return handler.Run(ctx, servers)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringArrayVarP(&route.ServeNodes, "node", "L", []string{}, "Startup node server. eg: tcp://localhost:1080")
|
||||
cmd.Flags().StringVarP(&route.ChainNode, "chain", "F", "", "Forward chain. eg: tcp://192.168.1.100:2345")
|
||||
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug log or not")
|
||||
return cmd
|
||||
}
|
||||
60
cmd/kubevpn/cmds/server.go
Normal file
60
cmd/kubevpn/cmds/server.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
glog "gvisor.dev/gvisor/pkg/log"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/core"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdServer(cmdutil.Factory) *cobra.Command {
|
||||
var route = &core.Route{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "server",
|
||||
Hidden: true,
|
||||
Short: "Server side, startup traffic manager, forward inbound and outbound traffic",
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Server side, startup traffic manager, forward inbound and outbound traffic.
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# server listener
|
||||
kubevpn server -l "tcp://:10800" -l "tun://127.0.0.1:8422?net=198.19.0.123/32"
|
||||
`)),
|
||||
PreRun: func(*cobra.Command, []string) {
|
||||
runtime.GOMAXPROCS(0)
|
||||
go util.StartupPProfForServer(config.PProfPort)
|
||||
glog.SetTarget(plog.ServerEmitter{Writer: &glog.Writer{Next: os.Stderr}})
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
_, _ = maxprocs.Set(maxprocs.Logger(nil))
|
||||
ctx := cmd.Context()
|
||||
logger := plog.InitLoggerForServer()
|
||||
logger.SetLevel(util.If(config.Debug, log.DebugLevel, log.InfoLevel))
|
||||
servers, err := handler.Parse(*route)
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("Parse server failed: %v", err)
|
||||
return err
|
||||
}
|
||||
return handler.Run(plog.WithLogger(ctx, logger), servers)
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringArrayVarP(&route.Listeners, "listener", "l", []string{}, "Startup listener server. eg: tcp://localhost:1080")
|
||||
cmd.Flags().StringVarP(&route.Forwarder, "forwarder", "f", "", "Special forwarder. eg: tcp://192.168.1.100:2345")
|
||||
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug log or not")
|
||||
return cmd
|
||||
}
|
||||
@@ -1,65 +1,195 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/google/uuid"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"golang.org/x/net/websocket"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
"k8s.io/kubectl/pkg/util/term"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/handler"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
// CmdSSH
|
||||
// 设置本地的IP是223.254.0.1/32 ,记得一定是掩码 32位,
|
||||
// 这样别的路由不会走到这里来
|
||||
func CmdSSH(_ cmdutil.Factory) *cobra.Command {
|
||||
var sshConf = &util.SshConfig{}
|
||||
var ExtraCIDR []string
|
||||
// Remember to use network mask 32, because ssh using unique network CIDR 198.18.0.0/16
|
||||
func CmdSSH(cmdutil.Factory) *cobra.Command {
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
var extraCIDR []string
|
||||
var platform string
|
||||
var lite bool
|
||||
cmd := &cobra.Command{
|
||||
Use: "ssh",
|
||||
Short: "Ssh to jump server",
|
||||
Long: `Ssh to jump server`,
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Ssh to jump server
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# Jump to server behind of bastion host or ssh jump host
|
||||
kubevpn ssh --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────┘
|
||||
kubevpn ssh --ssh-alias <alias>
|
||||
`)),
|
||||
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn ssh --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
|
||||
kubevpn ssh --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
|
||||
kubevpn ssh --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
plog.InitLoggerForClient()
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
plat, err := platforms.Parse(platform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config, err := websocket.NewConfig("ws://test/ws", "http://test")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.Header.Set("ssh-addr", sshConf.Addr)
|
||||
config.Header.Set("ssh-username", sshConf.User)
|
||||
config.Header.Set("ssh-password", sshConf.Password)
|
||||
config.Header.Set("ssh-keyfile", sshConf.Keyfile)
|
||||
config.Header.Set("ssh-alias", sshConf.ConfigAlias)
|
||||
config.Header.Set("extra-cidr", strings.Join(ExtraCIDR, ","))
|
||||
fd := int(os.Stdin.Fd())
|
||||
if !terminal.IsTerminal(fd) {
|
||||
return fmt.Errorf("stdin is not a terminal")
|
||||
}
|
||||
width, height, err := terminal.GetSize(fd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("terminal get size: %s", err)
|
||||
}
|
||||
sessionID := uuid.NewString()
|
||||
ssh := handler.Ssh{
|
||||
Config: *sshConf,
|
||||
ExtraCIDR: extraCIDR,
|
||||
Width: width,
|
||||
Height: height,
|
||||
Platform: platforms.Format(platforms.Normalize(plat)),
|
||||
SessionID: sessionID,
|
||||
Lite: lite,
|
||||
}
|
||||
marshal, err := json.Marshal(ssh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.Header.Set("ssh", string(marshal))
|
||||
client := daemon.GetTCPClient(true)
|
||||
if client == nil {
|
||||
return fmt.Errorf("client is nil")
|
||||
}
|
||||
conn, err := websocket.NewClient(config, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go io.Copy(conn, os.Stdin)
|
||||
_, err = io.Copy(os.Stdout, conn)
|
||||
return err
|
||||
defer conn.Close()
|
||||
|
||||
errChan := make(chan error, 3)
|
||||
go func() {
|
||||
errChan <- monitorSize(cmd.Context(), sessionID)
|
||||
}()
|
||||
|
||||
readyCtx, cancelFunc := context.WithCancel(cmd.Context())
|
||||
checker := func(log string) bool {
|
||||
isReady := strings.Contains(log, fmt.Sprintf(handler.SshTerminalReadyFormat, sessionID))
|
||||
if isReady {
|
||||
cancelFunc()
|
||||
}
|
||||
return isReady
|
||||
}
|
||||
var state *terminal.State
|
||||
go func() {
|
||||
select {
|
||||
case <-cmd.Context().Done():
|
||||
return
|
||||
case <-readyCtx.Done():
|
||||
}
|
||||
if state, err = terminal.MakeRaw(fd); err != nil {
|
||||
plog.G(context.Background()).Errorf("terminal make raw: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
_, err := io.Copy(conn, os.Stdin)
|
||||
errChan <- err
|
||||
}()
|
||||
go func() {
|
||||
_, err := io.Copy(io.MultiWriter(os.Stdout, util.NewWriter(checker)), conn)
|
||||
errChan <- err
|
||||
}()
|
||||
|
||||
defer func() {
|
||||
if state != nil {
|
||||
terminal.Restore(fd, state)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-errChan:
|
||||
return err
|
||||
case <-cmd.Context().Done():
|
||||
return cmd.Context().Err()
|
||||
}
|
||||
},
|
||||
}
|
||||
addSshFlags(cmd, sshConf)
|
||||
cmd.Flags().StringArrayVar(&ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
cmd.Flags().StringArrayVar(&extraCIDR, "extra-cidr", []string{}, "Extra network CIDR string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
|
||||
cmd.Flags().StringVar(&platform, "platform", util.If(os.Getenv("KUBEVPN_DEFAULT_PLATFORM") != "", os.Getenv("KUBEVPN_DEFAULT_PLATFORM"), "linux/amd64"), "Set ssh server platform if needs to install command kubevpn")
|
||||
cmd.Flags().BoolVar(&lite, "lite", false, "connect to ssh server in lite mode. mode \"lite\": design for only connect to ssh server. mode \"full\": not only connect to ssh server, it also create a two-way tunnel communicate with inner ip")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func monitorSize(ctx context.Context, sessionID string) error {
|
||||
conn := daemon.GetTCPClient(true)
|
||||
if conn == nil {
|
||||
return fmt.Errorf("conn is nil")
|
||||
}
|
||||
var tt = term.TTY{
|
||||
In: os.Stdin,
|
||||
Out: os.Stdout,
|
||||
Raw: false,
|
||||
TryDev: false,
|
||||
Parent: nil,
|
||||
}
|
||||
sizeQueue := tt.MonitorSize(tt.GetSize())
|
||||
if sizeQueue == nil {
|
||||
return fmt.Errorf("sizeQueue is nil")
|
||||
}
|
||||
//defer runtime.HandleCrash()
|
||||
config, err := websocket.NewConfig("ws://test/resize", "http://test")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.Header.Set("session-id", sessionID)
|
||||
client, err := websocket.NewClient(config, conn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encoder := json.NewEncoder(client)
|
||||
for ctx.Err() == nil {
|
||||
size := sizeQueue.Next()
|
||||
if size == nil {
|
||||
return nil
|
||||
}
|
||||
if err = encoder.Encode(&size); err != nil {
|
||||
plog.G(ctx).Errorf("Encode resize: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,30 +9,33 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
)
|
||||
|
||||
// CmdSSHDaemon
|
||||
// 设置本地的IP是223.254.0.1/32 ,记得一定是掩码 32位,
|
||||
// 这样别的路由不会走到这里来
|
||||
func CmdSSHDaemon(_ cmdutil.Factory) *cobra.Command {
|
||||
// set local tun ip 198.19.0.1/32, remember to use mask 32
|
||||
func CmdSSHDaemon(cmdutil.Factory) *cobra.Command {
|
||||
var clientIP string
|
||||
cmd := &cobra.Command{
|
||||
Use: "ssh-daemon",
|
||||
Hidden: true,
|
||||
Short: "Ssh daemon server",
|
||||
Long: `Ssh daemon server`,
|
||||
Long: templates.LongDesc(i18n.T(`Ssh daemon server`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# SSH daemon server
|
||||
kubevpn ssh-daemon --client-ip 223.254.0.123/32
|
||||
`)),
|
||||
kubevpn ssh-daemon --client-ip 198.19.0.123/32
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
err := daemon.StartupDaemon(cmd.Context())
|
||||
return err
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := daemon.GetClient(true).SshStart(
|
||||
cli, err := daemon.GetClient(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := cli.SshStart(
|
||||
cmd.Context(),
|
||||
&rpc.SshStartRequest{
|
||||
ClientIP: clientIP,
|
||||
@@ -41,8 +44,8 @@ func CmdSSHDaemon(_ cmdutil.Factory) *cobra.Command {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, client.ServerIP)
|
||||
return nil
|
||||
_, err = fmt.Fprint(os.Stdout, resp.ServerIP)
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&clientIP, "client-ip", "", "Client cidr")
|
||||
|
||||
@@ -1,41 +1,289 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
"github.com/spf13/cobra"
|
||||
flag "github.com/spf13/pflag"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/cli-runtime/pkg/printers"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
FormatJson = "json"
|
||||
FormatYaml = "yaml"
|
||||
FormatTable = "table"
|
||||
)
|
||||
|
||||
func CmdStatus(f cmdutil.Factory) *cobra.Command {
|
||||
var aliasName string
|
||||
var localFile string
|
||||
var remoteAddr string
|
||||
var format string
|
||||
cmd := &cobra.Command{
|
||||
Use: "status",
|
||||
Short: i18n.T("KubeVPN status"),
|
||||
Long: templates.LongDesc(i18n.T(`KubeVPN status`)),
|
||||
Short: i18n.T("Show connect status and list proxy/clone resource"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Show connect status and list proxy/clone resource
|
||||
|
||||
Show connect status and list proxy or clone resource, you can check connect status by filed status and netif.
|
||||
if netif is empty, means tun device closed, so it's unhealthy, it will also show route info, if proxy workloads,
|
||||
not only show myself proxy resource, another route info will also display.
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# show status for kubevpn status
|
||||
# show status for connect status and list proxy/clone resource
|
||||
kubevpn status
|
||||
`)),
|
||||
|
||||
# query status by alias config name dev_new
|
||||
kubevpn status --alias dev_new
|
||||
|
||||
# query status with output json format
|
||||
kubevpn status -o json
|
||||
|
||||
# query status with output yaml format
|
||||
kubevpn status -o yaml
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
plog.InitLoggerForClient()
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := daemon.GetClient(false).Status(
|
||||
var clusterIDs []string
|
||||
if aliasName != "" {
|
||||
configs, err := ParseAndGet(localFile, remoteAddr, aliasName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, conf := range configs {
|
||||
clusterID, err := GetClusterIDByConfig(cmd, conf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterIDs = append(clusterIDs, clusterID)
|
||||
}
|
||||
}
|
||||
|
||||
cli, err := daemon.GetClient(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := cli.Status(
|
||||
cmd.Context(),
|
||||
&rpc.StatusRequest{},
|
||||
&rpc.StatusRequest{
|
||||
ClusterIDs: clusterIDs,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, client.GetMessage())
|
||||
output, err := genOutput(resp, format)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprint(os.Stdout, output)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&aliasName, "alias", "", "Alias name, query connect status by alias config name")
|
||||
cmd.Flags().StringVarP(&localFile, "kubevpnconfig", "f", util.If(os.Getenv("KUBEVPNCONFIG") != "", os.Getenv("KUBEVPNCONFIG"), config.GetConfigFile()), "Path to the kubevpnconfig file to use for CLI requests.")
|
||||
cmd.Flags().StringVarP(&remoteAddr, "remote", "r", "", "Remote config file, eg: https://raw.githubusercontent.com/kubenetworks/kubevpn/master/pkg/config/config.yaml")
|
||||
cmd.Flags().StringVarP(&format, "output", "o", FormatTable, fmt.Sprintf("Output format. One of: (%s, %s, %s)", FormatJson, FormatYaml, FormatTable))
|
||||
return cmd
|
||||
}
|
||||
|
||||
func genOutput(status *rpc.StatusResponse, format string) (string, error) {
|
||||
switch format {
|
||||
case FormatJson:
|
||||
if len(status.List) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
marshal, err := json.Marshal(status.List)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(marshal), nil
|
||||
|
||||
case FormatYaml:
|
||||
if len(status.List) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
marshal, err := yaml.Marshal(status.List)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(marshal), nil
|
||||
default:
|
||||
var sb = new(bytes.Buffer)
|
||||
w := printers.GetNewTabWriter(sb)
|
||||
genConnectMsg(w, status.List)
|
||||
genProxyMsg(w, status.List)
|
||||
genCloneMsg(w, status.List)
|
||||
_ = w.Flush()
|
||||
return sb.String(), nil
|
||||
}
|
||||
}
|
||||
|
||||
func genConnectMsg(w *tabwriter.Writer, status []*rpc.Status) {
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Mode", "Cluster", "Kubeconfig", "Namespace", "Status", "Netif")
|
||||
for _, c := range status {
|
||||
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\t%s\n", c.ID, c.Mode, c.Cluster, c.Kubeconfig, c.Namespace, c.Status, c.Netif)
|
||||
}
|
||||
}
|
||||
|
||||
func genProxyMsg(w *tabwriter.Writer, list []*rpc.Status) {
|
||||
var needsPrint bool
|
||||
for _, status := range list {
|
||||
if len(status.ProxyList) != 0 {
|
||||
needsPrint = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !needsPrint {
|
||||
return
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(w, "\n")
|
||||
w.SetRememberedWidths(nil)
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Namespace", "Name", "Headers", "IP", "PortMap", "CurrentPC")
|
||||
for _, c := range list {
|
||||
for _, proxy := range c.ProxyList {
|
||||
for _, rule := range proxy.RuleList {
|
||||
var headers []string
|
||||
for k, v := range rule.Headers {
|
||||
headers = append(headers, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
if len(headers) == 0 {
|
||||
headers = []string{"*"}
|
||||
}
|
||||
var portmap []string
|
||||
for k, v := range rule.PortMap {
|
||||
portmap = append(portmap, fmt.Sprintf("%d->%d", k, v))
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\t%v\n",
|
||||
c.ID,
|
||||
proxy.Namespace,
|
||||
proxy.Workload,
|
||||
strings.Join(headers, ","),
|
||||
rule.LocalTunIPv4,
|
||||
strings.Join(portmap, ","),
|
||||
rule.CurrentDevice,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func genCloneMsg(w *tabwriter.Writer, list []*rpc.Status) {
|
||||
var needsPrint bool
|
||||
for _, status := range list {
|
||||
if len(status.CloneList) != 0 {
|
||||
needsPrint = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !needsPrint {
|
||||
return
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(w, "\n")
|
||||
w.SetRememberedWidths(nil)
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Namespace", "Name", "Headers", "ToName", "ToKubeconfig", "ToNamespace", "SyncthingGUI")
|
||||
for _, c := range list {
|
||||
for _, clone := range c.CloneList {
|
||||
//_, _ = fmt.Fprintf(w, "%s\n", clone.Workload)
|
||||
for _, rule := range clone.RuleList {
|
||||
var headers []string
|
||||
for k, v := range rule.Headers {
|
||||
headers = append(headers, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
if len(headers) == 0 {
|
||||
headers = []string{"*"}
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
|
||||
c.ID,
|
||||
clone.Namespace,
|
||||
clone.Workload,
|
||||
strings.Join(headers, ","),
|
||||
rule.DstWorkload,
|
||||
rule.DstKubeconfig,
|
||||
rule.DstNamespace,
|
||||
clone.SyncthingGUIAddr,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func GetClusterIDByConfig(cmd *cobra.Command, config Config) (string, error) {
|
||||
flags := flag.NewFlagSet("", flag.ContinueOnError)
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
pkgssh.AddSshFlags(flags, sshConf)
|
||||
handler.AddExtraRoute(flags, &handler.ExtraRouteInfo{})
|
||||
configFlags := genericclioptions.NewConfigFlags(true)
|
||||
configFlags.AddFlags(flags)
|
||||
matchVersionFlags := cmdutil.NewMatchVersionFlags(&warp{ConfigFlags: configFlags})
|
||||
matchVersionFlags.AddFlags(flags)
|
||||
factory := cmdutil.NewFactory(matchVersionFlags)
|
||||
|
||||
for _, command := range cmd.Parent().Commands() {
|
||||
command.Flags().VisitAll(func(f *flag.Flag) {
|
||||
if flags.Lookup(f.Name) == nil && flags.ShorthandLookup(f.Shorthand) == nil {
|
||||
flags.AddFlag(f)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
err := flags.ParseAll(config.Flags, func(flag *flag.Flag, value string) error {
|
||||
_ = flags.Set(flag.Name, value)
|
||||
return nil
|
||||
})
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(factory)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
file, err := util.ConvertToTempKubeconfigFile(bytes)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
flags = flag.NewFlagSet("", flag.ContinueOnError)
|
||||
flags.AddFlag(&flag.Flag{
|
||||
Name: "kubeconfig",
|
||||
DefValue: file,
|
||||
})
|
||||
flags.AddFlag(&flag.Flag{
|
||||
Name: "namespace",
|
||||
DefValue: ns,
|
||||
})
|
||||
var path string
|
||||
path, err = pkgssh.SshJump(cmd.Context(), sshConf, flags, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var c = &handler.ConnectOptions{}
|
||||
err = c.InitClient(util.InitFactoryByPath(path, ns))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = c.InitDHCP(cmd.Context())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return c.GetClusterID(), nil
|
||||
}
|
||||
|
||||
218
cmd/kubevpn/cmds/status_test.go
Normal file
218
cmd/kubevpn/cmds/status_test.go
Normal file
@@ -0,0 +1,218 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
)
|
||||
|
||||
func TestPrintProxyAndClone(t *testing.T) {
|
||||
var status = &rpc.StatusResponse{
|
||||
List: []*rpc.Status{
|
||||
{
|
||||
ID: 0,
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun4",
|
||||
ProxyList: []*rpc.Proxy{
|
||||
{
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Workload: "deployment.apps/authors",
|
||||
RuleList: []*rpc.ProxyRule{
|
||||
{
|
||||
Headers: map[string]string{"user": "naison"},
|
||||
LocalTunIPv4: "198.19.0.103",
|
||||
LocalTunIPv6: "2001:2::999d",
|
||||
CurrentDevice: false,
|
||||
PortMap: map[int32]int32{8910: 8910},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
CloneList: []*rpc.Clone{
|
||||
{
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Workload: "deployment.apps/ratings",
|
||||
RuleList: []*rpc.CloneRule{{
|
||||
Headers: map[string]string{"user": "naison"},
|
||||
DstClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
DstCluster: "ccm6epn7qvcplhs3o8p00",
|
||||
DstKubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
DstNamespace: "vke-system",
|
||||
DstWorkload: "deployment.apps/ratings-clone-5ngn6",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: 1,
|
||||
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
|
||||
Cluster: "ccnepblsebp68ivej4a20",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun5",
|
||||
ProxyList: []*rpc.Proxy{},
|
||||
CloneList: []*rpc.Clone{},
|
||||
},
|
||||
},
|
||||
}
|
||||
output, err := genOutput(status, FormatTable)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fmt.Println(output)
|
||||
}
|
||||
|
||||
func TestPrintProxy(t *testing.T) {
|
||||
var status = &rpc.StatusResponse{
|
||||
List: []*rpc.Status{
|
||||
{
|
||||
ID: 0,
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun4",
|
||||
ProxyList: []*rpc.Proxy{
|
||||
{
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Workload: "deployment.apps/authors",
|
||||
RuleList: []*rpc.ProxyRule{
|
||||
{
|
||||
Headers: map[string]string{"user": "naison"},
|
||||
LocalTunIPv4: "198.19.0.103",
|
||||
LocalTunIPv6: "2001:2::999d",
|
||||
CurrentDevice: false,
|
||||
PortMap: map[int32]int32{8910: 8910},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
CloneList: []*rpc.Clone{},
|
||||
},
|
||||
{
|
||||
ID: 1,
|
||||
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
|
||||
Cluster: "ccnepblsebp68ivej4a20",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun5",
|
||||
ProxyList: []*rpc.Proxy{},
|
||||
CloneList: []*rpc.Clone{},
|
||||
},
|
||||
},
|
||||
}
|
||||
output, err := genOutput(status, FormatTable)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fmt.Println(output)
|
||||
}
|
||||
|
||||
func TestPrintClone(t *testing.T) {
|
||||
var status = &rpc.StatusResponse{
|
||||
List: []*rpc.Status{
|
||||
{
|
||||
ID: 0,
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun4",
|
||||
ProxyList: []*rpc.Proxy{},
|
||||
CloneList: []*rpc.Clone{
|
||||
{
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Workload: "deployment.apps/ratings",
|
||||
RuleList: []*rpc.CloneRule{{
|
||||
Headers: map[string]string{"user": "naison"},
|
||||
DstClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
DstCluster: "ccm6epn7qvcplhs3o8p00",
|
||||
DstKubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
DstNamespace: "vke-system",
|
||||
DstWorkload: "deployment.apps/ratings-clone-5ngn6",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: 1,
|
||||
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
|
||||
Cluster: "ccnepblsebp68ivej4a20",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun5",
|
||||
ProxyList: []*rpc.Proxy{},
|
||||
CloneList: []*rpc.Clone{},
|
||||
},
|
||||
},
|
||||
}
|
||||
output, err := genOutput(status, FormatTable)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fmt.Println(output)
|
||||
}
|
||||
|
||||
func TestPrint(t *testing.T) {
|
||||
var status = &rpc.StatusResponse{
|
||||
List: []*rpc.Status{
|
||||
{
|
||||
ID: 0,
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun4",
|
||||
ProxyList: []*rpc.Proxy{},
|
||||
CloneList: []*rpc.Clone{},
|
||||
},
|
||||
{
|
||||
ID: 1,
|
||||
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
|
||||
Cluster: "ccnepblsebp68ivej4a20",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun5",
|
||||
ProxyList: []*rpc.Proxy{},
|
||||
CloneList: []*rpc.Clone{},
|
||||
},
|
||||
},
|
||||
}
|
||||
output, err := genOutput(status, FormatTable)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fmt.Println(output)
|
||||
}
|
||||
30
cmd/kubevpn/cmds/syncthing.go
Normal file
30
cmd/kubevpn/cmds/syncthing.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/syncthing"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdSyncthing(cmdutil.Factory) *cobra.Command {
|
||||
var detach bool
|
||||
var dir string
|
||||
cmd := &cobra.Command{
|
||||
Use: "syncthing",
|
||||
Short: i18n.T("Syncthing"),
|
||||
Long: templates.LongDesc(i18n.T(`Syncthing`)),
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
go util.StartupPProfForServer(0)
|
||||
return syncthing.StartServer(cmd.Context(), detach, dir)
|
||||
},
|
||||
Hidden: true,
|
||||
DisableFlagsInUseLine: true,
|
||||
}
|
||||
cmd.Flags().StringVar(&dir, "dir", "", "dir")
|
||||
cmd.Flags().BoolVarP(&detach, "detach", "d", false, "Run syncthing in background")
|
||||
return cmd
|
||||
}
|
||||
98
cmd/kubevpn/cmds/uninstall.go
Normal file
98
cmd/kubevpn/cmds/uninstall.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdUninstall(f cmdutil.Factory) *cobra.Command {
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "uninstall",
|
||||
Short: "Uninstall all resource create by kubevpn in k8s cluster",
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Uninstall all resource create by kubevpn in k8s cluster
|
||||
|
||||
Uninstall will delete all resources create by kubevpn in k8s cluster, like deployment, service, serviceAccount...
|
||||
and it will also delete local develop docker containers, docker networks. delete hosts entry added by kubevpn,
|
||||
cleanup DNS settings.
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# Uninstall default namespace
|
||||
kubevpn uninstall
|
||||
|
||||
# Uninstall another namespace test
|
||||
kubevpn uninstall -n test
|
||||
|
||||
# Uninstall cluster api-server behind of bastion host or ssh jump host
|
||||
kubevpn uninstall --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn uninstall --ssh-alias <alias>
|
||||
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn uninstall --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
|
||||
kubevpn uninstall --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
|
||||
kubevpn uninstall --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
plog.InitLoggerForClient()
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cli, err := daemon.GetClient(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
disconnectResp, err := cli.Disconnect(cmd.Context(), &rpc.DisconnectRequest{
|
||||
KubeconfigBytes: ptr.To(string(bytes)),
|
||||
Namespace: ptr.To(ns),
|
||||
SshJump: sshConf.ToRPC(),
|
||||
})
|
||||
if err != nil {
|
||||
plog.G(cmd.Context()).Warnf("Failed to disconnect from cluter: %v", err)
|
||||
} else {
|
||||
_ = util.PrintGRPCStream[rpc.DisconnectResponse](disconnectResp)
|
||||
}
|
||||
|
||||
req := &rpc.UninstallRequest{
|
||||
KubeconfigBytes: string(bytes),
|
||||
Namespace: ns,
|
||||
SshJump: sshConf.ToRPC(),
|
||||
}
|
||||
resp, err := cli.Uninstall(cmd.Context(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.UninstallResponse](resp)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Canceled {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
return cmd
|
||||
}
|
||||
@@ -4,56 +4,55 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/oauth2"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/upgrade"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/upgrade"
|
||||
)
|
||||
|
||||
func CmdUpgrade(_ cmdutil.Factory) *cobra.Command {
|
||||
func CmdUpgrade(cmdutil.Factory) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "upgrade",
|
||||
Short: "Upgrade KubeVPN version",
|
||||
Long: `Upgrade KubeVPN version, automatically download latest KubeVPN from GitHub`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Short: i18n.T("Upgrade kubevpn client to latest version"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Upgrade kubevpn client to latest version, automatically download and install latest kubevpn from GitHub.
|
||||
disconnect all from k8s cluster, leave all resources, remove all clone resource, and then,
|
||||
upgrade local daemon grpc server to latest version.
|
||||
`)),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
const (
|
||||
envLatestUrl = "KUBEVPN_LATEST_VERSION_URL"
|
||||
)
|
||||
plog.InitLoggerForClient()
|
||||
var client = http.DefaultClient
|
||||
if config.GitHubOAuthToken != "" {
|
||||
client = oauth2.NewClient(cmd.Context(), oauth2.StaticTokenSource(&oauth2.Token{AccessToken: config.GitHubOAuthToken, TokenType: "Bearer"}))
|
||||
}
|
||||
latestVersion, latestCommit, url, err := util.GetManifest(client, runtime.GOOS, runtime.GOARCH)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = upgrade.Main(cmd.Context(), client, latestVersion, latestCommit, url)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Fprint(os.Stdout, "Upgrade daemon...")
|
||||
for _, isSudo := range []bool{false, true} {
|
||||
cli := daemon.GetClient(isSudo)
|
||||
if cli != nil {
|
||||
var response *rpc.UpgradeResponse
|
||||
response, err = cli.Upgrade(cmd.Context(), &rpc.UpgradeRequest{
|
||||
ClientVersion: latestVersion,
|
||||
ClientCommitId: latestCommit,
|
||||
})
|
||||
if err == nil && !response.NeedUpgrade {
|
||||
// do nothing
|
||||
} else {
|
||||
_ = quit(cmd.Context(), isSudo)
|
||||
}
|
||||
var url = os.Getenv(envLatestUrl)
|
||||
if url == "" {
|
||||
var latestVersion string
|
||||
var needsUpgrade bool
|
||||
var err error
|
||||
url, latestVersion, needsUpgrade, err = upgrade.NeedsUpgrade(cmd.Context(), client, config.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !needsUpgrade {
|
||||
_, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("Already up to date, don't needs to upgrade, version: %s", latestVersion))
|
||||
return nil
|
||||
}
|
||||
_, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("Current version is: %s less than latest version: %s, needs to upgrade", config.Version, latestVersion))
|
||||
_ = os.Setenv(envLatestUrl, url)
|
||||
_ = quit(cmd.Context(), true)
|
||||
_ = quit(cmd.Context(), false)
|
||||
}
|
||||
err = daemon.StartupDaemon(cmd.Context())
|
||||
fmt.Fprint(os.Stdout, "done")
|
||||
return upgrade.Main(cmd.Context(), client, url)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
|
||||
@@ -9,10 +9,12 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
)
|
||||
|
||||
// --ldflags -X
|
||||
@@ -33,12 +35,14 @@ func reformatDate(buildTime string) string {
|
||||
func CmdVersion(cmdutil.Factory) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print the client version information",
|
||||
Long: `Print the client version information`,
|
||||
Short: i18n.T("Print the client version information"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Print the client version information
|
||||
`)),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("KubeVPN: CLI\n")
|
||||
fmt.Printf(" Version: %s\n", config.Version)
|
||||
fmt.Printf(" DaemonVersion: %s\n", getDaemonVersion())
|
||||
fmt.Printf(" Daemon: %s\n", getDaemonVersion())
|
||||
fmt.Printf(" Image: %s\n", config.Image)
|
||||
fmt.Printf(" Branch: %s\n", Branch)
|
||||
fmt.Printf(" Git commit: %s\n", config.GitCommit)
|
||||
@@ -60,12 +64,13 @@ func init() {
|
||||
}
|
||||
|
||||
func getDaemonVersion() string {
|
||||
cli := daemon.GetClient(false)
|
||||
if cli != nil {
|
||||
version, err := cli.Version(context.Background(), &rpc.VersionRequest{})
|
||||
if err == nil {
|
||||
return version.Version
|
||||
}
|
||||
cli, err := daemon.GetClient(false)
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
return "unknown"
|
||||
version, err := cli.Version(context.Background(), &rpc.VersionRequest{})
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
return version.Version
|
||||
}
|
||||
|
||||
@@ -3,23 +3,26 @@ package cmds
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/webhook"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/webhook"
|
||||
)
|
||||
|
||||
func CmdWebhook(f cmdutil.Factory) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "webhook",
|
||||
Hidden: true,
|
||||
Short: "Starts a HTTP server, useful for creating MutatingAdmissionWebhook",
|
||||
Long: `Starts a HTTP server, useful for creating MutatingAdmissionWebhook.
|
||||
After deploying it to Kubernetes cluster, the Administrator needs to create a MutatingWebhookConfiguration
|
||||
in the Kubernetes cluster to register remote webhook admission controllers.`,
|
||||
Short: i18n.T("Starts a HTTP server, useful for creating MutatingAdmissionWebhook"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Starts a HTTP server, useful for creating MutatingAdmissionWebhook.
|
||||
After deploying it to Kubernetes cluster, the Administrator needs to create a MutatingWebhookConfiguration
|
||||
in the Kubernetes cluster to register remote webhook admission controllers.
|
||||
`)),
|
||||
Args: cobra.MaximumNArgs(0),
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
util.InitLogger(true)
|
||||
go util.StartupPProf(0)
|
||||
go util.StartupPProfForServer(0)
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return webhook.Main(f)
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/cmd/kubevpn/cmds"
|
||||
"github.com/wencaiwulue/kubevpn/v2/cmd/kubevpn/cmds"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
45
docs/en/Architecture.md
Normal file
45
docs/en/Architecture.md
Normal file
@@ -0,0 +1,45 @@
|
||||
## Architecture
|
||||
|
||||
### Connect mode
|
||||
|
||||
create a tunnel with port-forward, add route to virtual interface, like tun0, forward traffic though tunnel to remote
|
||||
traffic manager.
|
||||

|
||||
|
||||
### Reverse mode
|
||||
|
||||
base on connect mode, inject a container to controller, use iptables to block all inbound traffic and forward to local
|
||||
though tunnel.
|
||||
|
||||
```text
|
||||
┌──────────┐ ┌─────────┌──────────┐ ┌──────────┐
|
||||
│ ServiceA ├───►│ sidecar │ ServiceB │ ┌─►│ ServiceC │
|
||||
└──────────┘ └────┌────┘──────────┘ │ └──────────┘
|
||||
│ │
|
||||
│ │ cloud
|
||||
─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘─ ─ ─ ─ ─ ─ ─ ─ ─┘ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
|
||||
│ │ local
|
||||
┌───┘──────┐ │
|
||||
│ ServiceB'├──────────┘
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
### Mesh mode
|
||||
|
||||
base on reverse mode, using envoy as proxy, if headers have special key-value pair, it will route to local machine, if
|
||||
not, use origin service.
|
||||
|
||||
```text
|
||||
┌──────────┐ ┌─────────┌────────────┐ ┌──────────┐
|
||||
│ ServiceA ├───►│ sidecar ├─► ServiceB │─►┌─►│ ServiceC │
|
||||
└──────────┘ └────┌────┘────────────┘ │ └──────────┘
|
||||
│ │ cloud
|
||||
─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─┘─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
|
||||
│ │ local
|
||||
header: foo=bar │
|
||||
┌───┘──────┐ │
|
||||
│ ServiceB'├─────────────┘
|
||||
└──────────┘
|
||||
```
|
||||
|
||||

|
||||
4
docs/en/images/connect-mode.drawio.svg
Normal file
4
docs/en/images/connect-mode.drawio.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 106 KiB |
4
docs/en/images/kubevpn-proxy-tun-arch.svg
Normal file
4
docs/en/images/kubevpn-proxy-tun-arch.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 372 KiB |
4
docs/en/images/proxy-arch.svg
Normal file
4
docs/en/images/proxy-arch.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 488 KiB |
468
go.mod
468
go.mod
@@ -1,215 +1,377 @@
|
||||
module github.com/wencaiwulue/kubevpn
|
||||
module github.com/wencaiwulue/kubevpn/v2
|
||||
|
||||
go 1.20
|
||||
go 1.23.2
|
||||
|
||||
require (
|
||||
github.com/cilium/ipam v0.0.0-20220824141044-46ef3d556735
|
||||
github.com/docker/cli v23.0.1+incompatible
|
||||
github.com/docker/docker v23.0.1+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/libcontainer v2.2.1+incompatible
|
||||
github.com/envoyproxy/go-control-plane v0.10.3
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/miekg/dns v1.1.50
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
golang.org/x/net v0.8.0
|
||||
golang.org/x/sys v0.6.0
|
||||
golang.zx2c4.com/wireguard v0.0.0-20220920152132-bb719d3a6e2c
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/grpc v1.53.0-dev.0.20230123225046-4075ef07c5d5
|
||||
google.golang.org/protobuf v1.30.0
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/api v0.26.3
|
||||
k8s.io/apimachinery v0.26.3
|
||||
k8s.io/cli-runtime v0.26.1
|
||||
k8s.io/client-go v0.26.3
|
||||
k8s.io/klog/v2 v2.90.1 // indirect
|
||||
k8s.io/kubectl v0.26.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/containerd/containerd v1.5.18
|
||||
github.com/cilium/ipam v0.0.0-20230509084518-fd66eae7909b
|
||||
github.com/containerd/containerd v1.7.27
|
||||
github.com/containernetworking/cni v1.1.2
|
||||
github.com/coredns/caddy v1.1.1
|
||||
github.com/coredns/coredns v1.10.1
|
||||
github.com/docker/distribution v2.8.1+incompatible
|
||||
github.com/coredns/caddy v1.1.2-0.20241029205200-8de985351a98
|
||||
github.com/coredns/coredns v1.12.1
|
||||
github.com/distribution/reference v0.6.0
|
||||
github.com/docker/cli v27.5.1+incompatible
|
||||
github.com/docker/docker v27.5.1+incompatible
|
||||
github.com/docker/go-connections v0.5.0
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/docker/libcontainer v2.2.1+incompatible
|
||||
github.com/envoyproxy/go-control-plane v0.13.4
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4
|
||||
github.com/fsnotify/fsnotify v1.8.0
|
||||
github.com/gliderlabs/ssh v0.3.8
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/hashicorp/go-version v1.6.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/go-version v1.7.0
|
||||
github.com/hpcloud/tail v1.0.0
|
||||
github.com/jcmturner/gofork v1.7.6
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4
|
||||
github.com/kevinburke/ssh_config v1.2.0
|
||||
github.com/libp2p/go-netroute v0.2.1
|
||||
github.com/mattbaird/jsonpatch v0.0.0-20200820163806-098863c1fc24
|
||||
github.com/prometheus-community/pro-bing v0.1.0
|
||||
github.com/schollz/progressbar/v3 v3.13.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
go.uber.org/automaxprocs v1.5.1
|
||||
golang.org/x/crypto v0.2.0
|
||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
|
||||
golang.org/x/oauth2 v0.6.0
|
||||
golang.org/x/sync v0.1.0
|
||||
golang.org/x/text v0.8.0
|
||||
golang.org/x/time v0.3.0
|
||||
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224
|
||||
gvisor.dev/gvisor v0.0.0-20230603040744-5c9219dedd33
|
||||
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749
|
||||
sigs.k8s.io/controller-runtime v0.14.5
|
||||
sigs.k8s.io/kustomize/api v0.12.1
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de
|
||||
github.com/mattbaird/jsonpatch v0.0.0-20240118010651-0ba75a80ca38
|
||||
github.com/miekg/dns v1.1.64
|
||||
github.com/moby/term v0.5.2
|
||||
github.com/opencontainers/image-spec v1.1.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus-community/pro-bing v0.4.0
|
||||
github.com/regclient/regclient v0.8.0
|
||||
github.com/schollz/progressbar/v3 v3.14.2
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/pflag v1.0.6
|
||||
github.com/syncthing/syncthing v1.29.2
|
||||
github.com/thejerf/suture/v4 v4.0.6
|
||||
go.uber.org/automaxprocs v1.6.0
|
||||
golang.org/x/crypto v0.37.0
|
||||
golang.org/x/net v0.39.0
|
||||
golang.org/x/oauth2 v0.28.0
|
||||
golang.org/x/sys v0.32.0
|
||||
golang.org/x/term v0.31.0
|
||||
golang.org/x/text v0.24.0
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
||||
golang.zx2c4.com/wireguard v0.0.0-20220920152132-bb719d3a6e2c
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||
google.golang.org/grpc v1.71.1
|
||||
google.golang.org/protobuf v1.36.6
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987
|
||||
helm.sh/helm/v4 v4.0.0-20250324191910-0199b748aaea
|
||||
k8s.io/api v0.32.3
|
||||
k8s.io/apimachinery v0.32.3
|
||||
k8s.io/cli-runtime v0.32.3
|
||||
k8s.io/client-go v0.32.3
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kubectl v0.32.3
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e
|
||||
sigs.k8s.io/controller-runtime v0.20.4
|
||||
sigs.k8s.io/kustomize/api v0.19.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
tailscale.com v1.74.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.15.1 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cel.dev/expr v0.19.1 // indirect
|
||||
cloud.google.com/go/auth v0.15.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.6.0 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.28 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.30 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/obfuscate v0.0.0-20211129110424-6491aa3bf583 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0-rc.1 // indirect
|
||||
github.com/DataDog/datadog-go v4.8.2+incompatible // indirect
|
||||
github.com/DataDog/datadog-go/v5 v5.0.2 // indirect
|
||||
github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect
|
||||
github.com/DataDog/sketches-go v1.2.1 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
|
||||
github.com/DataDog/appsec-internal-go v1.9.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/obfuscate v0.58.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/proto v0.58.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.58.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/trace v0.58.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/util/log v0.58.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/util/scrubber v0.58.0 // indirect
|
||||
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
|
||||
github.com/DataDog/go-libddwaf/v3 v3.5.1 // indirect
|
||||
github.com/DataDog/go-runtime-metrics-internal v0.0.4-0.20241206090539-a14610dc22b6 // indirect
|
||||
github.com/DataDog/go-sqllexer v0.0.14 // indirect
|
||||
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
|
||||
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 // indirect
|
||||
github.com/DataDog/sketches-go v1.4.5 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/antonmedv/expr v1.12.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.9 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
|
||||
github.com/apparentlymart/go-cidr v1.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.194 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.62 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 // indirect
|
||||
github.com/aws/smithy-go v1.22.2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/calmh/incontainer v1.0.0 // indirect
|
||||
github.com/calmh/xdr v1.2.0 // indirect
|
||||
github.com/ccding/go-stun v0.1.5 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dgraph-io/ristretto v0.1.0 // indirect
|
||||
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
|
||||
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect
|
||||
github.com/cilium/ebpf v0.16.0 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/dnstap/golang-dnstap v0.4.0 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/cli-docs-tool v0.9.0 // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.2 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect
|
||||
github.com/ebitengine/purego v0.8.3 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/expr-lang/expr v1.17.2 // indirect
|
||||
github.com/farsightsec/golang-framestream v0.3.0 // indirect
|
||||
github.com/fatih/camelcase v1.0.0 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
|
||||
github.com/fvbommel/sortorder v1.0.2 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/fvbommel/sortorder v1.1.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/gaissmai/bart v0.11.1 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
|
||||
github.com/go-errors/errors v1.5.1 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 // indirect
|
||||
github.com/go-ldap/ldap/v3 v3.4.10 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.1 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.1 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
||||
github.com/golang/glog v1.0.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect
|
||||
github.com/google/pprof v0.0.0-20250202011525-fc3143867406 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/greatroar/blobloom v0.8.0 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
|
||||
github.com/imdario/mergo v0.3.14 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/illarion/gonotify/v2 v2.0.3 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/infobloxopen/go-trees v0.0.0-20200715205103-96a057b8dfb9 // indirect
|
||||
github.com/infobloxopen/go-trees v0.0.0-20221216143356-66ceba885ebc // indirect
|
||||
github.com/jackpal/gateway v1.0.16 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.15.15 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/julienschmidt/httprouter v1.3.0 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mdlayher/netlink v1.7.2 // indirect
|
||||
github.com/mdlayher/socket v0.5.0 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75 // indirect
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.2 // indirect
|
||||
github.com/moby/buildkit v0.9.0-rc1 // indirect
|
||||
github.com/moby/patternmatcher v0.5.0 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/sys/signal v0.7.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/moby/sys/symlink v0.2.0 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/runc v1.1.4 // indirect
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 // indirect
|
||||
github.com/openzipkin/zipkin-go v0.4.1 // indirect
|
||||
github.com/oschwald/geoip2-golang v1.8.0 // indirect
|
||||
github.com/oschwald/maxminddb-golang v1.10.0 // indirect
|
||||
github.com/openzipkin/zipkin-go v0.4.3 // indirect
|
||||
github.com/oschwald/geoip2-golang v1.11.0 // indirect
|
||||
github.com/oschwald/maxminddb-golang v1.13.1 // indirect
|
||||
github.com/outcaste-io/ristretto v0.2.3 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/philhofer/fwd v1.1.1 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.3 // indirect
|
||||
github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/prometheus/client_golang v1.21.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.63.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.0 // indirect
|
||||
github.com/quic-go/quic-go v0.50.1 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rubenv/sql-migrate v1.7.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
|
||||
github.com/shirou/gopsutil/v3 v3.24.4 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.25.1 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/spf13/cast v1.7.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/stretchr/testify v1.10.0 // indirect
|
||||
github.com/syncthing/notify v0.0.0-20210616190510-c6b7342338d2 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 // indirect
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
github.com/tinylib/msgp v1.1.6 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/tinylib/msgp v1.2.1 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.1.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.7 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.7 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.starlark.net v0.0.0-20230112144946-fae38c8a6d89 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.24.0 // indirect
|
||||
go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect
|
||||
go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect
|
||||
golang.org/x/mod v0.11.0 // indirect
|
||||
golang.org/x/term v0.6.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
google.golang.org/api v0.109.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5 // indirect
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.47.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.20 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.20 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.20 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/collector/component v0.104.0 // indirect
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata v1.11.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.104.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.104.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
|
||||
go.opentelemetry.io/otel v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/mock v0.5.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect
|
||||
golang.org/x/exp v0.0.0-20250207012021-f9890c6ad9f3 // indirect
|
||||
golang.org/x/mod v0.23.0 // indirect
|
||||
golang.org/x/sync v0.13.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.30.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
|
||||
google.golang.org/api v0.227.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250409194420-de1ac958c67a // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e // indirect
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.72.2 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
inet.af/netaddr v0.0.0-20220617031823-097006376321 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.26.3 // indirect
|
||||
k8s.io/component-base v0.26.3 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.32.3 // indirect
|
||||
k8s.io/apiserver v0.32.3 // indirect
|
||||
k8s.io/component-base v0.32.3 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
oras.land/oras-go/v2 v2.5.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
)
|
||||
|
||||
667
go.work.sum
Normal file
667
go.work.sum
Normal file
@@ -0,0 +1,667 @@
|
||||
4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs=
|
||||
4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU=
|
||||
cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw=
|
||||
cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms=
|
||||
cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA=
|
||||
cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q=
|
||||
cloud.google.com/go/accessapproval v1.8.5/go.mod h1:aO61iJuMRAaugpD0rWgpwj9aXvWimCWTEbA/kYAFddE=
|
||||
cloud.google.com/go/accesscontextmanager v1.9.5/go.mod h1:i6WSokkuePCT3jWwRzhge/pZicoErUBbDWjAUd8AoQU=
|
||||
cloud.google.com/go/aiplatform v1.81.0/go.mod h1:uwLaCFXLvVnKzxl3OXQRw1Hry3KJOIgpofYorq0ZMPk=
|
||||
cloud.google.com/go/analytics v0.27.1/go.mod h1:2itQDvSWyGiBvs80ocjFjfu/ZUIo25fC93hsEX4fnoU=
|
||||
cloud.google.com/go/apigateway v1.7.5/go.mod h1:iJ9zoE4KMNF1CHBFV4pZDCJRZzonqKj4BECymhvAwWk=
|
||||
cloud.google.com/go/apigeeconnect v1.7.5/go.mod h1:XAGnQGiFakRMV3H6bawRb5JAIXIbFSfzGKLDqL1dYgQ=
|
||||
cloud.google.com/go/apigeeregistry v0.9.5/go.mod h1:e6oNKW1utj+A1fpTw+YUpPkFusNT8gfFbqx/8upsgCY=
|
||||
cloud.google.com/go/appengine v1.9.5/go.mod h1:x4zKNF1qRX++Joni0nQFJoNobodzWX1bieiGRMWx+4U=
|
||||
cloud.google.com/go/area120 v0.9.5/go.mod h1:1rAIWfyOiCXk/kuTqFU//pfrHiA8GM8LziM79Lm0zxk=
|
||||
cloud.google.com/go/artifactregistry v1.16.3/go.mod h1:eiLO70Qh5Z9Jbwctl0KdW5VzJ5HncWgNaYN0NdF8lmM=
|
||||
cloud.google.com/go/asset v1.20.5/go.mod h1:0pbY+F3Pr3teQLK1ZXpUjGPNBPfUiL1tpxRxRmLCV/c=
|
||||
cloud.google.com/go/assuredworkloads v1.12.5/go.mod h1:OHjBWxs611PdU/VkGDoNQ/SFZHIYQTPtZlfDAUWN8K0=
|
||||
cloud.google.com/go/automl v1.14.6/go.mod h1:mEn1QHZmPTnmrq6zj33gyKX1K7L32izry14I6LQCO5M=
|
||||
cloud.google.com/go/baremetalsolution v1.3.5/go.mod h1:FfLWTwf9g7MVh0jhomxs1ErK9J/E9GBALdsunmFo50Q=
|
||||
cloud.google.com/go/batch v1.12.1/go.mod h1:hB6jwKyX2zoFoIXw6/pT2CPIbvo0ya7mpQXFJ9QbnAY=
|
||||
cloud.google.com/go/beyondcorp v1.1.5/go.mod h1:C77HvHG9ntYvI3+/WXht0tqx/fNxfD4MahSutTOkJYg=
|
||||
cloud.google.com/go/bigquery v1.67.0/go.mod h1:HQeP1AHFuAz0Y55heDSb0cjZIhnEkuwFRBGo6EEKHug=
|
||||
cloud.google.com/go/bigtable v1.36.0/go.mod h1:u98oqNAXiAufepkRGAd95lq2ap4kHGr3wLeFojvJwew=
|
||||
cloud.google.com/go/billing v1.20.3/go.mod h1:DJt75ird7g3zrTODh2Eo8ZT2d3jtoEI5L6qNXIHwOY0=
|
||||
cloud.google.com/go/binaryauthorization v1.9.4/go.mod h1:LimAql4UPC7B/F+RW9rQpsUpzDFNO+VKwVRyHG9txKU=
|
||||
cloud.google.com/go/certificatemanager v1.9.4/go.mod h1:KneWp8OAhBVD4fqMUB6daOA90MHh9xVB8E3ZFN8w2dc=
|
||||
cloud.google.com/go/channel v1.19.4/go.mod h1:W82e3qLLe9wvZShy3aAg/6frvMYOdHKSaIwTLJT2Yxs=
|
||||
cloud.google.com/go/cloudbuild v1.22.1/go.mod h1:/3syBgG56xUK1UD8dXAOSnPWF4Cs0ZZ/eXhoTIBipwg=
|
||||
cloud.google.com/go/clouddms v1.8.6/go.mod h1:++xrkEPp1mAKZKFk3MMD63UkK7KpnSBt9kRLRSOYliE=
|
||||
cloud.google.com/go/cloudtasks v1.13.5/go.mod h1:AReQFk11yF7sHEOKHXP3/SufAeiHn4yXWpqQGds9Of0=
|
||||
cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU=
|
||||
cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls=
|
||||
cloud.google.com/go/compute v1.36.0 h1:QzLrJRxytIGE8OJWzmMweIdiu2pIlRVq9kSi7+xnDkU=
|
||||
cloud.google.com/go/compute v1.36.0/go.mod h1:+GZuz5prSWFLquViP55zcjRrOm7vRpIllx2MaYpzuiI=
|
||||
cloud.google.com/go/contactcenterinsights v1.17.2/go.mod h1:9yuX5Y7KFqsQgNydM7WeuGcYWWs/0dBCElXaOF6ltmo=
|
||||
cloud.google.com/go/container v1.42.3/go.mod h1:8ZT9RBJXjWXqRMM/sEW8dxolZUofxKJUaO9mMXSkDz0=
|
||||
cloud.google.com/go/containeranalysis v0.14.0/go.mod h1:vct7OEtK07Azaiyo6aCyae4teFL28t7JZQkr1DlTC5s=
|
||||
cloud.google.com/go/datacatalog v1.25.0/go.mod h1:Bodb/U9ZV549+0sQPoX6WtYnbFwqayuYldw5p6PmbH4=
|
||||
cloud.google.com/go/dataflow v0.10.5/go.mod h1:rLRbgv1ZK34XW72xrmJysN7z0PCwgsh0wtjWx5Yavoc=
|
||||
cloud.google.com/go/dataform v0.11.1/go.mod h1:2TYH+Dmqnx9ewr/YG8HbMpcNQBX5gdCyP8W/8GwprWk=
|
||||
cloud.google.com/go/datafusion v1.8.5/go.mod h1:xMoW16ciCOQpS8rNUDU1tWgHkhbQ3KKaV9o7UTggEtQ=
|
||||
cloud.google.com/go/datalabeling v0.9.5/go.mod h1:xJzHTfjCvPeF87QreDSFTl98mRS/vp47EWwDBHvQiMU=
|
||||
cloud.google.com/go/dataplex v1.24.0/go.mod h1:rNqsuS0Yag0NDGybhNpCaeqU/Jq8z4gFqqF0MUajHwE=
|
||||
cloud.google.com/go/dataproc/v2 v2.11.1/go.mod h1:KDbkJUYjcz+t8nfapg0upz665P0SrsDW7I9RC9GZf4o=
|
||||
cloud.google.com/go/dataqna v0.9.5/go.mod h1:UFRToVzSTCgwDkeSa4J0WE6bmbemdOZhUCUfs+DlJFc=
|
||||
cloud.google.com/go/datastore v1.20.0/go.mod h1:uFo3e+aEpRfHgtp5pp0+6M0o147KoPaYNaPAKpfh8Ew=
|
||||
cloud.google.com/go/datastream v1.14.0/go.mod h1:H0luYVOhiyUrzE2efbv1OHFRjzgZfHO9snDuBXmnQXE=
|
||||
cloud.google.com/go/deploy v1.26.4/go.mod h1:MaPXP4rU984LmRF+DmJ1qNEZrTI7Rez+hfku0oRudTk=
|
||||
cloud.google.com/go/dialogflow v1.68.1/go.mod h1:CpfTOpLjhM9ZXu+VzJ56xrX9GMBJt1aIjPMChiLUGso=
|
||||
cloud.google.com/go/dlp v1.22.0/go.mod h1:2cMTKdeReZI64BDsYzsBZFtXdDqb3nhDKHRsRUl7J9Y=
|
||||
cloud.google.com/go/documentai v1.36.0/go.mod h1:LsX1RO08WDd8mFBviYB03jgCytz2oIcwIZ9lBw5bKiM=
|
||||
cloud.google.com/go/domains v0.10.5/go.mod h1:VP7djhZJy47uxUoJGfDilXpUnAaIExcHL86vv3yfaQs=
|
||||
cloud.google.com/go/edgecontainer v1.4.2/go.mod h1:MhrgxorZIp/4myFe2a/Y0OHSx8PCxeyHBRZATvcTTZs=
|
||||
cloud.google.com/go/errorreporting v0.3.2/go.mod h1:s5kjs5r3l6A8UUyIsgvAhGq6tkqyBCUss0FRpsoVTww=
|
||||
cloud.google.com/go/essentialcontacts v1.7.5/go.mod h1:AzwvwPKMUnf8bwfLP0R/+BjzC7bi3OTaLABtUF/q428=
|
||||
cloud.google.com/go/eventarc v1.15.4/go.mod h1:E5vNWMxaZOwfMfQlQOsoE5TY07tKtOiMLF9s99/btyo=
|
||||
cloud.google.com/go/filestore v1.10.1/go.mod h1:uZfxcuSzAK8NZGflw9bvB0YOT2O8vhyfEVaFAG+vTkg=
|
||||
cloud.google.com/go/firestore v1.18.0/go.mod h1:5ye0v48PhseZBdcl0qbl3uttu7FIEwEYVaWm0UIEOEU=
|
||||
cloud.google.com/go/functions v1.19.4/go.mod h1:qmx3Yrm8ZdwQrWplvnpoL4tHW7s8ULNKwP2SjfX9zSM=
|
||||
cloud.google.com/go/gkebackup v1.6.4/go.mod h1:ZYY7CdiOKobk3gzEKBbRymaEo22bkR1EPkwZ7Tvts/U=
|
||||
cloud.google.com/go/gkeconnect v0.12.3/go.mod h1:Ra5w3QcA+ybM2hopIz4ZsQQsDqzoYws3Zn21CLGzfrw=
|
||||
cloud.google.com/go/gkehub v0.15.5/go.mod h1:hIIoZAGNuiKWp6y4fW9JCEPg9xM7OX9sZwgiJrozrWQ=
|
||||
cloud.google.com/go/gkemulticloud v1.5.2/go.mod h1:THwE0upZyYmgjEZtgbvGkf0VRkEdPkML9dF/J3lSahg=
|
||||
cloud.google.com/go/gsuiteaddons v1.7.6/go.mod h1:TPlgcxjwv+L3fx9S6El4dDWItBxJpIyYTs4YPk6Zc48=
|
||||
cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA=
|
||||
cloud.google.com/go/iam v1.5.0/go.mod h1:U+DOtKQltF/LxPEtcDLoobcsZMilSRwR7mgNL7knOpo=
|
||||
cloud.google.com/go/iap v1.10.5/go.mod h1:Sal3oNlcIiv9YWkXWLD9fYzbSCbnrqOD4Pm8JyaiZZY=
|
||||
cloud.google.com/go/ids v1.5.5/go.mod h1:XHNjg7KratNBxruoiG2Mfx2lFMnRQZWCr/p7T7AV724=
|
||||
cloud.google.com/go/iot v1.8.5/go.mod h1:BlwypQBsnaiVRCy2+49Zz4ClJLDidldn05+Fp1uGFOs=
|
||||
cloud.google.com/go/kms v1.21.1/go.mod h1:s0wCyByc9LjTdCjG88toVs70U9W+cc6RKFc8zAqX7nE=
|
||||
cloud.google.com/go/language v1.14.4/go.mod h1:EqwoMieV6UsNeqHV2tRxuhmfDyC3YqEu1er53CrRkeA=
|
||||
cloud.google.com/go/lifesciences v0.10.5/go.mod h1:p+vxvHLx0/4QeVp3DU5Gcnyoi+kKNFWRqfgn2d8HuNc=
|
||||
cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=
|
||||
cloud.google.com/go/longrunning v0.5.6/go.mod h1:vUaDrWYOMKRuhiv6JBnn49YxCPz2Ayn9GqyjaBT8/mA=
|
||||
cloud.google.com/go/longrunning v0.6.6/go.mod h1:hyeGJUrPHcx0u2Uu1UFSoYZLn4lkMrccJig0t4FI7yw=
|
||||
cloud.google.com/go/managedidentities v1.7.5/go.mod h1:cD8aai2c7nWdOzBMP48wJUM9zsdIu1VbdojGSlLGqjM=
|
||||
cloud.google.com/go/maps v1.20.1/go.mod h1:aMmv5a4nJBF3WpbPoGathd05Wbl4uuHEw2/bXX+2gZ4=
|
||||
cloud.google.com/go/mediatranslation v0.9.5/go.mod h1:JGsL9cldTUtRi3u6Q+BMXzY1zZFOWdbmZLf1C69G2Zs=
|
||||
cloud.google.com/go/memcache v1.11.5/go.mod h1:SYrG9bR51Q82rGpj04gA5YwL0aZGdDcqPvxfQiaxio4=
|
||||
cloud.google.com/go/metastore v1.14.5/go.mod h1:mWHoEHrIFMv4yjKxczc1S6LIwhDQ7rTcAIix2BEIad8=
|
||||
cloud.google.com/go/monitoring v1.24.1/go.mod h1:Z05d1/vn9NaujqY2voG6pVQXoJGbp+r3laV+LySt9K0=
|
||||
cloud.google.com/go/networkconnectivity v1.17.0/go.mod h1:RiX351sXmQ/iScNWUBLN+4L9HJeP3etBCIsXCt366Mc=
|
||||
cloud.google.com/go/networkmanagement v1.18.2/go.mod h1:QOOTm+LgXEPeA9u9bAeDETBYkibzMVTYH4mIi9GJATc=
|
||||
cloud.google.com/go/networksecurity v0.10.5/go.mod h1:CqJMtLG67gxHEAjGjccwEm5a7Tb6h0kPtHK5SEHnwMc=
|
||||
cloud.google.com/go/notebooks v1.12.5/go.mod h1:265WkAl2d3YKqxB+nFFkI+xwnc9CWDdvHs+Pl3TUhLM=
|
||||
cloud.google.com/go/optimization v1.7.5/go.mod h1:/nM8SUgl5C43X8Bb/AzEZdCL9CrUv9JtOVx6Ql4Ohg8=
|
||||
cloud.google.com/go/orchestration v1.11.7/go.mod h1:0u82lPJh6P5DpeaLtoeyrYafLEBAQ6m7gZwdhVSM1Ag=
|
||||
cloud.google.com/go/orgpolicy v1.14.3/go.mod h1:bc5nFdnE+4vwCLvv3uNFWUtsywFf6Szv+eW8SmAbQlQ=
|
||||
cloud.google.com/go/osconfig v1.14.4/go.mod h1:WQ5UV8yf1yhqrFrMD//dsqF/dqpepo9nzSF34aQ4vC8=
|
||||
cloud.google.com/go/oslogin v1.14.5/go.mod h1:H/wQ2JrheJ/NqGomDgRGj7YwRUKPl/EqQYUse5z+eCU=
|
||||
cloud.google.com/go/phishingprotection v0.9.5/go.mod h1:9eflfOQ/ZBWXzjX7Y5GCEDgK3KzpQafnFuGzdwt/AFM=
|
||||
cloud.google.com/go/policytroubleshooter v1.11.5/go.mod h1:/AnSQG4qCijhusdepnPROvb34cqvwZozTpnPmLt09Uk=
|
||||
cloud.google.com/go/privatecatalog v0.10.6/go.mod h1:rXuTtOfEicEN2bZRBkz/KTdDJndzvc4zb1b2Jaxkc8w=
|
||||
cloud.google.com/go/pubsub v1.37.0/go.mod h1:YQOQr1uiUM092EXwKs56OPT650nwnawc+8/IjoUeGzQ=
|
||||
cloud.google.com/go/pubsub v1.49.0/go.mod h1:K1FswTWP+C1tI/nfi3HQecoVeFvL4HUOB1tdaNXKhUY=
|
||||
cloud.google.com/go/pubsublite v1.8.2/go.mod h1:4r8GSa9NznExjuLPEJlF1VjOPOpgf3IT6k8x/YgaOPI=
|
||||
cloud.google.com/go/recaptchaenterprise/v2 v2.20.2/go.mod h1:BuZevlArTGydeIvlO3Mp4nQwLWPsnzUDUF/84+1bmfc=
|
||||
cloud.google.com/go/recommendationengine v0.9.5/go.mod h1:7Ngg07UK3Ix45dwj/DXgWJa0661YyKfE84XKXnM6qo0=
|
||||
cloud.google.com/go/recommender v1.13.4/go.mod h1:2xpcTYCOy2JlePWcMcVqS+dNiiMNCNGT/PtsjGP1BTQ=
|
||||
cloud.google.com/go/redis v1.18.1/go.mod h1:lZQIhkqbhlmqGlFws6yzxSt2qNrAsPDHozWYGvXywqM=
|
||||
cloud.google.com/go/resourcemanager v1.10.5/go.mod h1:3h1p8//AxBksoqJR/sD5AeGKVuuhZi805WC9nGogRGE=
|
||||
cloud.google.com/go/resourcesettings v1.8.3/go.mod h1:BzgfXFHIWOOmHe6ZV9+r3OWfpHJgnqXy8jqwx4zTMLw=
|
||||
cloud.google.com/go/retail v1.19.3/go.mod h1:o34bfr78e/gDLbHeDp0jiXKkXK7onYCJc86qrTM4Pac=
|
||||
cloud.google.com/go/run v1.9.2/go.mod h1:QD5H5hNuz900FYLQGtbMlA0dqZogy/Wj0xpLwTzK2+Q=
|
||||
cloud.google.com/go/scheduler v1.11.6/go.mod h1:gb8qfU07hAyXXtwrKXs7nbc9ar/R8vNsaRHswZpgPyM=
|
||||
cloud.google.com/go/secretmanager v1.14.6/go.mod h1:0OWeM3qpJ2n71MGgNfKsgjC/9LfVTcUqXFUlGxo5PzY=
|
||||
cloud.google.com/go/security v1.18.4/go.mod h1:+oNVB34sloqG2K3IpoT2KUDgNAbAJ9A2uENjAUvgzRQ=
|
||||
cloud.google.com/go/securitycenter v1.36.1/go.mod h1:SxE1r7Y5V9AVPa+DU0d+4QAOIJzcKglO3Vc4zvcQtPo=
|
||||
cloud.google.com/go/servicedirectory v1.12.5/go.mod h1:v/sr/Z4lbZzJBSn5H7bObu8FKoS6NZZ0ysQ3gi0vMMM=
|
||||
cloud.google.com/go/shell v1.8.5/go.mod h1:vuRxgLhy5pR9TZVqWvR/7lfSiMCLv6ucuoYDtQKKuJ8=
|
||||
cloud.google.com/go/spanner v1.79.0/go.mod h1:224ub0ngSaiy7SJI7QZ1pu9zoVPt6CgfwDGBNhUUuzU=
|
||||
cloud.google.com/go/speech v1.26.1/go.mod h1:YTt2qy3GFlzxNJmWj7aDEZjTqESvP2pWpExdOqtCQ6k=
|
||||
cloud.google.com/go/storagetransfer v1.12.3/go.mod h1:JzyP1ymNdy+F0VjyVCKzuk1WjLJ1yZGhtXcBlzBkPjk=
|
||||
cloud.google.com/go/talent v1.8.2/go.mod h1:SAIKGqmpKBCOf1LZLtL/7yzNqY2YTYHk0CgMlEWBXMY=
|
||||
cloud.google.com/go/texttospeech v1.12.0/go.mod h1:BdrVnsA7LnGe9v+zY3nfNJ2veaqLFbpkpBz3U+jsY34=
|
||||
cloud.google.com/go/tpu v1.8.2/go.mod h1:W/fW8HHjrzx1Ae5ahXiWnc/O0FNAQCbXdGdE7Hac3dc=
|
||||
cloud.google.com/go/trace v1.11.5/go.mod h1:TwblCcqNInriu5/qzaeYEIH7wzUcchSdeY2l5wL3Eec=
|
||||
cloud.google.com/go/translate v1.10.3/go.mod h1:GW0vC1qvPtd3pgtypCv4k4U8B7EdgK9/QEF2aJEUovs=
|
||||
cloud.google.com/go/translate v1.12.4/go.mod h1:u3NmYPWGXeNVz94QYzdd8kI7Rvi3wyp2jsjN3qAciCY=
|
||||
cloud.google.com/go/video v1.23.4/go.mod h1:G95szckwF/7LatG9fGfNXceMzLf7W0UhKTZi6zXKHPs=
|
||||
cloud.google.com/go/videointelligence v1.12.5/go.mod h1:OFaZL0H53vQl/uyz/8gqXMJ5nr69RIC3ffPGJwKCNww=
|
||||
cloud.google.com/go/vision/v2 v2.9.4/go.mod h1:VotOrCFm0DbWKU7KvtyuAm72okClHDoERxrgeeQNPN4=
|
||||
cloud.google.com/go/vmmigration v1.8.5/go.mod h1:6/VVofjrSGi14/0ZcaoSoZcy9VHDhJ6fNFxnYAPxuLg=
|
||||
cloud.google.com/go/vmwareengine v1.3.4/go.mod h1:2W2NdtnfEe/0rEKoDfGOpBPtbAAf9ZN/SecH1WwLX6w=
|
||||
cloud.google.com/go/vpcaccess v1.8.5/go.mod h1:R/oMa0mkPbi5GuIascldW5g/IHXq9YX0TBxJyOzyy28=
|
||||
cloud.google.com/go/webrisk v1.10.5/go.mod h1:Cd8ce1mCt1fbiufmVkHeZZlPGfe4LQVHw006MtBIxvk=
|
||||
cloud.google.com/go/websecurityscanner v1.7.5/go.mod h1:QGRxdN0ihdyjwDPaLf96O+ks4u+SBG7/bPNs+fc+LR0=
|
||||
cloud.google.com/go/workflows v1.14.0/go.mod h1:kjar2tf4qQu7VoCTFX+L3yy+2dIFTWr6R4i52DN6ySk=
|
||||
filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA=
|
||||
fyne.io/systray v1.11.0/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs=
|
||||
github.com/99designs/gqlgen v0.17.36/go.mod h1:6RdyY8puhCoWAQVr2qzF2OMVfudQzc8ACxzpzluoQm4=
|
||||
github.com/Abirdcfly/dupword v0.0.11/go.mod h1:wH8mVGuf3CP5fsBTkfWwwwKTjDnVVCxtU8d8rgeVYXA=
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU=
|
||||
github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0=
|
||||
github.com/Antonboom/errname v0.1.9/go.mod h1:nLTcJzevREuAsgTbG85UsuiWpMpAqbKD1HNZ29OzE58=
|
||||
github.com/Antonboom/nilnil v0.1.4/go.mod h1:iOov/7gRcXkeEU+EMGpBu2ORih3iyVEiWjeste1SJm8=
|
||||
github.com/AudriusButkevicius/recli v0.0.7-0.20220911121932-d000ce8fbf0f/go.mod h1:Nhfib1j/VFnLrXL9cHgA+/n2O6P5THuWelOnbfPNd78=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
|
||||
github.com/DataDog/datadog-agent/comp/trace/compression/def v0.58.0/go.mod h1:samFXdP0HVSwD223LPLzcPKUjRQ6/uwr/1wMPo2HhRg=
|
||||
github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip v0.58.0/go.mod h1:FTweq0EZjbOgeWgV7+3R1Zx9l2b9de7LwceYSNNdZvM=
|
||||
github.com/DataDog/datadog-agent/comp/trace/compression/impl-zstd v0.58.0/go.mod h1:wIlhI+gwxKQzDQFr4PjvXXuKHUsx3QWY2TbwDv1yaDs=
|
||||
github.com/DataDog/datadog-agent/pkg/util/cgroups v0.58.0/go.mod h1:XjTdv3Kb7EqpPnMlmmQK1MV6EFOArwoa6wSVB+P7TdU=
|
||||
github.com/DataDog/datadog-agent/pkg/util/pointer v0.58.0/go.mod h1:t1DlnUEMltkvwPLc7zCtP1u5cBDu+30daR2VhQO5bvA=
|
||||
github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
|
||||
github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0/go.mod h1:b3g59n2Y+T5xmcxJL+UEG2f8cQploZm1mR/v6BW0mU0=
|
||||
github.com/GoogleCloudPlatform/gke-networking-api v0.1.2-0.20240904205008-bc15495fd43f/go.mod h1:YnoYXo/cwpqFmIXKblHOV5jFEpsSL3PZeo0zaR3oGTI=
|
||||
github.com/GoogleCloudPlatform/k8s-cloud-provider v1.25.0/go.mod h1:UTfhBnADaj2rybPT049NScSh7Eall3u2ib43wmz3deg=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM=
|
||||
github.com/IBM/sarama v1.43.1/go.mod h1:GG5q1RURtDNPz8xxJs3mgX6Ytak8Z9eLhAkJPObe2xE=
|
||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
|
||||
github.com/Masterminds/vcs v1.13.3/go.mod h1:TiE7xuEjl1N4j016moRd6vezp6e6Lz23gypeXfzXeW8=
|
||||
github.com/Microsoft/cosesign1go v1.2.0/go.mod h1:1La/HcGw19rRLhPW0S6u55K6LKfti+GQSgGCtrfhVe8=
|
||||
github.com/Microsoft/didx509go v0.0.3/go.mod h1:wWt+iQsLzn3011+VfESzznLIp/Owhuj7rLF7yLglYbk=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
||||
github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc=
|
||||
github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g=
|
||||
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
|
||||
github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
|
||||
github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw=
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
|
||||
github.com/alecthomas/kong v1.6.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
|
||||
github.com/alecthomas/kong v1.10.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE=
|
||||
github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I=
|
||||
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/ashanbrown/forbidigo v1.5.1/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU=
|
||||
github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI=
|
||||
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13/go.mod h1:gpAbvyDGQFozTEmlTFO8XcQKHzubdq0LzRyJpG6MiXM=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.64/go.mod h1:4Q7R9MFpXRdjO3YnAfUTdnuENs32WzBkASt6VxSYDYQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.3/go.mod h1:jYLMm3Dh0wbeV3lxth5ryks/O2M/omVXWyYm3YcEVqQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.21.4/go.mod h1:aryF4jxgjhbqpdhj8QybUZI3xYrX8MQIKm4WbOv8Whg=
|
||||
github.com/aws/aws-sdk-go-v2/service/ec2 v1.93.2/go.mod h1:VX22JN3HQXDtQ3uS4h4TtM+K11vydq58tpHTlsm8TL8=
|
||||
github.com/aws/aws-sdk-go-v2/service/eventbridge v1.20.4/go.mod h1:XlbY5AGZhlipCdhRorT18/HEThKAxo51hMmhixreJoM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.35/go.mod h1:YVHrksq36j0sbXCT6rSuQafpfYkMYqy0QTk7JTCTBIU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.7.34/go.mod h1:CDPcT6pljRaqz1yLsOgPUvOPOczFvXuJxOKzDzAbF0c=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.3/go.mod h1:TXBww3ANB+QRj+/dUoYDvI8d/u4F4WzTxD4mxtDoxrg=
|
||||
github.com/aws/aws-sdk-go-v2/service/kinesis v1.18.4/go.mod h1:HnjgmL8TNmYtGcrA3N6EeCnDvlX6CteCdUbZ1wV8QWQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.33.0/go.mod h1:J9kLNzEiHSeGMyN7238EjJmBpCniVzFda75Gxl/NqB8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sfn v1.19.4/go.mod h1:uWCH4ATwNrkRO40j8Dmy7u/Y1/BVWgCM+YjBNYZeOro=
|
||||
github.com/aws/aws-sdk-go-v2/service/sns v1.21.4/go.mod h1:bbB779DXXOnPXvB7F3dP7AjuV1Eyr7fNyrA058ExuzY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.24.4/go.mod h1:c1AF/ac4k4xz32FprEk6AqqGFH/Fkub9VUPSrASlllA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM=
|
||||
github.com/bazelbuild/rules_go v0.44.2/go.mod h1:Dhcz716Kqg1RHNWos+N6MlXNkjNP2EwZQ0LukRKJfMs=
|
||||
github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI=
|
||||
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI=
|
||||
github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k=
|
||||
github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20230611145640-acc696258285/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
|
||||
github.com/bramvdbogaerde/go-scp v1.4.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
|
||||
github.com/breml/bidichk v0.2.4/go.mod h1:7Zk0kRFt1LIZxtQdl9W9JwGAcLTTkOs+tN7wuEYGJ3s=
|
||||
github.com/breml/errchkjson v0.3.1/go.mod h1:XroxrzKjdiutFyW3nWhw34VGg7kiMsDQox73yWCGI2U=
|
||||
github.com/butuzov/ireturn v0.2.0/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
|
||||
github.com/bytedance/sonic v1.12.0/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk=
|
||||
github.com/bytedance/sonic/loader v0.2.0/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/cavaliergopher/cpio v1.0.1/go.mod h1:pBdaqQjnvXxdS/6CvNDwIANIFSP0xRKI16PX4xejRQc=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
|
||||
github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ=
|
||||
github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8/go.mod h1:gakxgyXaaPkxvLw1XQxNGK4I37ys9iBRzNUx/B7pUCo=
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
|
||||
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
|
||||
github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/confluentinc/confluent-kafka-go v1.9.2/go.mod h1:ptXNqsuDfYbAE/LBW6pnwWZElUoWxHoV8E43DCrliyo=
|
||||
github.com/confluentinc/confluent-kafka-go/v2 v2.4.0/go.mod h1:E1dEQy50ZLfqs7T9luxz0rLxaeFZJZE92XvApJOr/Rk=
|
||||
github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
|
||||
github.com/containerd/btrfs/v2 v2.0.0/go.mod h1:swkD/7j9HApWpzl8OHfrHNxppPd9l44DFZdF94BUj9k=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
|
||||
github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc=
|
||||
github.com/containerd/continuity v0.4.4/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
|
||||
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
|
||||
github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM=
|
||||
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
|
||||
github.com/containerd/imgcrypt v1.1.8/go.mod h1:x6QvFIkMyO2qGIY2zXc88ivEzcbgvLdWjoZyGqDap5U=
|
||||
github.com/containerd/nri v0.8.0/go.mod h1:uSkgBrCdEtAiEz4vnrq8gmAC4EnVAM5Klt0OuK5rZYQ=
|
||||
github.com/containerd/protobuild v0.3.0/go.mod h1:5mNMFKKAwCIAkFBPiOdtRx2KiQlyEJeMXnL5R1DsWu8=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
|
||||
github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o=
|
||||
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
|
||||
github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
|
||||
github.com/containerd/zfs v1.1.0/go.mod h1:oZF9wBnrnQjpWLaPKEinrx3TQ9a+W/RJO7Zb41d8YLE=
|
||||
github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4=
|
||||
github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8=
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc=
|
||||
github.com/daixiang0/gci v0.10.1/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
|
||||
github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps=
|
||||
github.com/dave/astrid v0.0.0-20170323122508-8c2895878b14/go.mod h1:Sth2QfxfATb/nW4EsrSi2KyJmbcniZ8TgTaji17D6ms=
|
||||
github.com/dave/brenda v1.1.0/go.mod h1:4wCUr6gSlu5/1Tk7akE5X7UorwiQ8Rij0SKH3/BGMOM=
|
||||
github.com/dave/courtney v0.4.0/go.mod h1:3WSU3yaloZXYAxRuWt8oRyVb9SaRiMBt5Kz/2J227tM=
|
||||
github.com/dave/patsy v0.0.0-20210517141501-957256f50cba/go.mod h1:qfR88CgEGLoiqDaE+xxDCi5QA5v4vUoW0UCX2Nd5Tlc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c=
|
||||
github.com/denisenkom/go-mssqldb v0.11.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A=
|
||||
github.com/dimfeld/httptreemux/v5 v5.5.0/go.mod h1:QeEylH57C0v3VO0tkKraVz9oD3Uu93CKPnTLbsidvSw=
|
||||
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
|
||||
github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40=
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
|
||||
github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0=
|
||||
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/elastic/crd-ref-docs v0.0.12/go.mod h1:X83mMBdJt05heJUYiS3T0yJ/JkCuliuhSUNav5Gjo/U=
|
||||
github.com/elastic/elastic-transport-go/v8 v8.1.0/go.mod h1:87Tcz8IVNe6rVSLdBux1o/PEItLtyabHU3naC7IoqKI=
|
||||
github.com/elastic/go-elasticsearch/v6 v6.8.5/go.mod h1:UwaDJsD3rWLM5rKNFzv9hgox93HoX8utj1kxD9aFUcI=
|
||||
github.com/elastic/go-elasticsearch/v7 v7.17.1/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4=
|
||||
github.com/elastic/go-elasticsearch/v8 v8.4.0/go.mod h1:yY52i2Vj0unLz+N3Nwx1gM5LXwoj3h2dgptNGBYkMLA=
|
||||
github.com/emicklei/go-restful v2.16.0+incompatible h1:rgqiKNjTnFQA6kkhFe16D8epTksy9HQ1MyrbDXSdYhM=
|
||||
github.com/emicklei/go-restful v2.16.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
||||
github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0=
|
||||
github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY=
|
||||
github.com/evanw/esbuild v0.19.11/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48=
|
||||
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
|
||||
github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI=
|
||||
github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI=
|
||||
github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
||||
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/garyburd/redigo v1.6.4/go.mod h1:rTb6epsqigu3kYKBnaF028A7Tf/Aw5s0cqA47doKKqw=
|
||||
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/go-chi/chi v1.5.4/go.mod h1:uaf8YgoFazUOkPBG7fxPftUylNumIev9awIWOENIuEg=
|
||||
github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-critic/go-critic v0.8.0/go.mod h1:5TjdkPI9cu/yKbYS96BTsslihjKd6zg6vd8O9RZXj2s=
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
|
||||
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
|
||||
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
|
||||
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-pg/pg/v10 v10.11.1/go.mod h1:ExJWndhDNNftBdw1Ow83xqpSf4WMSJK8urmXD5VXS1I=
|
||||
github.com/go-pg/zerochecker v0.2.0/go.mod h1:NJZ4wKL0NmTtz0GKCoJ8kym6Xn/EQzXRl2OnAe7MmDo=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.15.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
|
||||
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
|
||||
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU=
|
||||
github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw=
|
||||
github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ=
|
||||
github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4=
|
||||
github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA=
|
||||
github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ=
|
||||
github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig=
|
||||
github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
|
||||
github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
|
||||
github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-yaml v1.12.0/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
|
||||
github.com/gocql/gocql v1.6.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8=
|
||||
github.com/godror/godror v0.40.4/go.mod h1:i8YtVTHUJKfFT3wTat4A9UoqScUtZXiYB9Rf3SVARgc=
|
||||
github.com/godror/knownpb v0.1.1/go.mod h1:4nRFbQo1dDuwKnblRXDxrfCFYeT4hjg3GjMqef58eRE=
|
||||
github.com/gofiber/fiber/v2 v2.52.5/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
|
||||
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
|
||||
github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI=
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/mock v1.7.0-rc.1/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs=
|
||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
|
||||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
|
||||
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ=
|
||||
github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs=
|
||||
github.com/golangci/golangci-lint v1.52.2/go.mod h1:S5fhC5sHM5kE22/HcATKd1XLWQxX+y7mHj8B5H91Q/0=
|
||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
|
||||
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
|
||||
github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc=
|
||||
github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs=
|
||||
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
|
||||
github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
|
||||
github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8=
|
||||
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
|
||||
github.com/google/go-containerregistry v0.20.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
|
||||
github.com/google/go-github/v56 v56.0.0/go.mod h1:D8cdcX98YWJvi7TLo7zM4/h8ZTx6u6fwGEkCdisopo0=
|
||||
github.com/google/go-pkcs11 v0.3.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY=
|
||||
github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4=
|
||||
github.com/google/rpmpack v0.5.0/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI=
|
||||
github.com/google/subcommands v1.0.2-0.20190508160503-636abe8753b8/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0=
|
||||
github.com/goreleaser/chglog v0.5.0/go.mod h1:Ri46M3lrMuv76FHszs3vtABR8J8k1w9JHYAzxeeOl28=
|
||||
github.com/goreleaser/fileglob v1.3.0/go.mod h1:Jx6BoXv3mbYkEzwm9THo7xbr5egkAraxkGorbJb4RxU=
|
||||
github.com/goreleaser/nfpm/v2 v2.33.1/go.mod h1:8wwWWvJWmn84xo/Sqiv0aMvEGTHlHZTXTEuVSgQpkIM=
|
||||
github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk=
|
||||
github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc=
|
||||
github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM=
|
||||
github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak=
|
||||
github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A=
|
||||
github.com/graph-gophers/graphql-go v1.5.0/go.mod h1:YtmJZDLbF1YYNrlNAuiO5zAStUWc3XZT07iGsVqe1Os=
|
||||
github.com/graphql-go/graphql v0.8.1/go.mod h1:nKiHzRM0qopJEwCITUuIsxk9PlVlwIiiI8pnJEhordQ=
|
||||
github.com/graphql-go/handler v0.2.3/go.mod h1:leLF6RpV5uZMN1CdImAxuiayrYYhOk33bZciaUGaXeU=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hanwen/go-fuse/v2 v2.3.0/go.mod h1:xKwi1cF7nXAOBCXujD5ie0ZKsxc8GGSA1rlMJc+8IJs=
|
||||
github.com/hashicorp/consul/api v1.24.0/go.mod h1:NZJGRFYruc/80wYowkPFCp1LbGmJC9L8izrwfyVx/Wg=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8=
|
||||
github.com/hashicorp/vault/sdk v0.9.2/go.mod h1:gG0lA7P++KefplzvcD3vrfCmgxVAM7Z/SqX5NeOL/98=
|
||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
||||
github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/inetaf/tcpproxy v0.0.0-20240214030015-3ce58045626c/go.mod h1:Di7LXRyUcnvAcLicFhtM9/MlZl/TNgRSDHORM2c6CMI=
|
||||
github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI=
|
||||
github.com/intel/goresctrl v0.5.0/go.mod h1:mIe63ggylWYr0cU/l8n11FAkesqfvuP3oktIsxvu0T0=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw=
|
||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
|
||||
github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4=
|
||||
github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc=
|
||||
github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
|
||||
github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0=
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/josephspurrier/goversioninfo v1.4.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE=
|
||||
github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0=
|
||||
github.com/junk1tm/musttag v0.5.0/go.mod h1:PcR7BA+oREQYvHwgjIDmw3exJeds5JzRcvEJTfjrA0M=
|
||||
github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
|
||||
github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw=
|
||||
github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
|
||||
github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU=
|
||||
github.com/knadh/koanf/v2 v2.0.2/go.mod h1:HN9uZ+qFAejH1e4G41gnoffIanINWQuONLXiV7kir6k=
|
||||
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I=
|
||||
github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes=
|
||||
github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA=
|
||||
github.com/labstack/echo v3.3.10+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s=
|
||||
github.com/labstack/echo/v4 v4.11.1/go.mod h1:YuYRTSM3CHs2ybfrL8Px48bO6BAnYIN4l8wSTMP6BDQ=
|
||||
github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
|
||||
github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0=
|
||||
github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY=
|
||||
github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
|
||||
github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU=
|
||||
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
|
||||
github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4=
|
||||
github.com/lestrrat-go/jwx v1.2.29/go.mod h1:hU8k2l6WF0ncx20uQdOmik/Gjg6E3/wIRtXSNFeZuB8=
|
||||
github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
|
||||
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
|
||||
github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM=
|
||||
github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJooIbHWCufXvOcT6E7Stq81gU+CSQ=
|
||||
github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
|
||||
github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
|
||||
github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE=
|
||||
github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc=
|
||||
github.com/maruel/panicparse/v2 v2.4.0/go.mod h1:nOY2OKe8csO3F3SA5+hsxot05JLgukrF54B9x88fVp4=
|
||||
github.com/maruel/panicparse/v2 v2.5.0/go.mod h1:DA2fDiBk63bKfBf4CVZP9gb4fuvzdPbLDsSI873hweQ=
|
||||
github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
|
||||
github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI=
|
||||
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.8.1/go.mod h1:eyp4DdUJAKkr9tvxR3jWhw2mDK7CWABMG5r9uyaKC7I=
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ=
|
||||
github.com/maxmind/geoipupdate/v6 v6.1.0/go.mod h1:cZYCDzfMzTY4v6dKRdV7KTB6SStxtn3yFkiJ1btTGGc=
|
||||
github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc=
|
||||
github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o=
|
||||
github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE=
|
||||
github.com/mgechev/revive v1.3.1/go.mod h1:YlD6TTWl2B8A103R9KWJSPVI9DrEf+oqr15q21Ld+5I=
|
||||
github.com/microsoft/go-mssqldb v0.21.0/go.mod h1:+4wZTUnz/SV6nffv+RRRB/ss8jPng5Sho2SmM1l2ts4=
|
||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
|
||||
github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4=
|
||||
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
|
||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
|
||||
github.com/mohae/deepcopy v0.0.0-20170308212314-bb9b5e7adda9/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
||||
github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
|
||||
github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE=
|
||||
github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM=
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
|
||||
github.com/nelsam/hel/v2 v2.3.3/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w=
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
|
||||
github.com/nishanths/exhaustive v0.10.0/go.mod h1:IbwrGdVMizvDcIxPYGVdQn5BqWJaOwpCvg4RGb8r/TA=
|
||||
github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c=
|
||||
github.com/nunnatsa/ginkgolinter v0.11.2/go.mod h1:dJIGXYXbkBswqa/pIzG0QlVTTDSBMxDoCFwhsl4Uras=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/open-policy-agent/opa v0.68.0/go.mod h1:5E5SvaPwTpwt2WM177I9Z3eT7qUpmOGjk1ZdHs+TZ4w=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.104.0/go.mod h1:4bLfc6BnVKRp3yY+ueEUEeyNWjW/InCGbFs9ZA7o/ko=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.104.0/go.mod h1:I2so4Vn+ROaCECo0bdQXNxyUjY9tbq1JvcyuWPETLcM=
|
||||
github.com/opencontainers/runc v1.1.14/go.mod h1:E4C2z+7BxR7GHXp0hAY53mek+x49X1LjPNeMTfRGvOA=
|
||||
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI=
|
||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ=
|
||||
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
|
||||
github.com/polyfloyd/go-errorlint v1.4.1/go.mod h1:k6fU/+fQe38ednoZS51T7gSIGQW1y94d6TkSr35OzH8=
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI=
|
||||
github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff/go.mod h1:FvE8dtQ1Ww63IlyKBn1V4s+zMwF9kHkVNkQBR1pM4CU=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/quasilyte/go-ruleguard v0.3.19/go.mod h1:lHSn69Scl48I7Gt9cX3VrbsZYvYiBYszZOZW4A+oTEw=
|
||||
github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng=
|
||||
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
|
||||
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ=
|
||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/riywo/loginshell v0.0.0-20200815045211-7d26008be1ab/go.mod h1:/PfPXh0EntGc3QAAyUaviy4S9tzy4Zp0e2ilq4voC6E=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww=
|
||||
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
|
||||
github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50=
|
||||
github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ=
|
||||
github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs=
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
|
||||
github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ=
|
||||
github.com/sashamelentyev/usestdlibvars v1.23.0/go.mod h1:YPwr/Y1LATzHI93CqoPUN/2BzGQ/6N/cl/KwgR0B/aU=
|
||||
github.com/securego/gosec/v2 v2.15.0/go.mod h1:VOjTrZOkUtSDt2QLSJmQBMWnvwiQPEjg0l+5juIqGk8=
|
||||
github.com/segmentio/kafka-go v0.4.42/go.mod h1:d0g15xPMqoUookug0OU75DhGZxXwCFxSLeJ4uphwJzg=
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
|
||||
github.com/shirou/gopsutil/v4 v4.25.3/go.mod h1:xbuxyoZj+UsgnZrENu3lQivsngRR5BdjbJwf2fv4szA=
|
||||
github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4=
|
||||
github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY=
|
||||
github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg=
|
||||
github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo=
|
||||
github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
|
||||
github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M=
|
||||
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
|
||||
github.com/studio-b12/gowebdav v0.9.0/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE=
|
||||
github.com/syncthing/notify v0.0.0-20250207082249-f0fa8f99c2bc/go.mod h1:J0q59IWjLtpRIJulohwqEZvjzwOfTEPp8SVhDJl+y0Y=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk=
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e/go.mod h1:XrBNfAFN+pwoWuksbFS9Ccxnopa15zJGgXRFN90l3K4=
|
||||
github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8=
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg=
|
||||
github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41/go.mod h1:/roCdA6gg6lQyw/Oz6gIIGu3ggJKYhF+WC/AQReE5XQ=
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ=
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8=
|
||||
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8=
|
||||
github.com/tailscale/mkctr v0.0.0-20240628074852-17ca944da6ba/go.mod h1:DxnqIXBplij66U2ZkL688xy07q97qQ83P+TVueLiHq4=
|
||||
github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
|
||||
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y=
|
||||
github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
|
||||
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg=
|
||||
github.com/tc-hib/winres v0.2.1/go.mod h1:C/JaNhH3KBvhNKVbvdlDWkbMDO9H4fKKDaN7/07SSuk=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/tcnksm/go-httpstat v0.2.0/go.mod h1:s3JVJFtQxtBEBC9dwcdTTXS9xFnM3SXAZwPG41aurT8=
|
||||
github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg=
|
||||
github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8=
|
||||
github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
|
||||
github.com/tidwall/buntdb v1.3.0/go.mod h1:lZZrZUWzlyDJKlLQ6DKAy53LnG7m5kHyrEHvvcDmBpU=
|
||||
github.com/tidwall/gjson v1.16.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/grect v0.1.4/go.mod h1:9FBsaYRaR0Tcy4UwefBX/UDcDcDy9V5jUcxHzv2jd5Q=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/rtred v0.1.2/go.mod h1:hd69WNXQ5RP9vHd7dqekAz+RIdtfBogmglkZSRxCHFQ=
|
||||
github.com/tidwall/tinyqueue v0.1.1/go.mod h1:O/QNHwrnjqr6IHItYrzoHAKYhBkLI67Q096fQP5zMYw=
|
||||
github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ=
|
||||
github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs=
|
||||
github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE=
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
|
||||
github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM=
|
||||
github.com/twitchtv/twirp v8.1.3+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI=
|
||||
github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264=
|
||||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
|
||||
github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
|
||||
github.com/uptrace/bun v1.1.17/go.mod h1:hATAzivtTIRsSJR4B8AXR+uABqnQxr3myKDKEf5iQ9U=
|
||||
github.com/uptrace/bun/dialect/sqlitedialect v1.1.17/go.mod h1:YF0FO4VVnY9GHNH6rM4r3STlVEBxkOc6L88Bm5X5mzA=
|
||||
github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po=
|
||||
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
||||
github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
|
||||
github.com/valkey-io/valkey-go v1.0.52/go.mod h1:BXlVAPIL9rFQinSFM+N32JfWzfCaUAqBpZkc4vPY6fM=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.51.0/go.mod h1:oI2XroL+lI7vdXyYoQk03bXBThfFl2cVdIA3Xl7cH8g=
|
||||
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
|
||||
github.com/vektah/gqlparser/v2 v2.5.16/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08PUS3pJqepRww=
|
||||
github.com/veraison/go-cose v1.1.0/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4=
|
||||
github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
|
||||
github.com/vmihailenco/bufpool v0.1.11/go.mod h1:AFf/MOy3l2CFTKbxwt0mp2MwnqjNEs5H/UxrkA5jxTQ=
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||
github.com/willabides/kongplete v0.4.0/go.mod h1:0P0jtWD9aTsqPSUAl4de35DLghrr57XcayPyvqSi2X8=
|
||||
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
|
||||
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
|
||||
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk=
|
||||
github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok=
|
||||
github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA=
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
|
||||
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
|
||||
github.com/zenazn/goji v1.0.1/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE=
|
||||
gitlab.com/digitalxero/go-conventional-commit v1.0.7/go.mod h1:05Xc2BFsSyC5tKhK0y+P3bs0AwUtNuTp+mTpbCU/DZ0=
|
||||
go.einride.tech/aip v0.66.0/go.mod h1:qAhMsfT7plxBX+Oy7Huol6YUvZ0ZzdUz26yZsQwfl1M=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI=
|
||||
go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s=
|
||||
go.mongodb.org/mongo-driver v1.12.1/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/collector v0.104.0 h1:R3zjM4O3K3+ttzsjPV75P80xalxRbwYTURlK0ys7uyo=
|
||||
go.opentelemetry.io/collector v0.104.0/go.mod h1:Tm6F3na9ajnOm6I5goU9dURKxq1fSBK1yA94nvUix3k=
|
||||
go.opentelemetry.io/collector/confmap v0.94.1/go.mod h1:pCT5UtcHaHVJ5BIILv1Z2VQyjZzmT9uTdBmC9+Z0AgA=
|
||||
go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo=
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.104.0/go.mod h1:3SnYKu8gLfxURJMWS/cFEUFs+jEKS6jvfqKXnOZsdkQ=
|
||||
go.opentelemetry.io/collector/processor v0.104.0/go.mod h1:qU2/xCCYdvVORkN6aq0H/WUWkvo505VGYg2eOwPvaTg=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4=
|
||||
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/exp/typeparams v0.0.0-20240119083558-1b970713d09a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
|
||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
|
||||
golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
|
||||
google.golang.org/genproto v0.0.0-20240325203815-454cdb8f5daa h1:ePqxpG3LVx+feAUOx8YmR5T7rc0rdzK8DyxM8cQ9zq0=
|
||||
google.golang.org/genproto v0.0.0-20240325203815-454cdb8f5daa/go.mod h1:CnZenrTdRJb7jc+jOm0Rkywq+9wh0QC4U8tyiRbEPPM=
|
||||
google.golang.org/genproto v0.0.0-20240924160255-9d4c2d233b61 h1:KipVMxePgXPFBzXOvpKbny3RVdVmJOD64R/Ob7GPWEs=
|
||||
google.golang.org/genproto v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:HiAZQz/G7n0EywFjmncAwsfnmFm2bjm7qPjwl8hyzjM=
|
||||
google.golang.org/genproto/googleapis/bytestream v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:WkJpQl6Ujj3ElX4qZaNm5t6cT95ffI4K+HKQ0+1NyMw=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/jinzhu/gorm.v1 v1.9.2/go.mod h1:56JJPUzbikvTVnoyP1nppSkbJ2L8sunqTBDY2fDrmFg=
|
||||
gopkg.in/olivere/elastic.v3 v3.0.75/go.mod h1:yDEuSnrM51Pc8dM5ov7U8aI/ToR3PG0llA8aRv2qmw0=
|
||||
gopkg.in/olivere/elastic.v5 v5.0.84/go.mod h1:LXF6q9XNBxpMqrcgax95C6xyARXWbbCXUrtTxrNrxJI=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gorm.io/driver/mysql v1.0.1/go.mod h1:KtqSthtg55lFp3S5kUXqlGaelnWpKitn4k1xZTnoiPw=
|
||||
gorm.io/driver/postgres v1.4.6/go.mod h1:UJChCNLFKeBqQRE+HrkFUbKbq9idPXmTOk2u4Wok8S4=
|
||||
gorm.io/driver/sqlserver v1.4.2/go.mod h1:XHwBuB4Tlh7DqO0x7Ema8dmyWsQW7wi38VQOAFkrbXY=
|
||||
gorm.io/gorm v1.25.3/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs=
|
||||
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||
k8s.io/cloud-provider v0.32.2/go.mod h1:2s8TeAXhVezp5VISaTxM6vW3yDonOZXoN4Aryz1p1PQ=
|
||||
k8s.io/code-generator v0.32.3/go.mod h1:+mbiYID5NLsBuqxjQTygKM/DAdKpAjvBzrJd64NU1G8=
|
||||
k8s.io/component-helpers v0.32.3/go.mod h1:utTBXk8lhkJewBKNuNf32Xl3KT/0VV19DmiXU/SV4Ao=
|
||||
k8s.io/controller-manager v0.32.2/go.mod h1:o5uo2tLCQhuoMt0RfKcQd0eqaNmSKOKiT+0YELCqXOk=
|
||||
k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/kms v0.32.3/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM=
|
||||
k8s.io/kube-controller-manager v0.32.2/go.mod h1:x7998ZLC+2lYnoizUwvVtHVPuoLeb7BhQEneeiNyVOg=
|
||||
k8s.io/kubelet v0.32.2/go.mod h1:cC1ms5RS+lu0ckVr6AviCQXHLSPKEBC3D5oaCBdTGkI=
|
||||
k8s.io/kubernetes v1.32.2/go.mod h1:tiIKO63GcdPRBHW2WiUFm3C0eoLczl3f7qi56Dm1W8I=
|
||||
k8s.io/metrics v0.32.3/go.mod h1:9R1Wk5cb+qJpCQon9h52mgkVCcFeYxcY+YkumfwHVCU=
|
||||
mellium.im/sasl v0.3.1/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw=
|
||||
mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js=
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
|
||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
|
||||
mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8/go.mod h1:Oh/d7dEtzsNHGOq1Cdv8aMm3KdKhVvPbRQcM8WFpBR8=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.1/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/controller-tools v0.15.1-0.20240618033008-7824932b0cab/go.mod h1:egedX5jq2KrZ3A2zaOz3e2DSsh5BhFyyjvNcBRIQel8=
|
||||
sigs.k8s.io/controller-tools v0.17.2/go.mod h1:4q5tZG2JniS5M5bkiXY2/potOiXyhoZVw/U48vLkXk0=
|
||||
sigs.k8s.io/kustomize/kustomize/v5 v5.5.0/go.mod h1:AeFCmgCrXzmvjWWaeZCyBp6XzG1Y0w1svYus8GhJEOE=
|
||||
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
|
||||
tags.cncf.io/container-device-interface v0.8.1/go.mod h1:Apb7N4VdILW0EVdEMRYXIDVRZfNJZ+kmEUss2kRRQ6Y=
|
||||
tags.cncf.io/container-device-interface/specs-go v0.8.0/go.mod h1:BhJIkjjPh4qpys+qm4DAYtUyryaTDg9zris+AczXyws=
|
||||
235
install.sh
Executable file
235
install.sh
Executable file
@@ -0,0 +1,235 @@
|
||||
#!/bin/sh
|
||||
|
||||
# KubeVPN installation script
|
||||
# This script installs KubeVPN CLI to your system
|
||||
# Created for https://github.com/kubenetworks/kubevpn
|
||||
# curl -fsSL https://kubevpn.dev/install.sh | sh
|
||||
|
||||
set -e
|
||||
|
||||
# Colors and formatting
|
||||
YELLOW='\033[0;33m'
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
BLUE='\033[0;34m'
|
||||
BOLD='\033[1m'
|
||||
RESET='\033[0m'
|
||||
|
||||
# Installation configuration
|
||||
INSTALL_DIR=${INSTALL_DIR:-"/usr/local/bin"}
|
||||
GITHUB_REPO="kubenetworks/kubevpn"
|
||||
GITHUB_URL="https://github.com/${GITHUB_REPO}"
|
||||
VERSION_URL="https://raw.githubusercontent.com/kubenetworks/kubevpn/refs/heads/master/plugins/stable.txt"
|
||||
ZIP_FILE="kubevpn.zip"
|
||||
|
||||
log() {
|
||||
echo "${BLUE}${BOLD}==> ${RESET}$1"
|
||||
}
|
||||
|
||||
success() {
|
||||
echo "${GREEN}${BOLD}==> $1${RESET}"
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo "${YELLOW}${BOLD}==> $1${RESET}"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo "${RED}${BOLD}==> $1${RESET}"
|
||||
}
|
||||
|
||||
get_system_info() {
|
||||
OS=$(uname | tr '[:upper:]' '[:lower:]')
|
||||
log "Detected OS: ${OS}"
|
||||
|
||||
case $OS in
|
||||
linux | darwin) ;;
|
||||
msys_nt | msys | cygwin)
|
||||
error "Windows is not supported, please install KubeVPN manually using scoop. More info: ${GITHUB_URL}"
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
error "Unsupported operating system: ${OS}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
ARCH=$(uname -m)
|
||||
case $ARCH in
|
||||
x86_64)
|
||||
ARCH="amd64"
|
||||
;;
|
||||
aarch64 | arm64)
|
||||
ARCH="arm64"
|
||||
;;
|
||||
i386 | i686)
|
||||
ARCH="386"
|
||||
;;
|
||||
*)
|
||||
error "Unsupported architecture: ${ARCH}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
log "Detected architecture: ${ARCH}"
|
||||
}
|
||||
|
||||
check_requirements() {
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
DOWNLOADER="curl"
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
DOWNLOADER="wget"
|
||||
else
|
||||
error "Either curl or wget is required for installation"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v unzip >/dev/null 2>&1; then
|
||||
error "unzip is required but not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -d "$INSTALL_DIR" ]; then
|
||||
log "Installation directory $INSTALL_DIR does not exist, attempting to create it"
|
||||
if ! mkdir -p "$INSTALL_DIR" 2>/dev/null; then
|
||||
if ! command -v sudo >/dev/null 2>&1 && ! command -v su >/dev/null 2>&1; then
|
||||
error "Cannot create $INSTALL_DIR and neither sudo nor su is available"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -w "$INSTALL_DIR" ] && ! command -v sudo >/dev/null 2>&1 && ! command -v su >/dev/null 2>&1; then
|
||||
error "No write permission to $INSTALL_DIR and neither sudo nor su is available"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
get_latest_version() {
|
||||
log "Fetching the latest release version..."
|
||||
|
||||
if [ "$DOWNLOADER" = "curl" ]; then
|
||||
VERSION=$(curl -s "$VERSION_URL")
|
||||
else
|
||||
VERSION=$(wget -qO- "$VERSION_URL")
|
||||
fi
|
||||
|
||||
if [ -z "$VERSION" ]; then
|
||||
error "Could not determine the latest version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION=$(echo "$VERSION" | tr -d 'v' | tr -d '\n')
|
||||
success "Latest version: ${VERSION}"
|
||||
}
|
||||
|
||||
download_binary() {
|
||||
DOWNLOAD_URL="$GITHUB_URL/releases/download/v${VERSION}/kubevpn_v${VERSION}_${OS}_${ARCH}.zip"
|
||||
|
||||
log "Downloading KubeVPN binary from $DOWNLOAD_URL"
|
||||
|
||||
if [ "$DOWNLOADER" = "curl" ]; then
|
||||
curl -L -o "$ZIP_FILE" "$DOWNLOAD_URL" || {
|
||||
error "Failed to download KubeVPN"
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
wget -O "$ZIP_FILE" "$DOWNLOAD_URL" || {
|
||||
error "Failed to download KubeVPN"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
}
|
||||
|
||||
install_binary() {
|
||||
log "Installing KubeVPN..."
|
||||
|
||||
TMP_DIR=$(mktemp -d)
|
||||
BINARY="$TMP_DIR/bin/kubevpn"
|
||||
unzip -o -q "$ZIP_FILE" -d "$TMP_DIR"
|
||||
|
||||
if [ -f "$TMP_DIR/checksums.txt" ]; then
|
||||
EXPECTED_CHECKSUM=$(cat "$TMP_DIR/checksums.txt" | awk '{print $1}')
|
||||
|
||||
if command -v shasum >/dev/null 2>&1; then
|
||||
ACTUAL_CHECKSUM=$(shasum -a 256 "$BINARY" | awk '{print $1}')
|
||||
elif command -v sha256sum >/dev/null 2>&1; then
|
||||
ACTUAL_CHECKSUM=$(sha256sum "$BINARY" | awk '{print $1}')
|
||||
else
|
||||
warn "No checksum tool available, skipping verification"
|
||||
ACTUAL_CHECKSUM=$EXPECTED_CHECKSUM
|
||||
fi
|
||||
|
||||
[ "$ACTUAL_CHECKSUM" = "$EXPECTED_CHECKSUM" ] || {
|
||||
error "Checksum verification failed (Expected: $EXPECTED_CHECKSUM, Got: $ACTUAL_CHECKSUM)"
|
||||
# Clean up
|
||||
rm -rf "$TMP_DIR"
|
||||
rm -f "$ZIP_FILE"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
# Check if we need sudo
|
||||
if [ -w "$INSTALL_DIR" ]; then
|
||||
mv "$BINARY" "$INSTALL_DIR/kubevpn"
|
||||
chmod +x "$INSTALL_DIR/kubevpn"
|
||||
else
|
||||
warn "Elevated permissions required to install to $INSTALL_DIR"
|
||||
if command -v sudo >/dev/null 2>&1; then
|
||||
sudo mv "$BINARY" "$INSTALL_DIR/kubevpn"
|
||||
sudo chmod +x "$INSTALL_DIR/kubevpn"
|
||||
else
|
||||
su -c "mv \"$BINARY\" \"$INSTALL_DIR/kubevpn\" && chmod +x \"$INSTALL_DIR/kubevpn\""
|
||||
fi
|
||||
fi
|
||||
|
||||
# Clean up
|
||||
rm -f "$ZIP_FILE"
|
||||
rm -rf "$TMP_DIR"
|
||||
}
|
||||
|
||||
verify_installation() {
|
||||
if [ -x "$INSTALL_DIR/kubevpn" ]; then
|
||||
VERSION_OUTPUT=$("$INSTALL_DIR/kubevpn" version 2>&1 || echo "unknown")
|
||||
success "KubeVPN installed successfully"
|
||||
log "$VERSION_OUTPUT"
|
||||
log "KubeVPN has been installed to: $INSTALL_DIR/kubevpn"
|
||||
|
||||
# Check if the installed binary is in PATH
|
||||
if command -v kubevpn >/dev/null 2>&1; then
|
||||
FOUND_PATH=$(command -v kubevpn)
|
||||
if [ "$FOUND_PATH" != "$INSTALL_DIR/kubevpn" ]; then
|
||||
warn "Another kubevpn binary was found in your PATH at: $FOUND_PATH"
|
||||
warn "Make sure $INSTALL_DIR is in your PATH to use the newly installed version"
|
||||
fi
|
||||
else
|
||||
warn "Make sure $INSTALL_DIR is in your PATH to use kubevpn"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
log "To connect to a Kubernetes cluster:"
|
||||
if [ "$FOUND_PATH" != "$INSTALL_DIR/kubevpn" ]; then
|
||||
echo " $INSTALL_DIR/kubevpn connect"
|
||||
else
|
||||
echo " kubevpn connect"
|
||||
fi
|
||||
echo ""
|
||||
log "For more information, visit:"
|
||||
echo " $GITHUB_URL"
|
||||
success "Done! enjoy KubeVPN 🚀"
|
||||
else
|
||||
error "KubeVPN installation failed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
log "Starting KubeVPN installation..."
|
||||
get_system_info
|
||||
check_requirements
|
||||
get_latest_version
|
||||
download_binary
|
||||
install_binary
|
||||
verify_installation
|
||||
}
|
||||
|
||||
main
|
||||
@@ -2,8 +2,6 @@ package config
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -14,46 +12,60 @@ const (
|
||||
// configmap name
|
||||
ConfigMapPodTrafficManager = "kubevpn-traffic-manager"
|
||||
|
||||
// helm app name kubevpn
|
||||
HelmAppNameKubevpn = "kubevpn"
|
||||
|
||||
// default installed namespace
|
||||
DefaultNamespaceKubevpn = "kubevpn"
|
||||
|
||||
// config map keys
|
||||
KeyDHCP = "DHCP"
|
||||
KeyDHCP6 = "DHCP6"
|
||||
KeyEnvoy = "ENVOY_CONFIG"
|
||||
KeyClusterIPv4POOLS = "IPv4_POOLS"
|
||||
KeyRefCount = "REF_COUNT"
|
||||
|
||||
// secret keys
|
||||
// TLSCertKey is the key for tls certificates in a TLS secret.
|
||||
TLSCertKey = "tls_crt"
|
||||
// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
|
||||
TLSPrivateKeyKey = "tls_key"
|
||||
// TLSServerName for tls config server name
|
||||
TLSServerName = "tls_server_name"
|
||||
|
||||
// container name
|
||||
ContainerSidecarEnvoyProxy = "envoy-proxy"
|
||||
ContainerSidecarControlPlane = "control-plane"
|
||||
ContainerSidecarWebhook = "webhook"
|
||||
ContainerSidecarVPN = "vpn"
|
||||
ContainerSidecarSyncthing = "syncthing"
|
||||
|
||||
VolumeEnvoyConfig = "envoy-config"
|
||||
VolumeSyncthing = "syncthing"
|
||||
|
||||
innerIPv4Pool = "223.254.0.100/16"
|
||||
// 原因:在docker环境中,设置docker的 gateway 和 subnet,不能 inner 的冲突,也不能和 docker的 172.17 冲突
|
||||
// 不然的话,请求会不通的
|
||||
// 解决的问题:在 k8s 中的 名叫 kubernetes 的 service ip 为
|
||||
// ➜ ~ kubectl get service kubernetes
|
||||
//NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
//kubernetes ClusterIP 172.17.0.1 <none> 443/TCP 190d
|
||||
//
|
||||
// ➜ ~ docker network inspect bridge | jq '.[0].IPAM.Config'
|
||||
//[
|
||||
// {
|
||||
// "Subnet": "172.17.0.0/16",
|
||||
// "Gateway": "172.17.0.1"
|
||||
// }
|
||||
//]
|
||||
// 如果不创建 network,那么是无法请求到 这个 kubernetes 的 service 的
|
||||
dockerInnerIPv4Pool = "223.255.0.100/16"
|
||||
// innerIPv4Pool is used as tun ip
|
||||
// 198.19.0.0/16 network is part of the 198.18.0.0/15 (reserved for benchmarking).
|
||||
// https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
|
||||
// so we split it into 2 parts: 198.18.0.0/15 --> [198.19.0.0/16, 198.19.0.0/16]
|
||||
innerIPv4Pool = "198.19.0.100/16"
|
||||
/*
|
||||
reason:docker use 172.17.0.0/16 network conflict with k8s service kubernetes
|
||||
➜ ~ kubectl get service kubernetes
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
kubernetes ClusterIP 172.17.0.1 <none> 443/TCP 190d
|
||||
|
||||
//The IPv6 address prefixes FE80::/10 and FF02::/16 are not routable
|
||||
innerIPv6Pool = "efff:ffff:ffff:ffff:ffff:ffff:ffff:9999/64"
|
||||
➜ ~ docker network inspect bridge | jq '.[0].IPAM.Config'
|
||||
[
|
||||
{
|
||||
"Subnet": "172.17.0.0/16",
|
||||
"Gateway": "172.17.0.1"
|
||||
}
|
||||
]
|
||||
*/
|
||||
dockerInnerIPv4Pool = "198.18.0.100/16"
|
||||
|
||||
// 2001:2::/64 network is part of the 2001:2::/48 (reserved for benchmarking)
|
||||
// https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
|
||||
innerIPv6Pool = "2001:2::9999/64"
|
||||
|
||||
DefaultNetDir = "/etc/cni/net.d"
|
||||
|
||||
@@ -68,14 +80,8 @@ const (
|
||||
EnvPodNamespace = "POD_NAMESPACE"
|
||||
|
||||
// header name
|
||||
HeaderPodName = "POD_NAME"
|
||||
HeaderPodNamespace = "POD_NAMESPACE"
|
||||
HeaderIPv4 = "IPv4"
|
||||
HeaderIPv6 = "IPv6"
|
||||
|
||||
// api
|
||||
APIRentIP = "/rent/ip"
|
||||
APIReleaseIP = "/release/ip"
|
||||
HeaderIPv4 = "IPv4"
|
||||
HeaderIPv6 = "IPv6"
|
||||
|
||||
KUBECONFIG = "kubeconfig"
|
||||
|
||||
@@ -83,30 +89,26 @@ const (
|
||||
ManageBy = konfig.ManagedbyLabelKey
|
||||
|
||||
// pprof port
|
||||
PProfPort = 32345
|
||||
PProfPort = 32345
|
||||
SudoPProfPort = 33345
|
||||
PProfDir = "pprof"
|
||||
|
||||
// startup by KubeVPN
|
||||
EnvStartSudoKubeVPNByKubeVPN = "DEPTH_SIGNED_BY_NAISON"
|
||||
EnvSSHJump = "SSH_JUMP_BY_KUBEVPN"
|
||||
EnvSSHJump = "SSH_JUMP_BY_KUBEVPN"
|
||||
|
||||
// transport mode
|
||||
ConfigKubeVPNTransportEngine = "transport-engine"
|
||||
// hosts entry key word
|
||||
HostsKeyWord = "# Add by KubeVPN"
|
||||
)
|
||||
|
||||
var (
|
||||
// Image inject --ldflags -X
|
||||
Image = "docker.io/naison/kubevpn:latest"
|
||||
Image = "ghcr.io/kubenetworks/kubevpn:latest"
|
||||
Version = "latest"
|
||||
GitCommit = ""
|
||||
|
||||
// GitHubOAuthToken --ldflags -X
|
||||
GitHubOAuthToken = ""
|
||||
|
||||
OriginImage = "docker.io/naison/kubevpn:" + Version
|
||||
|
||||
DaemonPath string
|
||||
OriginImage = "ghcr.io/kubenetworks/kubevpn:" + Version
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -121,23 +123,31 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
RouterIP, CIDR, _ = net.ParseCIDR(innerIPv4Pool)
|
||||
RouterIP6, CIDR6, _ = net.ParseCIDR(innerIPv6Pool)
|
||||
DockerRouterIP, DockerCIDR, _ = net.ParseCIDR(dockerInnerIPv4Pool)
|
||||
dir, _ := os.UserHomeDir()
|
||||
DaemonPath = filepath.Join(dir, HOME, Daemon)
|
||||
var err error
|
||||
RouterIP, CIDR, err = net.ParseCIDR(innerIPv4Pool)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
RouterIP6, CIDR6, err = net.ParseCIDR(innerIPv6Pool)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
DockerRouterIP, DockerCIDR, err = net.ParseCIDR(dockerInnerIPv4Pool)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
var Debug bool
|
||||
|
||||
var (
|
||||
SmallBufferSize = (1 << 13) - 1 // 8KB small buffer
|
||||
MediumBufferSize = (1 << 15) - 1 // 32KB medium buffer
|
||||
LargeBufferSize = (1 << 16) - 1 // 64KB large buffer
|
||||
SmallBufferSize = 8 * 1024 // 8KB small buffer
|
||||
MediumBufferSize = 32 * 1024 // 32KB medium buffer
|
||||
LargeBufferSize = 64 * 1024 // 64KB large buffer
|
||||
)
|
||||
|
||||
var (
|
||||
KeepAliveTime = 180 * time.Second
|
||||
KeepAliveTime = 60 * time.Second
|
||||
DialTimeout = 15 * time.Second
|
||||
HandshakeTimeout = 5 * time.Second
|
||||
ConnectTimeout = 5 * time.Second
|
||||
@@ -146,30 +156,72 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
// network layer ip needs 20 bytes
|
||||
// transport layer UDP header needs 8 bytes
|
||||
// UDP over TCP header needs 22 bytes
|
||||
DefaultMTU = 1500 - 20 - 8 - 21
|
||||
// DefaultMTU
|
||||
/**
|
||||
+--------------------------------------------------------------------+
|
||||
| Original IP Packet from TUN |
|
||||
+-------------------+------------------------------------------------+
|
||||
| IP Header (20B) | Payload (MTU size) |
|
||||
+-------------------+------------------------------------------------+
|
||||
|
||||
|
||||
After adding custom 2-byte header:
|
||||
+----+-------------------+-------------------------------------------+
|
||||
| LH | IP Header (20B) | Payload |
|
||||
+----+-------------------+-------------------------------------------+
|
||||
| 2B | 20B | 1453 - 20 = 1433B |
|
||||
+----+-------------------+-------------------------------------------+
|
||||
|
||||
TLS 1.3 Record Structure Breakdown:
|
||||
+---------------------+--------------------------+-------------------+
|
||||
| TLS Header (5B) | Encrypted Data (N) | Auth Tag (16B) |
|
||||
+---------------------+--------------------------+-------------------+
|
||||
| Content Type (1) | ↑ | AEAD Authentication
|
||||
| Version (2) | Encrypted Payload | (e.g. AES-GCM) |
|
||||
| Length (2) | (Original Data + LH2) | |
|
||||
+---------------------+--------------------------+-------------------+
|
||||
|←------- 5B --------→|←---- Length Field ------→|←----- 16B -------→|
|
||||
|
||||
|
||||
Final Ethernet Frame:
|
||||
+--------+----------------+----------------+-----------------------+--------+
|
||||
| EthHdr | IP Header | TCP Header | TLS Components |
|
||||
| (14B) | (20B) | (20B) +---------+-------------+--------+
|
||||
| | | | Hdr(5B) | Data+LH2 | Tag(16)|
|
||||
+--------+----------------+----------------+---------+-------------+--------+
|
||||
|←------------------- Total 1500B Ethernet Frame --------------------------→|
|
||||
|
||||
ipv4: 20
|
||||
ipv6: 40
|
||||
|
||||
mtu: 1417
|
||||
*/
|
||||
DefaultMTU = 1500 - max(20, 40) - 20 - 5 - 2 - 16
|
||||
)
|
||||
|
||||
var (
|
||||
LPool = &sync.Pool{
|
||||
SPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]byte, SmallBufferSize)
|
||||
},
|
||||
}
|
||||
MPool = sync.Pool{
|
||||
New: func() any {
|
||||
return make([]byte, MediumBufferSize)
|
||||
},
|
||||
}
|
||||
LPool = sync.Pool{
|
||||
New: func() any {
|
||||
return make([]byte, LargeBufferSize)
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
var SPool = sync.Pool{
|
||||
New: func() any {
|
||||
return make([]byte, 2)
|
||||
},
|
||||
}
|
||||
|
||||
type Engine string
|
||||
|
||||
const (
|
||||
EngineGvisor Engine = "gvisor"
|
||||
EngineMix Engine = "mix"
|
||||
EngineRaw Engine = "raw"
|
||||
EngineSystem Engine = "system"
|
||||
)
|
||||
|
||||
const Slogan = "Now you can access resources in the kubernetes cluster !"
|
||||
|
||||
20
pkg/config/config.yaml
Normal file
20
pkg/config/config.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
# Here is an example config kubevpn config file, please change it into your custom config.
|
||||
# Support three filed: Name,Needs,Flags
|
||||
# Exec command: kubevpn alias qa <===> kubevpn connect --kubeconfig=~/.kube/jumper_config --namespace=default
|
||||
# Simple is Good ~
|
||||
|
||||
Name: dev
|
||||
Needs: qa
|
||||
Flags:
|
||||
- connect
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=default
|
||||
- --lite
|
||||
|
||||
---
|
||||
|
||||
Name: qa
|
||||
Flags:
|
||||
- connect
|
||||
- --kubeconfig=~/.kube/jumper_config
|
||||
- --namespace=default
|
||||
@@ -1,29 +1,109 @@
|
||||
package config
|
||||
|
||||
import "os"
|
||||
import (
|
||||
_ "embed"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
HOME = ".kubevpn"
|
||||
Daemon = "daemon"
|
||||
|
||||
SockPath = "daemon.sock"
|
||||
SudoSockPath = "sudo_daemon.sock"
|
||||
SockPath = "user_daemon.sock"
|
||||
SudoSockPath = "root_daemon.sock"
|
||||
|
||||
PidPath = "daemon.pid"
|
||||
SudoPidPath = "sudo_daemon.pid"
|
||||
PidPath = "user_daemon.pid"
|
||||
SudoPidPath = "root_daemon.pid"
|
||||
|
||||
LogFile = "daemon.log"
|
||||
UserLogFile = "user_daemon.log"
|
||||
SudoLogFile = "root_daemon.log"
|
||||
|
||||
KubeVPNRestorePatchKey = "kubevpn-probe-restore-patch"
|
||||
ConfigFile = "config.yaml"
|
||||
|
||||
TmpDir = "tmp"
|
||||
)
|
||||
|
||||
var (
|
||||
daemonPath string
|
||||
homePath string
|
||||
|
||||
//go:embed config.yaml
|
||||
config []byte
|
||||
)
|
||||
|
||||
func init() {
|
||||
err := os.MkdirAll(DaemonPath, os.ModePerm)
|
||||
dir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = os.Chmod(DaemonPath, os.ModePerm)
|
||||
homePath = filepath.Join(dir, HOME)
|
||||
daemonPath = filepath.Join(dir, HOME, Daemon)
|
||||
|
||||
var paths = []string{homePath, daemonPath, GetPProfPath(), GetSyncthingPath(), GetTempPath()}
|
||||
for _, path := range paths {
|
||||
_, err = os.Stat(path)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
err = os.MkdirAll(path, 0755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = os.Chmod(path, 0755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
path := filepath.Join(homePath, ConfigFile)
|
||||
_, err = os.Stat(path)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
err = os.WriteFile(path, config, 0644)
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func GetSockPath(isSudo bool) string {
|
||||
name := SockPath
|
||||
if isSudo {
|
||||
name = SudoSockPath
|
||||
}
|
||||
return filepath.Join(daemonPath, name)
|
||||
}
|
||||
|
||||
func GetPidPath(isSudo bool) string {
|
||||
name := PidPath
|
||||
if isSudo {
|
||||
name = SudoPidPath
|
||||
}
|
||||
return filepath.Join(daemonPath, name)
|
||||
}
|
||||
|
||||
func GetSyncthingPath() string {
|
||||
return filepath.Join(daemonPath, SyncthingDir)
|
||||
}
|
||||
|
||||
func GetConfigFile() string {
|
||||
return filepath.Join(homePath, ConfigFile)
|
||||
}
|
||||
|
||||
func GetTempPath() string {
|
||||
return filepath.Join(homePath, TmpDir)
|
||||
}
|
||||
|
||||
func GetDaemonLogPath(isSudo bool) string {
|
||||
if isSudo {
|
||||
return filepath.Join(daemonPath, SudoLogFile)
|
||||
}
|
||||
return filepath.Join(daemonPath, UserLogFile)
|
||||
}
|
||||
|
||||
func GetPProfPath() string {
|
||||
return filepath.Join(daemonPath, PProfDir)
|
||||
}
|
||||
|
||||
92
pkg/config/syncthing.go
Normal file
92
pkg/config/syncthing.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
SyncthingDir = "syncthing"
|
||||
|
||||
DefaultRemoteDir = "/kubevpn-data"
|
||||
|
||||
SyncthingAPIKey = "kubevpn"
|
||||
)
|
||||
|
||||
var LocalCert tls.Certificate
|
||||
var RemoteCert tls.Certificate
|
||||
var LocalDeviceID protocol.DeviceID
|
||||
var RemoteDeviceID protocol.DeviceID
|
||||
|
||||
const (
|
||||
SyncthingLocalDeviceID = "BSNCBRY-ZI5HLYC-YH6544V-SQ3IDKT-4JQKING-ZGSW463-UKYEYCA-WO7ZHA3"
|
||||
SyncthingLocalCert = `-----BEGIN CERTIFICATE-----
|
||||
MIICHjCCAaSgAwIBAgIIHY0CWDFbXYEwCgYIKoZIzj0EAwIwSjESMBAGA1UEChMJ
|
||||
U3luY3RoaW5nMSAwHgYDVQQLExdBdXRvbWF0aWNhbGx5IEdlbmVyYXRlZDESMBAG
|
||||
A1UEAxMJc3luY3RoaW5nMCAXDTI0MDYxOTAwMDAwMFoYDzE4NTQwOTExMDA1MDUy
|
||||
WjBKMRIwEAYDVQQKEwlTeW5jdGhpbmcxIDAeBgNVBAsTF0F1dG9tYXRpY2FsbHkg
|
||||
R2VuZXJhdGVkMRIwEAYDVQQDEwlzeW5jdGhpbmcwdjAQBgcqhkjOPQIBBgUrgQQA
|
||||
IgNiAAQj1ov1aM0902yssK+3LPiGM1e1pUcVRuQjxl0nDX0fpZp3kdeWeiBm9AlE
|
||||
uwhAll/8QjoWBlNiEXyGFN9lOaIGf7ZIk7owPT6LiJXc1n3E6iqHWeSXcZ9dJL7M
|
||||
+E4eleajVTBTMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYI
|
||||
KwYBBQUHAwIwDAYDVR0TAQH/BAIwADAUBgNVHREEDTALgglzeW5jdGhpbmcwCgYI
|
||||
KoZIzj0EAwIDaAAwZQIwJI4KA9JgFXWU4dWq6JnIr+lAuIJ5ON2lFPrX8JWi1Z3F
|
||||
UXrvm80w+uR+1rLt6AdkAjEA3dpoBnS7tV21krEVmfX2vabtkzZidhXwuvP+1VJN
|
||||
By4EwZnuTLX3TqQx2TERF9rV
|
||||
-----END CERTIFICATE-----
|
||||
`
|
||||
SyncthingLocalKey = `-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDAltfhZ8YO4CrPsvFRpU6P8lOspm5VXFGvJghSaDr4D/ub66+4HpTk9
|
||||
3TdgtbUSMSmgBwYFK4EEACKhZANiAAQj1ov1aM0902yssK+3LPiGM1e1pUcVRuQj
|
||||
xl0nDX0fpZp3kdeWeiBm9AlEuwhAll/8QjoWBlNiEXyGFN9lOaIGf7ZIk7owPT6L
|
||||
iJXc1n3E6iqHWeSXcZ9dJL7M+E4eleY=
|
||||
-----END EC PRIVATE KEY-----
|
||||
`
|
||||
)
|
||||
|
||||
const (
|
||||
SyncthingRemoteDeviceID = "OELB2JL-MIOW652-6JPBYPZ-POV3EBV-XEOW2Z2-I45QUGZ-QF5TT4P-Z2AH7AU"
|
||||
SyncthingRemoteCert = `-----BEGIN CERTIFICATE-----
|
||||
MIICHzCCAaWgAwIBAgIJAOGCLdtwnUShMAoGCCqGSM49BAMCMEoxEjAQBgNVBAoT
|
||||
CVN5bmN0aGluZzEgMB4GA1UECxMXQXV0b21hdGljYWxseSBHZW5lcmF0ZWQxEjAQ
|
||||
BgNVBAMTCXN5bmN0aGluZzAgFw0yNDA2MTkwMDAwMDBaGA8xODU0MDkxMTAwNTA1
|
||||
MlowSjESMBAGA1UEChMJU3luY3RoaW5nMSAwHgYDVQQLExdBdXRvbWF0aWNhbGx5
|
||||
IEdlbmVyYXRlZDESMBAGA1UEAxMJc3luY3RoaW5nMHYwEAYHKoZIzj0CAQYFK4EE
|
||||
ACIDYgAETwaM3V92D499uMXWFgGxdTUAvtp1tN7ePuJxt8W+FO0izG1fa7oU29Hp
|
||||
FU0Ohh3xwnQfEHIWzlKJllZ2ZbbXGOvcfr0Yfiir6ToKuN6185EA8RHkA+5HRtu5
|
||||
nw5wyWL/o1UwUzAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG
|
||||
CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwFAYDVR0RBA0wC4IJc3luY3RoaW5nMAoG
|
||||
CCqGSM49BAMCA2gAMGUCMGxR9q9vjzm4GynOkoRIC+BQJN0zpiNusYUD6iYJNGe1
|
||||
wNH8jhOJEG+rjGracDZ6bgIxAIpyHv/rOAjEX7/wcafRqGTFhwXdRq0l3493aERd
|
||||
RCwqD8rbzP0QStVOCAE7xYt/sQ==
|
||||
-----END CERTIFICATE-----
|
||||
`
|
||||
SyncthingRemoteKey = `-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDAKabOokHf64xAsIQp5PA1zZ5vLjfcgKcuikx/D0CP6c2Cf48a6eADE
|
||||
GWrY1Ng8UzOgBwYFK4EEACKhZANiAARPBozdX3YPj324xdYWAbF1NQC+2nW03t4+
|
||||
4nG3xb4U7SLMbV9ruhTb0ekVTQ6GHfHCdB8QchbOUomWVnZlttcY69x+vRh+KKvp
|
||||
Ogq43rXzkQDxEeQD7kdG27mfDnDJYv8=
|
||||
-----END EC PRIVATE KEY-----
|
||||
`
|
||||
)
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
LocalCert, err = tls.X509KeyPair([]byte(SyncthingLocalCert), []byte(SyncthingLocalKey))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
RemoteCert, err = tls.X509KeyPair([]byte(SyncthingRemoteCert), []byte(SyncthingRemoteKey))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
LocalDeviceID, err = protocol.DeviceIDFromString(SyncthingLocalDeviceID)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
RemoteDeviceID, err = protocol.DeviceIDFromString(SyncthingRemoteDeviceID)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -2,13 +2,18 @@ package controlplane
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v31 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3"
|
||||
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
|
||||
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
|
||||
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
|
||||
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
|
||||
accesslogfilev3 "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/file/v3"
|
||||
corsv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/cors/v3"
|
||||
grpcwebv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_web/v3"
|
||||
routerv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3"
|
||||
@@ -21,26 +26,72 @@ import (
|
||||
"github.com/envoyproxy/go-control-plane/pkg/cache/types"
|
||||
"github.com/envoyproxy/go-control-plane/pkg/resource/v3"
|
||||
"github.com/envoyproxy/go-control-plane/pkg/wellknown"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
type Virtual struct {
|
||||
Uid string // group.resource.name
|
||||
Ports []corev1.ContainerPort
|
||||
Rules []*Rule
|
||||
Namespace string
|
||||
Uid string // group.resource.name
|
||||
Ports []ContainerPort
|
||||
Rules []*Rule
|
||||
}
|
||||
|
||||
type ContainerPort struct {
|
||||
// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
|
||||
// named port in a pod must have a unique name. Name for the port that can be
|
||||
// referred to by services.
|
||||
// +optional
|
||||
Name string `json:"name,omitempty"`
|
||||
// Number of port to expose on the host.
|
||||
// If specified, this must be a valid port number, 0 < x < 65536.
|
||||
// envoy listener port, if is not 0, means fargate mode
|
||||
// +optional
|
||||
EnvoyListenerPort int32 `json:"envoyListenerPort,omitempty"`
|
||||
// Number of port to expose on the pod's IP address.
|
||||
// This must be a valid port number, 0 < x < 65536.
|
||||
ContainerPort int32 `json:"containerPort"`
|
||||
// Protocol for port. Must be UDP, TCP, or SCTP.
|
||||
// Defaults to "TCP".
|
||||
// +optional
|
||||
// +default="TCP"
|
||||
Protocol corev1.Protocol `json:"protocol,omitempty"`
|
||||
}
|
||||
|
||||
func ConvertContainerPort(ports ...corev1.ContainerPort) []ContainerPort {
|
||||
var result []ContainerPort
|
||||
for _, port := range ports {
|
||||
result = append(result, ContainerPort{
|
||||
Name: port.Name,
|
||||
EnvoyListenerPort: 0,
|
||||
ContainerPort: port.ContainerPort,
|
||||
Protocol: port.Protocol,
|
||||
})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
type Rule struct {
|
||||
Headers map[string]string
|
||||
LocalTunIPv4 string
|
||||
LocalTunIPv6 string
|
||||
// for no privileged mode (AWS Fargate mode), don't have cap NET_ADMIN and privileged: true. so we can not use OSI layer 3 proxy
|
||||
// containerPort -> envoyRulePort:localPort
|
||||
// envoyRulePort for envoy forward to localhost:envoyRulePort
|
||||
// localPort for local pc listen localhost:localPort
|
||||
// use ssh reverse tunnel, envoy rule endpoint localhost:envoyRulePort will forward to local pc localhost:localPort
|
||||
// localPort is required and envoyRulePort is optional
|
||||
PortMap map[int32]string
|
||||
}
|
||||
|
||||
func (a *Virtual) To() (
|
||||
func (a *Virtual) To(enableIPv6 bool, logger *log.Logger) (
|
||||
listeners []types.Resource,
|
||||
clusters []types.Resource,
|
||||
routes []types.Resource,
|
||||
@@ -48,21 +99,57 @@ func (a *Virtual) To() (
|
||||
) {
|
||||
//clusters = append(clusters, OriginCluster())
|
||||
for _, port := range a.Ports {
|
||||
listenerName := fmt.Sprintf("%s_%v_%s", a.Uid, port.ContainerPort, port.Protocol)
|
||||
isFargateMode := port.EnvoyListenerPort != 0
|
||||
|
||||
listenerName := fmt.Sprintf("%s_%s_%v_%s", a.Namespace, a.Uid, util.If(isFargateMode, port.EnvoyListenerPort, port.ContainerPort), port.Protocol)
|
||||
routeName := listenerName
|
||||
listeners = append(listeners, ToListener(listenerName, routeName, port.ContainerPort, port.Protocol))
|
||||
listeners = append(listeners, ToListener(listenerName, routeName, util.If(isFargateMode, port.EnvoyListenerPort, port.ContainerPort), port.Protocol, isFargateMode))
|
||||
|
||||
var rr []*route.Route
|
||||
for _, rule := range a.Rules {
|
||||
for _, ip := range []string{rule.LocalTunIPv4, rule.LocalTunIPv6} {
|
||||
clusterName := fmt.Sprintf("%s_%v", ip, port.ContainerPort)
|
||||
var ips []string
|
||||
if enableIPv6 {
|
||||
ips = []string{rule.LocalTunIPv4, rule.LocalTunIPv6}
|
||||
} else {
|
||||
ips = []string{rule.LocalTunIPv4}
|
||||
}
|
||||
ports := rule.PortMap[port.ContainerPort]
|
||||
if isFargateMode {
|
||||
if strings.Index(ports, ":") > 0 {
|
||||
ports = strings.Split(ports, ":")[0]
|
||||
} else {
|
||||
logger.Errorf("fargate mode port should have two pair: %s", ports)
|
||||
}
|
||||
}
|
||||
envoyRulePort, _ := strconv.Atoi(ports)
|
||||
for _, ip := range ips {
|
||||
clusterName := fmt.Sprintf("%s_%v", ip, envoyRulePort)
|
||||
clusters = append(clusters, ToCluster(clusterName))
|
||||
endpoints = append(endpoints, ToEndPoint(clusterName, ip, port.ContainerPort))
|
||||
endpoints = append(endpoints, ToEndPoint(clusterName, ip, int32(envoyRulePort)))
|
||||
rr = append(rr, ToRoute(clusterName, rule.Headers))
|
||||
}
|
||||
}
|
||||
rr = append(rr, DefaultRoute())
|
||||
clusters = append(clusters, OriginCluster())
|
||||
// if isFargateMode is true, needs to add default route to container port, because use_original_dst not work
|
||||
if isFargateMode {
|
||||
// all ips should is IPv4 127.0.0.1 and ::1
|
||||
var ips = sets.New[string]()
|
||||
for _, rule := range a.Rules {
|
||||
if enableIPv6 {
|
||||
ips.Insert(rule.LocalTunIPv4, rule.LocalTunIPv6)
|
||||
} else {
|
||||
ips.Insert(rule.LocalTunIPv4)
|
||||
}
|
||||
}
|
||||
for _, ip := range ips.UnsortedList() {
|
||||
defaultClusterName := fmt.Sprintf("%s_%v", ip, port.ContainerPort)
|
||||
clusters = append(clusters, ToCluster(defaultClusterName))
|
||||
endpoints = append(endpoints, ToEndPoint(defaultClusterName, ip, port.ContainerPort))
|
||||
rr = append(rr, DefaultRouteToCluster(defaultClusterName))
|
||||
}
|
||||
} else {
|
||||
rr = append(rr, DefaultRoute())
|
||||
clusters = append(clusters, OriginCluster())
|
||||
}
|
||||
routes = append(routes, &route.RouteConfiguration{
|
||||
Name: routeName,
|
||||
VirtualHosts: []*route.VirtualHost{
|
||||
@@ -122,6 +209,9 @@ func ToCluster(clusterName string) *cluster.Cluster {
|
||||
LbPolicy: cluster.Cluster_ROUND_ROBIN,
|
||||
TypedExtensionProtocolOptions: map[string]*anypb.Any{
|
||||
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": anyFunc(&httpv3.HttpProtocolOptions{
|
||||
CommonHttpProtocolOptions: &corev3.HttpProtocolOptions{
|
||||
IdleTimeout: durationpb.New(time.Second * 10),
|
||||
},
|
||||
UpstreamProtocolOptions: &httpv3.HttpProtocolOptions_UseDownstreamProtocolConfig{
|
||||
UseDownstreamProtocolConfig: &httpv3.HttpProtocolOptions_UseDownstreamHttpConfig{},
|
||||
},
|
||||
@@ -214,7 +304,30 @@ func DefaultRoute() *route.Route {
|
||||
}
|
||||
}
|
||||
|
||||
func ToListener(listenerName string, routeName string, port int32, p corev1.Protocol) *listener.Listener {
|
||||
func DefaultRouteToCluster(clusterName string) *route.Route {
|
||||
return &route.Route{
|
||||
Match: &route.RouteMatch{
|
||||
PathSpecifier: &route.RouteMatch_Prefix{
|
||||
Prefix: "/",
|
||||
},
|
||||
},
|
||||
Action: &route.Route_Route{
|
||||
Route: &route.RouteAction{
|
||||
ClusterSpecifier: &route.RouteAction_Cluster{
|
||||
Cluster: clusterName,
|
||||
},
|
||||
Timeout: durationpb.New(0),
|
||||
IdleTimeout: durationpb.New(0),
|
||||
MaxStreamDuration: &route.RouteAction_MaxStreamDuration{
|
||||
MaxStreamDuration: durationpb.New(0),
|
||||
GrpcTimeoutHeaderMax: durationpb.New(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func ToListener(listenerName string, routeName string, port int32, p corev1.Protocol, isFargateMode bool) *listener.Listener {
|
||||
var protocol core.SocketAddress_Protocol
|
||||
switch p {
|
||||
case corev1.ProtocolTCP:
|
||||
@@ -278,6 +391,14 @@ func ToListener(listenerName string, routeName string, port int32, p corev1.Prot
|
||||
UpgradeConfigs: []*httpconnectionmanager.HttpConnectionManager_UpgradeConfig{{
|
||||
UpgradeType: "websocket",
|
||||
}},
|
||||
AccessLog: []*v31.AccessLog{{
|
||||
Name: wellknown.FileAccessLog,
|
||||
ConfigType: &v31.AccessLog_TypedConfig{
|
||||
TypedConfig: anyFunc(&accesslogfilev3.FileAccessLog{
|
||||
Path: "/dev/stdout",
|
||||
}),
|
||||
},
|
||||
}},
|
||||
}
|
||||
|
||||
tcpConfig := &tcpproxy.TcpProxy{
|
||||
@@ -290,7 +411,7 @@ func ToListener(listenerName string, routeName string, port int32, p corev1.Prot
|
||||
return &listener.Listener{
|
||||
Name: listenerName,
|
||||
TrafficDirection: core.TrafficDirection_INBOUND,
|
||||
BindToPort: &wrapperspb.BoolValue{Value: false},
|
||||
BindToPort: &wrapperspb.BoolValue{Value: util.If(isFargateMode, true, false)},
|
||||
UseOriginalDst: &wrapperspb.BoolValue{Value: true},
|
||||
|
||||
Address: &core.Address{
|
||||
|
||||
@@ -8,16 +8,19 @@ import (
|
||||
serverv3 "github.com/envoyproxy/go-control-plane/pkg/server/v3"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
)
|
||||
|
||||
func Main(filename string, port uint, logger *log.Logger) {
|
||||
func Main(ctx context.Context, filename string, port uint, logger *log.Logger) error {
|
||||
snapshotCache := cache.NewSnapshotCache(false, cache.IDHash{}, logger)
|
||||
proc := NewProcessor(snapshotCache, logger)
|
||||
|
||||
errChan := make(chan error, 2)
|
||||
|
||||
go func() {
|
||||
ctx := context.Background()
|
||||
server := serverv3.NewServer(ctx, snapshotCache, nil)
|
||||
RunServer(ctx, server, port)
|
||||
errChan <- RunServer(ctx, server, port)
|
||||
}()
|
||||
|
||||
notifyCh := make(chan NotifyMessage, 100)
|
||||
@@ -29,20 +32,29 @@ func Main(filename string, port uint, logger *log.Logger) {
|
||||
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(fmt.Errorf("failed to create file watcher, err: %v", err))
|
||||
return fmt.Errorf("failed to create file watcher: %v", err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
if err = watcher.Add(filename); err != nil {
|
||||
log.Fatal(fmt.Errorf("failed to add file: %s to wather, err: %v", filename, err))
|
||||
err = watcher.Add(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add file: %s to wather: %v", filename, err)
|
||||
}
|
||||
go func() {
|
||||
log.Fatal(Watch(watcher, filename, notifyCh))
|
||||
errChan <- Watch(watcher, filename, notifyCh)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg := <-notifyCh:
|
||||
proc.ProcessFile(msg)
|
||||
err = proc.ProcessFile(msg)
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("Failed to process file: %v", err)
|
||||
return err
|
||||
}
|
||||
case err = <-errChan:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,20 +14,22 @@ import (
|
||||
"github.com/envoyproxy/go-control-plane/pkg/cache/types"
|
||||
"github.com/envoyproxy/go-control-plane/pkg/cache/v3"
|
||||
"github.com/envoyproxy/go-control-plane/pkg/resource/v3"
|
||||
"github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
utilcache "k8s.io/apimachinery/pkg/util/cache"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
type Processor struct {
|
||||
cache cache.SnapshotCache
|
||||
logger *logrus.Logger
|
||||
logger *log.Logger
|
||||
version int64
|
||||
|
||||
expireCache *utilcache.Expiring
|
||||
}
|
||||
|
||||
func NewProcessor(cache cache.SnapshotCache, log *logrus.Logger) *Processor {
|
||||
func NewProcessor(cache cache.SnapshotCache, log *log.Logger) *Processor {
|
||||
return &Processor{
|
||||
cache: cache,
|
||||
logger: log,
|
||||
@@ -44,25 +46,27 @@ func (p *Processor) newVersion() string {
|
||||
return strconv.FormatInt(p.version, 10)
|
||||
}
|
||||
|
||||
func (p *Processor) ProcessFile(file NotifyMessage) {
|
||||
func (p *Processor) ProcessFile(file NotifyMessage) error {
|
||||
configList, err := ParseYaml(file.FilePath)
|
||||
if err != nil {
|
||||
p.logger.Errorf("error parsing yaml file: %+v", err)
|
||||
return
|
||||
p.logger.Errorf("error parsing yaml file: %v", err)
|
||||
return err
|
||||
}
|
||||
enableIPv6, _ := util.DetectSupportIPv6()
|
||||
for _, config := range configList {
|
||||
if len(config.Uid) == 0 {
|
||||
continue
|
||||
}
|
||||
lastConfig, ok := p.expireCache.Get(config.Uid)
|
||||
uid := util.GenEnvoyUID(config.Namespace, config.Uid)
|
||||
lastConfig, ok := p.expireCache.Get(uid)
|
||||
if ok && reflect.DeepEqual(lastConfig.(*Virtual), config) {
|
||||
marshal, _ := json.Marshal(config)
|
||||
p.logger.Debugf("config are same, not needs to update, config: %s", string(marshal))
|
||||
p.logger.Infof("config are same, not needs to update, config: %s", string(marshal))
|
||||
continue
|
||||
}
|
||||
p.logger.Debugf("update config, version %d, config %v", p.version, config)
|
||||
p.logger.Infof("update config, version %d, config %v", p.version, config)
|
||||
|
||||
listeners, clusters, routes, endpoints := config.To()
|
||||
listeners, clusters, routes, endpoints := config.To(enableIPv6, p.logger)
|
||||
resources := map[resource.Type][]types.Resource{
|
||||
resource.ListenerType: listeners, // listeners
|
||||
resource.RouteType: routes, // routes
|
||||
@@ -76,21 +80,22 @@ func (p *Processor) ProcessFile(file NotifyMessage) {
|
||||
|
||||
if err != nil {
|
||||
p.logger.Errorf("snapshot inconsistency: %v, err: %v", snapshot, err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
if err = snapshot.Consistent(); err != nil {
|
||||
p.logger.Errorf("snapshot inconsistency: %v, err: %v", snapshot, err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
p.logger.Debugf("will serve snapshot %+v, nodeID: %s", snapshot, config.Uid)
|
||||
if err = p.cache.SetSnapshot(context.Background(), config.Uid, snapshot); err != nil {
|
||||
p.logger.Infof("will serve snapshot %+v, nodeID: %s", snapshot, uid)
|
||||
if err = p.cache.SetSnapshot(context.Background(), uid, snapshot); err != nil {
|
||||
p.logger.Errorf("snapshot error %q for %v", err, snapshot)
|
||||
p.logger.Fatal(err)
|
||||
return err
|
||||
}
|
||||
|
||||
p.expireCache.Set(config.Uid, config, time.Minute*5)
|
||||
p.expireCache.Set(uid, config, time.Minute*5)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParseYaml(file string) ([]*Virtual, error) {
|
||||
|
||||
@@ -13,21 +13,22 @@ import (
|
||||
runtimeservice "github.com/envoyproxy/go-control-plane/envoy/service/runtime/v3"
|
||||
secretservice "github.com/envoyproxy/go-control-plane/envoy/service/secret/v3"
|
||||
serverv3 "github.com/envoyproxy/go-control-plane/pkg/server/v3"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
)
|
||||
|
||||
const (
|
||||
grpcMaxConcurrentStreams = 1000000
|
||||
)
|
||||
|
||||
func RunServer(ctx context.Context, server serverv3.Server, port uint) {
|
||||
func RunServer(ctx context.Context, server serverv3.Server, port uint) error {
|
||||
grpcServer := grpc.NewServer(grpc.MaxConcurrentStreams(grpcMaxConcurrentStreams))
|
||||
|
||||
var lc net.ListenConfig
|
||||
listener, err := lc.Listen(ctx, "tcp", fmt.Sprintf(":%d", port))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
|
||||
discoverygrpc.RegisterAggregatedDiscoveryServiceServer(grpcServer, server)
|
||||
@@ -38,8 +39,6 @@ func RunServer(ctx context.Context, server serverv3.Server, port uint) {
|
||||
secretservice.RegisterSecretDiscoveryServiceServer(grpcServer, server)
|
||||
runtimeservice.RegisterRuntimeDiscoveryServiceServer(grpcServer, server)
|
||||
|
||||
log.Infof("management server listening on %d", port)
|
||||
if err = grpcServer.Serve(listener); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
plog.G(ctx).Infof("Management server listening on %d", port)
|
||||
return grpcServer.Serve(listener)
|
||||
}
|
||||
|
||||
52
pkg/core/bufferedtcp.go
Normal file
52
pkg/core/bufferedtcp.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
)
|
||||
|
||||
type bufferedTCP struct {
|
||||
net.Conn
|
||||
Chan chan *DatagramPacket
|
||||
closed bool
|
||||
}
|
||||
|
||||
func NewBufferedTCP(conn net.Conn) net.Conn {
|
||||
c := &bufferedTCP{
|
||||
Conn: conn,
|
||||
Chan: make(chan *DatagramPacket, MaxSize),
|
||||
}
|
||||
go c.Run()
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *bufferedTCP) Write(b []byte) (n int, err error) {
|
||||
if c.closed {
|
||||
return 0, errors.New("tcp channel is closed")
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
n = copy(buf, b)
|
||||
c.Chan <- newDatagramPacket(buf, n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (c *bufferedTCP) Run() {
|
||||
for buf := range c.Chan {
|
||||
_, err := c.Conn.Write(buf.Data[:buf.DataLength])
|
||||
config.LPool.Put(buf.Data[:])
|
||||
if err != nil {
|
||||
plog.G(context.Background()).Errorf("[TCP] Write packet failed: %v", err)
|
||||
_ = c.Conn.Close()
|
||||
c.closed = true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"net"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorEmptyChain is an error that implies the chain is empty.
|
||||
ErrorEmptyChain = errors.New("empty chain")
|
||||
)
|
||||
|
||||
type Chain struct {
|
||||
Retries int
|
||||
node *Node
|
||||
}
|
||||
|
||||
func NewChain(retry int, node *Node) *Chain {
|
||||
return &Chain{Retries: retry, node: node}
|
||||
}
|
||||
|
||||
func (c *Chain) Node() *Node {
|
||||
return c.node
|
||||
}
|
||||
|
||||
func (c *Chain) IsEmpty() bool {
|
||||
return c == nil || c.node == nil
|
||||
}
|
||||
|
||||
func (c *Chain) DialContext(ctx context.Context) (conn net.Conn, err error) {
|
||||
for i := 0; i < int(math.Max(float64(1), float64(c.Retries))); i++ {
|
||||
conn, err = c.dial(ctx)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Chain) dial(ctx context.Context) (net.Conn, error) {
|
||||
if c.IsEmpty() {
|
||||
return nil, ErrorEmptyChain
|
||||
}
|
||||
|
||||
conn, err := c.getConn(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cc net.Conn
|
||||
cc, err = c.Node().Client.ConnectContext(ctx, conn)
|
||||
if err != nil {
|
||||
_ = conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
func (*Chain) resolve(addr string) string {
|
||||
if host, port, err := net.SplitHostPort(addr); err == nil {
|
||||
if ips, err := net.LookupIP(host); err == nil && len(ips) > 0 {
|
||||
return net.JoinHostPort(ips[0].String(), port)
|
||||
}
|
||||
}
|
||||
return addr
|
||||
}
|
||||
|
||||
func (c *Chain) getConn(ctx context.Context) (net.Conn, error) {
|
||||
if c.IsEmpty() {
|
||||
return nil, ErrorEmptyChain
|
||||
}
|
||||
return c.Node().Client.Dial(ctx, c.resolve(c.Node().Addr))
|
||||
}
|
||||
|
||||
type Handler interface {
|
||||
Handle(ctx context.Context, conn net.Conn)
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
Connector
|
||||
Transporter
|
||||
}
|
||||
|
||||
type Connector interface {
|
||||
ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error)
|
||||
}
|
||||
|
||||
type Transporter interface {
|
||||
Dial(ctx context.Context, addr string) (net.Conn, error)
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
Listener net.Listener
|
||||
Handler Handler
|
||||
}
|
||||
96
pkg/core/forwarder.go
Normal file
96
pkg/core/forwarder.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorEmptyForwarder is an error that implies the forward is empty.
|
||||
ErrorEmptyForwarder = errors.New("empty forwarder")
|
||||
)
|
||||
|
||||
type Forwarder struct {
|
||||
retries int
|
||||
node *Node
|
||||
}
|
||||
|
||||
func NewForwarder(retry int, node *Node) *Forwarder {
|
||||
return &Forwarder{retries: retry, node: node}
|
||||
}
|
||||
|
||||
func (c *Forwarder) Node() *Node {
|
||||
return c.node
|
||||
}
|
||||
|
||||
func (c *Forwarder) IsEmpty() bool {
|
||||
return c == nil || c.node == nil
|
||||
}
|
||||
|
||||
func (c *Forwarder) DialContext(ctx context.Context) (conn net.Conn, err error) {
|
||||
for i := 0; i < max(1, c.retries); i++ {
|
||||
conn, err = c.dial(ctx)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Forwarder) dial(ctx context.Context) (net.Conn, error) {
|
||||
if c.IsEmpty() {
|
||||
return nil, ErrorEmptyForwarder
|
||||
}
|
||||
|
||||
conn, err := c.getConn(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cc net.Conn
|
||||
cc, err = c.Node().Client.ConnectContext(ctx, conn)
|
||||
if err != nil {
|
||||
_ = conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
func (*Forwarder) resolve(addr string) string {
|
||||
if host, port, err := net.SplitHostPort(addr); err == nil {
|
||||
if ips, err := net.LookupIP(host); err == nil && len(ips) > 0 {
|
||||
return net.JoinHostPort(ips[0].String(), port)
|
||||
}
|
||||
}
|
||||
return addr
|
||||
}
|
||||
|
||||
func (c *Forwarder) getConn(ctx context.Context) (net.Conn, error) {
|
||||
if c.IsEmpty() {
|
||||
return nil, ErrorEmptyForwarder
|
||||
}
|
||||
return c.Node().Client.Dial(ctx, c.Node().Addr)
|
||||
}
|
||||
|
||||
type Handler interface {
|
||||
Handle(ctx context.Context, conn net.Conn)
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
Connector
|
||||
Transporter
|
||||
}
|
||||
|
||||
type Connector interface {
|
||||
ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error)
|
||||
}
|
||||
|
||||
type Transporter interface {
|
||||
Dial(ctx context.Context, addr string) (net.Conn, error)
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
Listener net.Listener
|
||||
Handler Handler
|
||||
}
|
||||
27
pkg/core/gvisoricmpforwarder.go
Normal file
27
pkg/core/gvisoricmpforwarder.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
||||
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func ICMPForwarder(ctx context.Context, s *stack.Stack) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
|
||||
return func(id stack.TransportEndpointID, buffer *stack.PacketBuffer) bool {
|
||||
plog.G(ctx).Infof("[TUN-ICMP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
|
||||
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
|
||||
)
|
||||
ctx1, cancelFunc := context.WithCancel(ctx)
|
||||
defer cancelFunc()
|
||||
ok, err := util.PingOnce(ctx1, id.RemoteAddress.String(), id.LocalAddress.String())
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("[TUN-ICMP] Failed to ping dst %s from src %s",
|
||||
id.LocalAddress.String(), id.RemoteAddress.String(),
|
||||
)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package core
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/header"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/link/packetsocket"
|
||||
@@ -13,18 +12,12 @@ import (
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/raw"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
|
||||
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
)
|
||||
|
||||
var _ stack.UniqueID = (*id)(nil)
|
||||
|
||||
type id struct {
|
||||
}
|
||||
|
||||
func (i id) UniqueID() uint64 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
|
||||
nicID := tcpip.NICID(1)
|
||||
s := stack.New(stack.Options{
|
||||
NetworkProtocols: []stack.NetworkProtocolFactory{
|
||||
ipv4.NewProtocol,
|
||||
@@ -40,35 +33,36 @@ func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
|
||||
// Enable raw sockets for users with sufficient
|
||||
// privileges.
|
||||
RawFactory: raw.EndpointFactory{},
|
||||
UniqueID: id{},
|
||||
})
|
||||
// set handler for TCP UDP ICMP
|
||||
s.SetTransportProtocolHandler(tcp.ProtocolNumber, TCPForwarder(s))
|
||||
s.SetTransportProtocolHandler(udp.ProtocolNumber, UDPForwarder(s))
|
||||
s.SetTransportProtocolHandler(tcp.ProtocolNumber, TCPForwarder(ctx, s))
|
||||
s.SetTransportProtocolHandler(udp.ProtocolNumber, UDPForwarder(ctx, s))
|
||||
s.SetTransportProtocolHandler(header.ICMPv4ProtocolNumber, ICMPForwarder(ctx, s))
|
||||
s.SetTransportProtocolHandler(header.ICMPv6ProtocolNumber, ICMPForwarder(ctx, s))
|
||||
|
||||
s.SetRouteTable([]tcpip.Route{
|
||||
{
|
||||
Destination: header.IPv4EmptySubnet,
|
||||
NIC: 1,
|
||||
NIC: nicID,
|
||||
},
|
||||
{
|
||||
Destination: header.IPv6EmptySubnet,
|
||||
NIC: 1,
|
||||
NIC: nicID,
|
||||
},
|
||||
})
|
||||
|
||||
s.CreateNICWithOptions(1, packetsocket.New(tun), stack.NICOptions{
|
||||
s.CreateNICWithOptions(nicID, packetsocket.New(tun), stack.NICOptions{
|
||||
Disabled: false,
|
||||
Context: ctx,
|
||||
})
|
||||
s.SetPromiscuousMode(1, true)
|
||||
s.SetSpoofing(1, true)
|
||||
s.SetPromiscuousMode(nicID, true)
|
||||
s.SetSpoofing(nicID, true)
|
||||
|
||||
// Enable SACK Recovery.
|
||||
{
|
||||
opt := tcpip.TCPSACKEnabled(true)
|
||||
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
|
||||
log.Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %v", tcp.ProtocolNumber, opt, opt, err)
|
||||
plog.G(ctx).Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %v", tcp.ProtocolNumber, opt, opt, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,10 +70,10 @@ func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
|
||||
{
|
||||
opt := tcpip.DefaultTTLOption(64)
|
||||
if err := s.SetNetworkProtocolOption(ipv4.ProtocolNumber, &opt); err != nil {
|
||||
log.Fatalf("SetNetworkProtocolOption(%d, &%T(%d)): %v", ipv4.ProtocolNumber, opt, opt, err)
|
||||
plog.G(ctx).Fatalf("SetNetworkProtocolOption(%d, &%T(%d)): %v", ipv4.ProtocolNumber, opt, opt, err)
|
||||
}
|
||||
if err := s.SetNetworkProtocolOption(ipv6.ProtocolNumber, &opt); err != nil {
|
||||
log.Fatalf("SetNetworkProtocolOption(%d, &%T(%d)): %v", ipv6.ProtocolNumber, opt, opt, err)
|
||||
plog.G(ctx).Fatalf("SetNetworkProtocolOption(%d, &%T(%d)): %v", ipv6.ProtocolNumber, opt, opt, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,23 +81,23 @@ func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
|
||||
{
|
||||
opt := tcpip.TCPModerateReceiveBufferOption(true)
|
||||
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
|
||||
log.Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %v", tcp.ProtocolNumber, opt, opt, err)
|
||||
plog.G(ctx).Fatalf("SetTransportProtocolOption(%d, &%T(%t)): %v", tcp.ProtocolNumber, opt, opt, err)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
if err := s.SetForwardingDefaultAndAllNICs(ipv4.ProtocolNumber, true); err != nil {
|
||||
log.Fatalf("set ipv4 forwarding: %v", err)
|
||||
plog.G(ctx).Fatalf("Set IPv4 forwarding: %v", err)
|
||||
}
|
||||
if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, true); err != nil {
|
||||
log.Fatalf("set ipv6 forwarding: %v", err)
|
||||
plog.G(ctx).Fatalf("Set IPv6 forwarding: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
option := tcpip.TCPModerateReceiveBufferOption(true)
|
||||
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &option); err != nil {
|
||||
log.Fatalf("set TCP moderate receive buffer: %v", err)
|
||||
plog.G(ctx).Fatalf("Set TCP moderate receive buffer: %v", err)
|
||||
}
|
||||
}
|
||||
return s
|
||||
|
||||
@@ -1,58 +1,45 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"github.com/pkg/errors"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
|
||||
"gvisor.dev/gvisor/pkg/waiter"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
)
|
||||
|
||||
var GvisorTCPForwardAddr string
|
||||
|
||||
func TCPForwarder(s *stack.Stack) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
|
||||
func TCPForwarder(ctx context.Context, s *stack.Stack) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
|
||||
return tcp.NewForwarder(s, 0, 100000, func(request *tcp.ForwarderRequest) {
|
||||
defer request.Complete(false)
|
||||
id := request.ID()
|
||||
log.Debugf("[TUN-TCP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
|
||||
plog.G(ctx).Infof("[TUN-TCP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
|
||||
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
|
||||
)
|
||||
|
||||
node, err := ParseNode(GvisorTCPForwardAddr)
|
||||
// 2, dial proxy
|
||||
host := id.LocalAddress.String()
|
||||
port := fmt.Sprintf("%d", id.LocalPort)
|
||||
var remote net.Conn
|
||||
var d = net.Dialer{Timeout: time.Second * 5}
|
||||
remote, err := d.DialContext(ctx, "tcp", net.JoinHostPort(host, port))
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-TCP] Error: can not parse gvisor tcp forward addr %s: %v", GvisorTCPForwardAddr, err)
|
||||
return
|
||||
}
|
||||
node.Client = &Client{
|
||||
Connector: GvisorTCPTunnelConnector(),
|
||||
Transporter: TCPTransporter(),
|
||||
}
|
||||
forwardChain := NewChain(5, node)
|
||||
|
||||
remote, err := forwardChain.dial(context.Background())
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-TCP] Error: failed to dial remote conn: %v", err)
|
||||
return
|
||||
}
|
||||
if err = WriteProxyInfo(remote, id); err != nil {
|
||||
log.Debugf("[TUN-TCP] Error: failed to write proxy info: %v", err)
|
||||
plog.G(ctx).Errorf("[TUN-TCP] Failed to connect addr %s: %v", net.JoinHostPort(host, port), err)
|
||||
return
|
||||
}
|
||||
|
||||
w := &waiter.Queue{}
|
||||
endpoint, tErr := request.CreateEndpoint(w)
|
||||
if tErr != nil {
|
||||
log.Debugf("[TUN-TCP] Error: can not create endpoint: %v", tErr)
|
||||
plog.G(ctx).Errorf("[TUN-TCP] Failed to create endpoint: %v", tErr)
|
||||
return
|
||||
}
|
||||
conn := gonet.NewTCPConn(w, endpoint)
|
||||
@@ -61,77 +48,22 @@ func TCPForwarder(s *stack.Stack) func(stack.TransportEndpointID, *stack.PacketB
|
||||
defer remote.Close()
|
||||
errChan := make(chan error, 2)
|
||||
go func() {
|
||||
i := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(i[:])
|
||||
written, err2 := io.CopyBuffer(remote, conn, i)
|
||||
log.Debugf("[TUN-TCP] Debug: write length %d data to remote", written)
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(buf[:])
|
||||
written, err2 := io.CopyBuffer(remote, conn, buf)
|
||||
plog.G(ctx).Infof("[TUN-TCP] Write length %d data to remote", written)
|
||||
errChan <- err2
|
||||
}()
|
||||
go func() {
|
||||
i := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(i[:])
|
||||
written, err2 := io.CopyBuffer(conn, remote, i)
|
||||
log.Debugf("[TUN-TCP] Debug: read length %d data from remote", written)
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(buf[:])
|
||||
written, err2 := io.CopyBuffer(conn, remote, buf)
|
||||
plog.G(ctx).Infof("[TUN-TCP] Read length %d data from remote", written)
|
||||
errChan <- err2
|
||||
}()
|
||||
err = <-errChan
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
log.Debugf("[TUN-TCP] Error: dsiconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
|
||||
plog.G(ctx).Errorf("[TUN-TCP] Disconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
|
||||
}
|
||||
}).HandlePacket
|
||||
}
|
||||
|
||||
func WriteProxyInfo(conn net.Conn, id stack.TransportEndpointID) error {
|
||||
var b bytes.Buffer
|
||||
i := config.SPool.Get().([]byte)[:]
|
||||
defer config.SPool.Put(i[:])
|
||||
binary.BigEndian.PutUint16(i, id.LocalPort)
|
||||
b.Write(i)
|
||||
binary.BigEndian.PutUint16(i, id.RemotePort)
|
||||
b.Write(i)
|
||||
b.WriteByte(byte(id.LocalAddress.Len()))
|
||||
b.Write(id.LocalAddress.AsSlice())
|
||||
b.WriteByte(byte(id.RemoteAddress.Len()))
|
||||
b.Write(id.RemoteAddress.AsSlice())
|
||||
_, err := b.WriteTo(conn)
|
||||
return err
|
||||
}
|
||||
|
||||
// ParseProxyInfo parse proxy info [20]byte
|
||||
func ParseProxyInfo(conn net.Conn) (id stack.TransportEndpointID, err error) {
|
||||
var n int
|
||||
var port = make([]byte, 2)
|
||||
|
||||
// local port
|
||||
if n, err = io.ReadFull(conn, port); err != nil || n != 2 {
|
||||
return
|
||||
}
|
||||
id.LocalPort = binary.BigEndian.Uint16(port)
|
||||
|
||||
// remote port
|
||||
if n, err = io.ReadFull(conn, port); err != nil || n != 2 {
|
||||
return
|
||||
}
|
||||
id.RemotePort = binary.BigEndian.Uint16(port)
|
||||
|
||||
// local address
|
||||
if n, err = io.ReadFull(conn, port[:1]); err != nil || n != 1 {
|
||||
return
|
||||
}
|
||||
var localAddress = make([]byte, port[0])
|
||||
if n, err = io.ReadFull(conn, localAddress); err != nil || n != len(localAddress) {
|
||||
return
|
||||
}
|
||||
id.LocalAddress = tcpip.AddrFromSlice(localAddress)
|
||||
|
||||
// remote address
|
||||
if n, err = io.ReadFull(conn, port[:1]); err != nil || n != 1 {
|
||||
return
|
||||
}
|
||||
var remoteAddress = make([]byte, port[0])
|
||||
if n, err = io.ReadFull(conn, remoteAddress); err != nil || n != len(remoteAddress) {
|
||||
return
|
||||
}
|
||||
id.RemoteAddress = tcpip.AddrFromSlice(remoteAddress)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2,101 +2,85 @@ package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
"sync"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
type gvisorTCPTunnelConnector struct {
|
||||
type gvisorTCPHandler struct {
|
||||
// map[srcIP]net.Conn
|
||||
routeMapTCP *sync.Map
|
||||
packetChan chan *Packet
|
||||
}
|
||||
|
||||
func GvisorTCPTunnelConnector() Connector {
|
||||
return &gvisorTCPTunnelConnector{}
|
||||
}
|
||||
|
||||
func (c *gvisorTCPTunnelConnector) ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error) {
|
||||
switch con := conn.(type) {
|
||||
case *net.TCPConn:
|
||||
err := con.SetNoDelay(true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = con.SetKeepAlive(true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = con.SetKeepAlivePeriod(15 * time.Second)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
type gvisorTCPHandler struct{}
|
||||
|
||||
func GvisorTCPHandler() Handler {
|
||||
return &gvisorTCPHandler{}
|
||||
return &gvisorTCPHandler{
|
||||
routeMapTCP: RouteMapTCP,
|
||||
packetChan: TCPPacketChan,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *gvisorTCPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
|
||||
defer tcpConn.Close()
|
||||
log.Debugf("[TUN-TCP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
// 1, get proxy info
|
||||
endpointID, err := ParseProxyInfo(tcpConn)
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-TCP] Error: failed to parse proxy info: %v", err)
|
||||
return
|
||||
}
|
||||
log.Debugf("[TUN-TCP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
|
||||
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
|
||||
)
|
||||
// 2, dial proxy
|
||||
host := endpointID.LocalAddress.String()
|
||||
port := fmt.Sprintf("%d", endpointID.LocalPort)
|
||||
var remote net.Conn
|
||||
remote, err = net.DialTimeout("tcp", net.JoinHostPort(host, port), time.Second*5)
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-TCP] Error: failed to connect addr %s: %v", net.JoinHostPort(host, port), err)
|
||||
return
|
||||
}
|
||||
cancel, cancelFunc := context.WithCancel(ctx)
|
||||
defer cancelFunc()
|
||||
plog.G(ctx).Infof("[TUN-GVISOR] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
h.handle(cancel, tcpConn)
|
||||
}
|
||||
|
||||
func (h *gvisorTCPHandler) handle(ctx context.Context, tcpConn net.Conn) {
|
||||
endpoint := channel.New(tcp.DefaultReceiveBufferSize, uint32(config.DefaultMTU), tcpip.GetRandMacAddr())
|
||||
errChan := make(chan error, 2)
|
||||
go func() {
|
||||
i := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(i[:])
|
||||
written, err2 := io.CopyBuffer(remote, tcpConn, i)
|
||||
log.Debugf("[TUN-TCP] Debug: write length %d data to remote", written)
|
||||
errChan <- err2
|
||||
defer util.HandleCrash()
|
||||
h.readFromTCPConnWriteToEndpoint(ctx, NewBufferedTCP(tcpConn), endpoint)
|
||||
util.SafeClose(errChan)
|
||||
}()
|
||||
go func() {
|
||||
i := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(i[:])
|
||||
written, err2 := io.CopyBuffer(tcpConn, remote, i)
|
||||
log.Debugf("[TUN-TCP] Debug: read length %d data from remote", written)
|
||||
errChan <- err2
|
||||
defer util.HandleCrash()
|
||||
h.readFromEndpointWriteToTCPConn(ctx, tcpConn, endpoint)
|
||||
util.SafeClose(errChan)
|
||||
}()
|
||||
err = <-errChan
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
log.Debugf("[TUN-TCP] Error: dsiconnect: %s >-<: %s: %v", tcpConn.LocalAddr(), remote.RemoteAddr(), err)
|
||||
stack := NewStack(ctx, sniffer.NewWithPrefix(endpoint, "[gVISOR] "))
|
||||
defer stack.Destroy()
|
||||
select {
|
||||
case <-errChan:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func GvisorTCPListener(addr string) (net.Listener, error) {
|
||||
log.Debug("gvisor tcp listen addr", addr)
|
||||
plog.G(context.Background()).Infof("Gvisor TCP listening addr: %s", addr)
|
||||
laddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ln, err := net.ListenTCP("tcp", laddr)
|
||||
listener, err := net.ListenTCP("tcp", laddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tcpKeepAliveListener{TCPListener: ln}, nil
|
||||
serverConfig, err := util.GetTlsServerConfig(nil)
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrNoTLSConfig) {
|
||||
plog.G(context.Background()).Warn("tls config not found in config, use raw tcp mode")
|
||||
return &tcpKeepAliveListener{TCPListener: listener}, nil
|
||||
}
|
||||
plog.G(context.Background()).Errorf("failed to get tls server config: %v", err)
|
||||
_ = listener.Close()
|
||||
return nil, err
|
||||
}
|
||||
plog.G(context.Background()).Debugf("Use tls mode")
|
||||
return tls.NewListener(&tcpKeepAliveListener{TCPListener: listener}, serverConfig), nil
|
||||
}
|
||||
|
||||
158
pkg/core/gvisortunendpoint.go
Executable file
158
pkg/core/gvisortunendpoint.go
Executable file
@@ -0,0 +1,158 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"github.com/google/gopacket/layers"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
"gvisor.dev/gvisor/pkg/buffer"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/header"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func (h *gvisorTCPHandler) readFromEndpointWriteToTCPConn(ctx context.Context, conn net.Conn, endpoint *channel.Endpoint) {
|
||||
tcpConn, _ := newGvisorUDPConnOverTCP(ctx, conn)
|
||||
for ctx.Err() == nil {
|
||||
pktBuffer := endpoint.ReadContext(ctx)
|
||||
if pktBuffer != nil {
|
||||
sniffer.LogPacket("[gVISOR] ", sniffer.DirectionSend, pktBuffer.NetworkProtocolNumber, pktBuffer)
|
||||
buf := pktBuffer.ToView().AsSlice()
|
||||
_, err := tcpConn.Write(buf)
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("[TUN-GVISOR] Failed to write data to tun device: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tun --> dispatcher
|
||||
func (h *gvisorTCPHandler) readFromTCPConnWriteToEndpoint(ctx context.Context, conn net.Conn, endpoint *channel.Endpoint) {
|
||||
tcpConn, _ := newGvisorUDPConnOverTCP(ctx, conn)
|
||||
defer h.removeFromRouteMapTCP(ctx, conn)
|
||||
|
||||
for ctx.Err() == nil {
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
read, err := tcpConn.Read(buf[:])
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("[TCP-GVISOR] Failed to read from tcp conn: %v", err)
|
||||
config.LPool.Put(buf[:])
|
||||
return
|
||||
}
|
||||
if read == 0 {
|
||||
plog.G(ctx).Warnf("[TCP-GVISOR] Read from tcp conn length is %d", read)
|
||||
config.LPool.Put(buf[:])
|
||||
continue
|
||||
}
|
||||
// Try to determine network protocol number, default zero.
|
||||
var protocol tcpip.NetworkProtocolNumber
|
||||
var ipProtocol int
|
||||
var src, dst net.IP
|
||||
// TUN interface with IFF_NO_PI enabled, thus
|
||||
// we need to determine protocol from version field
|
||||
if util.IsIPv4(buf) {
|
||||
protocol = header.IPv4ProtocolNumber
|
||||
ipHeader, err := ipv4.ParseHeader(buf[:read])
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("Failed to parse IPv4 header: %v", err)
|
||||
config.LPool.Put(buf[:])
|
||||
continue
|
||||
}
|
||||
ipProtocol = ipHeader.Protocol
|
||||
src = ipHeader.Src
|
||||
dst = ipHeader.Dst
|
||||
} else if util.IsIPv6(buf) {
|
||||
protocol = header.IPv6ProtocolNumber
|
||||
ipHeader, err := ipv6.ParseHeader(buf[:read])
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("[TCP-GVISOR] Failed to parse IPv6 header: %s", err.Error())
|
||||
config.LPool.Put(buf[:])
|
||||
continue
|
||||
}
|
||||
ipProtocol = ipHeader.NextHeader
|
||||
src = ipHeader.Src
|
||||
dst = ipHeader.Dst
|
||||
} else {
|
||||
plog.G(ctx).Errorf("[TCP-GVISOR] Unknown packet")
|
||||
config.LPool.Put(buf[:])
|
||||
continue
|
||||
}
|
||||
|
||||
h.addToRouteMapTCP(ctx, src, conn)
|
||||
// inner ip like 198.19.0.100/102/103 connect each other
|
||||
// for issue 594, sometimes k8s service network CIDR also use CIDR 198.19.151.170
|
||||
// if we can find dst in route map, just trade packet as inner communicate
|
||||
// if not find dst in route map, just trade packet as k8s service/pod ip
|
||||
_, found := h.routeMapTCP.Load(dst.String())
|
||||
if found && (config.CIDR.Contains(dst) || config.CIDR6.Contains(dst)) {
|
||||
err = h.handlePacket(ctx, buf, read, src, dst, layers.IPProtocol(ipProtocol).String())
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("[TCP-GVISOR] Failed to handle packet: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
|
||||
ReserveHeaderBytes: 0,
|
||||
Payload: buffer.MakeWithData(buf[:read]),
|
||||
})
|
||||
config.LPool.Put(buf[:])
|
||||
sniffer.LogPacket("[gVISOR] ", sniffer.DirectionRecv, protocol, pkt)
|
||||
endpoint.InjectInbound(protocol, pkt)
|
||||
pkt.DecRef()
|
||||
plog.G(ctx).Debugf("[TCP-GVISOR] Write to Gvisor. SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(ipProtocol).String(), read)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *gvisorTCPHandler) handlePacket(ctx context.Context, buf []byte, length int, src, dst net.IP, protocol string) error {
|
||||
if conn, ok := h.routeMapTCP.Load(dst.String()); ok {
|
||||
plog.G(ctx).Debugf("[TCP-GVISOR] Find TCP route SRC: %s to DST: %s -> %s", src, dst, conn.(net.Conn).RemoteAddr())
|
||||
dgram := newDatagramPacket(buf, length)
|
||||
err := dgram.Write(conn.(net.Conn))
|
||||
config.LPool.Put(buf[:])
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("[TCP-GVISOR] Failed to write to %s <- %s : %s", conn.(net.Conn).RemoteAddr(), conn.(net.Conn).LocalAddr(), err)
|
||||
return err
|
||||
}
|
||||
} else if config.RouterIP.Equal(dst) || config.RouterIP6.Equal(dst) {
|
||||
plog.G(ctx).Debugf("[TCP-GVISOR] Forward to TUN device, SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, protocol, length)
|
||||
util.SafeWrite(h.packetChan, NewPacket(buf[:], length, src, dst), func(v *Packet) {
|
||||
config.LPool.Put(v.data[:])
|
||||
plog.G(context.Background()).Errorf("[TCP-GVISOR] Drop packet, SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, protocol, v.length)
|
||||
})
|
||||
} else {
|
||||
plog.G(ctx).Warnf("[TCP-GVISOR] No route for src: %s -> dst: %s, drop it", src, dst)
|
||||
config.LPool.Put(buf[:])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *gvisorTCPHandler) addToRouteMapTCP(ctx context.Context, src net.IP, tcpConn net.Conn) {
|
||||
value, loaded := h.routeMapTCP.LoadOrStore(src.String(), tcpConn)
|
||||
if loaded {
|
||||
if value.(net.Conn) != tcpConn {
|
||||
h.routeMapTCP.Store(src.String(), tcpConn)
|
||||
plog.G(ctx).Infof("[TUN-GVISOR] Replace route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
|
||||
}
|
||||
} else {
|
||||
plog.G(ctx).Infof("[TUN-GVISOR] Add new route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
|
||||
}
|
||||
}
|
||||
|
||||
func (h *gvisorTCPHandler) removeFromRouteMapTCP(ctx context.Context, tcpConn net.Conn) {
|
||||
h.routeMapTCP.Range(func(key, value any) bool {
|
||||
if value.(net.Conn) == tcpConn {
|
||||
h.routeMapTCP.Delete(key)
|
||||
plog.G(ctx).Infof("[TCP-GVISOR] Delete to DST %s by conn %s from globle route map TCP", key, tcpConn.LocalAddr())
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
@@ -2,81 +2,118 @@ package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/pkg/errors"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
|
||||
"gvisor.dev/gvisor/pkg/waiter"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
var GvisorUDPForwardAddr string
|
||||
|
||||
func UDPForwarder(s *stack.Stack) func(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
|
||||
func UDPForwarder(ctx context.Context, s *stack.Stack) func(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
|
||||
return udp.NewForwarder(s, func(request *udp.ForwarderRequest) {
|
||||
endpointID := request.ID()
|
||||
log.Debugf("[TUN-UDP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
|
||||
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
|
||||
id := request.ID()
|
||||
plog.G(ctx).Infof("[TUN-UDP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
|
||||
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
|
||||
)
|
||||
src := &net.UDPAddr{
|
||||
IP: id.RemoteAddress.AsSlice(),
|
||||
Port: int(id.RemotePort),
|
||||
}
|
||||
dst := &net.UDPAddr{
|
||||
IP: id.LocalAddress.AsSlice(),
|
||||
Port: int(id.LocalPort),
|
||||
}
|
||||
|
||||
w := &waiter.Queue{}
|
||||
endpoint, tErr := request.CreateEndpoint(w)
|
||||
if tErr != nil {
|
||||
log.Debugf("[TUN-UDP] Error: can not create endpoint: %v", tErr)
|
||||
plog.G(ctx).Errorf("[TUN-UDP] Failed to create endpoint to dst: %s: %v", dst.String(), tErr)
|
||||
return
|
||||
}
|
||||
|
||||
node, err := ParseNode(GvisorUDPForwardAddr)
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: parse gviosr udp forward addr %s: %v", GvisorUDPForwardAddr, err)
|
||||
// dial dst
|
||||
remote, err1 := net.DialUDP("udp", nil, dst)
|
||||
if err1 != nil {
|
||||
plog.G(ctx).Errorf("[TUN-UDP] Failed to connect dst: %s: %v", dst.String(), err1)
|
||||
return
|
||||
}
|
||||
node.Client = &Client{
|
||||
Connector: GvisorUDPOverTCPTunnelConnector(endpointID),
|
||||
Transporter: TCPTransporter(),
|
||||
}
|
||||
forwardChain := NewChain(5, node)
|
||||
|
||||
ctx := context.Background()
|
||||
c, err := forwardChain.getConn(ctx)
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: can not get conn: %v", err)
|
||||
return
|
||||
}
|
||||
if err = WriteProxyInfo(c, endpointID); err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: can not write proxy info: %v", err)
|
||||
return
|
||||
}
|
||||
remote, err := node.Client.ConnectContext(ctx, c)
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: can not connect: %v", err)
|
||||
return
|
||||
}
|
||||
conn := gonet.NewUDPConn(s, w, endpoint)
|
||||
conn := gonet.NewUDPConn(w, endpoint)
|
||||
go func() {
|
||||
defer conn.Close()
|
||||
defer remote.Close()
|
||||
errChan := make(chan error, 2)
|
||||
go func() {
|
||||
i := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(i[:])
|
||||
written, err2 := io.CopyBuffer(remote, conn, i)
|
||||
log.Debugf("[TUN-UDP] Debug: write length %d data to remote", written)
|
||||
errChan <- err2
|
||||
defer util.HandleCrash()
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(buf[:])
|
||||
|
||||
var written int
|
||||
var err error
|
||||
for {
|
||||
err = conn.SetReadDeadline(time.Now().Add(time.Second * 120))
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
var read int
|
||||
read, _, err = conn.ReadFrom(buf[:])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
written += read
|
||||
err = remote.SetWriteDeadline(time.Now().Add(time.Second * 120))
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
_, err = remote.Write(buf[:read])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
plog.G(ctx).Infof("[TUN-UDP] Write length %d data from src: %s -> dst: %s", written, src, dst)
|
||||
errChan <- err
|
||||
}()
|
||||
go func() {
|
||||
i := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(i[:])
|
||||
written, err2 := io.CopyBuffer(conn, remote, i)
|
||||
log.Debugf("[TUN-UDP] Debug: read length %d data from remote", written)
|
||||
errChan <- err2
|
||||
defer util.HandleCrash()
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(buf[:])
|
||||
|
||||
var err error
|
||||
var written int
|
||||
for {
|
||||
err = remote.SetReadDeadline(time.Now().Add(time.Second * 120))
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
var n int
|
||||
n, _, err = remote.ReadFromUDP(buf[:])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
written += n
|
||||
err = conn.SetWriteDeadline(time.Now().Add(time.Second * 120))
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
_, err = conn.Write(buf[:n])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
plog.G(ctx).Infof("[TUN-UDP] Read length %d data from dst: %s -> src: %s", written, dst, src)
|
||||
errChan <- err
|
||||
}()
|
||||
err = <-errChan
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
log.Debugf("[TUN-UDP] Error: dsiconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
|
||||
err1 = <-errChan
|
||||
if err1 != nil && !errors.Is(err1, io.EOF) {
|
||||
plog.G(ctx).Errorf("[TUN-UDP] Disconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err1)
|
||||
}
|
||||
}()
|
||||
}).HandlePacket
|
||||
|
||||
@@ -3,44 +3,17 @@ package core
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
type gvisorUDPOverTCPTunnelConnector struct {
|
||||
Id stack.TransportEndpointID
|
||||
}
|
||||
|
||||
func GvisorUDPOverTCPTunnelConnector(endpointID stack.TransportEndpointID) Connector {
|
||||
return &gvisorUDPOverTCPTunnelConnector{
|
||||
Id: endpointID,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *gvisorUDPOverTCPTunnelConnector) ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error) {
|
||||
switch con := conn.(type) {
|
||||
case *net.TCPConn:
|
||||
err := con.SetNoDelay(true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = con.SetKeepAlive(true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = con.SetKeepAlivePeriod(15 * time.Second)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return newGvisorFakeUDPTunnelConnOverTCP(ctx, conn)
|
||||
}
|
||||
|
||||
type gvisorUDPHandler struct{}
|
||||
|
||||
func GvisorUDPHandler() Handler {
|
||||
@@ -49,14 +22,14 @@ func GvisorUDPHandler() Handler {
|
||||
|
||||
func (h *gvisorUDPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
|
||||
defer tcpConn.Close()
|
||||
log.Debugf("[TUN-UDP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
plog.G(ctx).Debugf("[TUN-UDP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
// 1, get proxy info
|
||||
endpointID, err := ParseProxyInfo(tcpConn)
|
||||
endpointID, err := util.ParseProxyInfo(tcpConn)
|
||||
if err != nil {
|
||||
log.Warningf("[TUN-UDP] Error: Failed to parse proxy info: %v", err)
|
||||
plog.G(ctx).Errorf("[TUN-UDP] Failed to parse proxy info: %v", err)
|
||||
return
|
||||
}
|
||||
log.Debugf("[TUN-UDP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
|
||||
plog.G(ctx).Infof("[TUN-UDP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress: %s",
|
||||
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
|
||||
)
|
||||
// 2, dial proxy
|
||||
@@ -67,45 +40,49 @@ func (h *gvisorUDPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
|
||||
var remote *net.UDPConn
|
||||
remote, err = net.DialUDP("udp", nil, addr)
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: failed to connect addr %s: %v", addr.String(), err)
|
||||
plog.G(ctx).Errorf("[TUN-UDP] Failed to connect addr %s: %v", addr.String(), err)
|
||||
return
|
||||
}
|
||||
handle(ctx, tcpConn, remote)
|
||||
}
|
||||
|
||||
// fake udp connect over tcp
|
||||
type gvisorFakeUDPTunnelConn struct {
|
||||
type gvisorUDPConnOverTCP struct {
|
||||
// tcp connection
|
||||
net.Conn
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func newGvisorFakeUDPTunnelConnOverTCP(ctx context.Context, conn net.Conn) (net.Conn, error) {
|
||||
return &gvisorFakeUDPTunnelConn{ctx: ctx, Conn: conn}, nil
|
||||
func newGvisorUDPConnOverTCP(ctx context.Context, conn net.Conn) (net.Conn, error) {
|
||||
return &gvisorUDPConnOverTCP{ctx: ctx, Conn: conn}, nil
|
||||
}
|
||||
|
||||
func (c *gvisorFakeUDPTunnelConn) Read(b []byte) (int, error) {
|
||||
func (c *gvisorUDPConnOverTCP) Read(b []byte) (int, error) {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return 0, c.ctx.Err()
|
||||
default:
|
||||
dgram, err := readDatagramPacket(c.Conn, b)
|
||||
datagram, err := readDatagramPacket(c.Conn, b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(dgram.DataLength), nil
|
||||
return int(datagram.DataLength), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *gvisorFakeUDPTunnelConn) Write(b []byte) (int, error) {
|
||||
dgram := newDatagramPacket(b)
|
||||
if err := dgram.Write(c.Conn); err != nil {
|
||||
func (c *gvisorUDPConnOverTCP) Write(b []byte) (int, error) {
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
n := copy(buf, b)
|
||||
defer config.LPool.Put(buf)
|
||||
|
||||
packet := newDatagramPacket(buf, n)
|
||||
if err := packet.Write(c.Conn); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (c *gvisorFakeUDPTunnelConn) Close() error {
|
||||
func (c *gvisorUDPConnOverTCP) Close() error {
|
||||
if cc, ok := c.Conn.(interface{ CloseRead() error }); ok {
|
||||
_ = cc.CloseRead()
|
||||
}
|
||||
@@ -116,7 +93,7 @@ func (c *gvisorFakeUDPTunnelConn) Close() error {
|
||||
}
|
||||
|
||||
func GvisorUDPListener(addr string) (net.Listener, error) {
|
||||
log.Debug("gvisor UDP over TCP listen addr", addr)
|
||||
plog.G(context.Background()).Infof("Gvisor UDP over TCP listening addr: %s", addr)
|
||||
laddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -125,82 +102,86 @@ func GvisorUDPListener(addr string) (net.Listener, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tcpKeepAliveListener{ln}, nil
|
||||
return &tcpKeepAliveListener{TCPListener: ln}, nil
|
||||
}
|
||||
|
||||
func handle(ctx context.Context, tcpConn net.Conn, udpConn *net.UDPConn) {
|
||||
defer udpConn.Close()
|
||||
log.Debugf("[TUN-UDP] Debug: %s <-> %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
|
||||
plog.G(ctx).Debugf("[TUN-UDP] %s <-> %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
|
||||
errChan := make(chan error, 2)
|
||||
go func() {
|
||||
b := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(b[:])
|
||||
defer util.HandleCrash()
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(buf[:])
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
dgram, err := readDatagramPacket(tcpConn, b[:])
|
||||
for ctx.Err() == nil {
|
||||
err := tcpConn.SetReadDeadline(time.Now().Add(time.Second * 30))
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-UDP] Debug: %s -> 0 : %v", tcpConn.RemoteAddr(), err)
|
||||
errChan <- err
|
||||
errChan <- errors.WithMessage(err, "set read deadline failed")
|
||||
return
|
||||
}
|
||||
if dgram.DataLength == 0 {
|
||||
log.Debugf("[TUN-UDP] Error: length is zero")
|
||||
datagram, err := readDatagramPacket(tcpConn, buf)
|
||||
if err != nil {
|
||||
errChan <- errors.WithMessage(err, "read datagram packet failed")
|
||||
return
|
||||
}
|
||||
if datagram.DataLength == 0 {
|
||||
errChan <- fmt.Errorf("length of read packet is zero")
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = udpConn.Write(dgram.Data); err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: %s -> %s : %s", tcpConn.RemoteAddr(), "localhost:8422", err)
|
||||
errChan <- err
|
||||
err = udpConn.SetWriteDeadline(time.Now().Add(time.Second * 30))
|
||||
if err != nil {
|
||||
errChan <- errors.WithMessage(err, "set write deadline failed")
|
||||
return
|
||||
}
|
||||
log.Debugf("[TUN-UDP] Debug: %s >>> %s length: %d", tcpConn.RemoteAddr(), "localhost:8422", dgram.DataLength)
|
||||
if _, err = udpConn.Write(datagram.Data[:datagram.DataLength]); err != nil {
|
||||
errChan <- errors.WithMessage(err, "write datagram packet failed")
|
||||
return
|
||||
}
|
||||
plog.G(ctx).Debugf("[TUN-UDP] %s >>> %s length: %d", tcpConn.RemoteAddr(), udpConn.RemoteAddr(), datagram.DataLength)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
b := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(b[:])
|
||||
defer util.HandleCrash()
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(buf[:])
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
n, _, err := udpConn.ReadFrom(b[:])
|
||||
for ctx.Err() == nil {
|
||||
err := udpConn.SetReadDeadline(time.Now().Add(time.Second * 30))
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: %s : %s", tcpConn.RemoteAddr(), err)
|
||||
errChan <- err
|
||||
errChan <- errors.WithMessage(err, "set read deadline failed")
|
||||
return
|
||||
}
|
||||
n, _, err := udpConn.ReadFrom(buf[:])
|
||||
if err != nil {
|
||||
errChan <- errors.WithMessage(err, "read datagram packet failed")
|
||||
return
|
||||
}
|
||||
if n == 0 {
|
||||
log.Debugf("[TUN-UDP] Error: length is zero")
|
||||
errChan <- fmt.Errorf("length of read packet is zero")
|
||||
return
|
||||
}
|
||||
|
||||
// pipe from peer to tunnel
|
||||
dgram := newDatagramPacket(b[:n])
|
||||
if err = dgram.Write(tcpConn); err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: %s <- %s : %s", tcpConn.RemoteAddr(), dgram.Addr(), err)
|
||||
err = tcpConn.SetWriteDeadline(time.Now().Add(time.Second * 30))
|
||||
if err != nil {
|
||||
errChan <- errors.WithMessage(err, "set write deadline failed")
|
||||
return
|
||||
}
|
||||
packet := newDatagramPacket(buf, n)
|
||||
if err = packet.Write(tcpConn); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
log.Debugf("[TUN-UDP] Debug: %s <<< %s length: %d", tcpConn.RemoteAddr(), dgram.Addr(), len(dgram.Data))
|
||||
plog.G(ctx).Debugf("[TUN-UDP] %s <<< %s length: %d", tcpConn.RemoteAddr(), tcpConn.LocalAddr(), packet.DataLength)
|
||||
}
|
||||
}()
|
||||
err := <-errChan
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: %v", err)
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
plog.G(ctx).Errorf("[TUN-UDP] %v", err)
|
||||
}
|
||||
log.Debugf("[TUN-UDP] Debug: %s >-< %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
|
||||
plog.G(ctx).Debugf("[TUN-UDP] %s >-< %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -7,9 +7,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrorInvalidNode = errors.New("invalid node")
|
||||
)
|
||||
var ErrorInvalidNode = errors.New("invalid node")
|
||||
|
||||
type Node struct {
|
||||
Addr string
|
||||
@@ -29,12 +27,13 @@ func ParseNode(s string) (*Node, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Node{
|
||||
node := &Node{
|
||||
Addr: u.Host,
|
||||
Remote: strings.Trim(u.EscapedPath(), "/"),
|
||||
Values: u.Query(),
|
||||
Protocol: u.Scheme,
|
||||
}, nil
|
||||
}
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// Get returns node parameter specified by key.
|
||||
|
||||
69
pkg/core/packetconn.go
Normal file
69
pkg/core/packetconn.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
)
|
||||
|
||||
var _ net.PacketConn = (*PacketConnOverTCP)(nil)
|
||||
|
||||
type PacketConnOverTCP struct {
|
||||
// tcp connection
|
||||
net.Conn
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func NewPacketConnOverTCP(ctx context.Context, conn net.Conn) (net.Conn, error) {
|
||||
return &PacketConnOverTCP{ctx: ctx, Conn: conn}, nil
|
||||
}
|
||||
|
||||
func (c *PacketConnOverTCP) ReadFrom(b []byte) (int, net.Addr, error) {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return 0, nil, c.ctx.Err()
|
||||
default:
|
||||
datagram, err := readDatagramPacket(c.Conn, b)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
return int(datagram.DataLength), nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *PacketConnOverTCP) Read(b []byte) (int, error) {
|
||||
n, _, err := c.ReadFrom(b)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *PacketConnOverTCP) WriteTo(b []byte, _ net.Addr) (int, error) {
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
n := copy(buf, b)
|
||||
defer config.LPool.Put(buf)
|
||||
|
||||
packet := newDatagramPacket(buf, n)
|
||||
if err := packet.Write(c.Conn); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (c *PacketConnOverTCP) Write(b []byte) (int, error) {
|
||||
n, err := c.WriteTo(b, nil)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (c *PacketConnOverTCP) Close() error {
|
||||
if cc, ok := c.Conn.(interface{ CloseRead() error }); ok {
|
||||
_ = cc.CloseRead()
|
||||
}
|
||||
if cc, ok := c.Conn.(interface{ CloseWrite() error }); ok {
|
||||
_ = cc.CloseWrite()
|
||||
}
|
||||
return c.Conn.Close()
|
||||
}
|
||||
@@ -1,139 +1,130 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/types"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/tun"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/tun"
|
||||
)
|
||||
|
||||
var (
|
||||
// RouteNAT Globe route table for inner ip
|
||||
RouteNAT = NewNAT()
|
||||
// RouteConnNAT map[srcIP]net.Conn
|
||||
RouteConnNAT = &sync.Map{}
|
||||
// Chan tcp connects
|
||||
Chan = make(chan *datagramPacket, MaxSize)
|
||||
// RouteMapTCP map[srcIP]net.Conn Globe route table for inner ip
|
||||
RouteMapTCP = &sync.Map{}
|
||||
// TCPPacketChan tcp connects
|
||||
TCPPacketChan = make(chan *Packet, MaxSize)
|
||||
)
|
||||
|
||||
type TCPUDPacket struct {
|
||||
data *datagramPacket
|
||||
data *DatagramPacket
|
||||
}
|
||||
|
||||
// Route example:
|
||||
// -L "tcp://:10800" -L "tun://:8422?net=223.254.0.100/16"
|
||||
// -L "tun:/10.233.24.133:8422?net=223.254.0.102/16&route=223.254.0.0/16"
|
||||
// -L "tun:/127.0.0.1:8422?net=223.254.0.102/16&route=223.254.0.0/16,10.233.0.0/16" -F "tcp://127.0.0.1:10800"
|
||||
// -l "tcp://:10800" -l "tun://:8422?net=198.19.0.100/16"
|
||||
// -l "tun:/10.233.24.133:8422?net=198.19.0.102/16&route=198.19.0.0/16"
|
||||
// -l "tun:/127.0.0.1:8422?net=198.19.0.102/16&route=198.19.0.0/16,10.233.0.0/16" -f "tcp://127.0.0.1:10800"
|
||||
type Route struct {
|
||||
ServeNodes []string // -L tun
|
||||
ChainNode string // -F tcp
|
||||
Retries int
|
||||
Listeners []string // -l tun
|
||||
Forwarder string // -f tcp
|
||||
Retries int
|
||||
}
|
||||
|
||||
func (r *Route) parseChain() (*Chain, error) {
|
||||
// parse the base nodes
|
||||
node, err := parseChainNode(r.ChainNode)
|
||||
func (r *Route) ParseForwarder() (*Forwarder, error) {
|
||||
forwarder, err := ParseNode(r.Forwarder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewChain(r.Retries, node), nil
|
||||
}
|
||||
|
||||
func parseChainNode(ns string) (*Node, error) {
|
||||
node, err := ParseNode(ns)
|
||||
if err != nil {
|
||||
log.Errorf("parse node error: %v", err)
|
||||
return nil, err
|
||||
forwarder.Client = &Client{
|
||||
Connector: NewUDPOverTCPConnector(),
|
||||
Transporter: TCPTransporter(nil),
|
||||
}
|
||||
node.Client = &Client{
|
||||
Connector: UDPOverTCPTunnelConnector(),
|
||||
Transporter: TCPTransporter(),
|
||||
}
|
||||
return node, nil
|
||||
return NewForwarder(r.Retries, forwarder), nil
|
||||
}
|
||||
|
||||
func (r *Route) GenerateServers() ([]Server, error) {
|
||||
chain, err := r.parseChain()
|
||||
forwarder, err := r.ParseForwarder()
|
||||
if err != nil && !errors.Is(err, ErrorInvalidNode) {
|
||||
log.Errorf("parse chain error: %v", err)
|
||||
plog.G(context.Background()).Errorf("Failed to parse forwarder: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
servers := make([]Server, 0, len(r.ServeNodes))
|
||||
for _, serveNode := range r.ServeNodes {
|
||||
servers := make([]Server, 0, len(r.Listeners))
|
||||
for _, l := range r.Listeners {
|
||||
var node *Node
|
||||
node, err = ParseNode(serveNode)
|
||||
node, err = ParseNode(l)
|
||||
if err != nil {
|
||||
log.Errorf("parse node %s error: %v", serveNode, err)
|
||||
plog.G(context.Background()).Errorf("Failed to parse node %s: %v", l, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ln net.Listener
|
||||
var listener net.Listener
|
||||
var handler Handler
|
||||
|
||||
switch node.Protocol {
|
||||
case "tun":
|
||||
handler = TunHandler(chain, node)
|
||||
ln, err = tun.Listener(tun.Config{
|
||||
handler = TunHandler(forwarder, node)
|
||||
listener, err = tun.Listener(tun.Config{
|
||||
Name: node.Get("name"),
|
||||
Addr: node.Get("net"),
|
||||
Addr6: os.Getenv(config.EnvInboundPodTunIPv6),
|
||||
Addr6: node.Get("net6"),
|
||||
MTU: node.GetInt("mtu"),
|
||||
Routes: parseIPRoutes(node.Get("route")),
|
||||
Routes: parseRoutes(node.Get("route")),
|
||||
Gateway: node.Get("gw"),
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("create tun listener error: %v", err)
|
||||
plog.G(context.Background()).Errorf("Failed to create tun listener: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
case "tcp":
|
||||
handler = TCPHandler()
|
||||
ln, err = TCPListener(node.Addr)
|
||||
listener, err = TCPListener(node.Addr)
|
||||
if err != nil {
|
||||
log.Errorf("create tcp listener error: %v", err)
|
||||
plog.G(context.Background()).Errorf("Failed to create tcp listener: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
case "gtcp":
|
||||
handler = GvisorTCPHandler()
|
||||
ln, err = GvisorTCPListener(node.Addr)
|
||||
listener, err = GvisorTCPListener(node.Addr)
|
||||
if err != nil {
|
||||
log.Errorf("create gvisor tcp listener error: %v", err)
|
||||
plog.G(context.Background()).Errorf("Failed to create gvisor tcp listener: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
case "gudp":
|
||||
handler = GvisorUDPHandler()
|
||||
ln, err = GvisorUDPListener(node.Addr)
|
||||
listener, err = GvisorUDPListener(node.Addr)
|
||||
if err != nil {
|
||||
log.Errorf("create gvisor udp listener error: %v", err)
|
||||
plog.G(context.Background()).Errorf("Failed to create gvisor udp listener: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
case "ssh":
|
||||
handler = SSHHandler()
|
||||
listener, err = SSHListener(node.Addr)
|
||||
if err != nil {
|
||||
plog.G(context.Background()).Errorf("Failed to create ssh listener: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
log.Errorf("not support protocol %s", node.Protocol)
|
||||
plog.G(context.Background()).Errorf("Not support protocol %s", node.Protocol)
|
||||
return nil, fmt.Errorf("not support protocol %s", node.Protocol)
|
||||
}
|
||||
servers = append(servers, Server{Listener: ln, Handler: handler})
|
||||
servers = append(servers, Server{Listener: listener, Handler: handler})
|
||||
}
|
||||
return servers, nil
|
||||
}
|
||||
|
||||
func parseIPRoutes(routeStringList string) (routes []types.Route) {
|
||||
if len(routeStringList) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
routeList := strings.Split(routeStringList, ",")
|
||||
for _, route := range routeList {
|
||||
func parseRoutes(str string) []types.Route {
|
||||
var routes []types.Route
|
||||
list := strings.Split(str, ",")
|
||||
for _, route := range list {
|
||||
if _, ipNet, _ := net.ParseCIDR(strings.TrimSpace(route)); ipNet != nil {
|
||||
routes = append(routes, types.Route{Dst: *ipNet})
|
||||
}
|
||||
}
|
||||
return
|
||||
return routes
|
||||
}
|
||||
|
||||
66
pkg/core/ssh.go
Normal file
66
pkg/core/ssh.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
)
|
||||
|
||||
func SSHListener(addr string) (net.Listener, error) {
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
plog.G(context.Background()).Infof("starting ssh server on port %s...", addr)
|
||||
return ln, err
|
||||
}
|
||||
|
||||
func SSHHandler() Handler {
|
||||
return &sshHandler{}
|
||||
}
|
||||
|
||||
type sshHandler struct {
|
||||
}
|
||||
|
||||
func (s *sshHandler) Handle(ctx context.Context, conn net.Conn) {
|
||||
forwardHandler := &ssh.ForwardedTCPHandler{}
|
||||
server := ssh.Server{
|
||||
LocalPortForwardingCallback: ssh.LocalPortForwardingCallback(func(ctx ssh.Context, dhost string, dport uint32) bool {
|
||||
plog.G(ctx).Infoln("Accepted forward", dhost, dport)
|
||||
return true
|
||||
}),
|
||||
Handler: ssh.Handler(func(s ssh.Session) {
|
||||
io.WriteString(s, "Remote forwarding available...\n")
|
||||
<-s.Context().Done()
|
||||
}),
|
||||
ReversePortForwardingCallback: ssh.ReversePortForwardingCallback(func(ctx ssh.Context, host string, port uint32) bool {
|
||||
plog.G(ctx).Infoln("attempt to bind", host, port, "granted")
|
||||
return true
|
||||
}),
|
||||
RequestHandlers: map[string]ssh.RequestHandler{
|
||||
"tcpip-forward": forwardHandler.HandleSSHRequest,
|
||||
"cancel-tcpip-forward": forwardHandler.HandleSSHRequest,
|
||||
},
|
||||
SubsystemHandlers: ssh.DefaultSubsystemHandlers,
|
||||
ChannelHandlers: ssh.DefaultChannelHandlers,
|
||||
HostSigners: func() []ssh.Signer {
|
||||
key, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
fromKey, err := gossh.NewSignerFromKey(key)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return []ssh.Signer{fromKey}
|
||||
}(),
|
||||
}
|
||||
server.HandleConn(conn)
|
||||
}
|
||||
@@ -2,20 +2,44 @@ package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
type tcpTransporter struct{}
|
||||
type tcpTransporter struct {
|
||||
tlsConfig *tls.Config
|
||||
}
|
||||
|
||||
func TCPTransporter() Transporter {
|
||||
return &tcpTransporter{}
|
||||
func TCPTransporter(tlsInfo map[string][]byte) Transporter {
|
||||
tlsConfig, err := util.GetTlsClientConfig(tlsInfo)
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrNoTLSConfig) {
|
||||
plog.G(context.Background()).Warn("tls config not found in config, use raw tcp mode")
|
||||
return &tcpTransporter{}
|
||||
}
|
||||
plog.G(context.Background()).Errorf("failed to get tls client config: %v", err)
|
||||
return &tcpTransporter{}
|
||||
}
|
||||
return &tcpTransporter{tlsConfig: tlsConfig}
|
||||
}
|
||||
|
||||
func (tr *tcpTransporter) Dial(ctx context.Context, addr string) (net.Conn, error) {
|
||||
dialer := &net.Dialer{Timeout: config.DialTimeout}
|
||||
return dialer.DialContext(ctx, "tcp", addr)
|
||||
dialer := &net.Dialer{Timeout: config.DialTimeout, KeepAlive: config.KeepAliveTime}
|
||||
conn, err := dialer.DialContext(ctx, "tcp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tr.tlsConfig == nil {
|
||||
plog.G(ctx).Debugf("tls config not found in config, use raw tcp mode")
|
||||
return conn, nil
|
||||
}
|
||||
plog.G(ctx).Debugf("Use tls mode")
|
||||
return tls.Client(conn, tr.tlsConfig), nil
|
||||
}
|
||||
|
||||
func TCPListener(addr string) (net.Listener, error) {
|
||||
@@ -23,11 +47,20 @@ func TCPListener(addr string) (net.Listener, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ln, err := net.ListenTCP("tcp", laddr)
|
||||
listener, err := net.ListenTCP("tcp", laddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tcpKeepAliveListener{ln}, nil
|
||||
serverConfig, err := util.GetTlsServerConfig(nil)
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrNoTLSConfig) {
|
||||
plog.G(context.Background()).Warn("tls config not found in config, use raw tcp mode")
|
||||
return &tcpKeepAliveListener{TCPListener: listener}, nil
|
||||
}
|
||||
plog.G(context.Background()).Errorf("failed to get tls server config: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return tls.NewListener(&tcpKeepAliveListener{TCPListener: listener}, serverConfig), nil
|
||||
}
|
||||
|
||||
type tcpKeepAliveListener struct {
|
||||
|
||||
@@ -4,22 +4,22 @@ import (
|
||||
"context"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/google/gopacket/layers"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
type fakeUDPTunnelConnector struct {
|
||||
type UDPOverTCPConnector struct {
|
||||
}
|
||||
|
||||
func UDPOverTCPTunnelConnector() Connector {
|
||||
return &fakeUDPTunnelConnector{}
|
||||
func NewUDPOverTCPConnector() Connector {
|
||||
return &UDPOverTCPConnector{}
|
||||
}
|
||||
|
||||
func (c *fakeUDPTunnelConnector) ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error) {
|
||||
func (c *UDPOverTCPConnector) ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error) {
|
||||
//defer conn.SetDeadline(time.Time{})
|
||||
switch con := conn.(type) {
|
||||
case *net.TCPConn:
|
||||
@@ -31,116 +31,142 @@ func (c *fakeUDPTunnelConnector) ConnectContext(ctx context.Context, conn net.Co
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = con.SetKeepAlivePeriod(15 * time.Second)
|
||||
err = con.SetKeepAlivePeriod(config.KeepAliveTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return newFakeUDPTunnelConnOverTCP(ctx, conn)
|
||||
return newUDPConnOverTCP(ctx, conn)
|
||||
}
|
||||
|
||||
type fakeUdpHandler struct {
|
||||
type UDPOverTCPHandler struct {
|
||||
// map[srcIP]net.Conn
|
||||
connNAT *sync.Map
|
||||
ch chan *datagramPacket
|
||||
routeMapTCP *sync.Map
|
||||
packetChan chan *Packet
|
||||
}
|
||||
|
||||
func TCPHandler() Handler {
|
||||
return &fakeUdpHandler{
|
||||
connNAT: RouteConnNAT,
|
||||
ch: Chan,
|
||||
return &UDPOverTCPHandler{
|
||||
routeMapTCP: RouteMapTCP,
|
||||
packetChan: TCPPacketChan,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *fakeUdpHandler) Handle(ctx context.Context, tcpConn net.Conn) {
|
||||
func (h *UDPOverTCPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
|
||||
tcpConn = NewBufferedTCP(tcpConn)
|
||||
defer tcpConn.Close()
|
||||
log.Debugf("[tcpserver] %s -> %s\n", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
plog.G(ctx).Infof("[TCP] Handle connection %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
|
||||
defer func(addr net.Addr) {
|
||||
var keys []string
|
||||
h.connNAT.Range(func(key, value any) bool {
|
||||
if value.(net.Conn) == tcpConn {
|
||||
keys = append(keys, key.(string))
|
||||
}
|
||||
return true
|
||||
})
|
||||
for _, key := range keys {
|
||||
h.connNAT.Delete(key)
|
||||
}
|
||||
log.Debugf("[tcpserver] delete conn %s from globle routeConnNAT, deleted count %d", addr, len(keys))
|
||||
}(tcpConn.LocalAddr())
|
||||
defer h.removeFromRouteMapTCP(ctx, tcpConn)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
b := config.LPool.Get().([]byte)[:]
|
||||
dgram, err := readDatagramPacketServer(tcpConn, b[:])
|
||||
for ctx.Err() == nil {
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
datagram, err := readDatagramPacket(tcpConn, buf)
|
||||
if err != nil {
|
||||
log.Debugf("[tcpserver] %s -> 0 : %v", tcpConn.RemoteAddr(), err)
|
||||
plog.G(ctx).Errorf("[TCP] Failed to read from %s -> %s: %v", tcpConn.RemoteAddr(), tcpConn.LocalAddr(), err)
|
||||
config.LPool.Put(buf[:])
|
||||
return
|
||||
}
|
||||
|
||||
var src net.IP
|
||||
bb := dgram.Data[:dgram.DataLength]
|
||||
if util.IsIPv4(bb) {
|
||||
src = net.IPv4(bb[12], bb[13], bb[14], bb[15])
|
||||
} else if util.IsIPv6(bb) {
|
||||
src = bb[8:24]
|
||||
} else {
|
||||
log.Errorf("[tcpserver] unknown packet")
|
||||
continue
|
||||
err = h.handlePacket(ctx, tcpConn, datagram)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
value, loaded := h.connNAT.LoadOrStore(src.String(), tcpConn)
|
||||
if loaded {
|
||||
if tcpConn != value.(net.Conn) {
|
||||
h.connNAT.Store(src.String(), tcpConn)
|
||||
log.Debugf("[tcpserver] replace routeConnNAT: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
|
||||
}
|
||||
log.Debugf("[tcpserver] find routeConnNAT: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
|
||||
} else {
|
||||
log.Debugf("[tcpserver] new routeConnNAT: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
|
||||
}
|
||||
h.ch <- dgram
|
||||
}
|
||||
}
|
||||
|
||||
// fake udp connect over tcp
|
||||
type fakeUDPTunnelConn struct {
|
||||
func (h *UDPOverTCPHandler) handlePacket(ctx context.Context, tcpConn net.Conn, datagram *DatagramPacket) error {
|
||||
src, dst, protocol, err := util.ParseIP(datagram.Data[:datagram.DataLength])
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("[TCP] Unknown packet")
|
||||
config.LPool.Put(datagram.Data[:])
|
||||
return err
|
||||
}
|
||||
|
||||
h.addToRouteMapTCP(ctx, src, tcpConn)
|
||||
|
||||
if conn, ok := h.routeMapTCP.Load(dst.String()); ok {
|
||||
plog.G(ctx).Debugf("[TCP] Find TCP route SRC: %s to DST: %s -> %s", src, dst, conn.(net.Conn).RemoteAddr())
|
||||
err = datagram.Write(conn.(net.Conn))
|
||||
config.LPool.Put(datagram.Data[:])
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("[TCP] Failed to write to %s <- %s : %s", conn.(net.Conn).RemoteAddr(), conn.(net.Conn).LocalAddr(), err)
|
||||
return err
|
||||
}
|
||||
} else if (config.CIDR.Contains(dst) || config.CIDR6.Contains(dst)) && (!config.RouterIP.Equal(dst) && !config.RouterIP6.Equal(dst)) {
|
||||
plog.G(ctx).Warnf("[TCP] No route for src: %s -> dst: %s, drop it", src, dst)
|
||||
config.LPool.Put(datagram.Data[:])
|
||||
} else {
|
||||
plog.G(ctx).Debugf("[TCP] Forward to TUN device, SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(protocol).String(), datagram.DataLength)
|
||||
util.SafeWrite(h.packetChan, NewPacket(datagram.Data, int(datagram.DataLength), src, dst), func(v *Packet) {
|
||||
plog.G(context.Background()).Errorf("Stuck packet, SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(protocol).String(), v.length)
|
||||
h.packetChan <- v
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *UDPOverTCPHandler) addToRouteMapTCP(ctx context.Context, src net.IP, tcpConn net.Conn) {
|
||||
value, loaded := h.routeMapTCP.LoadOrStore(src.String(), tcpConn)
|
||||
if loaded {
|
||||
if value.(net.Conn) != tcpConn {
|
||||
h.routeMapTCP.Store(src.String(), tcpConn)
|
||||
plog.G(ctx).Infof("[TCP] Replace route map TCP to DST %s by connation %s -> %s", src, tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
}
|
||||
} else {
|
||||
plog.G(ctx).Infof("[TCP] Add new route map TCP to DST %s by connation %s -> %s", src, tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
}
|
||||
}
|
||||
|
||||
func (h *UDPOverTCPHandler) removeFromRouteMapTCP(ctx context.Context, tcpConn net.Conn) {
|
||||
h.routeMapTCP.Range(func(key, value any) bool {
|
||||
if value.(net.Conn) == tcpConn {
|
||||
h.routeMapTCP.Delete(key)
|
||||
plog.G(ctx).Infof("[TCP] Delete to DST: %s by conn %s -> %s from globle route map TCP", key, tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
var _ net.Conn = (*UDPConnOverTCP)(nil)
|
||||
|
||||
// UDPConnOverTCP fake udp connection over tcp connection
|
||||
type UDPConnOverTCP struct {
|
||||
// tcp connection
|
||||
net.Conn
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func newFakeUDPTunnelConnOverTCP(ctx context.Context, conn net.Conn) (net.Conn, error) {
|
||||
return &fakeUDPTunnelConn{ctx: ctx, Conn: conn}, nil
|
||||
func newUDPConnOverTCP(ctx context.Context, conn net.Conn) (net.Conn, error) {
|
||||
return &UDPConnOverTCP{ctx: ctx, Conn: conn}, nil
|
||||
}
|
||||
|
||||
func (c *fakeUDPTunnelConn) ReadFrom(b []byte) (int, net.Addr, error) {
|
||||
func (c *UDPConnOverTCP) Read(b []byte) (int, error) {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return 0, nil, c.ctx.Err()
|
||||
return 0, c.ctx.Err()
|
||||
default:
|
||||
dgram, err := readDatagramPacket(c.Conn, b)
|
||||
datagram, err := readDatagramPacket(c.Conn, b)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
return 0, err
|
||||
}
|
||||
return int(dgram.DataLength), dgram.Addr(), nil
|
||||
return int(datagram.DataLength), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *fakeUDPTunnelConn) WriteTo(b []byte, _ net.Addr) (int, error) {
|
||||
dgram := newDatagramPacket(b)
|
||||
if err := dgram.Write(c.Conn); err != nil {
|
||||
func (c *UDPConnOverTCP) Write(b []byte) (int, error) {
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
n := copy(buf, b)
|
||||
defer config.LPool.Put(buf)
|
||||
|
||||
packet := newDatagramPacket(buf, n)
|
||||
if err := packet.Write(c.Conn); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (c *fakeUDPTunnelConn) Close() error {
|
||||
func (c *UDPConnOverTCP) Close() error {
|
||||
if cc, ok := c.Conn.(interface{ CloseRead() error }); ok {
|
||||
_ = cc.CloseRead()
|
||||
}
|
||||
|
||||
@@ -1,221 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/google/gopacket/layers"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
"gvisor.dev/gvisor/pkg/buffer"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/header"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
)
|
||||
|
||||
var _ stack.LinkEndpoint = (*tunEndpoint)(nil)
|
||||
|
||||
// tunEndpoint /Users/naison/go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20220422052705-39790bd3a15a/pkg/tcpip/link/tun/device.go:122
|
||||
type tunEndpoint struct {
|
||||
ctx context.Context
|
||||
tun net.Conn
|
||||
once sync.Once
|
||||
endpoint *channel.Endpoint
|
||||
engine config.Engine
|
||||
|
||||
in chan<- *DataElem
|
||||
out chan *DataElem
|
||||
}
|
||||
|
||||
// WritePackets writes packets. Must not be called with an empty list of
|
||||
// packet buffers.
|
||||
//
|
||||
// WritePackets may modify the packet buffers, and takes ownership of the PacketBufferList.
|
||||
// it is not safe to use the PacketBufferList after a call to WritePackets.
|
||||
func (e *tunEndpoint) WritePackets(p stack.PacketBufferList) (int, tcpip.Error) {
|
||||
return e.endpoint.WritePackets(p)
|
||||
}
|
||||
|
||||
// MTU is the maximum transmission unit for this endpoint. This is
|
||||
// usually dictated by the backing physical network; when such a
|
||||
// physical network doesn't exist, the limit is generally 64k, which
|
||||
// includes the maximum size of an IP packet.
|
||||
func (e *tunEndpoint) MTU() uint32 {
|
||||
return uint32(config.DefaultMTU)
|
||||
}
|
||||
|
||||
// MaxHeaderLength returns the maximum size the data link (and
|
||||
// lower level layers combined) headers can have. Higher levels use this
|
||||
// information to reserve space in the front of the packets they're
|
||||
// building.
|
||||
func (e *tunEndpoint) MaxHeaderLength() uint16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// LinkAddress returns the link address (typically a MAC) of the
|
||||
// endpoint.
|
||||
func (e *tunEndpoint) LinkAddress() tcpip.LinkAddress {
|
||||
return e.endpoint.LinkAddress()
|
||||
}
|
||||
|
||||
// Capabilities returns the set of capabilities supported by the
|
||||
// endpoint.
|
||||
func (e *tunEndpoint) Capabilities() stack.LinkEndpointCapabilities {
|
||||
return e.endpoint.LinkEPCapabilities
|
||||
}
|
||||
|
||||
// Attach attaches the data link layer endpoint to the network-layer
|
||||
// dispatcher of the stack.
|
||||
//
|
||||
// Attach is called with a nil dispatcher when the endpoint's NIC is being
|
||||
// removed.
|
||||
func (e *tunEndpoint) Attach(dispatcher stack.NetworkDispatcher) {
|
||||
e.endpoint.Attach(dispatcher)
|
||||
// queue --> tun
|
||||
e.once.Do(func() {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-e.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
read := e.endpoint.ReadContext(e.ctx)
|
||||
if !read.IsNil() {
|
||||
bb := read.ToView().AsSlice()
|
||||
i := config.LPool.Get().([]byte)[:]
|
||||
n := copy(i, bb)
|
||||
bb = nil
|
||||
e.out <- NewDataElem(i[:], n, nil, nil)
|
||||
}
|
||||
}
|
||||
}()
|
||||
// tun --> dispatcher
|
||||
go func() {
|
||||
// full(all use gvisor), mix(cluster network use gvisor), raw(not use gvisor)
|
||||
for {
|
||||
bytes := config.LPool.Get().([]byte)[:]
|
||||
read, err := e.tun.Read(bytes[:])
|
||||
if err != nil {
|
||||
// if context is still going
|
||||
if e.ctx.Err() == nil {
|
||||
log.Fatalf("[TUN]: read from tun failed: %v", err)
|
||||
} else {
|
||||
log.Info("tun device closed")
|
||||
}
|
||||
return
|
||||
}
|
||||
if read == 0 {
|
||||
log.Warnf("[TUN]: read from tun length is %d", read)
|
||||
continue
|
||||
}
|
||||
// Try to determine network protocol number, default zero.
|
||||
var protocol tcpip.NetworkProtocolNumber
|
||||
var ipProtocol int
|
||||
var src, dst net.IP
|
||||
// TUN interface with IFF_NO_PI enabled, thus
|
||||
// we need to determine protocol from version field
|
||||
version := bytes[0] >> 4
|
||||
if version == 4 {
|
||||
protocol = header.IPv4ProtocolNumber
|
||||
ipHeader, err := ipv4.ParseHeader(bytes[:read])
|
||||
if err != nil {
|
||||
log.Errorf("parse ipv4 header failed: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
ipProtocol = ipHeader.Protocol
|
||||
src = ipHeader.Src
|
||||
dst = ipHeader.Dst
|
||||
} else if version == 6 {
|
||||
protocol = header.IPv6ProtocolNumber
|
||||
ipHeader, err := ipv6.ParseHeader(bytes[:read])
|
||||
if err != nil {
|
||||
log.Errorf("parse ipv6 header failed: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
ipProtocol = ipHeader.NextHeader
|
||||
src = ipHeader.Src
|
||||
dst = ipHeader.Dst
|
||||
} else {
|
||||
log.Debugf("[TUN-gvisor] unknown packet version %d", version)
|
||||
continue
|
||||
}
|
||||
// only tcp and udp needs to distinguish transport engine
|
||||
// gvisor: all network use gvisor
|
||||
// mix: cluster network use gvisor, diy network use raw
|
||||
// raw: all network use raw
|
||||
if (ipProtocol == int(layers.IPProtocolUDP) || ipProtocol == int(layers.IPProtocolUDPLite) || ipProtocol == int(layers.IPProtocolTCP)) &&
|
||||
(e.engine == config.EngineGvisor || (e.engine == config.EngineMix && (!config.CIDR.Contains(dst) && !config.CIDR6.Contains(dst)))) {
|
||||
pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
|
||||
ReserveHeaderBytes: 0,
|
||||
Payload: buffer.MakeWithData(bytes[:read]),
|
||||
})
|
||||
//defer pkt.DecRef()
|
||||
config.LPool.Put(bytes[:])
|
||||
e.endpoint.InjectInbound(protocol, pkt)
|
||||
log.Debugf("[TUN-%s] IP-Protocol: %s, SRC: %s, DST: %s, Length: %d", layers.IPProtocol(ipProtocol).String(), layers.IPProtocol(ipProtocol).String(), src.String(), dst, read)
|
||||
} else {
|
||||
log.Debugf("[TUN-RAW] IP-Protocol: %s, SRC: %s, DST: %s, Length: %d", layers.IPProtocol(ipProtocol).String(), src.String(), dst, read)
|
||||
e.in <- NewDataElem(bytes[:], read, src, dst)
|
||||
}
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for elem := range e.out {
|
||||
_, err := e.tun.Write(elem.Data()[:elem.Length()])
|
||||
config.LPool.Put(elem.Data()[:])
|
||||
if err != nil {
|
||||
log.Fatalf("[TUN] Fatal: failed to write data to tun device: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
// IsAttached returns whether a NetworkDispatcher is attached to the
|
||||
// endpoint.
|
||||
func (e *tunEndpoint) IsAttached() bool {
|
||||
return e.endpoint.IsAttached()
|
||||
}
|
||||
|
||||
// Wait waits for any worker goroutines owned by the endpoint to stop.
|
||||
//
|
||||
// For now, requesting that an endpoint's worker goroutine(s) stop is
|
||||
// implementation specific.
|
||||
//
|
||||
// Wait will not block if the endpoint hasn't started any goroutines
|
||||
// yet, even if it might later.
|
||||
func (e *tunEndpoint) Wait() {
|
||||
return
|
||||
}
|
||||
|
||||
// ARPHardwareType returns the ARPHRD_TYPE of the link endpoint.
|
||||
//
|
||||
// See:
|
||||
// https://github.com/torvalds/linux/blob/aa0c9086b40c17a7ad94425b3b70dd1fdd7497bf/include/uapi/linux/if_arp.h#L30
|
||||
func (e *tunEndpoint) ARPHardwareType() header.ARPHardwareType {
|
||||
return header.ARPHardwareNone
|
||||
}
|
||||
|
||||
// AddHeader adds a link layer header to the packet if required.
|
||||
func (e *tunEndpoint) AddHeader(ptr stack.PacketBufferPtr) {
|
||||
return
|
||||
}
|
||||
|
||||
func NewTunEndpoint(ctx context.Context, tun net.Conn, mtu uint32, engine config.Engine, in chan<- *DataElem, out chan *DataElem) stack.LinkEndpoint {
|
||||
addr, _ := tcpip.ParseMACAddress("02:03:03:04:05:06")
|
||||
return &tunEndpoint{
|
||||
ctx: ctx,
|
||||
tun: tun,
|
||||
endpoint: channel.New(tcp.DefaultReceiveBufferSize, mtu, addr),
|
||||
engine: engine,
|
||||
in: in,
|
||||
out: out,
|
||||
}
|
||||
}
|
||||
@@ -2,411 +2,165 @@ package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/layers"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxSize = 1000
|
||||
MaxThread = 10
|
||||
MaxConn = 1
|
||||
MaxSize = 1000
|
||||
)
|
||||
|
||||
type tunHandler struct {
|
||||
chain *Chain
|
||||
node *Node
|
||||
routeNAT *NAT
|
||||
// map[srcIP]net.Conn
|
||||
routeConnNAT *sync.Map
|
||||
chExit chan error
|
||||
}
|
||||
|
||||
type NAT struct {
|
||||
lock *sync.RWMutex
|
||||
routes map[string][]net.Addr
|
||||
}
|
||||
|
||||
func NewNAT() *NAT {
|
||||
return &NAT{
|
||||
lock: &sync.RWMutex{},
|
||||
routes: map[string][]net.Addr{},
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NAT) RemoveAddr(addr net.Addr) (count int) {
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
for k, v := range n.routes {
|
||||
for i := 0; i < len(v); i++ {
|
||||
if v[i].String() == addr.String() {
|
||||
v = append(v[:i], v[i+1:]...)
|
||||
i--
|
||||
count++
|
||||
}
|
||||
}
|
||||
n.routes[k] = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (n *NAT) LoadOrStore(to net.IP, addr net.Addr) (result net.Addr, load bool) {
|
||||
n.lock.RLock()
|
||||
addrList := n.routes[to.String()]
|
||||
n.lock.RUnlock()
|
||||
for _, add := range addrList {
|
||||
if add.String() == addr.String() {
|
||||
load = true
|
||||
result = addr
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
if addrList == nil {
|
||||
n.routes[to.String()] = []net.Addr{addr}
|
||||
result = addr
|
||||
return
|
||||
} else {
|
||||
n.routes[to.String()] = append(n.routes[to.String()], addr)
|
||||
result = addr
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NAT) RouteTo(ip net.IP) net.Addr {
|
||||
n.lock.RLock()
|
||||
defer n.lock.RUnlock()
|
||||
addrList := n.routes[ip.String()]
|
||||
if len(addrList) == 0 {
|
||||
return nil
|
||||
}
|
||||
// for load balance
|
||||
index := rand.Intn(len(n.routes[ip.String()]))
|
||||
return addrList[index]
|
||||
}
|
||||
|
||||
func (n *NAT) Remove(ip net.IP, addr net.Addr) {
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
|
||||
addrList, ok := n.routes[ip.String()]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for i := 0; i < len(addrList); i++ {
|
||||
if addrList[i].String() == addr.String() {
|
||||
addrList = append(addrList[:i], addrList[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
n.routes[ip.String()] = addrList
|
||||
return
|
||||
}
|
||||
|
||||
func (n *NAT) Range(f func(key string, v []net.Addr)) {
|
||||
n.lock.RLock()
|
||||
defer n.lock.RUnlock()
|
||||
for k, v := range n.routes {
|
||||
f(k, v)
|
||||
}
|
||||
forward *Forwarder
|
||||
node *Node
|
||||
routeMapUDP *sync.Map
|
||||
routeMapTCP *sync.Map
|
||||
errChan chan error
|
||||
}
|
||||
|
||||
// TunHandler creates a handler for tun tunnel.
|
||||
func TunHandler(chain *Chain, node *Node) Handler {
|
||||
func TunHandler(forward *Forwarder, node *Node) Handler {
|
||||
return &tunHandler{
|
||||
chain: chain,
|
||||
node: node,
|
||||
routeNAT: RouteNAT,
|
||||
routeConnNAT: RouteConnNAT,
|
||||
chExit: make(chan error, 1),
|
||||
forward: forward,
|
||||
node: node,
|
||||
routeMapUDP: &sync.Map{},
|
||||
routeMapTCP: RouteMapTCP,
|
||||
errChan: make(chan error, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *tunHandler) Handle(ctx context.Context, tun net.Conn) {
|
||||
if h.node.Remote != "" {
|
||||
if remote := h.node.Remote; remote != "" {
|
||||
h.HandleClient(ctx, tun)
|
||||
} else {
|
||||
h.HandleServer(ctx, tun)
|
||||
}
|
||||
}
|
||||
|
||||
func (h tunHandler) printRoute() {
|
||||
for {
|
||||
select {
|
||||
case <-time.Tick(time.Second * 5):
|
||||
var i int
|
||||
var sb strings.Builder
|
||||
h.routeNAT.Range(func(key string, value []net.Addr) {
|
||||
i++
|
||||
var s []string
|
||||
for _, addr := range value {
|
||||
if addr != nil {
|
||||
s = append(s, addr.String())
|
||||
}
|
||||
}
|
||||
if len(s) != 0 {
|
||||
sb.WriteString(fmt.Sprintf("to: %s, route: %s\n", key, strings.Join(s, " ")))
|
||||
}
|
||||
})
|
||||
log.Debug(sb.String())
|
||||
log.Debug(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Device struct {
|
||||
tun net.Conn
|
||||
thread int
|
||||
|
||||
tunInboundRaw chan *DataElem
|
||||
tunInbound chan *DataElem
|
||||
tunOutbound chan *DataElem
|
||||
|
||||
// your main logic
|
||||
tunInboundHandler func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem)
|
||||
|
||||
chExit chan error
|
||||
}
|
||||
|
||||
func (d *Device) readFromTun() {
|
||||
for {
|
||||
b := config.LPool.Get().([]byte)[:]
|
||||
n, err := d.tun.Read(b[:])
|
||||
if err != nil {
|
||||
select {
|
||||
case d.chExit <- err:
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
d.tunInboundRaw <- &DataElem{
|
||||
data: b[:],
|
||||
length: n,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Device) writeToTun() {
|
||||
for e := range d.tunOutbound {
|
||||
_, err := d.tun.Write(e.data[:e.length])
|
||||
config.LPool.Put(e.data[:])
|
||||
if err != nil {
|
||||
select {
|
||||
case d.chExit <- err:
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Device) parseIPHeader() {
|
||||
for e := range d.tunInboundRaw {
|
||||
if util.IsIPv4(e.data[:e.length]) {
|
||||
// ipv4.ParseHeader
|
||||
b := e.data[:e.length]
|
||||
e.src = net.IPv4(b[12], b[13], b[14], b[15])
|
||||
e.dst = net.IPv4(b[16], b[17], b[18], b[19])
|
||||
} else if util.IsIPv6(e.data[:e.length]) {
|
||||
// ipv6.ParseHeader
|
||||
e.src = e.data[:e.length][8:24]
|
||||
e.dst = e.data[:e.length][24:40]
|
||||
} else {
|
||||
log.Errorf("[tun-packet] unknown packet")
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("[tun] %s --> %s, length: %d", e.src, e.dst, e.length)
|
||||
d.tunInbound <- e
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Device) Close() {
|
||||
d.tun.Close()
|
||||
}
|
||||
|
||||
func heartbeats(tun net.Conn, in chan<- *DataElem) {
|
||||
conn, err := util.GetTunDeviceByConn(tun)
|
||||
if err != nil {
|
||||
log.Errorf("get tun device error: %s", err.Error())
|
||||
return
|
||||
}
|
||||
srcIPv4, srcIPv6, err := util.GetLocalTunIP(conn.Name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if config.RouterIP.To4().Equal(srcIPv4) {
|
||||
return
|
||||
}
|
||||
if config.RouterIP6.To4().Equal(srcIPv6) {
|
||||
return
|
||||
func (h *tunHandler) HandleServer(ctx context.Context, tun net.Conn) {
|
||||
device := &Device{
|
||||
tun: tun,
|
||||
tunInbound: make(chan *Packet, MaxSize),
|
||||
tunOutbound: make(chan *Packet, MaxSize),
|
||||
errChan: h.errChan,
|
||||
}
|
||||
|
||||
var bytes []byte
|
||||
var bytes6 []byte
|
||||
|
||||
ticker := time.NewTicker(time.Second * 5)
|
||||
defer ticker.Stop()
|
||||
|
||||
for ; true; <-ticker.C {
|
||||
for i := 0; i < 4; i++ {
|
||||
if bytes == nil {
|
||||
bytes, err = genICMPPacket(srcIPv4, config.RouterIP)
|
||||
if err != nil {
|
||||
log.Errorf("generate ipv4 packet error: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
}
|
||||
if bytes6 == nil {
|
||||
bytes6, err = genICMPPacketIPv6(srcIPv6, config.RouterIP6)
|
||||
if err != nil {
|
||||
log.Errorf("generate ipv6 packet error: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
}
|
||||
for index, i2 := range [][]byte{bytes, bytes6} {
|
||||
data := config.LPool.Get().([]byte)[:]
|
||||
length := copy(data, i2)
|
||||
var src, dst net.IP
|
||||
if index == 0 {
|
||||
src, dst = srcIPv4, config.RouterIP
|
||||
} else {
|
||||
src, dst = srcIPv6, config.RouterIP6
|
||||
}
|
||||
in <- &DataElem{
|
||||
data: data[:],
|
||||
length: length,
|
||||
src: src,
|
||||
dst: dst,
|
||||
}
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func genICMPPacket(src net.IP, dst net.IP) ([]byte, error) {
|
||||
buf := gopacket.NewSerializeBuffer()
|
||||
icmpLayer := layers.ICMPv4{
|
||||
TypeCode: layers.CreateICMPv4TypeCode(layers.ICMPv4TypeEchoRequest, 0),
|
||||
Id: 3842,
|
||||
Seq: 1,
|
||||
}
|
||||
ipLayer := layers.IPv4{
|
||||
Version: 4,
|
||||
SrcIP: src,
|
||||
DstIP: dst,
|
||||
Protocol: layers.IPProtocolICMPv4,
|
||||
Flags: layers.IPv4DontFragment,
|
||||
TTL: 64,
|
||||
IHL: 5,
|
||||
Id: 55664,
|
||||
}
|
||||
opts := gopacket.SerializeOptions{
|
||||
FixLengths: true,
|
||||
ComputeChecksums: true,
|
||||
}
|
||||
err := gopacket.SerializeLayers(buf, opts, &ipLayer, &icmpLayer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize icmp packet, err: %v", err)
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func genICMPPacketIPv6(src net.IP, dst net.IP) ([]byte, error) {
|
||||
buf := gopacket.NewSerializeBuffer()
|
||||
icmpLayer := layers.ICMPv6{
|
||||
TypeCode: layers.CreateICMPv6TypeCode(layers.ICMPv6TypeEchoRequest, 0),
|
||||
}
|
||||
ipLayer := layers.IPv6{
|
||||
Version: 6,
|
||||
SrcIP: src,
|
||||
DstIP: dst,
|
||||
NextHeader: layers.IPProtocolICMPv6,
|
||||
HopLimit: 255,
|
||||
}
|
||||
opts := gopacket.SerializeOptions{
|
||||
FixLengths: true,
|
||||
}
|
||||
err := gopacket.SerializeLayers(buf, opts, &ipLayer, &icmpLayer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize icmp6 packet, err: %v", err)
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (d *Device) Start(ctx context.Context) {
|
||||
go d.readFromTun()
|
||||
for i := 0; i < d.thread; i++ {
|
||||
go d.parseIPHeader()
|
||||
}
|
||||
go d.tunInboundHandler(d.tunInbound, d.tunOutbound)
|
||||
go d.writeToTun()
|
||||
go heartbeats(d.tun, d.tunInbound)
|
||||
defer device.Close()
|
||||
go device.readFromTUN(ctx)
|
||||
go device.writeToTUN(ctx)
|
||||
go device.handlePacket(ctx, h.node.Addr, h.routeMapUDP, h.routeMapTCP)
|
||||
|
||||
select {
|
||||
case err := <-d.chExit:
|
||||
log.Errorf("device exit: %s", err.Error())
|
||||
case err := <-device.errChan:
|
||||
plog.G(ctx).Errorf("Device exit: %v", err)
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Device) SetTunInboundHandler(handler func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem)) {
|
||||
d.tunInboundHandler = handler
|
||||
type Device struct {
|
||||
tun net.Conn
|
||||
|
||||
tunInbound chan *Packet
|
||||
tunOutbound chan *Packet
|
||||
|
||||
errChan chan error
|
||||
}
|
||||
|
||||
func (h *tunHandler) HandleServer(ctx context.Context, tun net.Conn) {
|
||||
go h.printRoute()
|
||||
|
||||
device := &Device{
|
||||
tun: tun,
|
||||
thread: MaxThread,
|
||||
tunInboundRaw: make(chan *DataElem, MaxSize),
|
||||
tunInbound: make(chan *DataElem, MaxSize),
|
||||
tunOutbound: make(chan *DataElem, MaxSize),
|
||||
chExit: h.chExit,
|
||||
}
|
||||
device.SetTunInboundHandler(func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem) {
|
||||
for {
|
||||
packetConn, err := (&net.ListenConfig{}).ListenPacket(ctx, "udp", h.node.Addr)
|
||||
if err != nil {
|
||||
log.Debugf("[udp] can not listen %s, err: %v", h.node.Addr, err)
|
||||
return
|
||||
}
|
||||
err = transportTun(ctx, tunInbound, tunOutbound, packetConn, h.routeNAT, h.routeConnNAT)
|
||||
if err != nil {
|
||||
log.Debugf("[tun] %s: %v", tun.LocalAddr(), err)
|
||||
}
|
||||
func (d *Device) readFromTUN(ctx context.Context) {
|
||||
defer util.HandleCrash()
|
||||
for {
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
n, err := d.tun.Read(buf[:])
|
||||
if err != nil {
|
||||
config.LPool.Put(buf[:])
|
||||
plog.G(ctx).Errorf("[TUN] Failed to read from tun device: %v", err)
|
||||
util.SafeWrite(d.errChan, err)
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
defer device.Close()
|
||||
device.Start(ctx)
|
||||
src, dst, protocol, err := util.ParseIP(buf[:n])
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("[TUN] Unknown packet")
|
||||
config.LPool.Put(buf[:])
|
||||
continue
|
||||
}
|
||||
|
||||
plog.G(ctx).Debugf("[TUN] SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(protocol).String(), n)
|
||||
util.SafeWrite(d.tunInbound, NewPacket(buf[:], n, src, dst), func(v *Packet) {
|
||||
config.LPool.Put(v.data[:])
|
||||
plog.G(context.Background()).Errorf("Drop packet, SRC: %s, DST: %s, Protocol: %s, Length: %d", v.src, v.dst, layers.IPProtocol(protocol).String(), v.length)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type DataElem struct {
|
||||
func (d *Device) writeToTUN(ctx context.Context) {
|
||||
defer util.HandleCrash()
|
||||
for packet := range d.tunOutbound {
|
||||
_, err := d.tun.Write(packet.data[:packet.length])
|
||||
config.LPool.Put(packet.data[:])
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("[TUN] Failed to write to tun device: %v", err)
|
||||
util.SafeWrite(d.errChan, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Device) Close() {
|
||||
d.tun.Close()
|
||||
util.SafeClose(d.tunInbound)
|
||||
util.SafeClose(d.tunOutbound)
|
||||
util.SafeClose(TCPPacketChan)
|
||||
}
|
||||
|
||||
func (d *Device) handlePacket(ctx context.Context, addr string, routeMapUDP *sync.Map, routeMapTCP *sync.Map) {
|
||||
packetConn, err := (&net.ListenConfig{}).ListenPacket(ctx, "udp", addr)
|
||||
if err != nil {
|
||||
util.SafeWrite(d.errChan, err)
|
||||
plog.G(ctx).Errorf("[TUN] Failed to listen %s: %v", addr, err)
|
||||
return
|
||||
}
|
||||
|
||||
p := &Peer{
|
||||
conn: packetConn,
|
||||
tunInbound: d.tunInbound,
|
||||
tunOutbound: d.tunOutbound,
|
||||
routeMapUDP: routeMapUDP,
|
||||
routeMapTCP: routeMapTCP,
|
||||
errChan: make(chan error, 1),
|
||||
}
|
||||
|
||||
go p.readFromConn(ctx)
|
||||
go p.routeTUN(ctx)
|
||||
go p.routeTCPToTun(ctx)
|
||||
|
||||
select {
|
||||
case err = <-p.errChan:
|
||||
plog.G(ctx).Errorf("[TUN] %s: %v", d.tun.LocalAddr(), err)
|
||||
util.SafeWrite(d.errChan, err)
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
type Packet struct {
|
||||
data []byte
|
||||
length int
|
||||
src net.IP
|
||||
dst net.IP
|
||||
}
|
||||
|
||||
func NewDataElem(data []byte, length int, src net.IP, dst net.IP) *DataElem {
|
||||
return &DataElem{
|
||||
func NewPacket(data []byte, length int, src net.IP, dst net.IP) *Packet {
|
||||
return &Packet{
|
||||
data: data,
|
||||
length: length,
|
||||
src: src,
|
||||
@@ -414,36 +168,24 @@ func NewDataElem(data []byte, length int, src net.IP, dst net.IP) *DataElem {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DataElem) Data() []byte {
|
||||
func (d *Packet) Data() []byte {
|
||||
return d.data
|
||||
}
|
||||
|
||||
func (d *DataElem) Length() int {
|
||||
func (d *Packet) Length() int {
|
||||
return d.length
|
||||
}
|
||||
|
||||
type udpElem struct {
|
||||
from net.Addr
|
||||
data []byte
|
||||
length int
|
||||
src net.IP
|
||||
dst net.IP
|
||||
}
|
||||
|
||||
type Peer struct {
|
||||
conn net.PacketConn
|
||||
thread int
|
||||
conn net.PacketConn
|
||||
|
||||
connInbound chan *udpElem
|
||||
parsedConnInfo chan *udpElem
|
||||
tunInbound chan *Packet
|
||||
tunOutbound chan<- *Packet
|
||||
|
||||
tunInbound <-chan *DataElem
|
||||
tunOutbound chan<- *DataElem
|
||||
|
||||
routeNAT *NAT
|
||||
// map[srcIP]net.Conn
|
||||
// routeConnNAT sync.Map
|
||||
routeConnNAT *sync.Map
|
||||
// map[srcIP.String()]net.Addr for udp
|
||||
routeMapUDP *sync.Map
|
||||
// map[srcIP.String()]net.Conn for tcp
|
||||
routeMapTCP *sync.Map
|
||||
|
||||
errChan chan error
|
||||
}
|
||||
@@ -455,170 +197,73 @@ func (p *Peer) sendErr(err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Peer) readFromConn() {
|
||||
for {
|
||||
b := config.LPool.Get().([]byte)[:]
|
||||
n, srcAddr, err := p.conn.ReadFrom(b[:])
|
||||
func (p *Peer) readFromConn(ctx context.Context) {
|
||||
defer util.HandleCrash()
|
||||
for ctx.Err() == nil {
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
n, from, err := p.conn.ReadFrom(buf[:])
|
||||
if err != nil {
|
||||
config.LPool.Put(buf[:])
|
||||
p.sendErr(err)
|
||||
return
|
||||
}
|
||||
p.connInbound <- &udpElem{
|
||||
from: srcAddr,
|
||||
data: b[:],
|
||||
length: n,
|
||||
|
||||
src, dst, protocol, err := util.ParseIP(buf[:n])
|
||||
if err != nil {
|
||||
config.LPool.Put(buf[:])
|
||||
plog.G(ctx).Errorf("[TUN] Unknown packet: %v", err)
|
||||
p.sendErr(err)
|
||||
return
|
||||
}
|
||||
p.addToRouteMapUDP(ctx, src, from)
|
||||
plog.G(context.Background()).Errorf("[TUN] SRC: %s, DST: %s, Protocol: %s, Length: %d", src, dst, layers.IPProtocol(protocol).String(), n)
|
||||
p.tunInbound <- NewPacket(buf[:], n, src, dst)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Peer) readFromTCPConn() {
|
||||
for packet := range Chan {
|
||||
u := &udpElem{
|
||||
data: packet.Data[:],
|
||||
length: int(packet.DataLength),
|
||||
func (p *Peer) addToRouteMapUDP(ctx context.Context, src net.IP, from net.Addr) {
|
||||
if addr, loaded := p.routeMapUDP.LoadOrStore(src.String(), from); loaded {
|
||||
if addr.(net.Addr).String() != from.String() {
|
||||
p.routeMapUDP.Store(src.String(), from)
|
||||
plog.G(ctx).Infof("[TUN] Replace route map UDP: %s -> %s", src, from)
|
||||
}
|
||||
b := packet.Data
|
||||
if util.IsIPv4(packet.Data) {
|
||||
// ipv4.ParseHeader
|
||||
u.src = net.IPv4(b[12], b[13], b[14], b[15])
|
||||
u.dst = net.IPv4(b[16], b[17], b[18], b[19])
|
||||
} else if util.IsIPv6(packet.Data) {
|
||||
// ipv6.ParseHeader
|
||||
u.src = b[8:24]
|
||||
u.dst = b[24:40]
|
||||
} else {
|
||||
log.Errorf("[tun-conn] unknown packet")
|
||||
continue
|
||||
}
|
||||
log.Debugf("[tcpserver] udp-tun %s >>> %s length: %d", u.src, u.dst, u.length)
|
||||
p.parsedConnInfo <- u
|
||||
} else {
|
||||
plog.G(ctx).Infof("[TUN] Add new route map UDP: %s -> %s", src, from)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Peer) parseHeader() {
|
||||
var firstIPv4, firstIPv6 = true, true
|
||||
for e := range p.connInbound {
|
||||
b := e.data[:e.length]
|
||||
if util.IsIPv4(e.data[:e.length]) {
|
||||
// ipv4.ParseHeader
|
||||
e.src = net.IPv4(b[12], b[13], b[14], b[15])
|
||||
e.dst = net.IPv4(b[16], b[17], b[18], b[19])
|
||||
} else if util.IsIPv6(e.data[:e.length]) {
|
||||
// ipv6.ParseHeader
|
||||
e.src = b[:e.length][8:24]
|
||||
e.dst = b[:e.length][24:40]
|
||||
} else {
|
||||
log.Errorf("[tun] unknown packet")
|
||||
continue
|
||||
}
|
||||
|
||||
if firstIPv4 || firstIPv6 {
|
||||
if util.IsIPv4(e.data[:e.length]) {
|
||||
firstIPv4 = false
|
||||
} else {
|
||||
firstIPv6 = false
|
||||
}
|
||||
if _, loaded := p.routeNAT.LoadOrStore(e.src, e.from); loaded {
|
||||
log.Debugf("[tun] find route: %s -> %s", e.src, e.from)
|
||||
} else {
|
||||
log.Debugf("[tun] new route: %s -> %s", e.src, e.from)
|
||||
}
|
||||
}
|
||||
p.parsedConnInfo <- e
|
||||
func (p *Peer) routeTCPToTun(ctx context.Context) {
|
||||
defer util.HandleCrash()
|
||||
for packet := range TCPPacketChan {
|
||||
p.tunOutbound <- packet
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Peer) routePeer() {
|
||||
for e := range p.parsedConnInfo {
|
||||
if routeToAddr := p.routeNAT.RouteTo(e.dst); routeToAddr != nil {
|
||||
log.Debugf("[tun] find route: %s -> %s", e.dst, routeToAddr)
|
||||
_, err := p.conn.WriteTo(e.data[:e.length], routeToAddr)
|
||||
config.LPool.Put(e.data[:])
|
||||
func (p *Peer) routeTUN(ctx context.Context) {
|
||||
defer util.HandleCrash()
|
||||
for packet := range p.tunInbound {
|
||||
if addr, ok := p.routeMapUDP.Load(packet.dst.String()); ok {
|
||||
plog.G(ctx).Debugf("[TUN] Find UDP route to DST: %s -> %s, SRC: %s, DST: %s", packet.dst, addr, packet.src, packet.dst)
|
||||
_, err := p.conn.WriteTo(packet.data[:packet.length], addr.(net.Addr))
|
||||
config.LPool.Put(packet.data[:])
|
||||
if err != nil {
|
||||
plog.G(ctx).Errorf("[TUN] Failed wirte to route dst: %s -> %s", packet.dst, addr)
|
||||
p.sendErr(err)
|
||||
return
|
||||
}
|
||||
} else if conn, ok := p.routeConnNAT.Load(e.dst.String()); ok {
|
||||
dgram := newDatagramPacket(e.data[:e.length])
|
||||
if err := dgram.Write(conn.(net.Conn)); err != nil {
|
||||
log.Debugf("[tcpserver] udp-tun %s <- %s : %s", conn.(net.Conn).RemoteAddr(), dgram.Addr(), err)
|
||||
p.sendErr(err)
|
||||
return
|
||||
}
|
||||
config.LPool.Put(e.data[:])
|
||||
} else {
|
||||
p.tunOutbound <- &DataElem{
|
||||
data: e.data,
|
||||
length: e.length,
|
||||
src: e.src,
|
||||
dst: e.dst,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Peer) routeTUN() {
|
||||
for e := range p.tunInbound {
|
||||
if addr := p.routeNAT.RouteTo(e.dst); addr != nil {
|
||||
log.Debugf("[tun] find route: %s -> %s", e.dst, addr)
|
||||
_, err := p.conn.WriteTo(e.data[:e.length], addr)
|
||||
config.LPool.Put(e.data[:])
|
||||
if err != nil {
|
||||
log.Debugf("[tun] can not route: %s -> %s", e.dst, addr)
|
||||
p.sendErr(err)
|
||||
return
|
||||
}
|
||||
} else if conn, ok := p.routeConnNAT.Load(e.dst.String()); ok {
|
||||
dgram := newDatagramPacket(e.data[:e.length])
|
||||
} else if conn, ok := p.routeMapTCP.Load(packet.dst.String()); ok {
|
||||
plog.G(ctx).Debugf("[TUN] Find TCP route to dst: %s -> %s", packet.dst.String(), conn.(net.Conn).RemoteAddr())
|
||||
dgram := newDatagramPacket(packet.data, packet.length)
|
||||
err := dgram.Write(conn.(net.Conn))
|
||||
config.LPool.Put(e.data[:])
|
||||
config.LPool.Put(packet.data[:])
|
||||
if err != nil {
|
||||
log.Debugf("[tcpserver] udp-tun %s <- %s : %s", conn.(net.Conn).RemoteAddr(), dgram.Addr(), err)
|
||||
plog.G(ctx).Errorf("[TUN] Failed to write TCP %s <- %s : %s", conn.(net.Conn).RemoteAddr(), conn.(net.Conn).LocalAddr(), err)
|
||||
p.sendErr(err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
config.LPool.Put(e.data[:])
|
||||
log.Debug(fmt.Errorf("[tun] no route for %s -> %s", e.src, e.dst))
|
||||
plog.G(ctx).Warnf("[TUN] No route for src: %s -> dst: %s, drop it", packet.src, packet.dst)
|
||||
config.LPool.Put(packet.data[:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Peer) Start() {
|
||||
go p.readFromConn()
|
||||
go p.readFromTCPConn()
|
||||
for i := 0; i < p.thread; i++ {
|
||||
go p.parseHeader()
|
||||
}
|
||||
go p.routePeer()
|
||||
go p.routeTUN()
|
||||
}
|
||||
|
||||
func (p *Peer) Close() {
|
||||
p.conn.Close()
|
||||
}
|
||||
|
||||
func transportTun(ctx context.Context, tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem, packetConn net.PacketConn, nat *NAT, connNAT *sync.Map) error {
|
||||
p := &Peer{
|
||||
conn: packetConn,
|
||||
thread: MaxThread,
|
||||
connInbound: make(chan *udpElem, MaxSize),
|
||||
parsedConnInfo: make(chan *udpElem, MaxSize),
|
||||
tunInbound: tunInbound,
|
||||
tunOutbound: tunOutbound,
|
||||
routeNAT: nat,
|
||||
routeConnNAT: connNAT,
|
||||
errChan: make(chan error, 2),
|
||||
}
|
||||
|
||||
defer p.Close()
|
||||
p.Start()
|
||||
|
||||
select {
|
||||
case err := <-p.errChan:
|
||||
log.Errorf(err.Error())
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user