mirror of
https://github.com/kubenetworks/kubevpn.git
synced 2025-12-24 11:51:13 +08:00
Compare commits
255 Commits
errro-log-
...
v2.3.5
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1dc3c057a7 | ||
|
|
81f62eab31 | ||
|
|
d9a978d330 | ||
|
|
c95cb5ba6c | ||
|
|
d418da83b0 | ||
|
|
24a97de5dc | ||
|
|
481b720da6 | ||
|
|
a1247995e7 | ||
|
|
7cb86d70b0 | ||
|
|
9edf0122a7 | ||
|
|
5a0533c0fc | ||
|
|
17a13a2672 | ||
|
|
98c22ba9b7 | ||
|
|
880f842203 | ||
|
|
ab09f9e71c | ||
|
|
ef16641675 | ||
|
|
d9a9000d7b | ||
|
|
a1212f5144 | ||
|
|
f4c22f3073 | ||
|
|
2aa7812cb1 | ||
|
|
cad5d23d33 | ||
|
|
85e8bd76d2 | ||
|
|
a243842052 | ||
|
|
6e052a5a0b | ||
|
|
f966cd29d7 | ||
|
|
bfb7ac441d | ||
|
|
0cc8b04bab | ||
|
|
65ae890842 | ||
|
|
aa881a589e | ||
|
|
07292fcde5 | ||
|
|
3071ff2439 | ||
|
|
a64eaf66da | ||
|
|
9238e9914a | ||
|
|
6e4aeb288a | ||
|
|
105c3967e1 | ||
|
|
5dae60ffbc | ||
|
|
875cb8dc8c | ||
|
|
15103837a7 | ||
|
|
baf5b79a24 | ||
|
|
5618500e66 | ||
|
|
d28096d9fa | ||
|
|
bc960987ea | ||
|
|
1005075367 | ||
|
|
8f4de1968a | ||
|
|
a93f0b1667 | ||
|
|
941373a902 | ||
|
|
605fe047ca | ||
|
|
4d075b29b3 | ||
|
|
d141ec869b | ||
|
|
e2757d3916 | ||
|
|
9d917ae9cb | ||
|
|
0763e8a201 | ||
|
|
274116e44f | ||
|
|
ed375be157 | ||
|
|
be8ef7a127 | ||
|
|
2bfa82d936 | ||
|
|
394bc1a0e4 | ||
|
|
e64b9a3311 | ||
|
|
f9bbaeb3cf | ||
|
|
ac918b5009 | ||
|
|
69b6fa6318 | ||
|
|
63be89bf25 | ||
|
|
c4fb3c5ca0 | ||
|
|
947d50af85 | ||
|
|
0826f2e20c | ||
|
|
9f62e02f96 | ||
|
|
a3b8c1586d | ||
|
|
675ce2a52f | ||
|
|
79e524e319 | ||
|
|
49adeac14c | ||
|
|
9283c2f8f7 | ||
|
|
a48750c048 | ||
|
|
bbf3914f1e | ||
|
|
f13e21a049 | ||
|
|
a37bfc28da | ||
|
|
862238f65f | ||
|
|
18d6f67a5d | ||
|
|
4ae09a9dd2 | ||
|
|
1feaacaba9 | ||
|
|
bc7d205695 | ||
|
|
78de74bf08 | ||
|
|
8c0f2098c9 | ||
|
|
44320a792e | ||
|
|
0e2a8f1ce6 | ||
|
|
b0a6a0d054 | ||
|
|
62b0de99f9 | ||
|
|
295a7a709e | ||
|
|
8d400fd698 | ||
|
|
5f0fe6668a | ||
|
|
993be34b70 | ||
|
|
8093cb125a | ||
|
|
d3542b840a | ||
|
|
d2faffc2c7 | ||
|
|
d2648aabed | ||
|
|
0e87705e5e | ||
|
|
2d947f965f | ||
|
|
35ef5a8c88 | ||
|
|
ce750d9c74 | ||
|
|
207445640e | ||
|
|
e9327ec572 | ||
|
|
deb4ec98f5 | ||
|
|
5cd7ef4a0a | ||
|
|
d6f833fc0b | ||
|
|
faa6229aef | ||
|
|
98d88ac542 | ||
|
|
680e95fd7f | ||
|
|
4aeee5f8d8 | ||
|
|
28d2e78d04 | ||
|
|
d8e0cbcc3d | ||
|
|
ed4c6bbe2f | ||
|
|
a45688115c | ||
|
|
35f0568b09 | ||
|
|
2ec20f7d1d | ||
|
|
a26abab8ce | ||
|
|
9be029e65e | ||
|
|
6fed288e67 | ||
|
|
80e3aa154c | ||
|
|
38b9ad1991 | ||
|
|
eaacf3954f | ||
|
|
12a12bcda7 | ||
|
|
28f6d54398 | ||
|
|
a23b197554 | ||
|
|
a0ca862d59 | ||
|
|
7dd762b853 | ||
|
|
78762cd9e5 | ||
|
|
e58a9bf69e | ||
|
|
a10b1b2526 | ||
|
|
331423c308 | ||
|
|
e5c1ea4b9b | ||
|
|
cc032c4a6d | ||
|
|
984ab2ce89 | ||
|
|
3e51bf0f4d | ||
|
|
e7f00f5899 | ||
|
|
70d5723e97 | ||
|
|
5da018db2a | ||
|
|
a0137ad485 | ||
|
|
37552d3db9 | ||
|
|
5ac8eac923 | ||
|
|
d2d411a1cb | ||
|
|
d16bdf8fea | ||
|
|
ca18dab08f | ||
|
|
706afb348d | ||
|
|
def6c7dfdd | ||
|
|
e64dd428ab | ||
|
|
9df4efb98b | ||
|
|
6f6d338656 | ||
|
|
f93b06ea1c | ||
|
|
ada4b51035 | ||
|
|
679d097e83 | ||
|
|
c7b437c5d8 | ||
|
|
43dad39cca | ||
|
|
d428ee42bc | ||
|
|
0e569fe1a4 | ||
|
|
fe7be90d0b | ||
|
|
bacc8cdc26 | ||
|
|
9c62504489 | ||
|
|
6060bd8120 | ||
|
|
2cd4de52f4 | ||
|
|
03ac484069 | ||
|
|
c7b4499503 | ||
|
|
9a7466479b | ||
|
|
31d7e4debb | ||
|
|
52f1d38e56 | ||
|
|
acd4de313f | ||
|
|
8dbb80be7c | ||
|
|
45491f185d | ||
|
|
4eeecd5255 | ||
|
|
87166494c0 | ||
|
|
91b3a2fbdf | ||
|
|
b7615f57c3 | ||
|
|
e5438b297a | ||
|
|
d3aeae7573 | ||
|
|
aacdc8a6d0 | ||
|
|
fadfd00927 | ||
|
|
600e35b8d7 | ||
|
|
f3d1c99a04 | ||
|
|
18a5569054 | ||
|
|
1baa1de13f | ||
|
|
dcda747d0e | ||
|
|
2fd6427242 | ||
|
|
dc270ca846 | ||
|
|
ab0cd80b39 | ||
|
|
e920133c88 | ||
|
|
0d64dc7b10 | ||
|
|
96845ba37a | ||
|
|
0730cb12b7 | ||
|
|
e232bf902e | ||
|
|
1bc269d901 | ||
|
|
a8826b3334 | ||
|
|
7c560df82b | ||
|
|
939cc8547f | ||
|
|
fed7108eec | ||
|
|
2fdfc1d88d | ||
|
|
64cd7709e8 | ||
|
|
5773b69367 | ||
|
|
c689f47664 | ||
|
|
1f32a129b6 | ||
|
|
01e3456ad3 | ||
|
|
46fcf5521f | ||
|
|
26f53209c6 | ||
|
|
454f67b6c4 | ||
|
|
bd5c3c4cf6 | ||
|
|
991a840db2 | ||
|
|
3ad6127132 | ||
|
|
14e91d5110 | ||
|
|
4abc5f004a | ||
|
|
59abb16136 | ||
|
|
6a232473cd | ||
|
|
878a8190e3 | ||
|
|
d0978aa5b7 | ||
|
|
073c249e96 | ||
|
|
78c8afb456 | ||
|
|
0384de250a | ||
|
|
9be04cc149 | ||
|
|
f9ef4c8dad | ||
|
|
c09ac8f536 | ||
|
|
14731fe8e8 | ||
|
|
dc33331a8c | ||
|
|
879bdbc03d | ||
|
|
8eeb420245 | ||
|
|
847c2c8cc1 | ||
|
|
988c2e7fdc | ||
|
|
8c55d39af2 | ||
|
|
e2cb639c6e | ||
|
|
f9a67a2773 | ||
|
|
cbf3cdff42 | ||
|
|
d35656f3df | ||
|
|
9f97a9202d | ||
|
|
ae2b97a4b4 | ||
|
|
156ee998cd | ||
|
|
b2a6e602e6 | ||
|
|
8650e4ecf9 | ||
|
|
4a2abc24da | ||
|
|
a66fbb1637 | ||
|
|
c3c6864b47 | ||
|
|
80ffd2f468 | ||
|
|
0ad6b103cb | ||
|
|
d9977a5c11 | ||
|
|
6fbae091ec | ||
|
|
8505c26830 | ||
|
|
7c53cbc79b | ||
|
|
c0da61cd4b | ||
|
|
1644201978 | ||
|
|
91ee5be981 | ||
|
|
74bb3d3746 | ||
|
|
51bb3b8700 | ||
|
|
c18b56eb2a | ||
|
|
de050c2944 | ||
|
|
49876dee05 | ||
|
|
abf36b87a6 | ||
|
|
5cc64d17c2 | ||
|
|
c80f610fc1 | ||
|
|
1a9593f140 | ||
|
|
0e0885afd5 | ||
|
|
9e0c0b2bf0 |
27
.github/krew.yaml
vendored
27
.github/krew.yaml
vendored
@@ -4,20 +4,19 @@ metadata:
|
||||
name: kubevpn
|
||||
spec:
|
||||
version: {{ .TagName }}
|
||||
homepage: https://github.com/KubeNetworks/kubevpn
|
||||
shortDescription: "A vpn tunnel tools which can connect to kubernetes cluster network"
|
||||
homepage: https://github.com/kubenetworks/kubevpn
|
||||
shortDescription: "KubeVPN offers a Cloud Native Dev Environment that connects to kubernetes cluster network"
|
||||
description: |
|
||||
KubeVPN is Cloud Native Dev Environment, connect to kubernetes cluster network, you can access remote kubernetes
|
||||
cluster network, remote
|
||||
kubernetes cluster service can also access your local service. and more, you can run your kubernetes pod on local Docker
|
||||
container with same environment、volume、and network. you can develop your application on local PC totally.
|
||||
KubeVPN offers a Cloud-Native Dev Environment that seamlessly connects to your Kubernetes cluster network.
|
||||
Gain access to the Kubernetes cluster network effortlessly using service names or Pod IP/Service IP. Facilitate the interception of inbound traffic from remote Kubernetes cluster services to your local PC through a service mesh and more.
|
||||
For instance, you have the flexibility to run your Kubernetes pod within a local Docker container, ensuring an identical environment, volume, and network setup. With KubeVPN, empower yourself to develop applications entirely on your local PC!
|
||||
|
||||
platforms:
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: amd64
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_amd64.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_amd64.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn.exe
|
||||
to: .
|
||||
@@ -28,7 +27,7 @@ spec:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: arm64
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_arm64.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_arm64.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn.exe
|
||||
to: .
|
||||
@@ -39,7 +38,7 @@ spec:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: 386
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_386.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_windows_386.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn.exe
|
||||
to: .
|
||||
@@ -50,7 +49,7 @@ spec:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: amd64
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_amd64.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_amd64.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn
|
||||
to: .
|
||||
@@ -61,7 +60,7 @@ spec:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: arm64
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_arm64.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_arm64.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn
|
||||
to: .
|
||||
@@ -72,7 +71,7 @@ spec:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: 386
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_386.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_linux_386.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn
|
||||
to: .
|
||||
@@ -83,7 +82,7 @@ spec:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: amd64
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_darwin_amd64.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_darwin_amd64.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn
|
||||
to: .
|
||||
@@ -94,7 +93,7 @@ spec:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: arm64
|
||||
{{addURIAndSha "https://github.com/KubeNetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_darwin_arm64.zip" .TagName }}
|
||||
{{addURIAndSha "https://github.com/kubenetworks/kubevpn/releases/download/{{ .TagName }}/kubevpn_{{ .TagName }}_darwin_arm64.zip" .TagName }}
|
||||
files:
|
||||
- from: ./bin/kubevpn
|
||||
to: .
|
||||
|
||||
67
.github/release-note.sh
vendored
67
.github/release-note.sh
vendored
@@ -7,11 +7,70 @@ PREVIOUS_RELEASE=${PREVIOUS_RELEASE:-$1}
|
||||
CHANGELOG=$(git log --no-merges --date=short --pretty=format:'- %h %an %ad %s' "${PREVIOUS_RELEASE}".."${RELEASE}")
|
||||
|
||||
cat <<EOF
|
||||
## ${RELEASE}
|
||||
# KubeVPN release ${RELEASE}
|
||||
|
||||
KubeVPN ${RELEASE} is available now ! 🎉
|
||||
- fix known bugs 🛠
|
||||
## Installation and Upgrading
|
||||
wget -LO "https://github.com/KubeNetworks/kubevpn/releases/download/$(curl -L -s https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/plugins/stable.txt)/kubevpn_$(curl -L -s https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/plugins/stable.txt)_darwin_amd64.zip"
|
||||
|
||||
## Download KubeVPN for your platform
|
||||
|
||||
**Mac** (x86-64/Intel)
|
||||
|
||||
\`\`\`
|
||||
curl -Lo kubevpn.zip https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_darwin_amd64.zip && unzip -d kubevpn kubevpn.zip
|
||||
\`\`\`
|
||||
|
||||
**Mac** (AArch64/Apple M1 silicon)
|
||||
|
||||
\`\`\`
|
||||
curl -Lo kubevpn.zip https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_darwin_arm64.zip && unzip -d kubevpn kubevpn.zip
|
||||
\`\`\`
|
||||
|
||||
**Linux** (x86-64)
|
||||
|
||||
\`\`\`
|
||||
curl -Lo kubevpn.zip https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_linux_amd64.zip && unzip -d kubevpn kubevpn.zip
|
||||
\`\`\`
|
||||
|
||||
**Linux** (AArch64)
|
||||
|
||||
\`\`\`
|
||||
curl -Lo kubevpn.zip https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_linux_arm64.zip && unzip -d kubevpn kubevpn.zip
|
||||
\`\`\`
|
||||
|
||||
**Linux** (i386)
|
||||
|
||||
\`\`\`
|
||||
curl -Lo kubevpn.zip https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_linux_386.zip && unzip -d kubevpn kubevpn.zip
|
||||
\`\`\`
|
||||
|
||||
**Windows** (x86-64)
|
||||
|
||||
\`\`\`
|
||||
curl -LO https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_windows_amd64.zip
|
||||
\`\`\`
|
||||
|
||||
**Windows** (AArch64)
|
||||
|
||||
\`\`\`
|
||||
curl -LO https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_windows_arm64.zip
|
||||
\`\`\`
|
||||
|
||||
**Windows** (i386)
|
||||
|
||||
\`\`\`
|
||||
curl -LO https://github.com/kubenetworks/kubevpn/releases/download/${RELEASE}/kubevpn_${RELEASE}_windows_386.zip
|
||||
\`\`\`
|
||||
|
||||
## Checksums
|
||||
|
||||
SHA256 checksums available for compiled binaries.
|
||||
Run \`shasum -a 256 -c checksums.txt\` to verify.
|
||||
|
||||
## Upgrading
|
||||
|
||||
Run \`kubevpn upgrade\` to upgrade from a previous version.
|
||||
|
||||
## Changelog
|
||||
|
||||
${CHANGELOG}
|
||||
EOF
|
||||
|
||||
79
.github/workflows/coverage.yml
vendored
Normal file
79
.github/workflows/coverage.yml
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
name: Coverage
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
linux:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
|
||||
- name: Setup Minikube
|
||||
id: minikube
|
||||
timeout-minutes: 30
|
||||
uses: medyagh/setup-minikube@latest
|
||||
with:
|
||||
cache: true
|
||||
|
||||
- name: Kubernetes info
|
||||
run: |
|
||||
kubectl cluster-info
|
||||
cat ~/.kube/config
|
||||
kubectl get pods -n kube-system -o wide
|
||||
|
||||
- name: Install demo bookinfo
|
||||
run: |
|
||||
minikube image load --remote istio/examples-bookinfo-details-v1:1.16.2
|
||||
minikube image load --remote istio/examples-bookinfo-ratings-v1:1.16.2
|
||||
minikube image load --remote istio/examples-bookinfo-reviews-v1:1.16.2
|
||||
minikube image load --remote istio/examples-bookinfo-productpage-v1:1.16.2
|
||||
minikube image load --remote naison/authors:latest
|
||||
minikube image load --remote nginx:latest
|
||||
minikube image load --remote naison/kubevpn:test
|
||||
minikube image ls
|
||||
eval $(minikube docker-env)
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
export VERSION=${{github.event.pull_request.head.sha}}
|
||||
if [[ -z "$VERSION" ]]; then
|
||||
export VERSION=${{ github.sha }}
|
||||
fi
|
||||
make kubevpn-linux-amd64
|
||||
chmod +x ./bin/kubevpn
|
||||
cp ./bin/kubevpn /usr/local/bin/kubevpn
|
||||
kubevpn version
|
||||
|
||||
- name: Wait for pods reviews to be ready
|
||||
run: |
|
||||
kubectl wait pods -l app=reviews --for=condition=Ready --timeout=3600s
|
||||
kubectl wait pods -l app=productpage --for=condition=Ready --timeout=3600s
|
||||
kubectl get svc -A -o wide
|
||||
kubectl get pod -A -o wide
|
||||
kubectl get all -o wide
|
||||
kubectl get nodes -o yaml
|
||||
ifconfig
|
||||
route -n
|
||||
sudo ln /usr/bin/resolvectl /usr/bin/systemd-resolve
|
||||
|
||||
- name: Test
|
||||
run: make ut
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@0cfda1dd0a4ad9efc75517f399d859cd1ea4ced1 # v4.0.2
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
verbose: true
|
||||
slug: wencaiwulue/kubevpn
|
||||
142
.github/workflows/release.yml
vendored
142
.github/workflows/release.yml
vendored
@@ -11,18 +11,19 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Push image to docker hub
|
||||
run: |
|
||||
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USER }} --password-stdin
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
docker buildx create --use
|
||||
make container
|
||||
|
||||
@@ -52,13 +53,13 @@ jobs:
|
||||
git reset --hard
|
||||
|
||||
- name: Upload RELEASE_VERSION
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: RELEASE_VERSION
|
||||
path: RELEASE_VERSION
|
||||
|
||||
- name: Upload UPLOAD_URL
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: UPLOAD_URL
|
||||
path: UPLOAD_URL
|
||||
@@ -95,13 +96,128 @@ jobs:
|
||||
labels: |
|
||||
report
|
||||
automated pr
|
||||
# team-reviewers: |
|
||||
# owners
|
||||
# maintainers
|
||||
draft: false
|
||||
|
||||
# - name: Update new version in krew-index
|
||||
# uses: rajatjindal/krew-release-bot@v0.0.43
|
||||
# with:
|
||||
# krew_template_file: .github/krew.yaml
|
||||
# debug: true
|
||||
release-helm-chart:
|
||||
name: Release KubeVPN Helm Chart
|
||||
needs: [ build ]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Helm tool installer
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: "v3.6.3"
|
||||
- name: Change chart version
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/*/}
|
||||
CHART_VERSION=${VERSION/#v/}
|
||||
sed -i "s/^appVersion:.*$/appVersion: \"${VERSION}\"/;s/^version:.*$/version: ${CHART_VERSION}/" charts/kubevpn/Chart.yaml
|
||||
sed -i "s/tag:.*$/tag: \"${VERSION}\"/" charts/kubevpn/values.yaml
|
||||
- name: Tar chart
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/*/}
|
||||
CHART_VERSION=${VERSION/#v/}
|
||||
tar --transform 's/^charts\/kubevpn/kubevpn/' -zcf kubevpn-${CHART_VERSION}.tgz charts/kubevpn
|
||||
shasum -a 256 kubevpn-${CHART_VERSION}.tgz | awk '{print $1}' > kubevpn-${CHART_VERSION}.tgz-SHA256
|
||||
- name: Download UPLOAD_URL
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: UPLOAD_URL
|
||||
- name: Get Release UPLOAD_URL
|
||||
id: get_release_info
|
||||
run: |
|
||||
UploadUrl=$(cat ./UPLOAD_URL)
|
||||
echo "::set-output name=upload_url::$UploadUrl"
|
||||
- name: Get assert name
|
||||
id: get_assert_info
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/*/}
|
||||
CHART_VERSION=${VERSION/#v/}
|
||||
AssertName=kubevpn-${CHART_VERSION}.tgz
|
||||
echo "::set-output name=assert_name::$AssertName"
|
||||
- name: Get assert SHA256 name
|
||||
id: get_assert_info_sha256
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/*/}
|
||||
CHART_VERSION=${VERSION/#v/}
|
||||
AssertName=kubevpn-${CHART_VERSION}.tgz-SHA256
|
||||
echo "::set-output name=assert_name::$AssertName"
|
||||
- name: Upload Release Asset KubeVPN Server Chart
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.get_release_info.outputs.upload_url }}
|
||||
asset_path: ${{ steps.get_assert_info.outputs.assert_name }}
|
||||
asset_name: ${{ steps.get_assert_info.outputs.assert_name }}
|
||||
asset_content_type: application/octet-stream
|
||||
- name: Upload Release Asset KubeVPN Chart SHA256
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.get_release_info.outputs.upload_url }}
|
||||
asset_path: ${{ steps.get_assert_info_sha256.outputs.assert_name }}
|
||||
asset_name: ${{ steps.get_assert_info_sha256.outputs.assert_name }}
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
github-pages-deploy:
|
||||
name: Release Helm Chart To branch master
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
needs: release-helm-chart
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v4
|
||||
- name: Change chart version
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/*/}
|
||||
CHART_VERSION=${VERSION/#v/}
|
||||
sed -i "s/^appVersion:.*$/appVersion: \"${VERSION}\"/;s/^version:.*$/version: ${CHART_VERSION}/" charts/kubevpn/Chart.yaml
|
||||
sed -i "s/tag:.*$/tag: \"${VERSION}\"/" charts/kubevpn/values.yaml
|
||||
- name: Package and upload helm chart
|
||||
run: |
|
||||
# download helm chart releaser
|
||||
curl -sSLo cr.tar.gz "https://github.com/helm/chart-releaser/releases/download/v1.6.1/chart-releaser_1.6.1_linux_amd64.tar.gz"
|
||||
tar -xzf cr.tar.gz
|
||||
rm -f cr.tar.gz
|
||||
owner=$(cut -d '/' -f 1 <<< "$GITHUB_REPOSITORY")
|
||||
repo=$(cut -d '/' -f 2 <<< "$GITHUB_REPOSITORY")
|
||||
# package chart
|
||||
./cr package charts/$repo
|
||||
# update index and push to github pages
|
||||
git config user.email "$owner@users.noreply.github.com"
|
||||
git config user.name "$owner"
|
||||
./cr index \
|
||||
--owner "$owner" \
|
||||
--git-repo "$repo" \
|
||||
--token "${{ secrets.CREATE_HELM_PR }}" \
|
||||
--release-name-template "v{{ .Version }}" \
|
||||
--index-path ./index.yaml \
|
||||
--charts-repo https://github.com/$owner/$repo \
|
||||
--pages-branch master \
|
||||
--pages-index-path charts/index.yaml \
|
||||
--push
|
||||
63
.github/workflows/test.yml
vendored
63
.github/workflows/test.yml
vendored
@@ -10,33 +10,37 @@ jobs:
|
||||
image:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
- name: Push image to docker hub
|
||||
run: |
|
||||
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USER }} --password-stdin
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
|
||||
docker buildx create --use
|
||||
export VERSION=test
|
||||
make container
|
||||
export VERSION=${{github.event.pull_request.head.sha}}
|
||||
if [[ -z "$VERSION" ]]; then
|
||||
export VERSION=${{ github.sha }}
|
||||
fi
|
||||
make container-test
|
||||
linux:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ "image" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
- name: Setup Minikube
|
||||
id: minikube
|
||||
timeout-minutes: 30
|
||||
uses: medyagh/setup-minikube@master
|
||||
uses: medyagh/setup-minikube@latest
|
||||
with:
|
||||
cache: true
|
||||
|
||||
@@ -56,11 +60,14 @@ jobs:
|
||||
minikube image load --remote naison/kubevpn:test
|
||||
minikube image ls
|
||||
eval $(minikube docker-env)
|
||||
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
export VERSION=test
|
||||
export VERSION=${{github.event.pull_request.head.sha}}
|
||||
if [[ -z "$VERSION" ]]; then
|
||||
export VERSION=${{ github.sha }}
|
||||
fi
|
||||
make kubevpn-linux-amd64
|
||||
chmod +x ./bin/kubevpn
|
||||
cp ./bin/kubevpn /usr/local/bin/kubevpn
|
||||
@@ -79,21 +86,26 @@ jobs:
|
||||
sudo ln /usr/bin/resolvectl /usr/bin/systemd-resolve
|
||||
|
||||
- name: Test
|
||||
run: go test -v -failfast ./... -timeout=60m
|
||||
run: make ut
|
||||
|
||||
macos:
|
||||
runs-on: macos-latest
|
||||
runs-on: macos-13
|
||||
needs: [ "image" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
|
||||
# https://github.com/crazy-max/ghaction-setup-docker/issues/108
|
||||
- name: Set up QEMU
|
||||
uses: docker/actions-toolkit/.github/actions/macos-setup-qemu@19ca9ade20f5da695f76a10988d6532058575f82
|
||||
|
||||
- name: Set up Docker
|
||||
uses: crazy-max/ghaction-setup-docker@v1.4.0
|
||||
uses: crazy-max/ghaction-setup-docker@v3
|
||||
|
||||
- name: Install minikube
|
||||
run: |
|
||||
@@ -121,11 +133,14 @@ jobs:
|
||||
minikube image load --remote naison/kubevpn:test
|
||||
minikube image ls
|
||||
eval $(minikube docker-env)
|
||||
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
export VERSION=test
|
||||
export VERSION=${{github.event.pull_request.head.sha}}
|
||||
if [[ -z "$VERSION" ]]; then
|
||||
export VERSION=${{ github.sha }}
|
||||
fi
|
||||
make kubevpn-darwin-amd64
|
||||
chmod +x ./bin/kubevpn
|
||||
cp ./bin/kubevpn /usr/local/bin/kubevpn
|
||||
@@ -143,21 +158,21 @@ jobs:
|
||||
netstat -anr
|
||||
|
||||
- name: Test
|
||||
run: go test -v -failfast ./... -timeout=60m
|
||||
run: make ut
|
||||
|
||||
windows:
|
||||
runs-on: windows-latest
|
||||
needs: [ "image" ]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.23'
|
||||
|
||||
- name: Set up Docker
|
||||
uses: crazy-max/ghaction-setup-docker@v1.4.0
|
||||
uses: crazy-max/ghaction-setup-docker@v3
|
||||
- run: |
|
||||
docker info --format '{{.OSType}}'
|
||||
- run: |
|
||||
|
||||
6
.github/workflows/upload_release.yml
vendored
6
.github/workflows/upload_release.yml
vendored
@@ -23,12 +23,12 @@ jobs:
|
||||
arch: 386
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Build kubevpn
|
||||
run: |
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,8 +1,6 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
|
||||
34
Makefile
34
Makefile
@@ -1,25 +1,27 @@
|
||||
VERSION ?= $(shell git tag -l --sort=v:refname | tail -1)
|
||||
GIT_COMMIT := $(shell git describe --match=NeVeRmAtCh --always --abbrev=40)
|
||||
BUILD_TIME := $(shell date +"%Y-%m-%dT%H:%M:%SZ")
|
||||
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
GIT_COMMIT ?= $(shell git describe --match=NeVeRmAtCh --always --abbrev=7)
|
||||
BUILD_TIME ?= $(shell date +"%Y-%m-%dT%H:%M:%SZ")
|
||||
BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
|
||||
GOOS := $(shell go env GOHOSTOS)
|
||||
GOARCH := $(shell go env GOHOSTARCH)
|
||||
TARGET := kubevpn-${GOOS}-${GOARCH}
|
||||
OS_ARCH := ${GOOS}/${GOARCH}
|
||||
|
||||
BASE := github.com/wencaiwulue/kubevpn
|
||||
BASE := github.com/wencaiwulue/kubevpn/v2
|
||||
FOLDER := ${BASE}/cmd/kubevpn
|
||||
BUILD_DIR := ./build
|
||||
OUTPUT_DIR := ./bin
|
||||
BUILD_DIR ?= ./build
|
||||
OUTPUT_DIR ?= ./bin
|
||||
REGISTRY ?= docker.io
|
||||
NAMESPACE ?= naison
|
||||
REPOSITORY ?= kubevpn
|
||||
IMAGE ?= $(REGISTRY)/$(NAMESPACE)/$(REPOSITORY):$(VERSION)
|
||||
IMAGE_DEFAULT = docker.io/naison/kubevpn:latest
|
||||
IMAGE_LATEST ?= docker.io/naison/kubevpn:latest
|
||||
IMAGE_GH ?= ghcr.io/kubenetworks/kubevpn:$(VERSION)
|
||||
|
||||
# Setup the -ldflags option for go build here, interpolate the variable values
|
||||
LDFLAGS=--ldflags "\
|
||||
# add '-tag noassets' for syncthing gui
|
||||
LDFLAGS=-tags noassets --ldflags "-s -w\
|
||||
-X ${BASE}/pkg/config.Image=${IMAGE} \
|
||||
-X ${BASE}/pkg/config.Version=${VERSION} \
|
||||
-X ${BASE}/pkg/config.GitCommit=${GIT_COMMIT} \
|
||||
@@ -84,21 +86,29 @@ kubevpn-linux-386:
|
||||
|
||||
.PHONY: container
|
||||
container:
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE} -t ${IMAGE_DEFAULT} -f $(BUILD_DIR)/Dockerfile --push .
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE} -t ${IMAGE_LATEST} -t ${IMAGE_GH} -f $(BUILD_DIR)/Dockerfile --push .
|
||||
|
||||
############################ build local
|
||||
.PHONY: container-local
|
||||
container-local: kubevpn-linux-amd64
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t docker.io/naison/kubevpn:latest -f $(BUILD_DIR)/local.Dockerfile --push .
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE_LATEST} -f $(BUILD_DIR)/local.Dockerfile --push .
|
||||
|
||||
.PHONY: container-test
|
||||
container-test: kubevpn-linux-amd64
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t docker.io/naison/kubevpn:test -f $(BUILD_DIR)/test.Dockerfile --push .
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t ${IMAGE} -t ${IMAGE_GH} -f $(BUILD_DIR)/test.Dockerfile --push .
|
||||
|
||||
.PHONY: version
|
||||
version:
|
||||
go run github.com/wencaiwulue/kubevpn/pkg/util/krew
|
||||
go run ${BASE}/pkg/util/krew
|
||||
|
||||
.PHONY: gen
|
||||
gen:
|
||||
go generate ./...
|
||||
|
||||
.PHONY: ut
|
||||
ut:
|
||||
go test -tags=noassets -coverprofile=coverage.txt -coverpkg=./... -v ./... -timeout=60m
|
||||
|
||||
.PHONY: cover
|
||||
cover: ut
|
||||
go tool cover -html=coverage.txt
|
||||
533
README.md
533
README.md
@@ -1,125 +1,145 @@
|
||||

|
||||
|
||||
[![GitHub Workflow][1]](https://github.com/KubeNetworks/kubevpn/actions)
|
||||
[![Go Version][2]](https://github.com/KubeNetworks/kubevpn/blob/master/go.mod)
|
||||
[![Go Report][3]](https://goreportcard.com/badge/github.com/KubeNetworks/kubevpn)
|
||||
[![Maintainability][4]](https://codeclimate.com/github/KubeNetworks/kubevpn/maintainability)
|
||||
[![GitHub License][5]](https://github.com/KubeNetworks/kubevpn/blob/main/LICENSE)
|
||||
[![GitHub Workflow][1]](https://github.com/kubenetworks/kubevpn/actions)
|
||||
[![Go Version][2]](https://github.com/kubenetworks/kubevpn/blob/master/go.mod)
|
||||
[![Go Report][3]](https://goreportcard.com/report/github.com/wencaiwulue/kubevpn)
|
||||
[![Maintainability][4]](https://codeclimate.com/github/kubenetworks/kubevpn/maintainability)
|
||||
[![GitHub License][5]](https://github.com/kubenetworks/kubevpn/blob/main/LICENSE)
|
||||
[![Docker Pulls][6]](https://hub.docker.com/r/naison/kubevpn)
|
||||
[![Releases][7]](https://github.com/KubeNetworks/kubevpn/releases)
|
||||
[![Releases][7]](https://github.com/kubenetworks/kubevpn/releases)
|
||||
[](https://pkg.go.dev/github.com/wencaiwulue/kubevpn/v2)
|
||||
[](https://codecov.io/gh/wencaiwulue/kubevpn)
|
||||
|
||||
[1]: https://img.shields.io/github/actions/workflow/status/KubeNetworks/kubevpn/release.yml?logo=github
|
||||
[1]: https://img.shields.io/github/actions/workflow/status/kubenetworks/kubevpn/release.yml?logo=github
|
||||
|
||||
[2]: https://img.shields.io/github/go-mod/go-version/KubeNetworks/kubevpn?logo=go
|
||||
[2]: https://img.shields.io/github/go-mod/go-version/kubenetworks/kubevpn?logo=go
|
||||
|
||||
[3]: https://img.shields.io/badge/go%20report-A+-brightgreen.svg?style=flat
|
||||
[3]: https://goreportcard.com/badge/github.com/wencaiwulue/kubevpn?style=flat
|
||||
|
||||
[4]: https://api.codeclimate.com/v1/badges/b5b30239174fc6603aca/maintainability
|
||||
|
||||
[5]: https://img.shields.io/github/license/KubeNetworks/kubevpn
|
||||
[5]: https://img.shields.io/github/license/kubenetworks/kubevpn
|
||||
|
||||
[6]: https://img.shields.io/docker/pulls/naison/kubevpn?logo=docker
|
||||
|
||||
[7]: https://img.shields.io/github/v/release/KubeNetworks/kubevpn?logo=smartthings
|
||||
[7]: https://img.shields.io/github/v/release/kubenetworks/kubevpn?logo=smartthings
|
||||
|
||||
# KubeVPN
|
||||
|
||||
[中文](README_ZH.md) | [English](README.md) | [Wiki](https://github.com/KubeNetworks/kubevpn/wiki/Architecture)
|
||||
[中文](README_ZH.md) | [English](README.md) | [Wiki](https://github.com/kubenetworks/kubevpn/wiki/Architecture)
|
||||
|
||||
KubeVPN is Cloud Native Dev Environment, connect to kubernetes cluster network, you can access remote kubernetes
|
||||
cluster network, remote
|
||||
kubernetes cluster service can also access your local service. and more, you can run your kubernetes pod on local Docker
|
||||
container with same environment、volume、and network. you can develop your application on local PC totally.
|
||||
KubeVPN offers a Cloud-Native Dev Environment that seamlessly connects to your Kubernetes cluster network.
|
||||
|
||||
Gain access to the Kubernetes cluster network effortlessly using service names or Pod IP/Service IP. Facilitate the
|
||||
interception of inbound traffic from remote Kubernetes cluster services to your local PC through a service mesh and
|
||||
more.
|
||||
|
||||
For instance, you have the flexibility to run your Kubernetes pod within a local Docker container, ensuring an identical
|
||||
environment, volume, and network setup.
|
||||
With KubeVPN, empower yourself to develop applications entirely on your local PC!
|
||||
|
||||
## Content
|
||||
|
||||
1. [QuickStart](./README.md#quickstart)
|
||||
2. [Functions](./README.md#functions)
|
||||
3. [FAQ](./README.md#faq)
|
||||
4. [Architecture](./README.md#architecture)
|
||||
5. [Contributions](./README.md#Contributions)
|
||||
|
||||
## QuickStart
|
||||
|
||||
#### Install from GitHub release
|
||||
|
||||
[LINK](https://github.com/KubeNetworks/kubevpn/releases/latest)
|
||||
|
||||
#### Install from custom krew index
|
||||
### Install from [brew](https://brew.sh/) (macOS / Linux)
|
||||
|
||||
```shell
|
||||
(
|
||||
kubectl krew index add kubevpn https://github.com/KubeNetworks/kubevpn.git && \
|
||||
kubectl krew install kubevpn/kubevpn && kubectl kubevpn
|
||||
)
|
||||
brew install kubevpn
|
||||
```
|
||||
|
||||
#### Install from build it manually
|
||||
### Install from [scoop](https://scoop.sh/) (Windows)
|
||||
|
||||
```shell
|
||||
(
|
||||
git clone https://github.com/KubeNetworks/kubevpn.git && \
|
||||
cd kubevpn && make kubevpn && ./bin/kubevpn
|
||||
)
|
||||
|
||||
scoop bucket add extras
|
||||
scoop install kubevpn
|
||||
```
|
||||
|
||||
### Install from [krew](https://krew.sigs.k8s.io/) (Windows / macOS / Linux)
|
||||
|
||||
```shell
|
||||
kubectl krew index add kubevpn https://github.com/kubenetworks/kubevpn.git
|
||||
kubectl krew install kubevpn/kubevpn
|
||||
kubectl kubevpn
|
||||
```
|
||||
|
||||
### Install from GitHub release (Windows / macOS / Linux)
|
||||
|
||||
[https://github.com/kubenetworks/kubevpn/releases/latest](https://github.com/kubenetworks/kubevpn/releases/latest)
|
||||
|
||||
### Install bookinfo as demo application
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
```
|
||||
|
||||
For clean up after test
|
||||
|
||||
```shell
|
||||
kubectl delete -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
kubectl delete -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
```
|
||||
|
||||
## Functions
|
||||
|
||||
### Connect to k8s cluster network
|
||||
|
||||
use command `kubevpn connect` connect to k8s cluster network, prompt `Password:` need to input computer
|
||||
password. to enable root operation (create a tun device).
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn connect
|
||||
Password:
|
||||
start to connect
|
||||
get cidr from cluster info...
|
||||
get cidr from cluster info ok
|
||||
get cidr from cni...
|
||||
wait pod cni-net-dir-kubevpn to be running timeout, reason , ignore
|
||||
get cidr from svc...
|
||||
get cidr from svc ok
|
||||
get cidr successfully
|
||||
traffic manager not exist, try to create it...
|
||||
label namespace default
|
||||
create serviceAccount kubevpn-traffic-manager
|
||||
create roles kubevpn-traffic-manager
|
||||
create roleBinding kubevpn-traffic-manager
|
||||
create service kubevpn-traffic-manager
|
||||
create deployment kubevpn-traffic-manager
|
||||
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
|
||||
Starting connect
|
||||
Getting network CIDR from cluster info...
|
||||
Getting network CIDR from CNI...
|
||||
Getting network CIDR from services...
|
||||
Labeling Namespace default
|
||||
Creating ServiceAccount kubevpn-traffic-manager
|
||||
Creating Roles kubevpn-traffic-manager
|
||||
Creating RoleBinding kubevpn-traffic-manager
|
||||
Creating Service kubevpn-traffic-manager
|
||||
Creating MutatingWebhookConfiguration kubevpn-traffic-manager
|
||||
Creating Deployment kubevpn-traffic-manager
|
||||
|
||||
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
|
||||
Container Reason Message
|
||||
control-plane ContainerCreating
|
||||
vpn ContainerCreating
|
||||
webhook ContainerCreating
|
||||
|
||||
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
|
||||
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
|
||||
Container Reason Message
|
||||
control-plane ContainerRunning
|
||||
vpn ContainerRunning
|
||||
webhook ContainerRunning
|
||||
|
||||
Creating mutatingWebhook_configuration for kubevpn-traffic-manager
|
||||
update ref count successfully
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
already connected to cluster network, use command `kubevpn status` to check status
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn status
|
||||
ID Mode Cluster Kubeconfig Namespace Status
|
||||
0 full ccijorbccotmqodvr189g /Users/bytedance/.kube/config default Connected
|
||||
ID Mode Cluster Kubeconfig Namespace Status
|
||||
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
|
||||
➜ ~
|
||||
```
|
||||
|
||||
use pod `productpage-788df7ff7f-jpkcs` IP `172.29.2.134`
|
||||
|
||||
```shell
|
||||
➜ ~ kubectl get pods -o wide
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
@@ -131,6 +151,8 @@ ratings-77b6cd4499-zvl6c 1/1 Running 0
|
||||
reviews-85c88894d9-vgkxd 1/1 Running 0 24d 172.29.2.249 192.168.0.5 <none> <none>
|
||||
```
|
||||
|
||||
use `ping` to test connection, seems good
|
||||
|
||||
```shell
|
||||
➜ ~ ping 172.29.2.134
|
||||
PING 172.29.2.134 (172.29.2.134): 56 data bytes
|
||||
@@ -144,6 +166,8 @@ PING 172.29.2.134 (172.29.2.134): 56 data bytes
|
||||
round-trip min/avg/max/stddev = 54.293/55.380/56.270/0.728 ms
|
||||
```
|
||||
|
||||
use service `productpage` IP `172.21.10.49`
|
||||
|
||||
```shell
|
||||
➜ ~ kubectl get services -o wide
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
|
||||
@@ -156,6 +180,8 @@ ratings ClusterIP 172.21.3.247 <none> 9080/TCP
|
||||
reviews ClusterIP 172.21.8.24 <none> 9080/TCP 114d app=reviews
|
||||
```
|
||||
|
||||
use command `curl` to test service connection
|
||||
|
||||
```shell
|
||||
➜ ~ curl 172.21.10.49:9080
|
||||
<!DOCTYPE html>
|
||||
@@ -167,8 +193,18 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
```
|
||||
|
||||
seems good too~
|
||||
|
||||
### Domain resolve
|
||||
|
||||
support k8s dns name resolve.
|
||||
|
||||
a Pod/Service named `productpage` in the `default` namespace can successfully resolve by following name:
|
||||
|
||||
- `productpage`
|
||||
- `productpage.default`
|
||||
- `productpage.default.svc.cluster.local`
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage.default.svc.cluster.local:9080
|
||||
<!DOCTYPE html>
|
||||
@@ -182,7 +218,8 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
|
||||
|
||||
### Short domain resolve
|
||||
|
||||
To access the service in the cluster, service name or you can use the short domain name, such as `productpage.default.svc.cluster.local`
|
||||
To access the service in the cluster, service name or you can use the short domain name, such
|
||||
as `productpage`
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage:9080
|
||||
@@ -195,50 +232,94 @@ To access the service in the cluster, service name or you can use the short doma
|
||||
...
|
||||
```
|
||||
|
||||
***Disclaimer:*** This only works on the namespace where kubevpn-traffic-manager is deployed. Otherwise,
|
||||
use [Domain resolve](./README.md#domain-resolve)
|
||||
|
||||
### Connect to multiple kubernetes cluster network
|
||||
|
||||
- Mode `lite`: can connect to multiple cluster network, design for only connecting to multiple cluster network.
|
||||
- Mode `Full`: not only connect to cluster network, it also supports proxy workloads inbound traffic to local PC.
|
||||
|
||||
already connected cluster `ccijorbccotmqodvr189g` with mode `full`
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn status
|
||||
ID Mode Cluster Kubeconfig Namespace Status
|
||||
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
|
||||
```
|
||||
|
||||
then connect to another cluster `ccidd77aam2dtnc3qnddg` with mode `lite`
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn connect -n default --kubeconfig ~/.kube/dev_config --lite
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
```
|
||||
|
||||
use command `kubevpn status` to check connection status
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn status
|
||||
ID Mode Cluster Kubeconfig Namespace Status
|
||||
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
|
||||
1 lite ccidd77aam2dtnc3qnddg /Users/naison/.kube/dev_config default Connected
|
||||
➜ ~
|
||||
```
|
||||
|
||||
### Reverse proxy
|
||||
|
||||
use command `kubevpn proxy` to proxy all inbound traffic to local computer.
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn proxy deployment/productpage
|
||||
already connect to cluster
|
||||
start to create remote inbound pod for deployment/productpage
|
||||
workload default/deployment/productpage is controlled by a controller
|
||||
rollout status for deployment/productpage
|
||||
Connected to cluster
|
||||
Injecting inbound sidecar for deployment/productpage
|
||||
Checking rollout status for deployment/productpage
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "productpage" successfully rolled out
|
||||
rollout status for deployment/productpage successfully
|
||||
create remote inbound pod for deployment/productpage successfully
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
Rollout successfully for deployment/productpage
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
For local testing, save the following code as `hello.go`
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func main() {
|
||||
http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
|
||||
_, _ = io.WriteString(writer, "Hello world!")
|
||||
fmt.Printf(">>Received request: %s %s from %s\n", request.Method, request.RequestURI, request.RemoteAddr)
|
||||
})
|
||||
_ = http.ListenAndServe(":9080", nil)
|
||||
http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
|
||||
_, _ = io.WriteString(writer, "Hello world!")
|
||||
fmt.Printf(">>Received request: %s %s from %s\n", request.Method, request.RequestURI, request.RemoteAddr)
|
||||
})
|
||||
_ = http.ListenAndServe(":9080", nil)
|
||||
}
|
||||
```
|
||||
|
||||
and compile it
|
||||
|
||||
```
|
||||
go build hello.go
|
||||
```
|
||||
|
||||
then run it
|
||||
|
||||
```
|
||||
./hello &
|
||||
```
|
||||
@@ -247,20 +328,20 @@ then run it
|
||||
export selector=productpage
|
||||
export pod=`kubectl get pods -l app=${selector} -n default -o jsonpath='{.items[0].metadata.name}'`
|
||||
export pod_ip=`kubectl get pod $pod -n default -o jsonpath='{.status.podIP}'`
|
||||
curl -v -H "a: 1" http://$pod_ip:9080/health
|
||||
curl -v -H "foo: bar" http://$pod_ip:9080/health
|
||||
```
|
||||
|
||||
response would like below
|
||||
|
||||
```
|
||||
❯ curl -v -H "a: 1" http://$pod_ip:9080/health
|
||||
❯ curl -v -H "foo: bar" http://$pod_ip:9080/health
|
||||
* Trying 192.168.72.77:9080...
|
||||
* Connected to 192.168.72.77 (192.168.72.77) port 9080 (#0)
|
||||
> GET /health HTTP/1.1
|
||||
> Host: 192.168.72.77:9080
|
||||
> User-Agent: curl/7.87.0
|
||||
> Accept: */*
|
||||
> a: 1
|
||||
> foo: bar
|
||||
>
|
||||
>>Received request: GET /health from xxx.xxx.xxx.xxx:52974
|
||||
* Mark bundle as not supporting multiuse
|
||||
@@ -282,30 +363,25 @@ Hello world!%
|
||||
Hello world!%
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Reverse proxy with mesh
|
||||
|
||||
Support HTTP, GRPC and WebSocket etc. with specific header `"a: 1"` will route to your local machine
|
||||
Support HTTP, GRPC and WebSocket etc. with specific header `"foo: bar"` will route to your local machine
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn proxy deployment/productpage --headers a=1
|
||||
already connect to cluster
|
||||
start to create remote inbound pod for deployment/productpage
|
||||
patch workload default/deployment/productpage with sidecar
|
||||
rollout status for deployment/productpage
|
||||
➜ ~ kubevpn proxy deployment/productpage --headers foo=bar
|
||||
Connected to cluster
|
||||
Injecting inbound sidecar for deployment/productpage
|
||||
Checking rollout status for deployment/productpage
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "productpage" successfully rolled out
|
||||
rollout status for deployment/productpage successfully
|
||||
create remote inbound pod for deployment/productpage successfully
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
Rollout successfully for deployment/productpage
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
first access without header "a: 1", it will access existing pod on kubernetes cluster.
|
||||
first access without header "foo: bar", it will access existing pod on kubernetes cluster.
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage:9080
|
||||
@@ -319,38 +395,47 @@ first access without header "a: 1", it will access existing pod on kubernetes cl
|
||||
...
|
||||
```
|
||||
|
||||
Now let's access local service with header `"a: 1"`
|
||||
Now let's access local service with header `"foo: bar"`
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage:9080 -H "a: 1"
|
||||
➜ ~ curl productpage:9080 -H "foo: bar"
|
||||
>>Received request: GET / from xxx.xxx.xxx.xxx:51296
|
||||
Hello world!
|
||||
```
|
||||
|
||||
If you want to cancel proxy, just run command:
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn leave deployments/productpage
|
||||
Leaving workload deployments/productpage
|
||||
Checking rollout status for deployments/productpage
|
||||
Waiting for deployment "productpage" rollout to finish: 0 out of 1 new replicas have been updated...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Rollout successfully for deployments/productpage
|
||||
```
|
||||
|
||||
### Dev mode in local Docker 🐳
|
||||
|
||||
Run the Kubernetes pod in the local Docker container, and cooperate with the service mesh to intercept the traffic with
|
||||
the specified header to the local, or all the traffic to the local.
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn dev deployment/authors --headers a=1 -it --rm --entrypoint sh
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
start to create remote inbound pod for Deployment.apps/authors
|
||||
patch workload default/Deployment.apps/authors with sidecar
|
||||
rollout status for Deployment.apps/authors
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
➜ ~ kubevpn dev deployment/authors --headers foo=bar --entrypoint sh
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
Injecting inbound sidecar for deployment/authors
|
||||
Patching workload deployment/authors
|
||||
Checking rollout status for deployment/authors
|
||||
Waiting for deployment "authors" rollout to finish: 0 out of 1 new replicas have been updated...
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "authors" successfully rolled out
|
||||
rollout status for Deployment.apps/authors successfully
|
||||
create remote inbound pod for Deployment.apps/authors successfully
|
||||
Rollout successfully for Deployment.apps/authors
|
||||
tar: removing leading '/' from member names
|
||||
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/4563987760170736212:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
tar: Removing leading `/' from member names
|
||||
@@ -390,23 +475,21 @@ OK: 8 MiB in 19 packages
|
||||
{"status":"Authors is healthy"} /opt/microservices # echo "continue testing pod access..."
|
||||
continue testing pod access...
|
||||
/opt/microservices # exit
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
tun device closed
|
||||
leave resource: deployments.apps/authors
|
||||
workload default/deployments.apps/authors is controlled by a controller
|
||||
leave resource: deployments.apps/authors successfully
|
||||
clean up successfully
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
clean up successfully
|
||||
Created container: default_authors
|
||||
Wait container default_authors to be running...
|
||||
Container default_authors is running now
|
||||
Disconnecting from the cluster...
|
||||
Leaving workload deployments.apps/authors
|
||||
Disconnecting from the cluster...
|
||||
Performing cleanup operations
|
||||
Clearing DNS settings
|
||||
➜ ~
|
||||
```
|
||||
|
||||
You can see that it will start up two containers with docker, mapping to pod two container, and share port with same
|
||||
network, you can use `localhost:port`
|
||||
to access another container. And more, all environment、volume and network are the same as remote kubernetes pod, it is
|
||||
truly consistent with the kubernetes runtime. Makes develop on local PC comes true.
|
||||
truly consistent with the kubernetes runtime. Makes develop on local PC come true.
|
||||
|
||||
```shell
|
||||
➜ ~ docker ps
|
||||
@@ -416,39 +499,37 @@ fc04e42799a5 nginx:latest "/docker-entrypoint.…" 37 sec
|
||||
➜ ~
|
||||
```
|
||||
|
||||
Here is how to access pod in local docker container
|
||||
Here is how to access pod in local docker container
|
||||
|
||||
```shell
|
||||
export authors_pod=`kubectl get pods -l app=authors -n default -o jsonpath='{.items[0].metadata.name}'`
|
||||
export authors_pod_ip=`kubectl get pod $authors_pod -n default -o jsonpath='{.status.podIP}'`
|
||||
curl -kv -H "a: 1" http://$authors_pod_ip:80/health
|
||||
curl -kv -H "foo: bar" http://$authors_pod_ip:80/health
|
||||
```
|
||||
|
||||
Verify logs of nginx container
|
||||
|
||||
```shell
|
||||
docker logs $(docker ps --format '{{.Names}}' | grep nginx_default_kubevpn)
|
||||
```
|
||||
|
||||
|
||||
If you just want to start up a docker image, you can use simple way like this:
|
||||
If you just want to start up a docker image, you can use a simple way like this:
|
||||
|
||||
```shell
|
||||
kubevpn dev deployment/authors --no-proxy -it --rm
|
||||
kubevpn dev deployment/authors --no-proxy
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn dev deployment/authors --no-proxy -it --rm
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
➜ ~ kubevpn dev deployment/authors --no-proxy
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
tar: removing leading '/' from member names
|
||||
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/5631078868924498209:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
tar: Removing leading `/' from member names
|
||||
@@ -466,7 +547,7 @@ Created main container: authors_default_kubevpn_ff34b
|
||||
|
||||
Now the main process will hang up to show you log.
|
||||
|
||||
If you want to specify the image to start the container locally, you can use the parameter `--docker-image`. When the
|
||||
If you want to specify the image to start the container locally, you can use the parameter `--dev-image`. When the
|
||||
image does not exist locally, it will be pulled from the corresponding mirror warehouse. If you want to specify startup
|
||||
parameters, you can use `--entrypoint` parameter, replace it with the command you want to execute, such
|
||||
as `--entrypoint /bin/bash`, for more parameters, see `kubevpn dev --help`.
|
||||
@@ -474,63 +555,53 @@ as `--entrypoint /bin/bash`, for more parameters, see `kubevpn dev --help`.
|
||||
### DinD ( Docker in Docker ) use kubevpn in Docker
|
||||
|
||||
If you want to start the development mode locally using Docker in Docker (DinD), because the program will read and
|
||||
write the `/tmp` directory, you need to manually add the parameter `-v /tmp:/tmp` (outer docker) and other thing is you
|
||||
write the `/tmp` directory, you need to manually add the parameter `-v /tmp:/tmp` (outer docker) and another thing is
|
||||
you
|
||||
need to special parameter `--network` (inner docker) for sharing network and pid
|
||||
|
||||
Example:
|
||||
|
||||
```shell
|
||||
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
|
||||
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 naison/kubevpn:latest
|
||||
```
|
||||
|
||||
```shell
|
||||
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
|
||||
Unable to find image 'naison/kubevpn:v2.0.0' locally
|
||||
v2.0.0: Pulling from naison/kubevpn
|
||||
445a6a12be2b: Already exists
|
||||
bd6c670dd834: Pull complete
|
||||
64a7297475a2: Pull complete
|
||||
33fa2e3224db: Pull complete
|
||||
e008f553422a: Pull complete
|
||||
5132e0110ddc: Pull complete
|
||||
5b2243de1f1a: Pull complete
|
||||
662a712db21d: Pull complete
|
||||
4f4fb700ef54: Pull complete
|
||||
33f0298d1d4f: Pull complete
|
||||
Digest: sha256:115b975a97edd0b41ce7a0bc1d8428e6b8569c91a72fe31ea0bada63c685742e
|
||||
Status: Downloaded newer image for naison/kubevpn:v2.0.0
|
||||
root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison -it --entrypoint sh
|
||||
|
||||
----------------------------------------------------------------------------------
|
||||
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
|
||||
Because of sudo user env and user env are different.
|
||||
Current env KUBECONFIG value:
|
||||
----------------------------------------------------------------------------------
|
||||
|
||||
hostname is d0b3dab8912a
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
start to create remote inbound pod for Deployment.apps/authors
|
||||
patch workload default/Deployment.apps/authors with sidecar
|
||||
rollout status for Deployment.apps/authors
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 naison/kubevpn:latest
|
||||
Unable to find image 'naison/kubevpn:latest' locally
|
||||
latest: Pulling from naison/kubevpn
|
||||
9c704ecd0c69: Already exists
|
||||
4987d0a976b5: Pull complete
|
||||
8aa94c4fc048: Pull complete
|
||||
526fee014382: Pull complete
|
||||
6c1c2bedceb6: Pull complete
|
||||
97ac845120c5: Pull complete
|
||||
ca82aef6a9eb: Pull complete
|
||||
1fd9534c7596: Pull complete
|
||||
588bd802eb9c: Pull complete
|
||||
Digest: sha256:368db2e0d98f6866dcefd60512960ce1310e85c24a398fea2a347905ced9507d
|
||||
Status: Downloaded newer image for naison/kubevpn:latest
|
||||
WARNING: image with reference naison/kubevpn was found but does not match the specified platform: wanted linux/amd64, actual: linux/arm64
|
||||
root@5732124e6447:/app# kubevpn dev deployment/authors --headers user=naison --entrypoint sh
|
||||
hostname is 5732124e6447
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
Injecting inbound sidecar for deployment/authors
|
||||
Patching workload deployment/authors
|
||||
Checking rollout status for deployment/authors
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "authors" successfully rolled out
|
||||
rollout status for Deployment.apps/authors successfully
|
||||
create remote inbound pod for Deployment.apps/authors successfully
|
||||
Rollout successfully for Deployment.apps/authors
|
||||
tar: removing leading '/' from member names
|
||||
/tmp/6460902982794789917:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
tar: Removing leading `/' from member names
|
||||
tar: Removing leading `/' from hard link targets
|
||||
/tmp/5028895788722532426:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
network mode is container:d0b3dab8912a
|
||||
Network mode is container:d0b3dab8912a
|
||||
Created container: nginx_default_kubevpn_6df63
|
||||
Wait container nginx_default_kubevpn_6df63 to be running...
|
||||
Container nginx_default_kubevpn_6df63 is running now
|
||||
@@ -585,49 +656,50 @@ OK: 8 MiB in 19 packages
|
||||
>> Container Received request: GET / from 127.0.0.1:41230
|
||||
Hello world!/opt/microservices #
|
||||
|
||||
/opt/microservices # curl authors:9080/health -H "a: 1"
|
||||
/opt/microservices # curl authors:9080/health -H "foo: bar"
|
||||
>>Received request: GET /health from 223.254.0.109:57930
|
||||
Hello world!/opt/microservices #
|
||||
/opt/microservices # curl localhost:9080/health
|
||||
{"status":"Authors is healthy"}/opt/microservices # exit
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
tun device closed
|
||||
leave resource: deployments.apps/authors
|
||||
workload default/deployments.apps/authors is controlled by a controller
|
||||
leave resource: deployments.apps/authors successfully
|
||||
clean up successfully
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
clean up successfully
|
||||
Created container: default_authors
|
||||
Wait container default_authors to be running...
|
||||
Container default_authors is running now
|
||||
Disconnecting from the cluster...
|
||||
Leaving workload deployments.apps/authors
|
||||
Disconnecting from the cluster...
|
||||
Performing cleanup operations
|
||||
Clearing DNS settings
|
||||
root@d0b3dab8912a:/app# exit
|
||||
exit
|
||||
➜ ~
|
||||
```
|
||||
|
||||
during test, check what container is running
|
||||
|
||||
```text
|
||||
➜ ~ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
|
||||
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
|
||||
d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
|
||||
d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
|
||||
➜ ~
|
||||
```
|
||||
|
||||
* For clean up after test
|
||||
|
||||
```shell
|
||||
kubectl delete -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
kubectl delete -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
```
|
||||
|
||||
|
||||
### Multiple Protocol
|
||||
|
||||
support OSI model layers 3 and above, protocols like `ICMP`, `TCP`, and `UDP`...
|
||||
|
||||
- TCP
|
||||
- UDP
|
||||
- ICMP
|
||||
- GRPC
|
||||
- gRPC
|
||||
- Thrift
|
||||
- WebSocket
|
||||
- HTTP
|
||||
- ...
|
||||
@@ -638,15 +710,11 @@ kubectl delete -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/
|
||||
- Linux
|
||||
- Windows
|
||||
|
||||
on Windows platform, you need to
|
||||
install [PowerShell](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.2)
|
||||
in advance
|
||||
|
||||
## FAQ
|
||||
|
||||
### 1, What should I do if the dependent image cannot be pulled, or the inner environment cannot access docker.io?
|
||||
|
||||
Answer: here are two solution to solve this problem
|
||||
Answer: here are two solutions to solve this problem
|
||||
|
||||
- Solution 1: In the network that can access docker.io, transfer the image in the command `kubevpn version` to your own
|
||||
private image registry, and then add option `--image` to special image when starting the command.
|
||||
@@ -656,7 +724,7 @@ Answer: here are two solution to solve this problem
|
||||
➜ ~ kubevpn version
|
||||
KubeVPN: CLI
|
||||
Version: v2.0.0
|
||||
DaemonVersion: v2.0.0
|
||||
Daemon: v2.0.0
|
||||
Image: docker.io/naison/kubevpn:v2.0.0
|
||||
Branch: feature/daemon
|
||||
Git commit: 7c3a87e14e05c238d8fb23548f95fa1dd6e96936
|
||||
@@ -677,9 +745,10 @@ Then you can use this image, as follows:
|
||||
|
||||
```text
|
||||
➜ ~ kubevpn connect --image [docker registry]/[namespace]/[repo]:[tag]
|
||||
got cidr from cache
|
||||
traffic manager not exist, try to create it...
|
||||
pod [kubevpn-traffic-manager] status is Running
|
||||
Starting connect
|
||||
Getting network CIDR from cluster info...
|
||||
Getting network CIDR from CNI...
|
||||
Getting network CIDR from services...
|
||||
...
|
||||
```
|
||||
|
||||
@@ -701,24 +770,23 @@ f5507edfc283: Pushed
|
||||
ecc065754c15: Pushed
|
||||
feda785382bb: Pushed
|
||||
v2.0.0: digest: sha256:85d29ebb53af7d95b9137f8e743d49cbc16eff1cdb9983128ab6e46e0c25892c size: 2000
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
### 2, When use `kubevpn dev`, but got error code 137, how to resolve ?
|
||||
### 2, When use `kubevpn dev`, but got error code 137, how to resolve?
|
||||
|
||||
```text
|
||||
dns service ok
|
||||
Configured DNS service
|
||||
tar: Removing leading `/' from member names
|
||||
tar: Removing leading `/' from hard link targets
|
||||
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/7375606548554947868:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
@@ -726,11 +794,8 @@ Created container: server_vke-system_kubevpn_0db84
|
||||
Wait container server_vke-system_kubevpn_0db84 to be running...
|
||||
Container server_vke-system_kubevpn_0db84 is running on port 8888/tcp: 6789/tcp:6789 now
|
||||
$ Status: , Code: 137
|
||||
prepare to exit, cleaning up
|
||||
port-forward occurs error, err: lost connection to pod, retrying
|
||||
update ref count successfully
|
||||
ref-count is zero, prepare to clean up resource
|
||||
clean up successfully
|
||||
Performing cleanup operations
|
||||
Clearing DNS settings
|
||||
```
|
||||
|
||||
This is because of your docker-desktop required resource is less than pod running request resource, it OOM killed, so
|
||||
@@ -801,3 +866,19 @@ add subnet not conflict, eg: 172.15.0.1/24
|
||||
```
|
||||
|
||||
restart docker and retry
|
||||
|
||||
## Architecture
|
||||
|
||||
Architecture can be found [here](/docs/en/Architecture.md)
|
||||
and [website](https://www.kubevpn.cn/docs/architecture/connect).
|
||||
|
||||
## Contributions
|
||||
|
||||
Always welcome. Just opening an issue should be also grateful.
|
||||
|
||||
If you want to debug this project on local PC. Please follow the steps bellow:
|
||||
|
||||
- Startup daemon and sudo daemon process with IDE debug mode. (Essentially two GRPC server)
|
||||
- Add breakpoint to file `pkg/daemon/action/connect.go:21`.
|
||||
- Open another terminal run `make kubevpn`.
|
||||
- Then run `./bin/kubevpn connect` and it will hit breakpoint.
|
||||
578
README_ZH.md
578
README_ZH.md
@@ -1,110 +1,124 @@
|
||||

|
||||
|
||||
[![GitHub Workflow][1]](https://github.com/KubeNetworks/kubevpn/actions)
|
||||
[![Go Version][2]](https://github.com/KubeNetworks/kubevpn/blob/master/go.mod)
|
||||
[![Go Report][3]](https://goreportcard.com/badge/github.com/KubeNetworks/kubevpn)
|
||||
[![Maintainability][4]](https://codeclimate.com/github/KubeNetworks/kubevpn/maintainability)
|
||||
[![GitHub License][5]](https://github.com/KubeNetworks/kubevpn/blob/main/LICENSE)
|
||||
[![GitHub Workflow][1]](https://github.com/kubenetworks/kubevpn/actions)
|
||||
[![Go Version][2]](https://github.com/kubenetworks/kubevpn/blob/master/go.mod)
|
||||
[![Go Report][3]](https://goreportcard.com/report/github.com/wencaiwulue/kubevpn)
|
||||
[![Maintainability][4]](https://codeclimate.com/github/kubenetworks/kubevpn/maintainability)
|
||||
[![GitHub License][5]](https://github.com/kubenetworks/kubevpn/blob/main/LICENSE)
|
||||
[![Docker Pulls][6]](https://hub.docker.com/r/naison/kubevpn)
|
||||
[![Releases][7]](https://github.com/KubeNetworks/kubevpn/releases)
|
||||
[![Releases][7]](https://github.com/kubenetworks/kubevpn/releases)
|
||||
[](https://pkg.go.dev/github.com/wencaiwulue/kubevpn/v2)
|
||||
[](https://codecov.io/gh/wencaiwulue/kubevpn)
|
||||
|
||||
[1]: https://img.shields.io/github/actions/workflow/status/KubeNetworks/kubevpn/release.yml?logo=github
|
||||
[1]: https://img.shields.io/github/actions/workflow/status/kubenetworks/kubevpn/release.yml?logo=github
|
||||
|
||||
[2]: https://img.shields.io/github/go-mod/go-version/KubeNetworks/kubevpn?logo=go
|
||||
[2]: https://img.shields.io/github/go-mod/go-version/kubenetworks/kubevpn?logo=go
|
||||
|
||||
[3]: https://img.shields.io/badge/go%20report-A+-brightgreen.svg?style=flat
|
||||
[3]: https://goreportcard.com/badge/github.com/wencaiwulue/kubevpn?style=flat
|
||||
|
||||
[4]: https://api.codeclimate.com/v1/badges/b5b30239174fc6603aca/maintainability
|
||||
|
||||
[5]: https://img.shields.io/github/license/KubeNetworks/kubevpn
|
||||
[5]: https://img.shields.io/github/license/kubenetworks/kubevpn
|
||||
|
||||
[6]: https://img.shields.io/docker/pulls/naison/kubevpn?logo=docker
|
||||
|
||||
[7]: https://img.shields.io/github/v/release/KubeNetworks/kubevpn?logo=smartthings
|
||||
[7]: https://img.shields.io/github/v/release/kubenetworks/kubevpn?logo=smartthings
|
||||
|
||||
# KubeVPN
|
||||
|
||||
[English](README.md) | [中文](README_ZH.md) | [维基](https://github.com/KubeNetworks/kubevpn/wiki/%E6%9E%B6%E6%9E%84)
|
||||
[English](README.md) | [中文](README_ZH.md) | [维基](https://github.com/kubenetworks/kubevpn/wiki/%E6%9E%B6%E6%9E%84)
|
||||
|
||||
KubeVPN 是一个云原生开发工具, 可以在本地连接云端 kubernetes 网络的工具,可以在本地直接访问远端集群的服务。也可以在远端集群访问到本地服务,便于调试及开发。同时还可以使用开发模式,直接在本地使用 Docker
|
||||
将远程容器运行在本地。
|
||||
KubeVPN 提供一个云原生开发环境。通过连接云端 kubernetes 网络,可以在本地使用 k8s dns 或者 Pod IP / Service IP
|
||||
直接访问远端集群中的服务。拦截远端集群中的工作负载的入流量到本地电脑,配合服务网格便于调试及开发。同时还可以使用开发模式,直接在本地使用
|
||||
Docker
|
||||
模拟 k8s pod runtime 将容器运行在本地 (具有相同的环境变量,磁盘和网络)。
|
||||
|
||||
## 内容
|
||||
|
||||
1. [快速开始](./README_ZH.md#快速开始)
|
||||
2. [功能](./README_ZH.md#功能)
|
||||
3. [问答](./README_ZH.md#问答)
|
||||
4. [架构](./README_ZH.md#架构)
|
||||
5. [贡献代码](./README_ZH.md#贡献代码)
|
||||
|
||||
## 快速开始
|
||||
|
||||
#### 从 Github release 下载编译好的二进制文件
|
||||
|
||||
[链接](https://github.com/KubeNetworks/kubevpn/releases/latest)
|
||||
|
||||
#### 从 自定义 Krew 仓库安装
|
||||
### 使用 [brew](https://brew.sh/) 安装 (macOS / Linux)
|
||||
|
||||
```shell
|
||||
(
|
||||
kubectl krew index add kubevpn https://github.com/KubeNetworks/kubevpn.git && \
|
||||
kubectl krew install kubevpn/kubevpn && kubectl kubevpn
|
||||
)
|
||||
brew install kubevpn
|
||||
```
|
||||
|
||||
#### 自己构建二进制文件
|
||||
### 使用 [scoop](https://scoop.sh/) (Windows)
|
||||
|
||||
```shell
|
||||
(
|
||||
git clone https://github.com/KubeNetworks/kubevpn.git && \
|
||||
cd kubevpn && make kubevpn && ./bin/kubevpn
|
||||
)
|
||||
|
||||
scoop bucket add extras
|
||||
scoop install kubevpn
|
||||
```
|
||||
|
||||
#### 安装 bookinfo 作为 demo 应用
|
||||
### 使用 [krew](https://krew.sigs.k8s.io/) (Windows / macOS / Linux)
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/KubeNetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
kubectl krew index add kubevpn https://github.com/kubenetworks/kubevpn.git
|
||||
kubectl krew install kubevpn/kubevpn
|
||||
kubectl kubevpn
|
||||
```
|
||||
|
||||
### 从 Github release 下载 (Windows / macOS / Linux)
|
||||
|
||||
[https://github.com/kubenetworks/kubevpn/releases/latest](https://github.com/kubenetworks/kubevpn/releases/latest)
|
||||
|
||||
### 安装 bookinfo 作为 demo 应用
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/samples/bookinfo.yaml
|
||||
```
|
||||
|
||||
## 功能
|
||||
|
||||
### 链接到集群网络
|
||||
|
||||
使用命令 `kubevpn connect` 链接到集群,请注意这里需要输入电脑密码。因为需要 `root` 权限。(创建虚拟网卡)
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn connect
|
||||
Password:
|
||||
start to connect
|
||||
get cidr from cluster info...
|
||||
get cidr from cluster info ok
|
||||
get cidr from cni...
|
||||
wait pod cni-net-dir-kubevpn to be running timeout, reason , ignore
|
||||
get cidr from svc...
|
||||
get cidr from svc ok
|
||||
get cidr successfully
|
||||
traffic manager not exist, try to create it...
|
||||
label namespace default
|
||||
create serviceAccount kubevpn-traffic-manager
|
||||
create roles kubevpn-traffic-manager
|
||||
create roleBinding kubevpn-traffic-manager
|
||||
create service kubevpn-traffic-manager
|
||||
create deployment kubevpn-traffic-manager
|
||||
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
|
||||
Starting connect
|
||||
Getting network CIDR from cluster info...
|
||||
Getting network CIDR from CNI...
|
||||
Getting network CIDR from services...
|
||||
Labeling Namespace default
|
||||
Creating ServiceAccount kubevpn-traffic-manager
|
||||
Creating Roles kubevpn-traffic-manager
|
||||
Creating RoleBinding kubevpn-traffic-manager
|
||||
Creating Service kubevpn-traffic-manager
|
||||
Creating MutatingWebhookConfiguration kubevpn-traffic-manager
|
||||
Creating Deployment kubevpn-traffic-manager
|
||||
|
||||
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Pending
|
||||
Container Reason Message
|
||||
control-plane ContainerCreating
|
||||
vpn ContainerCreating
|
||||
webhook ContainerCreating
|
||||
|
||||
pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
|
||||
Pod kubevpn-traffic-manager-66d969fd45-9zlbp is Running
|
||||
Container Reason Message
|
||||
control-plane ContainerRunning
|
||||
vpn ContainerRunning
|
||||
webhook ContainerRunning
|
||||
|
||||
Creating mutatingWebhook_configuration for kubevpn-traffic-manager
|
||||
update ref count successfully
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
提示已经链接到集群了。使用命令 `kubevpn status` 检查一下状态。
|
||||
|
||||
```shell
|
||||
➜ ~ kubectl get pods -o wide
|
||||
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
|
||||
@@ -116,6 +130,8 @@ ratings-77b6cd4499-zvl6c 1/1 Running 0
|
||||
reviews-85c88894d9-vgkxd 1/1 Running 0 24d 172.29.2.249 192.168.0.5 <none> <none>
|
||||
```
|
||||
|
||||
找一个 pod 的 IP,比如 `productpage-788df7ff7f-jpkcs` 的 IP `172.29.2.134`
|
||||
|
||||
```shell
|
||||
➜ ~ ping 172.29.2.134
|
||||
PING 172.29.2.134 (172.29.2.134): 56 data bytes
|
||||
@@ -129,6 +145,8 @@ PING 172.29.2.134 (172.29.2.134): 56 data bytes
|
||||
round-trip min/avg/max/stddev = 54.293/55.380/56.270/0.728 ms
|
||||
```
|
||||
|
||||
测试应该可以直接 Ping 通,说明本地可以正常访问到集群网络了。
|
||||
|
||||
```shell
|
||||
➜ ~ kubectl get services -o wide
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
|
||||
@@ -141,6 +159,8 @@ ratings ClusterIP 172.21.3.247 <none> 9080/TCP
|
||||
reviews ClusterIP 172.21.8.24 <none> 9080/TCP 114d app=reviews
|
||||
```
|
||||
|
||||
找一个 service 的 IP,比如 `productpage` 的 IP `172.21.10.49`,试着访问一下服务 `productpage`
|
||||
|
||||
```shell
|
||||
➜ ~ curl 172.21.10.49:9080
|
||||
<!DOCTYPE html>
|
||||
@@ -152,8 +172,16 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
```
|
||||
|
||||
可以看到也可以正常访问,也就是可以在本地访问到集群的 pod 和 service 了~
|
||||
|
||||
### 域名解析功能
|
||||
|
||||
支持 k8s dns 解析。比如一个名为 `productpage` 的 Pod 或者 Service 处于 `default` 命名空间下可以被如下域名正常解析到:
|
||||
|
||||
- `productpage`
|
||||
- `productpage.default`
|
||||
- `productpage.default.svc.cluster.local`
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage.default.svc.cluster.local:9080
|
||||
<!DOCTYPE html>
|
||||
@@ -165,8 +193,15 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
```
|
||||
|
||||
可以看到能够被正常解析,并且返回相应内容。
|
||||
|
||||
### 短域名解析功能
|
||||
|
||||
连接到此命名空间下,可以直接使用 `service` name 的方式访问,否则访问其它命令空间下的服务,需要带上命令空间作为域名的一部分,使用如下的域名即可。
|
||||
|
||||
- `productpage.default`
|
||||
- `productpage.default.svc.cluster.local`
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage:9080
|
||||
<!DOCTYPE html>
|
||||
@@ -178,25 +213,71 @@ reviews ClusterIP 172.21.8.24 <none> 9080/TCP
|
||||
...
|
||||
```
|
||||
|
||||
可以看到直接使用 service name 的方式,可以正常访问到集群资源。
|
||||
|
||||
### 链接到多集群网络
|
||||
|
||||
有个两个模式
|
||||
|
||||
- 模式 `lite`: 可以链接到多个集群网络,但是仅支持链接到多集群。
|
||||
- 模式 `full`: 不仅支持链接到单个集群网络,还可以拦截工作负载流量到本地电脑。
|
||||
|
||||
可以看到已经链接到了一个集群 `ccijorbccotmqodvr189g`,是 `full` 模式
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn status
|
||||
ID Mode Cluster Kubeconfig Namespace Status
|
||||
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
|
||||
```
|
||||
|
||||
此时还可以使用 `lite` 模式链接到其它集群
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn connect -n default --kubeconfig ~/.kube/dev_config --lite
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
```
|
||||
|
||||
使用命令 `kubevpn status` 查看当前链接状态。
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn status
|
||||
ID Mode Cluster Kubeconfig Namespace Status
|
||||
0 full ccijorbccotmqodvr189g /Users/naison/.kube/config default Connected
|
||||
1 lite ccidd77aam2dtnc3qnddg /Users/naison/.kube/dev_config default Connected
|
||||
➜ ~
|
||||
```
|
||||
|
||||
可以看到连接到了多个集群。
|
||||
|
||||
### 反向代理
|
||||
|
||||
使用命令 `kubevpn proxy` 代理所有的入站流量到本地电脑。
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn proxy deployment/productpage
|
||||
already connect to cluster
|
||||
start to create remote inbound pod for deployment/productpage
|
||||
workload default/deployment/productpage is controlled by a controller
|
||||
rollout status for deployment/productpage
|
||||
Connected to cluster
|
||||
Injecting inbound sidecar for deployment/productpage
|
||||
Checking rollout status for deployment/productpage
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "productpage" successfully rolled out
|
||||
rollout status for deployment/productpage successfully
|
||||
create remote inbound pod for deployment/productpage successfully
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
Rollout successfully for deployment/productpage
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
此时在本地使用 `go` 启动一个服务,用于承接流量。
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
@@ -213,6 +294,8 @@ func main() {
|
||||
}
|
||||
```
|
||||
|
||||
使用 `service` name 的方式,直接访问集群中的 `productpage` 服务。
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage:9080
|
||||
Hello world!%
|
||||
@@ -220,27 +303,28 @@ Hello world!%
|
||||
Hello world!%
|
||||
```
|
||||
|
||||
可以看到直接击中了本地电脑的服务。
|
||||
|
||||
### 反向代理支持 service mesh
|
||||
|
||||
支持 HTTP, GRPC 和 WebSocket 等, 携带了指定 header `"a: 1"` 的流量,将会路由到本地
|
||||
支持 HTTP, GRPC 和 WebSocket 等, 携带了指定 header `"foo: bar"` 的流量,将会路由到本地
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn proxy deployment/productpage --headers a=1
|
||||
already connect to cluster
|
||||
start to create remote inbound pod for deployment/productpage
|
||||
patch workload default/deployment/productpage with sidecar
|
||||
rollout status for deployment/productpage
|
||||
➜ ~ kubevpn proxy deployment/productpage --headers foo=bar
|
||||
Connected to cluster
|
||||
Injecting inbound sidecar for deployment/productpage
|
||||
Checking rollout status for deployment/productpage
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "productpage" successfully rolled out
|
||||
rollout status for deployment/productpage successfully
|
||||
create remote inbound pod for deployment/productpage successfully
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
Rollout successfully for deployment/productpage
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
不带 header 直接访问集群资源,可以看到返回的是集群中的服务内容。
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage:9080
|
||||
<!DOCTYPE html>
|
||||
@@ -253,34 +337,46 @@ create remote inbound pod for deployment/productpage successfully
|
||||
...
|
||||
```
|
||||
|
||||
带上特定 header 访问集群资源,可以看到返回了本地服务的内容。
|
||||
|
||||
```shell
|
||||
➜ ~ curl productpage:9080 -H "a: 1"
|
||||
➜ ~ curl productpage:9080 -H "foo: bar"
|
||||
Hello world!%
|
||||
```
|
||||
|
||||
如果你需要取消代理流量,可以执行如下命令:
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn leave deployments/productpage
|
||||
Leaving workload deployments/productpage
|
||||
Checking rollout status for deployments/productpage
|
||||
Waiting for deployment "productpage" rollout to finish: 0 out of 1 new replicas have been updated...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Waiting for deployment "productpage" rollout to finish: 1 old replicas are pending termination...
|
||||
Rollout successfully for deployments/productpage
|
||||
```
|
||||
|
||||
### 本地进入开发模式 🐳
|
||||
|
||||
将 Kubernetes pod 运行在本地的 Docker 容器中,同时配合 service mesh, 拦截带有指定 header 的流量到本地,或者所有的流量到本地。这个开发模式依赖于本地 Docker。
|
||||
将 Kubernetes pod 运行在本地的 Docker 容器中,同时配合 service mesh, 拦截带有指定 header 的流量到本地,或者所有的流量到本地。这个开发模式依赖于本地
|
||||
Docker。
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn dev deployment/authors --headers a=1 -it --rm --entrypoint sh
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
start to create remote inbound pod for Deployment.apps/authors
|
||||
patch workload default/Deployment.apps/authors with sidecar
|
||||
rollout status for Deployment.apps/authors
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
➜ ~ kubevpn dev deployment/authors --headers foo=bar --entrypoint sh
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
Injecting inbound sidecar for deployment/authors
|
||||
Patching workload deployment/authors
|
||||
Checking rollout status for deployment/authors
|
||||
Waiting for deployment "authors" rollout to finish: 0 out of 1 new replicas have been updated...
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "authors" successfully rolled out
|
||||
rollout status for Deployment.apps/authors successfully
|
||||
create remote inbound pod for Deployment.apps/authors successfully
|
||||
Rollout successfully for Deployment.apps/authors
|
||||
tar: removing leading '/' from member names
|
||||
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/4563987760170736212:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
tar: Removing leading `/' from member names
|
||||
@@ -317,21 +413,22 @@ OK: 8 MiB in 19 packages
|
||||
/opt/microservices # 2023/09/30 13:41:58 Start listening http port 9080 ...
|
||||
|
||||
/opt/microservices # curl localhost:9080/health
|
||||
{"status":"Authors is healthy"}/opt/microservices # exit
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
tun device closed
|
||||
leave resource: deployments.apps/authors
|
||||
workload default/deployments.apps/authors is controlled by a controller
|
||||
leave resource: deployments.apps/authors successfully
|
||||
clean up successfully
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
clean up successfully
|
||||
{"status":"Authors is healthy"} /opt/microservices # echo "continue testing pod access..."
|
||||
continue testing pod access...
|
||||
/opt/microservices # exit
|
||||
Created container: default_authors
|
||||
Wait container default_authors to be running...
|
||||
Container default_authors is running now
|
||||
Disconnecting from the cluster...
|
||||
Leaving workload deployments.apps/authors
|
||||
Disconnecting from the cluster...
|
||||
Performing cleanup operations
|
||||
Clearing DNS settings
|
||||
➜ ~
|
||||
```
|
||||
|
||||
此时本地会启动两个 container, 对应 pod 容器中的两个 container, 并且共享端口, 可以直接使用 localhost:port 的形式直接访问另一个 container,
|
||||
此时本地会启动两个 container, 对应 pod 容器中的两个 container, 并且共享端口, 可以直接使用 localhost:port 的形式直接访问另一个
|
||||
container,
|
||||
并且, 所有的环境变量、挂载卷、网络条件都和 pod 一样, 真正做到与 kubernetes 运行环境一致。
|
||||
|
||||
```shell
|
||||
@@ -345,22 +442,20 @@ fc04e42799a5 nginx:latest "/docker-entrypoint.…" 37 sec
|
||||
如果你只是想在本地启动镜像,可以用一种简单的方式:
|
||||
|
||||
```shell
|
||||
kubevpn dev deployment/authors --no-proxy -it --rm
|
||||
kubevpn dev deployment/authors --no-proxy
|
||||
```
|
||||
|
||||
例如:
|
||||
|
||||
```shell
|
||||
➜ ~ kubevpn dev deployment/authors --no-proxy -it --rm
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
➜ ~ kubevpn dev deployment/authors --no-proxy
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
tar: removing leading '/' from member names
|
||||
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/5631078868924498209:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
tar: Removing leading `/' from member names
|
||||
@@ -378,68 +473,59 @@ Created main container: authors_default_kubevpn_ff34b
|
||||
|
||||
此时程序会挂起,默认为显示日志
|
||||
|
||||
如果你想指定在本地启动容器的镜像, 可以使用参数 `--docker-image`, 当本地不存在该镜像时, 会从对应的镜像仓库拉取。如果你想指定启动参数,可以使用 `--entrypoint`
|
||||
如果你想指定在本地启动容器的镜像, 可以使用参数 `--dev-image`, 当本地不存在该镜像时,
|
||||
会从对应的镜像仓库拉取。如果你想指定启动参数,可以使用 `--entrypoint`
|
||||
参数,替换为你想要执行的命令,比如 `--entrypoint /bin/bash`, 更多使用参数,请参见 `kubevpn dev --help`.
|
||||
|
||||
### DinD ( Docker in Docker ) 在 Docker 中使用 kubevpn
|
||||
|
||||
如果你想在本地使用 Docker in Docker (DinD) 的方式启动开发模式, 由于程序会读写 `/tmp` 目录,您需要手动添加参数 `-v /tmp:/tmp`, 还有一点需要注意, 如果使用 DinD
|
||||
如果你想在本地使用 Docker in Docker (DinD) 的方式启动开发模式, 由于程序会读写 `/tmp`
|
||||
目录,您需要手动添加参数 `-v /tmp:/tmp`, 还有一点需要注意, 如果使用 DinD
|
||||
模式,为了共享容器网络和 pid, 还需要指定参数 `--network`
|
||||
|
||||
例如:
|
||||
|
||||
```shell
|
||||
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
|
||||
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config --platform linux/amd64 naison/kubevpn:latest
|
||||
```
|
||||
|
||||
```shell
|
||||
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 naison/kubevpn:v2.0.0
|
||||
Unable to find image 'naison/kubevpn:v2.0.0' locally
|
||||
v2.0.0: Pulling from naison/kubevpn
|
||||
445a6a12be2b: Already exists
|
||||
bd6c670dd834: Pull complete
|
||||
64a7297475a2: Pull complete
|
||||
33fa2e3224db: Pull complete
|
||||
e008f553422a: Pull complete
|
||||
5132e0110ddc: Pull complete
|
||||
5b2243de1f1a: Pull complete
|
||||
662a712db21d: Pull complete
|
||||
4f4fb700ef54: Pull complete
|
||||
33f0298d1d4f: Pull complete
|
||||
Digest: sha256:115b975a97edd0b41ce7a0bc1d8428e6b8569c91a72fe31ea0bada63c685742e
|
||||
Status: Downloaded newer image for naison/kubevpn:v2.0.0
|
||||
root@d0b3dab8912a:/app# kubevpn dev deployment/authors --headers user=naison -it --entrypoint sh
|
||||
|
||||
----------------------------------------------------------------------------------
|
||||
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
|
||||
Because of sudo user env and user env are different.
|
||||
Current env KUBECONFIG value:
|
||||
----------------------------------------------------------------------------------
|
||||
|
||||
hostname is d0b3dab8912a
|
||||
connectting to cluster
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
start to create remote inbound pod for Deployment.apps/authors
|
||||
patch workload default/Deployment.apps/authors with sidecar
|
||||
rollout status for Deployment.apps/authors
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config --platform linux/amd64 naison/kubevpn:latest
|
||||
Unable to find image 'naison/kubevpn:latest' locally
|
||||
latest: Pulling from naison/kubevpn
|
||||
9c704ecd0c69: Already exists
|
||||
4987d0a976b5: Pull complete
|
||||
8aa94c4fc048: Pull complete
|
||||
526fee014382: Pull complete
|
||||
6c1c2bedceb6: Pull complete
|
||||
97ac845120c5: Pull complete
|
||||
ca82aef6a9eb: Pull complete
|
||||
1fd9534c7596: Pull complete
|
||||
588bd802eb9c: Pull complete
|
||||
Digest: sha256:368db2e0d98f6866dcefd60512960ce1310e85c24a398fea2a347905ced9507d
|
||||
Status: Downloaded newer image for naison/kubevpn:latest
|
||||
WARNING: image with reference naison/kubevpn was found but does not match the specified platform: wanted linux/amd64, actual: linux/arm64
|
||||
root@5732124e6447:/app# kubevpn dev deployment/authors --headers user=naison --entrypoint sh
|
||||
hostname is 5732124e6447
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
Injecting inbound sidecar for deployment/authors
|
||||
Patching workload deployment/authors
|
||||
Checking rollout status for deployment/authors
|
||||
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
|
||||
deployment "authors" successfully rolled out
|
||||
rollout status for Deployment.apps/authors successfully
|
||||
create remote inbound pod for Deployment.apps/authors successfully
|
||||
Rollout successfully for Deployment.apps/authors
|
||||
tar: removing leading '/' from member names
|
||||
/tmp/6460902982794789917:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
tar: Removing leading `/' from member names
|
||||
tar: Removing leading `/' from hard link targets
|
||||
/tmp/5028895788722532426:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
network mode is container:d0b3dab8912a
|
||||
Network mode is container:d0b3dab8912a
|
||||
Created container: nginx_default_kubevpn_6df63
|
||||
Wait container nginx_default_kubevpn_6df63 to be running...
|
||||
Container nginx_default_kubevpn_6df63 is running now
|
||||
@@ -456,77 +542,82 @@ PID USER TIME COMMAND
|
||||
Executing busybox-1.33.1-r3.trigger
|
||||
OK: 8 MiB in 19 packagesnx: worker process
|
||||
/opt/microservices #
|
||||
|
||||
/opt/microservices # cat > hello.go <<EOF
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func main() {
|
||||
http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
|
||||
_, _ = io.WriteString(writer, "Hello world!")
|
||||
fmt.Println(">> Container Received request: %s %s from %s\n", request.Method, request.RequestURI, request.RemoteAddr)
|
||||
})
|
||||
fmt.Println("Start listening http port 9080 ...")
|
||||
_ = http.ListenAndServe(":9080", nil)
|
||||
}
|
||||
EOF
|
||||
/opt/microservices # go build hello.go
|
||||
/opt/microservices #
|
||||
//opt/microservices # ls -alh
|
||||
total 12M
|
||||
drwxr-xr-x 1 root root 26 Nov 4 10:29 .
|
||||
drwxr-xr-x 1 root root 26 Oct 18 2021 ..
|
||||
-rwxr-xr-x 1 root root 6.3M Oct 18 2021 app
|
||||
-rwxr-xr-x 1 root root 5.8M Nov 4 10:29 hello
|
||||
-rw-r--r-- 1 root root 387 Nov 4 10:28 hello.go
|
||||
/opt/microservices #
|
||||
/opt/microservices # apk add curl
|
||||
OK: 8 MiB in 19 packages
|
||||
/opt/microservices # curl localhost:80
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Welcome to nginx!</title>
|
||||
<style>
|
||||
html { color-scheme: light dark; }
|
||||
body { width: 35em; margin: 0 auto;
|
||||
font-family: Tahoma, Verdana, Arial, sans-serif; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Welcome to nginx!</h1>
|
||||
<p>If you see this page, the nginx web server is successfully installed and
|
||||
working. Further configuration is required.</p>
|
||||
/opt/microservices # ./hello &
|
||||
/opt/microservices # Start listening http port 9080 ...
|
||||
[2]+ Done ./hello
|
||||
/opt/microservices # curl localhost:9080
|
||||
>> Container Received request: GET / from 127.0.0.1:41230
|
||||
Hello world!/opt/microservices #
|
||||
|
||||
<p>For online documentation and support please refer to
|
||||
<a href="http://nginx.org/">nginx.org</a>.<br/>
|
||||
Commercial support is available at
|
||||
<a href="http://nginx.com/">nginx.com</a>.</p>
|
||||
|
||||
<p><em>Thank you for using nginx.</em></p>
|
||||
</body>
|
||||
</html>
|
||||
/opt/microservices # ls
|
||||
app
|
||||
/opt/microservices # ls -alh
|
||||
total 6M
|
||||
drwxr-xr-x 2 root root 4.0K Oct 18 2021 .
|
||||
drwxr-xr-x 1 root root 4.0K Oct 18 2021 ..
|
||||
-rwxr-xr-x 1 root root 6.3M Oct 18 2021 app
|
||||
/opt/microservices # ./app &
|
||||
/opt/microservices # 2023/09/30 14:27:32 Start listening http port 9080 ...
|
||||
|
||||
/opt/microservices # curl authors:9080/health
|
||||
/opt/microservices # curl authors:9080/health
|
||||
{"status":"Authors is healthy"}/opt/microservices #
|
||||
/opt/microservices # curl authors:9080/health -H "foo: bar"
|
||||
>>Received request: GET /health from 223.254.0.109:57930
|
||||
Hello world!/opt/microservices #
|
||||
/opt/microservices # curl localhost:9080/health
|
||||
{"status":"Authors is healthy"}/opt/microservices # exit
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
tun device closed
|
||||
leave resource: deployments.apps/authors
|
||||
workload default/deployments.apps/authors is controlled by a controller
|
||||
leave resource: deployments.apps/authors successfully
|
||||
clean up successfully
|
||||
prepare to exit, cleaning up
|
||||
update ref count successfully
|
||||
clean up successfully
|
||||
Created container: default_authors
|
||||
Wait container default_authors to be running...
|
||||
Container default_authors is running now
|
||||
Disconnecting from the cluster...
|
||||
Leaving workload deployments.apps/authors
|
||||
Disconnecting from the cluster...
|
||||
Performing cleanup operations
|
||||
Clearing DNS settings
|
||||
root@d0b3dab8912a:/app# exit
|
||||
exit
|
||||
➜ ~
|
||||
```
|
||||
|
||||
可以看到实际上是在本地使用 `Docker` 启动了三个容器。
|
||||
|
||||
```text
|
||||
➜ ~ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
1cd576b51b66 naison/authors:latest "sh" 4 minutes ago Up 4 minutes authors_default_kubevpn_6df5f
|
||||
56a6793df82d nginx:latest "/docker-entrypoint.…" 4 minutes ago Up 4 minutes nginx_default_kubevpn_6df63
|
||||
d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
|
||||
d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago Up 5 minutes upbeat_noyce
|
||||
➜ ~
|
||||
```
|
||||
|
||||
### 支持多种协议
|
||||
|
||||
支持 OSI 模型三层及三层以上的协议,例如:
|
||||
|
||||
- TCP
|
||||
- UDP
|
||||
- ICMP
|
||||
- GRPC
|
||||
- gRPC
|
||||
- Thrift
|
||||
- WebSocket
|
||||
- HTTP
|
||||
- ...
|
||||
@@ -537,23 +628,21 @@ d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago
|
||||
- Linux
|
||||
- Windows
|
||||
|
||||
Windows
|
||||
下需要安装 [PowerShell](https://docs.microsoft.com/zh-cn/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.2)
|
||||
|
||||
## 问答
|
||||
|
||||
### 1,依赖的镜像拉不下来,或者内网环境无法访问 docker.io 怎么办?
|
||||
|
||||
答:有两种方法可以解决
|
||||
|
||||
- 第一种,在可以访问 docker.io 的网络中,将命令 `kubevpn version` 中的 image 镜像, 转存到自己的私有镜像仓库,然后启动命令的时候,加上 `--image 新镜像` 即可。
|
||||
- 第一种,在可以访问 docker.io 的网络中,将命令 `kubevpn version` 中的 image 镜像,
|
||||
转存到自己的私有镜像仓库,然后启动命令的时候,加上 `--image 新镜像` 即可。
|
||||
例如:
|
||||
|
||||
``` shell
|
||||
➜ ~ kubevpn version
|
||||
KubeVPN: CLI
|
||||
Version: v2.0.0
|
||||
DaemonVersion: v2.0.0
|
||||
Daemon: v2.0.0
|
||||
Image: docker.io/naison/kubevpn:v2.0.0
|
||||
Branch: feature/daemon
|
||||
Git commit: 7c3a87e14e05c238d8fb23548f95fa1dd6e96936
|
||||
@@ -574,9 +663,11 @@ docker push [镜像仓库地址]/[命名空间]/[镜像仓库]:[镜像版本号]
|
||||
|
||||
```text
|
||||
➜ ~ kubevpn connect --image [docker registry]/[namespace]/[repo]:[tag]
|
||||
got cidr from cache
|
||||
traffic manager not exist, try to create it...
|
||||
pod [kubevpn-traffic-manager] status is Running
|
||||
Starting connect
|
||||
Getting network CIDR from cluster info...
|
||||
Getting network CIDR from CNI...
|
||||
Getting network CIDR from services...
|
||||
...
|
||||
...
|
||||
```
|
||||
|
||||
@@ -597,24 +688,23 @@ f5507edfc283: Pushed
|
||||
ecc065754c15: Pushed
|
||||
feda785382bb: Pushed
|
||||
v2.0.0: digest: sha256:85d29ebb53af7d95b9137f8e743d49cbc16eff1cdb9983128ab6e46e0c25892c size: 2000
|
||||
start to connect
|
||||
got cidr from cache
|
||||
get cidr successfully
|
||||
update ref count successfully
|
||||
traffic manager already exist, reuse it
|
||||
port forward ready
|
||||
tunnel connected
|
||||
dns service ok
|
||||
+---------------------------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster, enjoy it :) |
|
||||
+---------------------------------------------------------------------------+
|
||||
Starting connect
|
||||
Got network CIDR from cache
|
||||
Use exist traffic manager
|
||||
Forwarding port...
|
||||
Connected tunnel
|
||||
Adding route...
|
||||
Configured DNS service
|
||||
+----------------------------------------------------------+
|
||||
| Now you can access resources in the kubernetes cluster ! |
|
||||
+----------------------------------------------------------+
|
||||
➜ ~
|
||||
```
|
||||
|
||||
### 2,在使用 `kubevpn dev` 进入开发模式的时候,有出现报错 137, 改怎么解决 ?
|
||||
|
||||
```text
|
||||
dns service ok
|
||||
Configured DNS service
|
||||
tar: Removing leading `/' from member names
|
||||
tar: Removing leading `/' from hard link targets
|
||||
/var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/7375606548554947868:/var/run/secrets/kubernetes.io/serviceaccount
|
||||
@@ -622,19 +712,18 @@ Created container: server_vke-system_kubevpn_0db84
|
||||
Wait container server_vke-system_kubevpn_0db84 to be running...
|
||||
Container server_vke-system_kubevpn_0db84 is running on port 8888/tcp: 6789/tcp:6789 now
|
||||
$ Status: , Code: 137
|
||||
prepare to exit, cleaning up
|
||||
port-forward occurs error, err: lost connection to pod, retrying
|
||||
update ref count successfully
|
||||
ref-count is zero, prepare to clean up resource
|
||||
clean up successfully
|
||||
Performing cleanup operations
|
||||
Clearing DNS settings
|
||||
```
|
||||
|
||||
这是因为你的 `Docker-desktop` 声明的资源, 小于 container 容器启动时所需要的资源, 因此被 OOM 杀掉了, 你可以增加 `Docker-desktop` 对于 resources
|
||||
这是因为你的 `Docker-desktop` 声明的资源, 小于 container 容器启动时所需要的资源, 因此被 OOM 杀掉了,
|
||||
你可以增加 `Docker-desktop` 对于 resources
|
||||
的设置, 目录是:`Preferences --> Resources --> Memory`
|
||||
|
||||
### 3,使用 WSL( Windows Sub Linux ) Docker, 用命令 `kubevpn dev` 进入开发模式的时候, 在 terminal 中无法提示链接集群网络, 这是为什么, 如何解决?
|
||||
|
||||
答案: 这是因为 WSL 的 Docker 使用的是 主机 Windows 的网络, 所以即便在 WSL 中启动 container, 这个 container 不会使用 WSL 的网络,而是使用 Windows 的网络。
|
||||
答案: 这是因为 WSL 的 Docker 使用的是 主机 Windows 的网络, 所以即便在 WSL 中启动 container, 这个 container 不会使用 WSL
|
||||
的网络,而是使用 Windows 的网络。
|
||||
解决方案:
|
||||
|
||||
- 1): 在 WSL 中安装 Docker, 不要使用 Windows 版本的 Docker-desktop
|
||||
@@ -691,3 +780,18 @@ clean up successfully
|
||||
```
|
||||
|
||||
重启 docker,重新操作即可
|
||||
|
||||
## 架构
|
||||
|
||||
架构信息可以从[这里](/docs/en/Architecture.md) 和 [网站](https://www.kubevpn.cn/docs/architecture/connect) 找到.
|
||||
|
||||
## 贡献代码
|
||||
|
||||
所有都是欢迎的,只是打开一个问题也是受欢迎的~
|
||||
|
||||
如果你想在本地电脑上调试项目,可以按照这样的步骤:
|
||||
|
||||
- 使用喜欢的 IDE Debug 启动 daemon 和 sudo daemon 两个后台进程。(本质上是两个 GRPC server)
|
||||
- 添加断点给文件 `pkg/daemon/action/connect.go:21`
|
||||
- 新开个终端,执行命令 `make kubevpn`
|
||||
- 然后运行命令 `./bin/kubevpn connect` 这样将会击中断点
|
||||
24
TODO.MD
24
TODO.MD
@@ -1,24 +0,0 @@
|
||||
## TODO
|
||||
|
||||
- [x] 访问集群网络
|
||||
- [x] 域名解析功能
|
||||
- [x] 支持多个 service 反向代理
|
||||
- [x] 短域名解析
|
||||
- [x] 优化 DHCP 功能
|
||||
- [x] 支持多种类型,例如 statefulset, replicaset...
|
||||
- [ ] 支持 ipv6
|
||||
- [x] 自己实现 socks5 协议
|
||||
- [ ] 考虑是否需要把 openvpn tap/tun 驱动作为后备方案
|
||||
- [x] 加入 TLS 以提高安全性
|
||||
- [ ] 写个 CNI 网络插件,直接提供 VPN 功能
|
||||
- [x] 优化重连逻辑
|
||||
- [x] 支持 service mesh
|
||||
- [x] service mesh 支持多端口
|
||||
- [x] 使用自己写的 proxy 替换 envoy
|
||||
- [ ] 优化性能,Windows 上考虑使用 IPC 通信
|
||||
- [x] 自己写个 control plane
|
||||
- [x] 考虑是否将 control plane 和服务分开
|
||||
- [x] 写单元测试,优化 GitHub action
|
||||
- [x] Linux 和 macOS 也改用 WireGuard library
|
||||
- [x] 探测是否有重复路由的 utun设备,禁用 `sudo ifconfig utun1 down`
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
FROM envoyproxy/envoy:v1.25.0 AS envoy
|
||||
FROM golang:1.20 AS builder
|
||||
FROM golang:1.23 AS builder
|
||||
ARG BASE=github.com/wencaiwulue/kubevpn
|
||||
|
||||
COPY . /go/src/$BASE
|
||||
@@ -16,7 +16,18 @@ ARG BASE=github.com/wencaiwulue/kubevpn
|
||||
RUN sed -i s@/security.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list \
|
||||
&& sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list
|
||||
RUN apt-get clean && apt-get update && apt-get install -y wget dnsutils vim curl \
|
||||
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3
|
||||
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3 \
|
||||
apt-transport-https ca-certificates curl
|
||||
|
||||
RUN if [ $(uname -m) = "x86_64" ]; then \
|
||||
echo "The architecture is AMD64"; \
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
|
||||
elif [ $(uname -m) = "aarch64" ]; then \
|
||||
echo "The architecture is ARM64"; \
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
|
||||
else \
|
||||
echo "Unsupported architecture."; \
|
||||
fi
|
||||
|
||||
ENV TZ=Asia/Shanghai \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
FROM golang:1.20 as delve
|
||||
RUN curl --location --output delve-1.20.1.tar.gz https://github.com/go-delve/delve/archive/v1.20.1.tar.gz \
|
||||
&& tar xzf delve-1.20.1.tar.gz
|
||||
RUN cd delve-1.20.1 && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /go/dlv -ldflags '-extldflags "-static"' ./cmd/dlv/
|
||||
FROM golang:1.23 as delve
|
||||
RUN curl --location --output delve-1.23.1.tar.gz https://github.com/go-delve/delve/archive/v1.23.1.tar.gz \
|
||||
&& tar xzf delve-1.23.1.tar.gz
|
||||
RUN cd delve-1.23.1 && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /go/dlv -ldflags '-extldflags "-static"' ./cmd/dlv/
|
||||
FROM busybox
|
||||
COPY --from=delve /go/dlv /bin/dlv
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.20 AS builder
|
||||
FROM golang:1.23 AS builder
|
||||
RUN go env -w GO111MODULE=on && go env -w GOPROXY=https://goproxy.cn,direct
|
||||
RUN go install github.com/go-delve/delve/cmd/dlv@latest
|
||||
|
||||
@@ -8,7 +8,18 @@ FROM ubuntu:latest
|
||||
RUN sed -i s@/security.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list \
|
||||
&& sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list
|
||||
RUN apt-get clean && apt-get update && apt-get install -y wget dnsutils vim curl \
|
||||
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3
|
||||
net-tools iptables iputils-ping lsof iproute2 tcpdump binutils traceroute conntrack socat iperf3 \
|
||||
apt-transport-https ca-certificates curl
|
||||
|
||||
RUN if [ $(uname -m) = "x86_64" ]; then \
|
||||
echo "The architecture is AMD64"; \
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
|
||||
elif [ $(uname -m) = "aarch64" ]; then \
|
||||
echo "The architecture is ARM64"; \
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" && chmod +x kubectl && mv kubectl /usr/local/bin; \
|
||||
else \
|
||||
echo "Unsupported architecture."; \
|
||||
fi
|
||||
|
||||
ENV TZ=Asia/Shanghai \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
254
charts/index.yaml
Normal file
254
charts/index.yaml
Normal file
@@ -0,0 +1,254 @@
|
||||
apiVersion: v1
|
||||
entries:
|
||||
kubevpn:
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.4
|
||||
created: "2024-11-29T13:03:24.255324387Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 2804aa624f6139695f3fb723bdc6ba087492bcd8810baf7196a1ae88bd2a62b5
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.4/kubevpn-2.3.4.tgz
|
||||
version: 2.3.4
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.3
|
||||
created: "2024-11-22T14:54:13.795282085Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 33cbbc9312e7b7e415fb14f80f17df50d305194617bcf75d1501227cb90b8f32
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.3/kubevpn-2.3.3.tgz
|
||||
version: 2.3.3
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.2
|
||||
created: "2024-11-18T11:52:12.076510627Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: cdb38ab84bf1649ac4280f6996060c49a095f9c056044cd5f691e7bf4f259dad
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.2/kubevpn-2.3.2.tgz
|
||||
version: 2.3.2
|
||||
- apiVersion: v2
|
||||
appVersion: v2.3.1
|
||||
created: "2024-11-15T13:36:37.056311943Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 10c1200241309be4ec2eb88e9689ebbf96704c8fad270e6fda30047135aeccf2
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.3.1/kubevpn-2.3.1.tgz
|
||||
version: 2.3.1
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.22
|
||||
created: "2024-10-30T08:46:08.845218523Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: c2dc336383d7de2fb97cfd40a15e9f6c29a9a598484b88515a98bcaeb4925eda
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.22/kubevpn-2.2.22.tgz
|
||||
version: 2.2.22
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.21
|
||||
created: "2024-10-25T14:10:25.545716679Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 98ae51247535525ff6a10b5f493d8bfc573af62759432f7aa54dd7eb6edeffd5
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.21/kubevpn-2.2.21.tgz
|
||||
version: 2.2.21
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.20
|
||||
created: "2024-10-20T04:00:07.263734809Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 7863701dff5b3fce0795ee8e0b73044b7c88f8777c86a65adc1f5563123565dc
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.20/kubevpn-2.2.20.tgz
|
||||
version: 2.2.20
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.19
|
||||
created: "2024-10-10T00:47:08.858011096Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: be2c672081307c03b7fe6b635d524c8f3f73d70ae3316efa85e781a62c25a46d
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.19/kubevpn-2.2.19.tgz
|
||||
version: 2.2.19
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.18
|
||||
created: "2024-09-10T09:39:11.71407425Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 2d953103425ca2a087a2d521c9297662f97b72e78cf831e947942f292bbcc643
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.18/kubevpn-2.2.18.tgz
|
||||
version: 2.2.18
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.17
|
||||
created: "2024-08-03T07:45:55.228743946Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 476317ad82b2c59a623e1fca968c09a28554ebcabec337c1c363e7296bb27514
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.17/kubevpn-2.2.17.tgz
|
||||
version: 2.2.17
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.16
|
||||
created: "2024-07-26T13:43:50.473565863Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 6cdb809d04687197a8defbf4349871c505ac699924833fecc210d8a6d82a9f20
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.16/kubevpn-2.2.16.tgz
|
||||
version: 2.2.16
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.15
|
||||
created: "2024-07-19T15:03:13.558586823Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 279b24976cef25e1dd8a4cd612a7c6a5767cecd4ba386ccab80fc00db76117e7
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.15/kubevpn-2.2.15.tgz
|
||||
version: 2.2.15
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.14
|
||||
created: "2024-07-12T15:24:27.825047662Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 52ab9b89ea3773792bf3839e4a7c23a9ea60a6c72547024dc0907c973a8d34b3
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.14/kubevpn-2.2.14.tgz
|
||||
version: 2.2.14
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.13
|
||||
created: "2024-07-05T15:08:40.140645659Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 610c5528952826839d5636b8bd940ac907ab0e70377e37538063cb53a5f75443
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.13/kubevpn-2.2.13.tgz
|
||||
version: 2.2.13
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.12
|
||||
created: "2024-06-29T15:36:12.429229459Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: a129ac0efda2e2967937407b904d59122e7b9725fb225c0bcbfdf2260337c032
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.12/kubevpn-2.2.12.tgz
|
||||
version: 2.2.12
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.11
|
||||
created: "2024-06-21T14:13:53.982206886Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 3a7fa4cb3e1785da68e422ef151a3c7f621fbe76862b557ae2750af70d34e1ad
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.11/kubevpn-2.2.11.tgz
|
||||
version: 2.2.11
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.10
|
||||
created: "2024-05-21T06:46:20.368800554Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 89be252c9eedb13560224550f06270f8be88049edfb0a46ca170ab5c8c493a6c
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.10/kubevpn-2.2.10.tgz
|
||||
version: 2.2.10
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.9
|
||||
created: "2024-05-14T11:50:54.700148975Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: e94debe7c904e21f791c1e3bb877ca8132888a3bb3c53beaa74e2ff1e7dd8769
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.9/kubevpn-2.2.9.tgz
|
||||
version: 2.2.9
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.8
|
||||
created: "2024-05-03T15:50:13.647253665Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 9e18d0d02f123e5d8f096362daa5e6893d5db1e8447a632585ae23d6ce755489
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.8/kubevpn-2.2.8.tgz
|
||||
version: 2.2.8
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.7
|
||||
created: "2024-04-27T12:11:35.594701859Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 3828f5b20d6bf4c0c7d94654cc33fd8d7b4c5f2aa20a3cc18d18b9298f459456
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.7/kubevpn-2.2.7.tgz
|
||||
version: 2.2.7
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.6
|
||||
created: "2024-04-16T05:44:31.777079658Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 63668930b99e6c18f6dd77a25e5ce2d21579d52a83451f58be3bc0ca32678829
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.6/kubevpn-2.2.6.tgz
|
||||
version: 2.2.6
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.5
|
||||
created: "2024-04-14T08:46:13.877936123Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 8509aeec7584935344bdf465efd8f0d5efb58ef1b7a31fd2738e5c2790f680c4
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.5/kubevpn-2.2.5.tgz
|
||||
version: 2.2.5
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.4
|
||||
created: "2024-04-02T05:15:00.372823536Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: 07e87e648b7ad5688146a356c93c1771e94485c2fd9d5441553d94ce6371c19f
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.4/kubevpn-2.2.4.tgz
|
||||
version: 2.2.4
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.3
|
||||
created: "2024-03-03T11:52:37.856463964Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: cb1b8c210259292488548853bdeb2eb9ef4c60d1643e0d6537174349514dc8e9
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.3/kubevpn-2.2.3.tgz
|
||||
version: 2.2.3
|
||||
- apiVersion: v2
|
||||
appVersion: v2.2.2
|
||||
created: "2024-02-15T13:35:35.121411893Z"
|
||||
description: A Helm chart for KubeVPN
|
||||
digest: b7589312eab83e50db9ae5703a30e76f0b40fd280c81d102a823aeeb61e14c1c
|
||||
name: kubevpn
|
||||
type: application
|
||||
urls:
|
||||
- https://github.com/kubenetworks/kubevpn/releases/download/v2.2.2/kubevpn-2.2.2.tgz
|
||||
version: 2.2.2
|
||||
generated: "2024-11-29T13:03:24.255531082Z"
|
||||
23
charts/kubevpn/.helmignore
Normal file
23
charts/kubevpn/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
6
charts/kubevpn/Chart.yaml
Normal file
6
charts/kubevpn/Chart.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: v2
|
||||
name: kubevpn
|
||||
description: A Helm chart for KubeVPN
|
||||
type: application
|
||||
version: 0.1.0
|
||||
appVersion: "1.16.0"
|
||||
4
charts/kubevpn/templates/NOTES.txt
Normal file
4
charts/kubevpn/templates/NOTES.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
1. Connect to cluster network by running these commands:
|
||||
kubevpn connect --namespace {{ .Release.Namespace }}
|
||||
export POD_IP=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kubevpn.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].status.podIP}")
|
||||
ping $POD_IP
|
||||
63
charts/kubevpn/templates/_helpers.tpl
Normal file
63
charts/kubevpn/templates/_helpers.tpl
Normal file
@@ -0,0 +1,63 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "kubevpn.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "kubevpn.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "kubevpn.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "kubevpn.labels" -}}
|
||||
helm.sh/chart: {{ include "kubevpn.chart" . }}
|
||||
app: kubevpn-traffic-manager
|
||||
{{ include "kubevpn.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "kubevpn.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "kubevpn.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "kubevpn.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "kubevpn.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
10
charts/kubevpn/templates/configmap.yaml
Normal file
10
charts/kubevpn/templates/configmap.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
data:
|
||||
DHCP: ""
|
||||
DHCP6: ""
|
||||
ENVOY_CONFIG: ""
|
||||
IPv4_POOLS: "{{ .Values.cidr.pod }} {{ .Values.cidr.service }}"
|
||||
REF_COUNT: "0"
|
||||
133
charts/kubevpn/templates/deployment.yaml
Normal file
133
charts/kubevpn/templates/deployment.yaml
Normal file
@@ -0,0 +1,133 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
labels:
|
||||
{{- include "kubevpn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "kubevpn.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "kubevpn.labels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "kubevpn.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- args:
|
||||
- |2-
|
||||
|
||||
sysctl -w net.ipv4.ip_forward=1
|
||||
sysctl -w net.ipv6.conf.all.disable_ipv6=0
|
||||
sysctl -w net.ipv6.conf.all.forwarding=1
|
||||
update-alternatives --set iptables /usr/sbin/iptables-legacy
|
||||
iptables -F
|
||||
ip6tables -F
|
||||
iptables -P INPUT ACCEPT
|
||||
ip6tables -P INPUT ACCEPT
|
||||
iptables -P FORWARD ACCEPT
|
||||
ip6tables -P FORWARD ACCEPT
|
||||
iptables -t nat -A POSTROUTING -s ${CIDR4} -o eth0 -j MASQUERADE
|
||||
ip6tables -t nat -A POSTROUTING -s ${CIDR6} -o eth0 -j MASQUERADE
|
||||
kubevpn serve -L "tcp://:10800" -L "tun://:8422?net=${TunIPv4}" -L "gtcp://:10801" -L "gudp://:10802" --debug=true
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
env:
|
||||
- name: CIDR4
|
||||
value: 223.254.0.0/16
|
||||
- name: CIDR6
|
||||
value: efff:ffff:ffff:ffff::/64
|
||||
- name: TunIPv4
|
||||
value: 223.254.0.100/16
|
||||
- name: TunIPv6
|
||||
value: efff:ffff:ffff:ffff:ffff:ffff:ffff:9999/64
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
name: vpn
|
||||
ports:
|
||||
- containerPort: {{ .Values.service.port8422 }}
|
||||
name: 8422-for-udp
|
||||
protocol: UDP
|
||||
- containerPort: {{ .Values.service.port10800 }}
|
||||
name: 10800-for-tcp
|
||||
protocol: TCP
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
privileged: true
|
||||
runAsUser: 0
|
||||
- args:
|
||||
- control-plane
|
||||
- --watchDirectoryFilename
|
||||
- /etc/envoy/envoy-config.yaml
|
||||
command:
|
||||
- kubevpn
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
name: control-plane
|
||||
ports:
|
||||
- containerPort: {{ .Values.service.port9002 }}
|
||||
name: 9002-for-envoy
|
||||
protocol: TCP
|
||||
resources:
|
||||
{{- toYaml .Values.resourcesSmall | nindent 12 }}
|
||||
volumeMounts:
|
||||
- mountPath: /etc/envoy
|
||||
name: envoy-config
|
||||
readOnly: true
|
||||
- args:
|
||||
- webhook
|
||||
command:
|
||||
- kubevpn
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
name: webhook
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: 80-for-webhook
|
||||
protocol: TCP
|
||||
resources:
|
||||
{{- toYaml .Values.resourcesSmall | nindent 12 }}
|
||||
{{- with .Values.volumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
32
charts/kubevpn/templates/hpa.yaml
Normal file
32
charts/kubevpn/templates/hpa.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
{{- if .Values.autoscaling.enabled }}
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
labels:
|
||||
{{- include "kubevpn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
minReplicas: {{ .Values.autoscaling.minReplicas }}
|
||||
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
72
charts/kubevpn/templates/job.yaml
Normal file
72
charts/kubevpn/templates/job.yaml
Normal file
@@ -0,0 +1,72 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
labels:
|
||||
{{- include "kubevpn.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-install
|
||||
"helm.sh/hook-delete-policy": before-hook-creation
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "kubevpn.labels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
restartPolicy: Never
|
||||
serviceAccountName: {{ include "kubevpn.serviceAccountName" . }}
|
||||
containers:
|
||||
- name: label-ns
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
args:
|
||||
- |2-
|
||||
|
||||
echo "Label namespace {{ .Release.Namespace }}"
|
||||
kubectl label ns {{ .Release.Namespace }} ns={{ .Release.Namespace }}
|
||||
|
||||
echo "Generating https certificate"
|
||||
openssl req -x509 -nodes -days 36500 -newkey rsa:2048 -subj "/CN={{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}.svc" -addext "subjectAltName=DNS:{{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local,DNS:{{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}.svc" -keyout server.key -out server.crt
|
||||
|
||||
export TLS_CRT=$(cat server.crt | base64 | tr -d '\n')
|
||||
echo "Patch mutatingwebhookconfigurations {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}"
|
||||
kubectl patch mutatingwebhookconfigurations {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }} -p "{\"webhooks\":[{\"name\":\"{{ include "kubevpn.fullname" . }}.naison.io\",\"sideEffects\":\"None\",\"admissionReviewVersions\":[\"v1\", \"v1beta1\"],\"clientConfig\":{\"service\":{\"namespace\":\"{{ .Release.Namespace }}\",\"name\":\"{{ include "kubevpn.fullname" . }}\"},\"caBundle\":\"$TLS_CRT\"}}]}"
|
||||
|
||||
export TLS_KEY=$(cat server.key | base64 | tr -d '\n')
|
||||
echo "Patch secret {{ include "kubevpn.fullname" . }}"
|
||||
kubectl patch secret {{ include "kubevpn.fullname" . }} -n {{ .Release.Namespace }} -p "{\"data\":{\"tls_key\":\"$TLS_KEY\",\"tls_crt\":\"$TLS_CRT\"}}"
|
||||
|
||||
echo "Restart the pods..."
|
||||
kubectl scale -n {{ .Release.Namespace }} --replicas=0 deployment/{{ include "kubevpn.fullname" . }}
|
||||
kubectl scale -n {{ .Release.Namespace }} --replicas=1 deployment/{{ include "kubevpn.fullname" . }}
|
||||
|
||||
export POOLS=$(kubectl get cm {{ include "kubevpn.fullname" . }} -n {{ .Release.Namespace }} -o jsonpath='{.data.IPv4_POOLS}')
|
||||
if [[ -z "${POOLS// }" ]];then
|
||||
echo "Cidr is empty"
|
||||
echo "Get pod cidr..."
|
||||
export POD_CIDR=$(kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' | tr -s '\n' ' ')
|
||||
echo "Get service cidr..."
|
||||
export SVC_CIDR=$(echo '{"apiVersion":"v1","kind":"Service","metadata":{"name":"kubevpn-get-svc-cidr-{{ .Release.Namespace }}", "namespace": "{{ .Release.Namespace }}"},"spec":{"clusterIP":"1.1.1.1","ports":[{"port":443}]}}' | kubectl apply -f - 2>&1 | sed 's/.*valid IPs is //')
|
||||
echo "Pod cidr: $POD_CIDR, service cidr: $SVC_CIDR"
|
||||
echo "Patch configmap {{ include "kubevpn.fullname" . }}"
|
||||
kubectl patch configmap {{ include "kubevpn.fullname" . }} -n {{ .Release.Namespace }} -p "{\"data\":{\"IPv4_POOLS\":\"$POD_CIDR $SVC_CIDR\"}}"
|
||||
else
|
||||
echo "Cidr is NOT empty"
|
||||
fi
|
||||
|
||||
echo "Done~"
|
||||
exit 0
|
||||
36
charts/kubevpn/templates/mutatingwebhookconfiguration.yaml
Normal file
36
charts/kubevpn/templates/mutatingwebhookconfiguration.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
caBundle: {{ .Values.tls.crt }}
|
||||
service:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /pods
|
||||
port: 80
|
||||
failurePolicy: Ignore
|
||||
matchPolicy: Equivalent
|
||||
name: {{ include "kubevpn.fullname" . }}.naison.io
|
||||
namespaceSelector:
|
||||
matchLabels:
|
||||
ns: {{ .Release.Namespace }}
|
||||
objectSelector: { }
|
||||
reinvocationPolicy: Never
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- DELETE
|
||||
resources:
|
||||
- pods
|
||||
scope: Namespaced
|
||||
sideEffects: None
|
||||
timeoutSeconds: 15
|
||||
69
charts/kubevpn/templates/role.yaml
Normal file
69
charts/kubevpn/templates/role.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- {{ include "kubevpn.fullname" . }}
|
||||
resources:
|
||||
- configmaps
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "namespaces" ]
|
||||
resourceNames: [{{ .Release.Namespace }}]
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- apiGroups: [ "apps" ]
|
||||
resources: [ "deployments/scale", "deployments" ]
|
||||
resourceNames:
|
||||
- {{ include "kubevpn.fullname" . }}
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- admissionregistration.k8s.io
|
||||
resources:
|
||||
- mutatingwebhookconfigurations
|
||||
resourceNames:
|
||||
- {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
26
charts/kubevpn/templates/rolebinding.yaml
Normal file
26
charts/kubevpn/templates/rolebinding.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ include "kubevpn.fullname" . }}.{{ .Release.Namespace }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
8
charts/kubevpn/templates/secret.yaml
Normal file
8
charts/kubevpn/templates/secret.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
data:
|
||||
tls_crt: {{ .Values.tls.crt }}
|
||||
tls_key: {{ .Values.tls.key }}
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
type: Opaque
|
||||
31
charts/kubevpn/templates/service.yaml
Normal file
31
charts/kubevpn/templates/service.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "kubevpn.fullname" . }}
|
||||
labels:
|
||||
{{- include "kubevpn.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- name: 8422-for-udp
|
||||
port: {{ .Values.service.port8422 }}
|
||||
protocol: UDP
|
||||
targetPort: 8422
|
||||
- name: 10800-for-tcp
|
||||
port: {{ .Values.service.port10800 }}
|
||||
protocol: TCP
|
||||
targetPort: 10800
|
||||
- name: 9002-for-envoy
|
||||
port: {{ .Values.service.port9002 }}
|
||||
protocol: TCP
|
||||
targetPort: 9002
|
||||
- name: 80-for-webhook
|
||||
port: {{ .Values.service.port80 }}
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
- name: 53-for-dns
|
||||
port: {{ .Values.service.port53 }}
|
||||
protocol: UDP
|
||||
targetPort: 53
|
||||
selector:
|
||||
{{- include "kubevpn.selectorLabels" . | nindent 4 }}
|
||||
13
charts/kubevpn/templates/serviceaccount.yaml
Normal file
13
charts/kubevpn/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "kubevpn.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "kubevpn.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
|
||||
{{- end }}
|
||||
113
charts/kubevpn/values.yaml
Normal file
113
charts/kubevpn/values.yaml
Normal file
@@ -0,0 +1,113 @@
|
||||
# Default values for kubevpn.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: naison/kubevpn
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
imagePullSecrets: [ ]
|
||||
nameOverride: ""
|
||||
fullnameOverride: "kubevpn-traffic-manager"
|
||||
|
||||
# this filed is import if configured this value
|
||||
# if not configured, it will get this value from cluster automatically
|
||||
cidr:
|
||||
pod: ""
|
||||
service: ""
|
||||
|
||||
tls:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURXVENDQWtHZ0F3SUJBZ0lJU0NmUDdHeHVhUkl3RFFZSktvWklodmNOQVFFTEJRQXdNREV1TUN3R0ExVUUKQXd3bGEzVmlaWFp3YmkxMGNtRm1abWxqTFcxaGJtRm5aWEl0WTJGQU1UY3dOamsyTnpjd01EQWVGdzB5TkRBeQpNRE14TWpReE5EQmFGdzB5TlRBeU1ESXhNalF4TkRCYU1DMHhLekFwQmdOVkJBTU1JbXQxWW1WMmNHNHRkSEpoClptWnBZeTF0WVc1aFoyVnlRREUzTURZNU5qYzNNREF3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXcKZ2dFS0FvSUJBUURzVnNleEVpVG00dmlleUhEeU5SbldKbXNiaFBWV24yTkgvNi9wUGVBT3ZUbXgwSDdHUnZJLwpzMzVoZW9EWExhdFVmaDlXT1hXdzRqaGZsdUdWQWlzZGs2Y2ZkS1hVVzJheXpRbFpZd1ZMTzdUUHFoeWF0UHVpCmpRYVB2bUErRGNYMHJRc2Y3SFJwVWhjVTJ1QTJ4WGhZNy9QWWFUdzhkU0NTTHFTK2ZLM3poc0lONTFrYnIzdG4KU2FKcWFybDNhSU82N1JvdmNZbmxERG9XTzFwS1ZSUmROVkM1anVtREJOSWdOam5TSTY5QTFydzR0REkwdjcxWQpPRmhjYnUwNnFVdkNNU1JzR3F5ZkhOeUlXakVvcnk4Wk0xVExlcnZhTk12WlFTRndRNk5SRExHYXNlbTBlNTRXCmVublA0OVpIR1FhTjllYnJQSkJuL2pQQ3p0NlFDMkg5QWdNQkFBR2plakI0TUE0R0ExVWREd0VCL3dRRUF3SUYKb0RBVEJnTlZIU1VFRERBS0JnZ3JCZ0VGQlFjREFUQU1CZ05WSFJNQkFmOEVBakFBTUI4R0ExVWRJd1FZTUJhQQpGQVA3WmhvcGsvbEc3MVNCMk42QkpKdDI2eXhuTUNJR0ExVWRFUVFiTUJtQ0YydDFZbVYyY0c0dGRISmhabVpwCll5MXRZVzVoWjJWeU1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQVhYWk1WazhhQWwwZTlqUWRQTDc3ZVZOL3kKY1ZZZzRBVDlhdkh0UXV2UkZnOU80Z3JMaFVDQnoyN25wdlZZcHNMbmdEMTFRTXpYdHlsRDNMNDJNQ3V0Wnk5VQorL1BCL291ajQzWkZUckJDbk9DZDl6elE2MXZSL1RmbUFrTUhObTNZYjE1OGt2V0ZhNVlBdytRVi9vRDNUcGlXClREVTZXNkxvRFg5N0lNSFk0L3VLNTNzbXVLMjh5VzduSVVrbnpqN3h5UzVOWTFZaVNUN0w2ZFZ0VVppR1FUK00KRk16ODVRcTJOTWVXU1lKTmhhQVk5WEpwMXkrcEhoeWpPVFdjSEFNYmlPR29mODM5N1R6YmUyWHdNQ3BGMWc5NwpMaHZERnNsNzcyOWs1NFJVb1d2ZjFIVFFxL2R6cVBQTTNhWGpTbXFWUEV2Zk5qeGNhZnFnNHBaRmdzYzEKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJREpEQ0NBZ3lnQXdJQkFnSUlJMmROaFBKY0Uxc3dEUVlKS29aSWh2Y05BUUVMQlFBd01ERXVNQ3dHQTFVRQpBd3dsYTNWaVpYWndiaTEwY21GbVptbGpMVzFoYm1GblpYSXRZMkZBTVRjd05qazJOemN3TURBZUZ3MHlOREF5Ck1ETXhNalF4TkRCYUZ3MHlOVEF5TURJeE1qUXhOREJhTURBeExqQXNCZ05WQkFNTUpXdDFZbVYyY0c0dGRISmgKWm1acFl5MXRZVzVoWjJWeUxXTmhRREUzTURZNU5qYzNNREF3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQgpEd0F3Z2dFS0FvSUJBUURBQVpBdEZaTzJEZG9BVTUxWnRiVjI0QkVGN3RkakMzTzBPdEE2UURYTlVwNWlZZGdjCjdORVlGZE55YXltTWZMUVFGTWZqZFcxNWpDQ0N4KzFFMm1KQTVZa0ZFcXJTeDA3Z1pkKy9hcU13ZkhDT0ZTM0UKSUROdzBKYlBGVHZuSGsyZHVXby8zT1BnVmpONWw2UTBWaE10WkJEc2haVHVvSUhWaTJZcldDdnNkMU9mWFVyMwo0Y0ZJUkJ2OW5mNDIzdWthajYxdisrRDd6K3Y0bEN4R0JtUDhpYXFaNFVlckxIdWF2N1hQUnZ4QmQzNDBGY2diCm5TZVUxTXZmcTgvOUg4VTRzeWRGaUpZVUs1RFhkWU15NEw0RlMvbXZRaWR1TU5lWUw1Y2xHSXZTNGFzQjl2QlMKM0ZIY1IrQk1xVzFQWUdDc2YyL0RvdVNRVVNhcnB5VU5aczZKQWdNQkFBR2pRakJBTUE0R0ExVWREd0VCL3dRRQpBd0lDcERBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJRRCsyWWFLWlA1UnU5VWdkamVnU1NiCmR1c3NaekFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBVGFNR0NLK2YxSmdKaXplVjlla3ZhckhDZHpmZzJNZkQKV2pCeFUzMXNabE1vZU9GS0hPdndjMVliTzVNTStHTGM0bGhMS2VHV1pwQmVRV0lFamo4V01wa3k2M2VtUUl5eQpOT2hjdVBUTFhCQ0JkS1lhUU1LcU9mN3c4MEw2cVRKclFER0t0a0MxVzEwbFJzbUd0TEtBbDVjU0w4VFRSZVhXCjhiNXRGOFd5Yms1Vm12VWtxdEpkSVNJTjdVOG5nV21WRUVOZFcvckNqclI5TllaSXZBZk9mS1Zrc1JuZEJaQ0kKOXdxVUI2K2JITEJBWjNpV293ZFhpRGhLMSt5Z2ZwNnpUcW9LRmxOWi8rRTNkS0tpbStyZFFGSmIvNTNvU2xaaApwMkVkT1ZNYU1mRjh1ZFhDdE44WjZnVHpPWkJxN1pmWjVpMlU1eFQ2aFNxRjFjT1ZuQS9idmc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBN0ZiSHNSSWs1dUw0bnNodzhqVVoxaVpyRzRUMVZwOWpSLyt2NlQzZ0RyMDVzZEIrCnhrYnlQN04rWVhxQTF5MnJWSDRmVmpsMXNPSTRYNWJobFFJckhaT25IM1NsMUZ0bXNzMEpXV01GU3p1MHo2b2MKbXJUN29vMEdqNzVnUGczRjlLMExIK3gwYVZJWEZOcmdOc1Y0V08vejJHazhQSFVna2k2a3ZueXQ4NGJDRGVkWgpHNjk3WjBtaWFtcTVkMmlEdXUwYUwzR0o1UXc2Rmp0YVNsVVVYVFZRdVk3cGd3VFNJRFk1MGlPdlFOYThPTFF5Ck5MKzlXRGhZWEc3dE9xbEx3akVrYkJxc254emNpRm94S0s4dkdUTlV5M3E3MmpUTDJVRWhjRU9qVVF5eG1ySHAKdEh1ZUZucDV6K1BXUnhrR2pmWG02enlRWi80endzN2VrQXRoL1FJREFRQUJBb0lCQVFEWkRaWVdsS0JaZ0Nodgp3NHlmbFk4bDgyQzVCSEpCM041VWVJbjVmejh3cWk2N2xNMXBraXpYdmlTYXArUitPczQ0S2lEamtwLzVGTHBMCmFBbkRUUnVGN1Y0MmNHNEFTdlZWenlMLytnWVpvenNhNFpPbHJnUFF0UTVLbzhCR0hXWXBvV2N2S1gxOFlNMGIKOVN5b2dORlhkUUNSUjR6dnhXNWxjdnNRaXZkRFNFTUJhbW00bFpEM0ZtUm5HVGlpaUVNSis2SFdlR1lBS1RMSgoxN0NnejZaWjg1bGtUZ0dxeEUrWkQwNDJGYWdJZlJORVI0QmZOMlp6NU5CU3RnMTJFdUpXWmRGcWpxSHlwbnNjCjNjbEd0U1Z5VStvWUFUWnV5Y2VMNVIwZUdzdTB6ZHhLT3ZzSm9yVWZ0dlMrUGovclJxWHVjOVdXSkFLU1FDVm0Ka1I1Y2M4ak5Bb0dCQU8wYkVrNTdtZWYwcXNKT0U3TFlFV1hRRFZiTmhnZ1E2eTlBZlNnVjZDMFFDdC93YkVGaQo0Rm41bTdhSHdqZUJ5OFJnMGhGbTdVNThCb3FyNnBQNFp6MEhwY1ZSTmVLeTF6R0wreFRJRXFnTXQxei9TYVE0CkIwWEZ4Ulg3d2pjeit2OC9GOVdsOElLbHhBWjhxNXd6aHNFUVVYcVIxTzF1T2FjRktSdXg3OU1UQW9HQkFQOHMKRVJBa1R3WEV3UU9ya2dQOW5tTHZLYXMwM0J6UXUrOFBtQWlsOGFmbVR5ZEFWdzJKaHBwOFlUQzl6NDM3VXU4Ngpta2lOVHpRL3MvQ1lCNEdJVVFCMEdlTDJtc2VjdWNaUHhTSW10dElSOWY4bjk2NEpuL3RtVUd4VXRFaWhWdER4ClZCdFBiWmNzc2E5VVVCRFVqRnZJSUdPTGlqSVdxbW8zM3htT0tJaXZBb0dCQU5HV2k0RWFtdnBCK1N1V3JxejUKZDYrQzBEZTVwcys4Zk5nZzdrRWYxRUw1R2xQSGh6bnBPQjN3bWFjb3JCSTZ4cTlKVW9lVmJ4RmdhcnZycVlpeApIRGtEYUpKWjdnTDlTV0YvdGlzeGkrUkdrVk5BU28xQ0JaTzBkVG13ZUlZcGlhWlUxREhENUN6b2NMVzNRRTdyCjhTTDUxTHcrNm5RU2FoM3NYdUVmVWJwSEFvR0JBTk1FNlROMUkxaDg1cldYVEJBNnk2RzdjTFVoNktsM3dRTW8KM1N6QnRyK0h5WXVIUExaNE5iVktDTUhiSm1xZkhXMnpBK1hkM2xNeUh5ZG5Ra1hQcWxUNnJuR3dTRDJ0RVVDNwp0U1hSNkR4L0YvVWpZME1zdUgyWmxnYVFZZXJ5YWE0dTlNUUZBbmNUUWZuaGVya0FYUGFGNEtzUnVYNUVtamR1Cjd2UGVTUTBIQW9HQUM0ZlJmZnFFM3RRdWxSeUJVeHhKNHlPaWJiVlpCV1hxWHRzMU0wczdsZ1YxaGVrYis1VmMKVTZ3MFh2T0pTaEZPaGF6UVdseVZUejhmSVdSa1BXa2MzSzE1dWx6cmh6NWZVa0dYOEw0OGMrTHlaSzZ1M2ZRVgpyL1pRV3JsYlZSWlhRVGhuaGhOM1Jodm96SlZZV0lpckVyMGp3VmRaQWRUYW1XZEpTQ3J4WE1NPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: { }
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
podAnnotations: { }
|
||||
podLabels:
|
||||
|
||||
podSecurityContext: { }
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: { }
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port8422: 8422
|
||||
port9002: 9002
|
||||
port10800: 10800
|
||||
port80: 80
|
||||
port53: 53
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: "2"
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 512Mi
|
||||
resourcesSmall:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 1
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
# Additional volumes on the output Deployment definition.
|
||||
volumes:
|
||||
- configMap:
|
||||
defaultMode: 420
|
||||
items:
|
||||
- key: ENVOY_CONFIG
|
||||
path: envoy-config.yaml
|
||||
name: kubevpn-traffic-manager
|
||||
optional: false
|
||||
name: envoy-config
|
||||
|
||||
|
||||
# Additional volumeMounts on the output Deployment definition.
|
||||
volumeMounts: [ ]
|
||||
# - name: foo
|
||||
# mountPath: "/etc/foo"
|
||||
# readOnly: true
|
||||
|
||||
nodeSelector: { }
|
||||
|
||||
tolerations: [ ]
|
||||
|
||||
affinity: { }
|
||||
203
cmd/kubevpn/cmds/alias.go
Normal file
203
cmd/kubevpn/cmds/alias.go
Normal file
@@ -0,0 +1,203 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
yaml "sigs.k8s.io/yaml/goyaml.v3"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
// CmdAlias
|
||||
/**
|
||||
Name: test
|
||||
Description: this is a test environment
|
||||
Needs: test1
|
||||
Flags:
|
||||
- connect
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --lite
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Description: this is another test environment
|
||||
Flags:
|
||||
- connect
|
||||
- --kubeconfig=~/.kube/jumper_config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
*/
|
||||
func CmdAlias(f cmdutil.Factory) *cobra.Command {
|
||||
var localFile, remoteAddr string
|
||||
cmd := &cobra.Command{
|
||||
Use: "alias",
|
||||
Short: i18n.T("Config file alias to execute command simply"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Config file alias to execute command simply, just like ssh alias config
|
||||
|
||||
It will read ~/.kubevpn/config.yaml file as config, also support special file path
|
||||
by flag -f. It also supports depends relationship, like one cluster api server needs to
|
||||
access via another cluster, you can use syntax needs. it will do action to needs cluster first
|
||||
and then do action to target cluster
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
If you have following config in your ~/.kubevpn/config.yaml
|
||||
|
||||
Name: dev
|
||||
Needs: jumper
|
||||
Flags:
|
||||
- connect
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=default
|
||||
- --lite
|
||||
---
|
||||
|
||||
Name: jumper
|
||||
Flags:
|
||||
- connect
|
||||
- --kubeconfig=~/.kube/jumper_config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
|
||||
Config file support three field: Name,Needs,Flags
|
||||
|
||||
# Use kubevpn alias config to simply execute command, connect to cluster network by order: jumper --> dev
|
||||
kubevpn alias dev
|
||||
|
||||
# kubevpn alias jumper, just connect to cluster jumper
|
||||
kubevpn alias jumper
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
if localFile != "" {
|
||||
_, err = os.Stat(localFile)
|
||||
}
|
||||
return err
|
||||
},
|
||||
Args: cobra.MatchAll(cobra.ExactArgs(1)),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
configs, err := ParseAndGet(localFile, remoteAddr, args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name, err := os.Executable()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, config := range configs {
|
||||
c := exec.Command(name, config.Flags...)
|
||||
c.Stdout = os.Stdout
|
||||
c.Stdin = os.Stdin
|
||||
c.Stderr = os.Stderr
|
||||
fmt.Printf("Alias: %s\n", config.Name)
|
||||
if config.Description != "" {
|
||||
fmt.Printf("Description: %s\n", config.Description)
|
||||
}
|
||||
fmt.Printf("Command: %v\n", c.Args)
|
||||
err = c.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVarP(&localFile, "file", "f", config.GetConfigFilePath(), "Config file location")
|
||||
cmd.Flags().StringVarP(&remoteAddr, "remote", "r", "", "Remote config file, eg: https://raw.githubusercontent.com/kubenetworks/kubevpn/master/pkg/config/config.yaml")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func ParseAndGet(localFile, remoteAddr string, aliasName string) ([]Config, error) {
|
||||
var content []byte
|
||||
var err error
|
||||
var path string
|
||||
if localFile != "" {
|
||||
path = localFile
|
||||
content, err = os.ReadFile(path)
|
||||
} else if remoteAddr != "" {
|
||||
path = remoteAddr
|
||||
content, err = util.DownloadFileStream(path)
|
||||
} else {
|
||||
path = config.GetConfigFilePath()
|
||||
content, err = os.ReadFile(path)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list, err := ParseConfig(content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configs, err := GetConfigs(list, aliasName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(configs) == 0 {
|
||||
var names []string
|
||||
for _, c := range list {
|
||||
if c.Name != "" {
|
||||
names = append(names, c.Name)
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("failed to find any aliases for the name: '%s', avaliable: [%s], please verify your configuration file %s", aliasName, strings.Join(names, ", "), path)
|
||||
return nil, err
|
||||
}
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
func ParseConfig(file []byte) ([]Config, error) {
|
||||
decoder := yaml.NewDecoder(strings.NewReader(string(file)))
|
||||
var configs []Config
|
||||
for {
|
||||
var cfg Config
|
||||
err := decoder.Decode(&cfg)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
configs = append(configs, cfg)
|
||||
}
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
func GetConfigs(configs []Config, name string) ([]Config, error) {
|
||||
m := make(map[string]Config)
|
||||
for _, config := range configs {
|
||||
m[config.Name] = config
|
||||
}
|
||||
var result []Config
|
||||
var set []string
|
||||
for !sets.New[string](set...).Has(name) {
|
||||
config, ok := m[name]
|
||||
if ok {
|
||||
result = append([]Config{config}, result...)
|
||||
set = append(set, name)
|
||||
name = config.Needs
|
||||
if name == "" {
|
||||
return result, nil
|
||||
}
|
||||
} else {
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("loop jump detected: %s. verify your configuration", strings.Join(append(set, name), " -> "))
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Name string `yaml:"Name"`
|
||||
Description string `yaml:"Description"`
|
||||
Needs string `yaml:"Needs,omitempty"`
|
||||
Flags []string `yaml:"Flags,omitempty"`
|
||||
}
|
||||
227
cmd/kubevpn/cmds/alias_test.go
Normal file
227
cmd/kubevpn/cmds/alias_test.go
Normal file
@@ -0,0 +1,227 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"log"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAlias(t *testing.T) {
|
||||
str := `Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com`
|
||||
_, err := ParseConfig([]byte(str))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckLoop(t *testing.T) {
|
||||
str := `Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com`
|
||||
_, err := ParseConfig([]byte(str))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoop(t *testing.T) {
|
||||
data := []struct {
|
||||
Config string
|
||||
Run string
|
||||
ExpectError bool
|
||||
ExpectOrder []string
|
||||
}{
|
||||
{
|
||||
Config: `
|
||||
Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Needs: test
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
`,
|
||||
Run: "test",
|
||||
ExpectError: true,
|
||||
ExpectOrder: nil,
|
||||
},
|
||||
{
|
||||
Config: `
|
||||
Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
`,
|
||||
Run: "test",
|
||||
ExpectError: false,
|
||||
ExpectOrder: []string{"test1", "test"},
|
||||
},
|
||||
{
|
||||
Config: `
|
||||
Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Needs: test2
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
`,
|
||||
Run: "test",
|
||||
ExpectError: false,
|
||||
ExpectOrder: []string{"test1", "test"},
|
||||
},
|
||||
{
|
||||
Config: `
|
||||
Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Needs: test2
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
|
||||
---
|
||||
|
||||
Name: test2
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
`,
|
||||
Run: "test",
|
||||
ExpectError: false,
|
||||
ExpectOrder: []string{"test2", "test1", "test"},
|
||||
},
|
||||
{
|
||||
Config: `
|
||||
Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Needs: test2
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
|
||||
---
|
||||
|
||||
Name: test2
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
`,
|
||||
Run: "test2",
|
||||
ExpectError: false,
|
||||
ExpectOrder: []string{"test2"},
|
||||
},
|
||||
{
|
||||
Config: `
|
||||
Name: test
|
||||
Needs: test1
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
|
||||
---
|
||||
|
||||
Name: test1
|
||||
Needs: test2
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
|
||||
---
|
||||
|
||||
Name: test2
|
||||
Flags:
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=test
|
||||
- --extra-hosts=xxx.com
|
||||
`,
|
||||
Run: "test1",
|
||||
ExpectError: false,
|
||||
ExpectOrder: []string{"test2", "test1"},
|
||||
},
|
||||
}
|
||||
for _, datum := range data {
|
||||
configs, err := ParseConfig([]byte(datum.Config))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
getConfigs, err := GetConfigs(configs, datum.Run)
|
||||
if err != nil && !datum.ExpectError {
|
||||
log.Fatal(err)
|
||||
} else if err != nil {
|
||||
}
|
||||
if datum.ExpectError {
|
||||
continue
|
||||
}
|
||||
var c []string
|
||||
for _, config := range getConfigs {
|
||||
c = append(c, config.Name)
|
||||
}
|
||||
if !reflect.DeepEqual(c, datum.ExpectOrder) {
|
||||
log.Fatalf("Not match, expect: %v, real: %v", datum.ExpectOrder, c)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,69 +2,76 @@ package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
pkgerr "github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
utilcomp "k8s.io/kubectl/pkg/util/completion"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
// CmdClone multiple cluster operate, can start up one deployment to another cluster
|
||||
// kubectl exec POD_NAME -c CONTAINER_NAME /sbin/killall5 or ephemeralcontainers
|
||||
func CmdClone(f cmdutil.Factory) *cobra.Command {
|
||||
var options = handler.CloneOptions{}
|
||||
var sshConf = &util.SshConfig{}
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
var extraRoute = &handler.ExtraRouteInfo{}
|
||||
var transferImage bool
|
||||
var syncDir string
|
||||
cmd := &cobra.Command{
|
||||
Use: "clone",
|
||||
Short: i18n.T("Clone workloads to target-kubeconfig cluster with same volume、env、and network"),
|
||||
Long: templates.LongDesc(i18n.T(`Clone workloads to target-kubeconfig cluster with same volume、env、and network`)),
|
||||
Short: i18n.T("Clone workloads to run in target-kubeconfig cluster with same volume、env、and network"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Clone workloads to run into target-kubeconfig cluster with same volume、env、and network
|
||||
|
||||
In this way, you can startup another deployment in same cluster or not, but with different image version,
|
||||
it also supports service mesh proxy. only traffic with special header will hit to cloned_resource.
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# clone
|
||||
- clone deployment in current cluster and current namespace
|
||||
- clone deployment run into current cluster and current namespace
|
||||
kubevpn clone deployment/productpage
|
||||
|
||||
- clone deployment in current cluster with different namespace
|
||||
- clone deployment run into current cluster with different namespace
|
||||
kubevpn clone deployment/productpage -n test
|
||||
|
||||
- clone deployment to another cluster
|
||||
- clone deployment run into another cluster
|
||||
kubevpn clone deployment/productpage --target-kubeconfig ~/.kube/other-kubeconfig
|
||||
|
||||
- clone multiple workloads
|
||||
- clone multiple workloads run into current cluster and current namespace
|
||||
kubevpn clone deployment/authors deployment/productpage
|
||||
or
|
||||
kubevpn clone deployment authors productpage
|
||||
|
||||
# clone with mesh, traffic with header a=1, will hit cloned workloads, otherwise hit origin workloads
|
||||
kubevpn clone deployment/productpage --headers a=1
|
||||
# clone with mesh, traffic with header foo=bar, will hit cloned workloads, otherwise hit origin workloads
|
||||
kubevpn clone deployment/productpage --headers foo=bar
|
||||
|
||||
# clone workloads which api-server behind of bastion host or ssh jump host
|
||||
kubevpn clone deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers a=1
|
||||
kubevpn clone deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers foo=bar
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn clone service/productpage --ssh-alias <alias> --headers a=1
|
||||
kubevpn clone service/productpage --ssh-alias <alias> --headers foo=bar
|
||||
|
||||
`)),
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn clone service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
|
||||
kubevpn clone service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
|
||||
kubevpn clone service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
// not support temporally
|
||||
if options.Engine == config.EngineGvisor {
|
||||
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
|
||||
}
|
||||
util.InitLoggerForClient(false)
|
||||
// startup daemon process and sudo process
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
@@ -81,19 +88,37 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
|
||||
// special empty string, eg: --target-registry ""
|
||||
options.IsChangeTargetRegistry = cmd.Flags().Changed("target-registry")
|
||||
|
||||
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
|
||||
if syncDir != "" {
|
||||
local, remote, err := util.ParseDirMapping(syncDir)
|
||||
if err != nil {
|
||||
return pkgerr.Wrapf(err, "options 'sync' is invalid, %s", syncDir)
|
||||
}
|
||||
options.LocalDir = local
|
||||
options.RemoteDir = remote
|
||||
} else {
|
||||
options.RemoteDir = config.DefaultRemoteDir
|
||||
}
|
||||
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !sshConf.IsEmpty() {
|
||||
if ip := util.GetAPIServerFromKubeConfigBytes(bytes); ip != nil {
|
||||
extraRoute.ExtraCIDR = append(extraRoute.ExtraCIDR, ip.String())
|
||||
}
|
||||
}
|
||||
logLevel := log.InfoLevel
|
||||
if config.Debug {
|
||||
logLevel = log.DebugLevel
|
||||
}
|
||||
req := &rpc.CloneRequest{
|
||||
KubeconfigBytes: string(bytes),
|
||||
Namespace: ns,
|
||||
Headers: options.Headers,
|
||||
Workloads: args,
|
||||
ExtraCIDR: options.ExtraCIDR,
|
||||
ExtraDomain: options.ExtraDomain,
|
||||
UseLocalDNS: options.UseLocalDNS,
|
||||
OriginKubeconfigPath: util.GetKubeconfigPath(f),
|
||||
ExtraRoute: extraRoute.ToRPC(),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
||||
Engine: string(options.Engine),
|
||||
SshJump: sshConf.ToRPC(),
|
||||
TargetKubeconfig: options.TargetKubeconfig,
|
||||
@@ -104,44 +129,38 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
|
||||
IsChangeTargetRegistry: options.IsChangeTargetRegistry,
|
||||
TransferImage: transferImage,
|
||||
Image: config.Image,
|
||||
Level: int32(log.DebugLevel),
|
||||
Level: int32(logLevel),
|
||||
LocalDir: options.LocalDir,
|
||||
RemoteDir: options.RemoteDir,
|
||||
}
|
||||
cli := daemon.GetClient(false)
|
||||
resp, err := cli.Clone(cmd.Context(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
recv, err := resp.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, recv.GetMessage())
|
||||
err = util.PrintGRPCStream[rpc.CloneResponse](resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
util.Print(os.Stdout, "Now clone workloads running successfully on other cluster, enjoy it :)")
|
||||
util.Print(os.Stdout, config.Slogan)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to clone workloads, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to clone workloads, format is k=v, like: k1=v1,k2=v2")
|
||||
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers (use `and` to match all headers) with reverse it to target cluster cloned workloads, If not special, redirect all traffic to target cluster cloned workloads. eg: --headers foo=bar --headers env=dev")
|
||||
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug mode or not, true or false")
|
||||
cmd.Flags().StringVar(&config.Image, "image", config.Image, "Use this image to startup container")
|
||||
cmd.Flags().StringArrayVar(&options.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
|
||||
cmd.Flags().StringArrayVar(&options.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
|
||||
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
|
||||
cmd.Flags().StringVar((*string)(&options.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
|
||||
cmd.Flags().BoolVar(&options.UseLocalDNS, "use-localdns", false, "if use-lcoaldns is true, kubevpn will start coredns listen at 53 to forward your dns queries. only support on linux now")
|
||||
cmd.Flags().StringVar((*string)(&options.Engine), "netstack", string(config.EngineSystem), fmt.Sprintf(`network stack ("%s"|"%s") %s: use gvisor (good compatibility), %s: use raw mode (best performance, relays on iptables SNAT)`, config.EngineGvisor, config.EngineSystem, config.EngineGvisor, config.EngineSystem))
|
||||
|
||||
cmd.Flags().StringVar(&options.TargetImage, "target-image", "", "Clone container use this image to startup container, if not special, use origin image")
|
||||
cmd.Flags().StringVar(&options.TargetContainer, "target-container", "", "Clone container use special image to startup this container, if not special, use origin image")
|
||||
cmd.Flags().StringVar(&options.TargetNamespace, "target-namespace", "", "Clone workloads in this namespace, if not special, use origin namespace")
|
||||
cmd.Flags().StringVar(&options.TargetKubeconfig, "target-kubeconfig", "", "Clone workloads will create in this cluster, if not special, use origin cluster")
|
||||
cmd.Flags().StringVar(&options.TargetRegistry, "target-registry", "", "Clone workloads will create this registry domain to replace origin registry, if not special, use origin registry")
|
||||
cmd.Flags().StringVar(&syncDir, "sync", "", "Sync local dir to remote pod dir. format: LOCAL_DIR:REMOTE_DIR, eg: ~/code:/app/code")
|
||||
|
||||
addSshFlags(cmd, sshConf)
|
||||
handler.AddExtraRoute(cmd.Flags(), extraRoute)
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
cmd.ValidArgsFunction = utilcomp.ResourceTypeAndNameCompletionFunc(f)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -9,9 +9,10 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdConfig(f cmdutil.Factory) *cobra.Command {
|
||||
@@ -25,27 +26,27 @@ func CmdConfig(f cmdutil.Factory) *cobra.Command {
|
||||
}
|
||||
|
||||
func cmdConfigAdd(f cmdutil.Factory) *cobra.Command {
|
||||
var sshConf = &util.SshConfig{}
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "add",
|
||||
Short: "Proxy kubeconfig",
|
||||
Short: i18n.T("Proxy kubeconfig"),
|
||||
Long: templates.LongDesc(i18n.T(`proxy kubeconfig which behind of ssh jump server`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# proxy api-server which api-server behind of bastion host or ssh jump host
|
||||
kubevpn config add --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn config add --ssh-alias <alias>
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
// startup daemon process and sudo process
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -59,23 +60,25 @@ func cmdConfigAdd(f cmdutil.Factory) *cobra.Command {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, resp.ClusterID)
|
||||
_, _ = fmt.Fprint(os.Stdout, resp.ClusterID)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
addSshFlags(cmd, sshConf)
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func cmdConfigRemove(f cmdutil.Factory) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "remove",
|
||||
Short: "Remove proxy kubeconfig",
|
||||
Long: templates.LongDesc(i18n.T(`Remove proxy kubeconfig which behind of ssh jump server`)),
|
||||
Short: i18n.T("Remove proxy kubeconfig"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Remove proxy kubeconfig which behind of ssh jump server
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# remove proxy api-server which api-server behind of bastion host or ssh jump host
|
||||
kubevpn config remove --kubeconfig /var/folders/30/cmv9c_5j3mq_kthx63sb1t5c0000gn/T/947048961.kubeconfig
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
// startup daemon process and sudo process
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
|
||||
@@ -1,33 +1,43 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdConnect(f cmdutil.Factory) *cobra.Command {
|
||||
var connect = &handler.ConnectOptions{}
|
||||
var sshConf = &util.SshConfig{}
|
||||
var extraRoute = &handler.ExtraRouteInfo{}
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
var transferImage, foreground, lite bool
|
||||
cmd := &cobra.Command{
|
||||
Use: "connect",
|
||||
Short: i18n.T("Connect to kubernetes cluster network"),
|
||||
Long: templates.LongDesc(i18n.T(`Connect to kubernetes cluster network`)),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Connect to kubernetes cluster network
|
||||
|
||||
After connect to kubernetes cluster network, you can ping PodIP or
|
||||
curl ServiceIP in local PC, it also supports k8s DNS resolve.
|
||||
Like: curl authors/authors.default/authors.default.svc/authors.default.svc.cluster.local.
|
||||
So you can start up your application in local PC. depends on anything in
|
||||
k8s cluster is ok, connect to them just like in k8s cluster.
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# Connect to k8s cluster network
|
||||
kubevpn connect
|
||||
@@ -35,88 +45,100 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
|
||||
# Connect to api-server behind of bastion host or ssh jump host
|
||||
kubevpn connect --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn connect --ssh-alias <alias>
|
||||
|
||||
`)),
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn connect --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
|
||||
kubevpn connect --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
|
||||
kubevpn connect --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
|
||||
|
||||
# Support ssh jump inline
|
||||
kubevpn connect --ssh-jump "--ssh-addr jump.naison.org --ssh-username naison --gssapi-password xxx" --ssh-username root --ssh-addr 127.0.0.1:22 --ssh-keyfile ~/.ssh/dst.pem
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
util.InitLoggerForClient(false)
|
||||
// startup daemon process and sudo process
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
|
||||
err := daemon.StartupDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !sshConf.IsEmpty() {
|
||||
if ip := util.GetAPIServerFromKubeConfigBytes(bytes); ip != nil {
|
||||
extraRoute.ExtraCIDR = append(extraRoute.ExtraCIDR, ip.String())
|
||||
}
|
||||
}
|
||||
logLevel := log.InfoLevel
|
||||
if config.Debug {
|
||||
logLevel = log.DebugLevel
|
||||
}
|
||||
req := &rpc.ConnectRequest{
|
||||
KubeconfigBytes: string(bytes),
|
||||
Namespace: ns,
|
||||
ExtraCIDR: connect.ExtraCIDR,
|
||||
ExtraDomain: connect.ExtraDomain,
|
||||
UseLocalDNS: connect.UseLocalDNS,
|
||||
ExtraRoute: extraRoute.ToRPC(),
|
||||
Engine: string(connect.Engine),
|
||||
OriginKubeconfigPath: util.GetKubeconfigPath(f),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
||||
|
||||
SshJump: sshConf.ToRPC(),
|
||||
TransferImage: transferImage,
|
||||
Foreground: foreground,
|
||||
Image: config.Image,
|
||||
Level: int32(log.DebugLevel),
|
||||
Level: int32(logLevel),
|
||||
}
|
||||
// if is foreground, send to sudo daemon server
|
||||
cli := daemon.GetClient(false)
|
||||
var resp grpc.ClientStream
|
||||
if lite {
|
||||
resp, err := cli.ConnectFork(cmd.Context(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
recv, err := resp.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, recv.GetMessage())
|
||||
}
|
||||
resp, err = cli.ConnectFork(cmd.Context(), req)
|
||||
} else {
|
||||
resp, err := cli.Connect(cmd.Context(), req)
|
||||
resp, err = cli.Connect(cmd.Context(), req)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.ConnectResponse](resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !foreground {
|
||||
util.Print(os.Stdout, config.Slogan)
|
||||
} else {
|
||||
<-cmd.Context().Done()
|
||||
disconnect, err := cli.Disconnect(context.Background(), &rpc.DisconnectRequest{
|
||||
KubeconfigBytes: ptr.To(string(bytes)),
|
||||
Namespace: ptr.To(ns),
|
||||
SshJump: sshConf.ToRPC(),
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("Disconnect error: %v", err)
|
||||
return err
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.DisconnectResponse](disconnect)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
recv, err := resp.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, recv.GetMessage())
|
||||
}
|
||||
}
|
||||
if !req.Foreground {
|
||||
util.Print(os.Stdout, "Now you can access resources in the kubernetes cluster, enjoy it :)")
|
||||
_, _ = fmt.Fprint(os.Stdout, "Disconnect completed")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().BoolVar(&config.Debug, "debug", false, "enable debug mode or not, true or false")
|
||||
cmd.Flags().StringVar(&config.Image, "image", config.Image, "use this image to startup container")
|
||||
cmd.Flags().StringArrayVar(&connect.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
|
||||
cmd.Flags().StringArrayVar(&connect.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
|
||||
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
|
||||
cmd.Flags().BoolVar(&connect.UseLocalDNS, "use-localdns", false, "if use-lcoaldns is true, kubevpn will start coredns listen at 53 to forward your dns queries. only support on linux now")
|
||||
cmd.Flags().StringVar((*string)(&connect.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
|
||||
cmd.Flags().StringVar((*string)(&connect.Engine), "netstack", string(config.EngineSystem), fmt.Sprintf(`network stack ("%s"|"%s") %s: use gvisor (good compatibility), %s: use raw mode (best performance, relays on iptables SNAT)`, config.EngineGvisor, config.EngineSystem, config.EngineGvisor, config.EngineSystem))
|
||||
cmd.Flags().BoolVar(&foreground, "foreground", false, "Hang up")
|
||||
cmd.Flags().BoolVar(&lite, "lite", false, "connect to multiple cluster in lite mode, you needs to special this options")
|
||||
cmd.Flags().BoolVar(&lite, "lite", false, "connect to multiple cluster in lite mode. mode \"lite\": design for only connecting to multiple cluster network. mode \"full\": not only connect to cluster network, it also supports proxy workloads inbound traffic to local PC.")
|
||||
|
||||
addSshFlags(cmd, sshConf)
|
||||
handler.AddExtraRoute(cmd.Flags(), extraRoute)
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/libnetwork/resolvconf"
|
||||
miekgdns "github.com/miekg/dns"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/controlplane"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/controlplane"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/dns"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdControlPlane(_ cmdutil.Factory) *cobra.Command {
|
||||
@@ -18,12 +23,22 @@ func CmdControlPlane(_ cmdutil.Factory) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "control-plane",
|
||||
Hidden: true,
|
||||
Short: "Control-plane is a envoy xds server",
|
||||
Long: `Control-plane is a envoy xds server, distribute envoy route configuration`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
util.InitLogger(config.Debug)
|
||||
go util.StartupPProf(0)
|
||||
controlplane.Main(watchDirectoryFilename, port, log.StandardLogger())
|
||||
Short: i18n.T("Control-plane is a envoy xds server"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Control-plane is a envoy xds server, distribute envoy route configuration
|
||||
`)),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
util.InitLoggerForServer(config.Debug)
|
||||
go util.StartupPProfForServer(0)
|
||||
go func() {
|
||||
conf, err := miekgdns.ClientConfigFromFile(resolvconf.Path())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Fatal(dns.ListenAndServe("udp", ":53", conf))
|
||||
}()
|
||||
err := controlplane.Main(cmd.Context(), watchDirectoryFilename, port, log.StandardLogger())
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVarP(&watchDirectoryFilename, "watchDirectoryFilename", "w", "/etc/envoy/envoy-config.yaml", "full path to directory to watch for files")
|
||||
|
||||
@@ -6,15 +6,14 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/cli-runtime/pkg/genericiooptions"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/completion"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/cp"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/cp"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
)
|
||||
|
||||
var cpExample = templates.Examples(i18n.T(`
|
||||
@@ -46,29 +45,35 @@ var cpExample = templates.Examples(i18n.T(`
|
||||
# copy reverse proxy api-server behind of bastion host or ssh jump host
|
||||
kubevpn cp deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn cp deployment/productpage --ssh-alias <alias>
|
||||
kubevpn cp deployment/productpage:/tmp/foo /tmp/bar --ssh-alias <alias>
|
||||
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn cp deployment/productpage:/tmp/foo /tmp/bar --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
|
||||
kubevpn cp deployment/productpage:/tmp/foo /tmp/bar --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
|
||||
kubevpn cp deployment/productpage:/tmp/foo /tmp/bar --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
|
||||
`,
|
||||
))
|
||||
|
||||
func CmdCp(f cmdutil.Factory) *cobra.Command {
|
||||
o := cp.NewCopyOptions(genericclioptions.IOStreams{
|
||||
o := cp.NewCopyOptions(genericiooptions.IOStreams{
|
||||
In: os.Stdin,
|
||||
Out: os.Stdout,
|
||||
ErrOut: os.Stderr,
|
||||
})
|
||||
var sshConf = &util.SshConfig{}
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "cp <file-spec-src> <file-spec-dest>",
|
||||
DisableFlagsInUseLine: true,
|
||||
Hidden: true,
|
||||
Short: i18n.T("Copy files and directories to and from containers"),
|
||||
Long: i18n.T("Copy files and directories to and from containers. Different between kubectl cp is it will de-reference symbol link."),
|
||||
Example: cpExample,
|
||||
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
cmdutil.CheckErr(handler.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false))
|
||||
cmdutil.CheckErr(pkgssh.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false))
|
||||
|
||||
var comps []string
|
||||
if len(args) == 0 {
|
||||
@@ -80,14 +85,14 @@ func CmdCp(f cmdutil.Factory) *cobra.Command {
|
||||
// complete <namespace>/<pod>
|
||||
namespace := toComplete[:idx]
|
||||
template := "{{ range .items }}{{ .metadata.namespace }}/{{ .metadata.name }}: {{ end }}"
|
||||
comps = completion.CompGetFromTemplate(&template, f, namespace, cmd, []string{"pod"}, toComplete)
|
||||
comps = completion.CompGetFromTemplate(&template, f, namespace, []string{"pod"}, toComplete)
|
||||
} else {
|
||||
// Complete namespaces followed by a /
|
||||
for _, ns := range completion.CompGetResource(f, cmd, "namespace", toComplete) {
|
||||
for _, ns := range completion.CompGetResource(f, "namespace", toComplete) {
|
||||
comps = append(comps, fmt.Sprintf("%s/", ns))
|
||||
}
|
||||
// Complete pod names followed by a :
|
||||
for _, pod := range completion.CompGetResource(f, cmd, "pod", toComplete) {
|
||||
for _, pod := range completion.CompGetResource(f, "pod", toComplete) {
|
||||
comps = append(comps, fmt.Sprintf("%s:", pod))
|
||||
}
|
||||
|
||||
@@ -130,11 +135,6 @@ func CmdCp(f cmdutil.Factory) *cobra.Command {
|
||||
cmd.Flags().BoolVarP(&o.NoPreserve, "no-preserve", "", false, "The copied file/directory's ownership and permissions will not be preserved in the container")
|
||||
cmd.Flags().IntVarP(&o.MaxTries, "retries", "", 0, "Set number of retries to complete a copy operation from a container. Specify 0 to disable or any negative value for infinite retrying. The default is 0 (no retry).")
|
||||
|
||||
// for ssh jumper host
|
||||
cmd.Flags().StringVar(&sshConf.Addr, "ssh-addr", "", "Optional ssh jump server address to dial as <hostname>:<port>, eg: 127.0.0.1:22")
|
||||
cmd.Flags().StringVar(&sshConf.User, "ssh-username", "", "Optional username for ssh jump server")
|
||||
cmd.Flags().StringVar(&sshConf.Password, "ssh-password", "", "Optional password for ssh jump server")
|
||||
cmd.Flags().StringVar(&sshConf.Keyfile, "ssh-keyfile", "", "Optional file with private key for SSH authentication")
|
||||
cmd.Flags().StringVar(&sshConf.ConfigAlias, "ssh-alias", "", "Optional config alias with ~/.ssh/config for SSH authentication")
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -1,15 +1,24 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"path/filepath"
|
||||
"runtime/pprof"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/action"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/dns"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdDaemon(_ cmdutil.Factory) *cobra.Command {
|
||||
@@ -17,39 +26,65 @@ func CmdDaemon(_ cmdutil.Factory) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "daemon",
|
||||
Short: i18n.T("Startup kubevpn daemon server"),
|
||||
Long: i18n.T(`Startup kubevpn daemon server`),
|
||||
Long: templates.LongDesc(i18n.T(`Startup kubevpn daemon server`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
sockPath := daemon.GetSockPath(opt.IsSudo)
|
||||
err := os.Remove(sockPath)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
b := make([]byte, 32)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return err
|
||||
}
|
||||
pidPath := daemon.GetPidPath(opt.IsSudo)
|
||||
err = os.Remove(pidPath)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return err
|
||||
opt.ID = base64.URLEncoding.EncodeToString(b)
|
||||
|
||||
if opt.IsSudo {
|
||||
go util.StartupPProf(config.SudoPProfPort)
|
||||
_ = os.RemoveAll("/etc/resolver")
|
||||
_ = dns.CleanupHosts()
|
||||
_ = util.CleanupTempKubeConfigFile()
|
||||
} else {
|
||||
go util.StartupPProf(config.PProfPort)
|
||||
}
|
||||
pid := os.Getpid()
|
||||
err = os.WriteFile(pidPath, []byte(strconv.Itoa(pid)), os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Chmod(pidPath, os.ModePerm)
|
||||
return err
|
||||
return initLogfile(action.GetDaemonLogPath())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
defer opt.Stop()
|
||||
defer func() {
|
||||
if errors.Is(err, http.ErrServerClosed) {
|
||||
err = nil
|
||||
}
|
||||
if opt.IsSudo {
|
||||
for _, profile := range pprof.Profiles() {
|
||||
func() {
|
||||
file, e := os.Create(filepath.Join(config.PprofPath, profile.Name()))
|
||||
if e != nil {
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
e = profile.WriteTo(file, 1)
|
||||
if e != nil {
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}()
|
||||
return opt.Start(cmd.Context())
|
||||
},
|
||||
PostRun: func(cmd *cobra.Command, args []string) {
|
||||
sockPath := daemon.GetSockPath(opt.IsSudo)
|
||||
_ = os.Remove(sockPath)
|
||||
pidPath := daemon.GetPidPath(opt.IsSudo)
|
||||
_ = os.Remove(pidPath)
|
||||
},
|
||||
Hidden: true,
|
||||
DisableFlagsInUseLine: true,
|
||||
}
|
||||
cmd.Flags().BoolVar(&opt.IsSudo, "sudo", false, "is sudo or not")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func initLogfile(path string) error {
|
||||
_, err := os.Lstat(path)
|
||||
if os.IsNotExist(err) {
|
||||
var f *os.File
|
||||
f, err = os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = f.Close()
|
||||
return os.Chmod(path, 0644)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,8 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
dockercomp "github.com/docker/cli/cli/command/completion"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
@@ -13,38 +12,32 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/dev"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/dev"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdDev(f cmdutil.Factory) *cobra.Command {
|
||||
cli, dockerCli, err := util.GetClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
var options = &dev.Options{
|
||||
NoProxy: false,
|
||||
ExtraRouteInfo: handler.ExtraRouteInfo{},
|
||||
}
|
||||
var devOptions = &dev.Options{
|
||||
Factory: f,
|
||||
NoProxy: false,
|
||||
ExtraCIDR: []string{},
|
||||
Cli: cli,
|
||||
DockerCli: dockerCli,
|
||||
}
|
||||
var sshConf = &util.SshConfig{}
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
var transferImage bool
|
||||
cmd := &cobra.Command{
|
||||
Use: "dev TYPE/NAME [-c CONTAINER] [flags] -- [args...]",
|
||||
Short: i18n.T("Startup your kubernetes workloads in local Docker container"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Startup your kubernetes workloads in local Docker container with same volume、env、and network
|
||||
Startup your kubernetes workloads in local Docker container with same volume、env、and network
|
||||
|
||||
## What did i do:
|
||||
- Download volume which MountPath point to, mount to docker container
|
||||
- Connect to cluster network, set network to docker container
|
||||
- Get all environment with command (env), set env to docker container
|
||||
`)),
|
||||
## What did it do:
|
||||
- Download volume which MountPath point to, mount to docker container
|
||||
- Connect to cluster network, set network to docker container
|
||||
- Get all environment with command (env), set env to docker container
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# Develop workloads
|
||||
- develop deployment
|
||||
@@ -52,8 +45,8 @@ Startup your kubernetes workloads in local Docker container with same volume、e
|
||||
- develop service
|
||||
kubevpn dev service/productpage
|
||||
|
||||
# Develop workloads with mesh, traffic with header a=1, will hit local PC, otherwise no effect
|
||||
kubevpn dev service/productpage --headers a=1
|
||||
# Develop workloads with mesh, traffic with header foo=bar, will hit local PC, otherwise no effect
|
||||
kubevpn dev service/productpage --headers foo=bar
|
||||
|
||||
# Develop workloads without proxy traffic
|
||||
kubevpn dev service/productpage --no-proxy
|
||||
@@ -61,23 +54,28 @@ Startup your kubernetes workloads in local Docker container with same volume、e
|
||||
# Develop workloads which api-server behind of bastion host or ssh jump host
|
||||
kubevpn dev deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
|
||||
# It also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn dev deployment/productpage --ssh-alias <alias>
|
||||
|
||||
# Switch to terminal mode; send stdin to 'bash' and sends stdout/stderror from 'bash' back to the client
|
||||
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev -i -t --entrypoint /bin/bash
|
||||
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev --entrypoint /bin/bash
|
||||
or
|
||||
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev -it --entrypoint /bin/bash
|
||||
`)),
|
||||
kubevpn dev deployment/authors -n default --kubeconfig ~/.kube/config --ssh-alias dev --entrypoint /bin/bash
|
||||
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab --entrypoint /bin/bash
|
||||
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache --entrypoint /bin/bash
|
||||
kubevpn dev deployment/authors -n default --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD> --entrypoint /bin/bash
|
||||
`)),
|
||||
ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f),
|
||||
Args: cobra.MatchAll(cobra.OnlyValidArgs),
|
||||
DisableFlagsInUseLine: true,
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
fmt.Fprintf(os.Stdout, "You must specify the type of resource to proxy. %s\n\n", cmdutil.SuggestAPIResources("kubevpn"))
|
||||
_, _ = fmt.Fprintf(os.Stdout, "You must specify the type of resource to proxy. %s\n\n", cmdutil.SuggestAPIResources("kubevpn"))
|
||||
fullCmdName := cmd.Parent().CommandPath()
|
||||
usageString := "Required resource not specified."
|
||||
if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") {
|
||||
@@ -85,92 +83,87 @@ Startup your kubernetes workloads in local Docker container with same volume、e
|
||||
}
|
||||
return cmdutil.UsageErrorf(cmd, usageString)
|
||||
}
|
||||
err = cmd.Flags().Parse(args[1:])
|
||||
err := cmd.Flags().Parse(args[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
util.InitLogger(false)
|
||||
// not support temporally
|
||||
if devOptions.Engine == config.EngineGvisor {
|
||||
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
|
||||
util.InitLoggerForClient(config.Debug)
|
||||
|
||||
if p := options.RunOptions.Platform; p != "" {
|
||||
if _, err = platforms.Parse(p); err != nil {
|
||||
return fmt.Errorf("error parsing specified platform: %v", err)
|
||||
}
|
||||
}
|
||||
if err = validatePullOpt(options.RunOptions.Pull); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = daemon.StartupDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return handler.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false)
|
||||
return pkgssh.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false)
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
devOptions.Workload = args[0]
|
||||
options.Workload = args[0]
|
||||
for i, arg := range args {
|
||||
if arg == "--" && i != len(args)-1 {
|
||||
devOptions.Copts.Args = args[i+1:]
|
||||
options.ContainerOptions.Args = args[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
err = dev.DoDev(cmd.Context(), devOptions, sshConf, cmd.Flags(), f, transferImage)
|
||||
for _, fun := range devOptions.GetRollbackFuncList() {
|
||||
if fun != nil {
|
||||
if err = fun(); err != nil {
|
||||
log.Errorf("roll back failed, error: %s", err.Error())
|
||||
defer func() {
|
||||
for _, function := range options.GetRollbackFuncList() {
|
||||
if function != nil {
|
||||
if er := function(); er != nil {
|
||||
log.Errorf("Rollback failed, error: %s", er.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := options.InitClient(f); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := options.Main(cmd.Context(), sshConf, cmd.Flags(), transferImage)
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Flags().SortFlags = false
|
||||
cmd.Flags().StringToStringVarP(&devOptions.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
|
||||
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
|
||||
cmd.Flags().BoolVar(&config.Debug, "debug", false, "enable debug mode or not, true or false")
|
||||
cmd.Flags().StringVar(&config.Image, "image", config.Image, "use this image to startup container")
|
||||
cmd.Flags().BoolVar(&devOptions.NoProxy, "no-proxy", false, "Whether proxy remote workloads traffic into local or not, true: just startup container on local without inject containers to intercept traffic, false: intercept traffic and forward to local")
|
||||
cmdutil.AddContainerVarFlags(cmd, &devOptions.ContainerName, devOptions.ContainerName)
|
||||
cmd.Flags().BoolVar(&options.NoProxy, "no-proxy", false, "Whether proxy remote workloads traffic into local or not, true: just startup container on local without inject containers to intercept traffic, false: intercept traffic and forward to local")
|
||||
cmdutil.AddContainerVarFlags(cmd, &options.ContainerName, options.ContainerName)
|
||||
cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc("container", completion.ContainerCompletionFunc(f)))
|
||||
cmd.Flags().StringArrayVar(&devOptions.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
|
||||
cmd.Flags().StringArrayVar(&devOptions.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
|
||||
cmd.Flags().StringVar((*string)(&devOptions.ConnectMode), "connect-mode", string(dev.ConnectModeHost), "Connect to kubernetes network in container or in host, eg: ["+string(dev.ConnectModeContainer)+"|"+string(dev.ConnectModeHost)+"]")
|
||||
cmd.Flags().StringVar((*string)(&options.ConnectMode), "connect-mode", string(dev.ConnectModeHost), "Connect to kubernetes network in container or in host, eg: ["+string(dev.ConnectModeContainer)+"|"+string(dev.ConnectModeHost)+"]")
|
||||
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
|
||||
cmd.Flags().StringVar((*string)(&devOptions.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
|
||||
cmd.Flags().StringVar((*string)(&options.Engine), "netstack", string(config.EngineSystem), fmt.Sprintf(`network stack ("%s"|"%s") %s: use gvisor (good compatibility), %s: use raw mode (best performance, relays on iptables SNAT)`, config.EngineGvisor, config.EngineSystem, config.EngineGvisor, config.EngineSystem))
|
||||
|
||||
// diy docker options
|
||||
cmd.Flags().StringVar(&devOptions.DockerImage, "docker-image", "", "Overwrite the default K8s pod of the image")
|
||||
cmd.Flags().StringVar(&options.DevImage, "dev-image", "", "Use to startup docker container, Default is pod image")
|
||||
// origin docker options
|
||||
flags := cmd.Flags()
|
||||
flags.SetInterspersed(false)
|
||||
dev.AddDockerFlags(options, cmd.Flags())
|
||||
|
||||
// These are flags not stored in Config/HostConfig
|
||||
flags.BoolVarP(&devOptions.Options.Detach, "detach", "d", false, "Run container in background and print container ID")
|
||||
flags.StringVar(&devOptions.Options.Name, "name", "", "Assign a name to the container")
|
||||
flags.StringVar(&devOptions.Options.Pull, "pull", dev.PullImageMissing, `Pull image before running ("`+dev.PullImageAlways+`"|"`+dev.PullImageMissing+`"|"`+dev.PullImageNever+`")`)
|
||||
flags.BoolVarP(&devOptions.Options.Quiet, "quiet", "q", false, "Suppress the pull output")
|
||||
|
||||
// Add an explicit help that doesn't have a `-h` to prevent the conflict
|
||||
// with hostname
|
||||
flags.Bool("help", false, "Print usage")
|
||||
|
||||
command.AddPlatformFlag(flags, &devOptions.Options.Platform)
|
||||
command.AddTrustVerificationFlags(flags, &devOptions.Options.Untrusted, dockerCli.ContentTrustEnabled())
|
||||
devOptions.Copts = dev.AddFlags(flags)
|
||||
|
||||
_ = cmd.RegisterFlagCompletionFunc(
|
||||
"env",
|
||||
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return os.Environ(), cobra.ShellCompDirectiveNoFileComp
|
||||
},
|
||||
)
|
||||
_ = cmd.RegisterFlagCompletionFunc(
|
||||
"env-file",
|
||||
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
},
|
||||
)
|
||||
_ = cmd.RegisterFlagCompletionFunc(
|
||||
"network",
|
||||
dockercomp.NetworkNames(nil),
|
||||
)
|
||||
|
||||
addSshFlags(cmd, sshConf)
|
||||
handler.AddExtraRoute(cmd.Flags(), &options.ExtraRouteInfo)
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func validatePullOpt(val string) error {
|
||||
switch val {
|
||||
case dev.PullImageAlways, dev.PullImageMissing, dev.PullImageNever, "":
|
||||
// valid option, but nothing to do yet
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"invalid pull option: '%s': must be one of %q, %q or %q",
|
||||
val,
|
||||
dev.PullImageAlways,
|
||||
dev.PullImageMissing,
|
||||
dev.PullImageNever,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,43 +2,53 @@ package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdDisconnect(f cmdutil.Factory) *cobra.Command {
|
||||
var all = false
|
||||
var clusterIDs []string
|
||||
cmd := &cobra.Command{
|
||||
Use: "disconnect",
|
||||
Short: i18n.T("Disconnect from kubernetes cluster network"),
|
||||
Long: templates.LongDesc(i18n.T(`Disconnect from kubernetes cluster network`)),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Disconnect from kubernetes cluster network
|
||||
|
||||
This command is to disconnect from cluster. after use command 'kubevpn connect',
|
||||
you can use this command to disconnect from a specific cluster.
|
||||
before disconnect, it will leave proxy resource and clone resource if resource depends on this cluster
|
||||
after disconnect it will also cleanup DNS and host
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# disconnect from cluster network and restore proxy resource
|
||||
kubevpn disconnect
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
util.InitLoggerForClient(false)
|
||||
err = daemon.StartupDaemon(cmd.Context())
|
||||
return err
|
||||
},
|
||||
Args: cobra.MatchAll(cobra.OnlyValidArgs),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 && all {
|
||||
return fmt.Errorf("either specify --all or specific ID, not both")
|
||||
return fmt.Errorf("either specify --all or ID, not both")
|
||||
}
|
||||
if len(args) == 0 && !all {
|
||||
return fmt.Errorf("either specify --all or specific ID")
|
||||
if len(clusterIDs) > 0 && all {
|
||||
return fmt.Errorf("either specify --all or cluster-id, not both")
|
||||
}
|
||||
if len(args) == 0 && !all && len(clusterIDs) == 0 {
|
||||
return fmt.Errorf("either specify --all or ID or cluster-id")
|
||||
}
|
||||
var ids *int32
|
||||
if len(args) > 0 {
|
||||
@@ -51,27 +61,23 @@ func CmdDisconnect(f cmdutil.Factory) *cobra.Command {
|
||||
client, err := daemon.GetClient(false).Disconnect(
|
||||
cmd.Context(),
|
||||
&rpc.DisconnectRequest{
|
||||
ID: ids,
|
||||
All: pointer.Bool(all),
|
||||
ID: ids,
|
||||
ClusterIDs: clusterIDs,
|
||||
All: pointer.Bool(all),
|
||||
},
|
||||
)
|
||||
var resp *rpc.DisconnectResponse
|
||||
for {
|
||||
resp, err = client.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err == nil {
|
||||
fmt.Fprint(os.Stdout, resp.Message)
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
break
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, "disconnect successfully")
|
||||
err = util.PrintGRPCStream[rpc.DisconnectResponse](client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprint(os.Stdout, "Disconnect completed")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().BoolVar(&all, "all", all, "Select all, disconnect from all cluster network")
|
||||
cmd.Flags().BoolVar(&all, "all", all, "Disconnect all cluster, disconnect from all cluster network")
|
||||
cmd.Flags().StringArrayVar(&clusterIDs, "cluster-id", []string{}, "Cluster id, command status -o yaml/json will show cluster-id")
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -1,20 +1,27 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"cmp"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/cli-runtime/pkg/printers"
|
||||
cmdget "k8s.io/kubectl/pkg/cmd/get"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
)
|
||||
|
||||
func CmdGet(f cmdutil.Factory) *cobra.Command {
|
||||
var printFlags = cmdget.NewGetPrintFlags()
|
||||
cmd := &cobra.Command{
|
||||
Use: "get",
|
||||
Hidden: true,
|
||||
@@ -27,38 +34,76 @@ func CmdGet(f cmdutil.Factory) *cobra.Command {
|
||||
# Get api-server behind of bastion host or ssh jump host
|
||||
kubevpn get deployment --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn get service --ssh-alias <alias>
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
// startup daemon process and sudo process
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
namespace, _, err := f.ToRawKubeConfigLoader().Namespace()
|
||||
ns, _, err := f.ToRawKubeConfigLoader().Namespace()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client, err := daemon.GetClient(false).Get(
|
||||
client, err := daemon.GetClient(true).Get(
|
||||
cmd.Context(),
|
||||
&rpc.GetRequest{
|
||||
Namespace: namespace,
|
||||
Namespace: ns,
|
||||
Resource: args[0],
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
marshal, err := yaml.Marshal(client.Metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
w := printers.GetNewTabWriter(os.Stdout)
|
||||
var toPrinter = func() (printers.ResourcePrinterFunc, error) {
|
||||
var flags = printFlags.Copy()
|
||||
_ = flags.EnsureWithNamespace()
|
||||
printer, err := flags.ToPrinter()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
printer, err = printers.NewTypeSetter(scheme.Scheme).WrapToPrinter(printer, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outputOption := cmd.Flags().Lookup("output").Value.String()
|
||||
if strings.Contains(outputOption, "custom-columns") || outputOption == "yaml" || strings.Contains(outputOption, "json") {
|
||||
} else {
|
||||
printer = &cmdget.TablePrinter{Delegate: printer}
|
||||
}
|
||||
return printer.PrintObj, nil
|
||||
}
|
||||
fmt.Fprint(os.Stdout, string(marshal))
|
||||
return nil
|
||||
var list []*v1.PartialObjectMetadata
|
||||
for _, m := range client.Metadata {
|
||||
var data v1.PartialObjectMetadata
|
||||
err = json.Unmarshal([]byte(m), &data)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
list = append(list, &data)
|
||||
}
|
||||
slices.SortStableFunc(list, func(a, b *v1.PartialObjectMetadata) int {
|
||||
compare := cmp.Compare(a.GetNamespace(), b.GetNamespace())
|
||||
if compare == 0 {
|
||||
return cmp.Compare(a.GetName(), b.GetName())
|
||||
}
|
||||
return compare
|
||||
})
|
||||
for _, m := range list {
|
||||
printer, err := toPrinter()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = printer.PrintObj(m, w)
|
||||
}
|
||||
return w.Flush()
|
||||
},
|
||||
}
|
||||
printFlags.AddFlags(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -1,30 +1,33 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdLeave(f cmdutil.Factory) *cobra.Command {
|
||||
var leaveCmd = &cobra.Command{
|
||||
Use: "leave",
|
||||
Short: "Leave proxy resource",
|
||||
Long: `leave proxy resource and restore it to origin`,
|
||||
Short: i18n.T("Leave proxy resource"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Leave proxy resource and restore it to origin
|
||||
|
||||
This command is used to leave proxy resources. after use command 'kubevpn proxy xxx',
|
||||
you can use this command to leave proxy resources.
|
||||
you can just leave proxy resources which do proxy by yourself.
|
||||
and the last one leave proxy resource, it will also restore workloads container.
|
||||
otherwise it will keep containers [vpn, envoy-proxy] until last one to leave.
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# leave proxy resource and restore it to origin
|
||||
kubevpn leave deployment/authors
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
@@ -35,17 +38,8 @@ func CmdLeave(f cmdutil.Factory) *cobra.Command {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
recv, err := leave.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, recv.GetMessage())
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.LeaveResponse](leave)
|
||||
return err
|
||||
},
|
||||
}
|
||||
return leaveCmd
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
)
|
||||
|
||||
func CmdList(f cmdutil.Factory) *cobra.Command {
|
||||
@@ -20,7 +20,7 @@ func CmdList(f cmdutil.Factory) *cobra.Command {
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# list proxy resources
|
||||
kubevpn list
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
@@ -35,6 +35,7 @@ func CmdList(f cmdutil.Factory) *cobra.Command {
|
||||
fmt.Println(client.GetMessage())
|
||||
return nil
|
||||
},
|
||||
Hidden: true,
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -1,34 +1,32 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdLogs(f cmdutil.Factory) *cobra.Command {
|
||||
req := &rpc.LogRequest{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "logs",
|
||||
Short: i18n.T("Log kubevpn daemon server"),
|
||||
Long: templates.LongDesc(i18n.T(`Log kubevpn daemon server`)),
|
||||
Short: i18n.T("Log kubevpn daemon grpc server"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Print the logs for kubevpn daemon grpc server. it will show sudo daemon and daemon grpc server log in both
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# show log for kubevpn daemon server
|
||||
kubevpn logs
|
||||
# follow more log
|
||||
kubevpn logs -f
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
util.InitLoggerForClient(false)
|
||||
// startup daemon process and sudo process
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
@@ -37,22 +35,11 @@ func CmdLogs(f cmdutil.Factory) *cobra.Command {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var resp *rpc.LogResponse
|
||||
for {
|
||||
resp, err = client.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err == nil {
|
||||
fmt.Fprintln(os.Stdout, resp.Message)
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
err = util.PrintGRPCStream[rpc.LogResponse](client)
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Flags().BoolVarP(&req.Follow, "follow", "f", false, "Specify if the logs should be streamed.")
|
||||
cmd.Flags().Int32VarP(&req.Lines, "number", "N", 10, "Lines of recent log file to display.")
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -12,14 +12,17 @@ import (
|
||||
var (
|
||||
optionsExample = templates.Examples(i18n.T(`
|
||||
# Print flags inherited by all commands
|
||||
kubevpn options`))
|
||||
kubevpn options
|
||||
`))
|
||||
)
|
||||
|
||||
func CmdOptions(cmdutil.Factory) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "options",
|
||||
Short: i18n.T("Print the list of flags inherited by all commands"),
|
||||
Long: i18n.T("Print the list of flags inherited by all commands"),
|
||||
Use: "options",
|
||||
Short: i18n.T("Print the list of flags inherited by all commands"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Print the list of flags inherited by all commands
|
||||
`)),
|
||||
Example: optionsExample,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmd.Usage()
|
||||
|
||||
@@ -3,33 +3,41 @@ package cmds
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
utilcomp "k8s.io/kubectl/pkg/util/completion"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdProxy(f cmdutil.Factory) *cobra.Command {
|
||||
var connect = handler.ConnectOptions{}
|
||||
var sshConf = &util.SshConfig{}
|
||||
var extraRoute = &handler.ExtraRouteInfo{}
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
var transferImage, foreground bool
|
||||
cmd := &cobra.Command{
|
||||
Use: "proxy",
|
||||
Short: i18n.T("Proxy kubernetes workloads inbound traffic into local PC"),
|
||||
Long: templates.LongDesc(i18n.T(`Proxy kubernetes workloads inbound traffic into local PC`)),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Proxy kubernetes workloads inbound traffic into local PC
|
||||
|
||||
Proxy k8s workloads inbound traffic into local PC with/without service mesh.
|
||||
Without service mesh, it will proxy all inbound traffic into local PC, even traffic protocol is layer 4(Transport layer).
|
||||
With service mesh, it will proxy traffic which has special header to local PC, support protocol HTTP,GRPC,THRIFT, WebSocket...
|
||||
After proxy resource, it also connected to cluster network automatically. so just startup your app in local PC
|
||||
and waiting for inbound traffic, make debug more easier.
|
||||
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# Reverse proxy
|
||||
- proxy deployment
|
||||
@@ -43,27 +51,43 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
|
||||
or
|
||||
kubevpn proxy deployment authors productpage
|
||||
|
||||
# Reverse proxy with mesh, traffic with header a=1, will hit local PC, otherwise no effect
|
||||
kubevpn proxy service/productpage --headers a=1
|
||||
# Reverse proxy with mesh, traffic with header foo=bar, will hit local PC, otherwise no effect
|
||||
kubevpn proxy service/productpage --headers foo=bar
|
||||
|
||||
# Reverse proxy with mesh, traffic with header foo=bar and env=dev, will hit local PC, otherwise no effect
|
||||
kubevpn proxy service/productpage --headers foo=bar --headers env=dev
|
||||
|
||||
# Connect to api-server behind of bastion host or ssh jump host and proxy kubernetes resource traffic into local PC
|
||||
kubevpn proxy deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers a=1
|
||||
kubevpn proxy deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers foo=bar
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn proxy service/productpage --ssh-alias <alias> --headers a=1
|
||||
kubevpn proxy service/productpage --ssh-alias <alias> --headers foo=bar
|
||||
|
||||
`)),
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn proxy service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
|
||||
kubevpn proxy service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
|
||||
kubevpn proxy service/productpage --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
|
||||
|
||||
# Support port map, you can proxy container port to local port by command:
|
||||
kubevpn proxy deployment/productpage --portmap 80:8080
|
||||
|
||||
# Proxy container port 9080 to local port 8080 of TCP protocol
|
||||
kubevpn proxy deployment/productpage --portmap 9080:8080
|
||||
|
||||
# Proxy container port 9080 to local port 5000 of UDP protocol
|
||||
kubevpn proxy deployment/productpage --portmap udp/9080:5000
|
||||
|
||||
# Auto proxy container port to same local port, and auto detect protocol
|
||||
kubevpn proxy deployment/productpage
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
util.InitLoggerForClient(false)
|
||||
if err = daemon.StartupDaemon(cmd.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
// not support temporally
|
||||
if connect.Engine == config.EngineGvisor {
|
||||
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
|
||||
}
|
||||
return err
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -77,47 +101,46 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
|
||||
return cmdutil.UsageErrorf(cmd, usageString)
|
||||
}
|
||||
|
||||
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !sshConf.IsEmpty() {
|
||||
if ip := util.GetAPIServerFromKubeConfigBytes(bytes); ip != nil {
|
||||
extraRoute.ExtraCIDR = append(extraRoute.ExtraCIDR, ip.String())
|
||||
}
|
||||
}
|
||||
// todo 将 doConnect 方法封装?内部使用 client 发送到daemon?
|
||||
cli := daemon.GetClient(false)
|
||||
logLevel := log.InfoLevel
|
||||
if config.Debug {
|
||||
logLevel = log.DebugLevel
|
||||
}
|
||||
client, err := cli.Proxy(
|
||||
cmd.Context(),
|
||||
&rpc.ConnectRequest{
|
||||
KubeconfigBytes: string(bytes),
|
||||
Namespace: ns,
|
||||
Headers: connect.Headers,
|
||||
PortMap: connect.PortMap,
|
||||
Workloads: args,
|
||||
ExtraCIDR: connect.ExtraCIDR,
|
||||
ExtraDomain: connect.ExtraDomain,
|
||||
UseLocalDNS: connect.UseLocalDNS,
|
||||
ExtraRoute: extraRoute.ToRPC(),
|
||||
Engine: string(connect.Engine),
|
||||
SshJump: sshConf.ToRPC(),
|
||||
TransferImage: transferImage,
|
||||
Image: config.Image,
|
||||
Level: int32(log.DebugLevel),
|
||||
OriginKubeconfigPath: util.GetKubeconfigPath(f),
|
||||
Level: int32(logLevel),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var resp *rpc.ConnectResponse
|
||||
for {
|
||||
resp, err = client.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err == nil {
|
||||
fmt.Fprint(os.Stdout, resp.Message)
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.ConnectResponse](client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
util.Print(os.Stdout, "Now you can access resources in the kubernetes cluster, enjoy it :)")
|
||||
util.Print(os.Stdout, config.Slogan)
|
||||
// hangup
|
||||
if foreground {
|
||||
// leave from cluster resources
|
||||
@@ -126,44 +149,27 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
|
||||
stream, err := cli.Leave(context.Background(), &rpc.LeaveRequest{
|
||||
Workloads: args,
|
||||
})
|
||||
var resp *rpc.LeaveResponse
|
||||
for {
|
||||
resp, err = stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, resp.Message)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.LeaveResponse](stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringToStringVarP(&connect.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
|
||||
cmd.Flags().StringToStringVarP(&connect.Headers, "headers", "H", map[string]string{}, "Traffic with special headers (use `and` to match all headers) with reverse it to local PC, If not special, redirect all traffic to local PC. format: <KEY>=<VALUE> eg: --headers foo=bar --headers env=dev")
|
||||
cmd.Flags().StringArrayVar(&connect.PortMap, "portmap", []string{}, "Port map, map container port to local port, format: [tcp/udp]/containerPort:localPort, If not special, localPort will use containerPort. eg: tcp/80:8080 or udp/5000:5001 or 80 or 80:8080")
|
||||
cmd.Flags().BoolVar(&config.Debug, "debug", false, "Enable debug mode or not, true or false")
|
||||
cmd.Flags().StringVar(&config.Image, "image", config.Image, "Use this image to startup container")
|
||||
cmd.Flags().StringArrayVar(&connect.ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
|
||||
cmd.Flags().StringArrayVar(&connect.ExtraDomain, "extra-domain", []string{}, "Extra domain string, the resolved ip will add to route table, eg: --extra-domain test.abc.com --extra-domain foo.test.com")
|
||||
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
|
||||
cmd.Flags().StringVar((*string)(&connect.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
|
||||
cmd.Flags().StringVar((*string)(&connect.Engine), "netstack", string(config.EngineSystem), fmt.Sprintf(`network stack ("%s"|"%s") %s: use gvisor (good compatibility), %s: use raw mode (best performance, relays on iptables SNAT)`, config.EngineGvisor, config.EngineSystem, config.EngineGvisor, config.EngineSystem))
|
||||
cmd.Flags().BoolVar(&foreground, "foreground", false, "foreground hang up")
|
||||
|
||||
addSshFlags(cmd, sshConf)
|
||||
handler.AddExtraRoute(cmd.Flags(), extraRoute)
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
cmd.ValidArgsFunction = utilcomp.ResourceTypeAndNameCompletionFunc(f)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addSshFlags(cmd *cobra.Command, sshConf *util.SshConfig) {
|
||||
// for ssh jumper host
|
||||
cmd.Flags().StringVar(&sshConf.Addr, "ssh-addr", "", "Optional ssh jump server address to dial as <hostname>:<port>, eg: 127.0.0.1:22")
|
||||
cmd.Flags().StringVar(&sshConf.User, "ssh-username", "", "Optional username for ssh jump server")
|
||||
cmd.Flags().StringVar(&sshConf.Password, "ssh-password", "", "Optional password for ssh jump server")
|
||||
cmd.Flags().StringVar(&sshConf.Keyfile, "ssh-keyfile", "", "Optional file with private key for SSH authentication")
|
||||
cmd.Flags().StringVar(&sshConf.ConfigAlias, "ssh-alias", "", "Optional config alias with ~/.ssh/config for SSH authentication")
|
||||
cmd.Flags().StringVar(&sshConf.RemoteKubeconfig, "remote-kubeconfig", "", "Remote kubeconfig abstract path of ssh server, default is /$ssh-user/.kube/config")
|
||||
lookup := cmd.Flags().Lookup("remote-kubeconfig")
|
||||
lookup.NoOptDefVal = "~/.kube/config"
|
||||
}
|
||||
|
||||
@@ -3,33 +3,34 @@ package cmds
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdQuit(f cmdutil.Factory) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "quit",
|
||||
Short: i18n.T("Quit kubevpn daemon server"),
|
||||
Long: templates.LongDesc(i18n.T(`Disconnect from cluster, leave proxy resources, and quit daemon`)),
|
||||
Short: i18n.T("Quit kubevpn daemon grpc server"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Disconnect from cluster, leave proxy resources, quit daemon grpc server and cleanup dns/hosts
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# before quit kubevpn, it will leave proxy resources to origin and disconnect from cluster
|
||||
kubevpn quit
|
||||
`)),
|
||||
`)),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
_ = quit(cmd.Context(), true)
|
||||
_ = quit(cmd.Context(), false)
|
||||
fmt.Fprint(os.Stdout, "quit successfully")
|
||||
_ = quit(cmd.Context(), true)
|
||||
util.CleanExtensionLib()
|
||||
_, _ = fmt.Fprint(os.Stdout, "Exited")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@@ -45,18 +46,9 @@ func quit(ctx context.Context, isSudo bool) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var resp *rpc.QuitResponse
|
||||
for {
|
||||
resp, err = client.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err == nil {
|
||||
fmt.Fprint(os.Stdout, resp.Message)
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.QuitResponse](client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,30 +1,31 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdRemove(f cmdutil.Factory) *cobra.Command {
|
||||
var cmd = &cobra.Command{
|
||||
Use: "remove",
|
||||
Short: "Remove cloned resource",
|
||||
Long: `Remove cloned resource`,
|
||||
Short: "Remove clone resource",
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Remove clone resource
|
||||
|
||||
This command is design to remove clone resources, after use command 'kubevpn clone xxx',
|
||||
it will generate and create a new resource in target k8s cluster with format [resource_name]_clone_xxxxx,
|
||||
use this command to remove this created resources.
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# leave proxy resources to origin
|
||||
kubevpn remove deployment/authors
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
@@ -35,17 +36,8 @@ func CmdRemove(f cmdutil.Factory) *cobra.Command {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
recv, err := leave.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, recv.GetMessage())
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.RemoveResponse](leave)
|
||||
return err
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
|
||||
@@ -1,26 +1,31 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdReset(factory cmdutil.Factory) *cobra.Command {
|
||||
var connect = handler.ConnectOptions{}
|
||||
var sshConf = &util.SshConfig{}
|
||||
func CmdReset(f cmdutil.Factory) *cobra.Command {
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "reset",
|
||||
Short: "Reset all changes made by KubeVPN",
|
||||
Long: `Reset all changes made by KubeVPN`,
|
||||
Short: "Reset all resource create by kubevpn in k8s cluster",
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Reset all resource create by kubevpn in k8s cluster
|
||||
|
||||
Reset will delete all resources create by kubevpn in k8s cluster, like deployment, service, serviceAccount...
|
||||
and it will also delete local develop docker containers, docker networks. delete hosts entry added by kubevpn,
|
||||
cleanup DNS settings.
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# Reset default namespace
|
||||
kubevpn reset
|
||||
@@ -31,34 +36,52 @@ func CmdReset(factory cmdutil.Factory) *cobra.Command {
|
||||
# Reset cluster api-server behind of bastion host or ssh jump host
|
||||
kubevpn reset --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ api-server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────────┘
|
||||
kubevpn reset --ssh-alias <alias>
|
||||
|
||||
`)),
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn reset --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
|
||||
kubevpn reset --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
|
||||
kubevpn reset --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return handler.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false)
|
||||
util.InitLoggerForClient(false)
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
util.InitLogger(false)
|
||||
if err := connect.InitClient(factory); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err := connect.Reset(cmd.Context())
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, "done")
|
||||
cli := daemon.GetClient(false)
|
||||
disconnect, err := cli.Disconnect(cmd.Context(), &rpc.DisconnectRequest{
|
||||
KubeconfigBytes: ptr.To(string(bytes)),
|
||||
Namespace: ptr.To(ns),
|
||||
SshJump: sshConf.ToRPC(),
|
||||
})
|
||||
if err != nil {
|
||||
log.Warnf("Failed to disconnect from cluter: %v", err)
|
||||
} else {
|
||||
_ = util.PrintGRPCStream[rpc.DisconnectResponse](disconnect)
|
||||
}
|
||||
|
||||
req := &rpc.ResetRequest{
|
||||
KubeconfigBytes: string(bytes),
|
||||
Namespace: ns,
|
||||
SshJump: sshConf.ToRPC(),
|
||||
}
|
||||
resp, err := cli.Reset(cmd.Context(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.ResetResponse](resp)
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
// for ssh jumper host
|
||||
cmd.Flags().StringVar(&sshConf.Addr, "ssh-addr", "", "Optional ssh jump server address to dial as <hostname>:<port>, eg: 127.0.0.1:22")
|
||||
cmd.Flags().StringVar(&sshConf.User, "ssh-username", "", "Optional username for ssh jump server")
|
||||
cmd.Flags().StringVar(&sshConf.Password, "ssh-password", "", "Optional password for ssh jump server")
|
||||
cmd.Flags().StringVar(&sshConf.Keyfile, "ssh-keyfile", "", "Optional file with private key for SSH authentication")
|
||||
cmd.Flags().StringVar(&sshConf.ConfigAlias, "ssh-alias", "", "Optional config alias with ~/.ssh/config for SSH authentication")
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -2,26 +2,29 @@ package cmds
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/client-go/rest"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/homedir"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
)
|
||||
|
||||
func NewKubeVPNCommand() *cobra.Command {
|
||||
var cmd = &cobra.Command{
|
||||
Use: "kubevpn",
|
||||
Short: i18n.T("kubevpn connect to Kubernetes cluster network"),
|
||||
Short: i18n.T("KubeVPN offers a Cloud-Native Dev Environment that seamlessly connects to your Kubernetes cluster network."),
|
||||
Long: templates.LongDesc(`
|
||||
kubevpn connect to Kubernetes cluster network.
|
||||
`),
|
||||
KubeVPN offers a Cloud-Native Dev Environment that seamlessly connects to your Kubernetes cluster network.
|
||||
`),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmd.Help()
|
||||
},
|
||||
@@ -41,7 +44,7 @@ func NewKubeVPNCommand() *cobra.Command {
|
||||
return c
|
||||
}
|
||||
configFlags.AddFlags(flags)
|
||||
matchVersionFlags := cmdutil.NewMatchVersionFlags(configFlags)
|
||||
matchVersionFlags := cmdutil.NewMatchVersionFlags(&warp{ConfigFlags: configFlags})
|
||||
matchVersionFlags.AddFlags(flags)
|
||||
factory := cmdutil.NewFactory(matchVersionFlags)
|
||||
|
||||
@@ -61,28 +64,30 @@ func NewKubeVPNCommand() *cobra.Command {
|
||||
CmdServe(factory),
|
||||
CmdDaemon(factory),
|
||||
CmdWebhook(factory),
|
||||
CmdSyncthing(factory),
|
||||
},
|
||||
},
|
||||
{
|
||||
Message: "Management commands",
|
||||
Message: "Management commands:",
|
||||
Commands: []*cobra.Command{
|
||||
CmdStatus(factory),
|
||||
CmdList(factory),
|
||||
CmdAlias(factory),
|
||||
CmdGet(factory),
|
||||
CmdConfig(factory),
|
||||
CmdCp(factory),
|
||||
CmdSSH(factory),
|
||||
CmdSSHDaemon(factory),
|
||||
CmdLogs(factory),
|
||||
CmdCp(factory),
|
||||
CmdReset(factory),
|
||||
CmdQuit(factory),
|
||||
},
|
||||
},
|
||||
{
|
||||
Message: "Other commands",
|
||||
Message: "Other commands:",
|
||||
Commands: []*cobra.Command{
|
||||
CmdStatus(factory),
|
||||
CmdVersion(factory),
|
||||
CmdUpgrade(factory),
|
||||
CmdVersion(factory),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -91,3 +96,15 @@ func NewKubeVPNCommand() *cobra.Command {
|
||||
cmd.AddCommand(CmdOptions(factory))
|
||||
return cmd
|
||||
}
|
||||
|
||||
type warp struct {
|
||||
*genericclioptions.ConfigFlags
|
||||
}
|
||||
|
||||
func (f *warp) ToRawKubeConfigLoader() clientcmd.ClientConfig {
|
||||
if strings.HasPrefix(ptr.Deref[string](f.KubeConfig, ""), "~") {
|
||||
home := homedir.HomeDir()
|
||||
f.KubeConfig = ptr.To(strings.Replace(*f.KubeConfig, "~", home, 1))
|
||||
}
|
||||
return f.ConfigFlags.ToRawKubeConfigLoader()
|
||||
}
|
||||
|
||||
@@ -2,20 +2,22 @@ package cmds
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
glog "gvisor.dev/gvisor/pkg/log"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/core"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/core"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdServe(_ cmdutil.Factory) *cobra.Command {
|
||||
@@ -24,35 +26,32 @@ func CmdServe(_ cmdutil.Factory) *cobra.Command {
|
||||
Use: "serve",
|
||||
Hidden: true,
|
||||
Short: "Server side, startup traffic manager, forward inbound and outbound traffic",
|
||||
Long: templates.LongDesc(`Server side, startup traffic manager, forward inbound and outbound traffic.`),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Server side, startup traffic manager, forward inbound and outbound traffic.
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# serve node
|
||||
kubevpn serve -L "tcp://:10800" -L "tun://127.0.0.1:8422?net=223.254.0.123/32"
|
||||
`)),
|
||||
`)),
|
||||
PreRun: func(*cobra.Command, []string) {
|
||||
util.InitLogger(config.Debug)
|
||||
util.InitLoggerForServer(config.Debug)
|
||||
runtime.GOMAXPROCS(0)
|
||||
go util.StartupPProf(0)
|
||||
go util.StartupPProfForServer(config.PProfPort)
|
||||
glog.SetTarget(util.ServerEmitter{Writer: &glog.Writer{Next: os.Stderr}})
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
_, _ = maxprocs.Set(maxprocs.Logger(nil))
|
||||
err := handler.RentIPIfNeeded(route)
|
||||
ctx := cmd.Context()
|
||||
err := handler.Complete(ctx, route)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
err := handler.ReleaseIPIfNeeded()
|
||||
if err != nil {
|
||||
log.Errorf("release ip failed: %v", err)
|
||||
}
|
||||
}()
|
||||
servers, err := handler.Parse(*route)
|
||||
if err != nil {
|
||||
log.Errorf("parse server failed: %v", err)
|
||||
log.Errorf("Parse server failed: %v", err)
|
||||
return err
|
||||
}
|
||||
ctx := cmd.Context()
|
||||
return handler.Run(ctx, servers)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,41 +1,57 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"golang.org/x/net/websocket"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
"k8s.io/kubectl/pkg/util/term"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
// CmdSSH
|
||||
// 设置本地的IP是223.254.0.1/32 ,记得一定是掩码 32位,
|
||||
// 这样别的路由不会走到这里来
|
||||
// Remember to use network mask 32, because ssh using unique network CIDR 223.255.0.0/16
|
||||
func CmdSSH(_ cmdutil.Factory) *cobra.Command {
|
||||
var sshConf = &util.SshConfig{}
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
var ExtraCIDR []string
|
||||
cmd := &cobra.Command{
|
||||
Use: "ssh",
|
||||
Short: "Ssh to jump server",
|
||||
Long: `Ssh to jump server`,
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Ssh to jump server
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# Jump to server behind of bastion host or ssh jump host
|
||||
kubevpn ssh --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
|
||||
|
||||
# it also support ProxyJump, like
|
||||
# It also supports ProxyJump, like
|
||||
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────┐
|
||||
│ pc ├────►│ ssh1 ├────►│ ssh2 ├────►│ ssh3 ├─────►... ─────► │ server │
|
||||
└──────┘ └──────┘ └──────┘ └──────┘ └────────┘
|
||||
kubevpn ssh --ssh-alias <alias>
|
||||
`)),
|
||||
|
||||
# Support ssh auth GSSAPI
|
||||
kubevpn ssh --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-keytab /path/to/keytab
|
||||
kubevpn ssh --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-cache /path/to/cache
|
||||
kubevpn ssh --ssh-addr <HOST:PORT> --ssh-username <USERNAME> --gssapi-password <PASSWORD>
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
util.InitLoggerForClient(false)
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -43,23 +59,98 @@ func CmdSSH(_ cmdutil.Factory) *cobra.Command {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.Header.Set("ssh-addr", sshConf.Addr)
|
||||
config.Header.Set("ssh-username", sshConf.User)
|
||||
config.Header.Set("ssh-password", sshConf.Password)
|
||||
config.Header.Set("ssh-keyfile", sshConf.Keyfile)
|
||||
config.Header.Set("ssh-alias", sshConf.ConfigAlias)
|
||||
fd := int(os.Stdin.Fd())
|
||||
if !terminal.IsTerminal(fd) {
|
||||
return fmt.Errorf("stdin is not a terminal")
|
||||
}
|
||||
state, err := terminal.MakeRaw(fd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("terminal make raw: %s", err)
|
||||
}
|
||||
defer terminal.Restore(fd, state)
|
||||
width, height, err := terminal.GetSize(fd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("terminal get size: %s", err)
|
||||
}
|
||||
marshal, err := json.Marshal(sshConf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sessionID := uuid.NewString()
|
||||
config.Header.Set("ssh", string(marshal))
|
||||
config.Header.Set("extra-cidr", strings.Join(ExtraCIDR, ","))
|
||||
config.Header.Set("terminal-width", strconv.Itoa(width))
|
||||
config.Header.Set("terminal-height", strconv.Itoa(height))
|
||||
config.Header.Set("session-id", sessionID)
|
||||
client := daemon.GetTCPClient(true)
|
||||
conn, err := websocket.NewClient(config, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go io.Copy(conn, os.Stdin)
|
||||
_, err = io.Copy(os.Stdout, conn)
|
||||
return err
|
||||
defer conn.Close()
|
||||
|
||||
errChan := make(chan error, 3)
|
||||
go func() {
|
||||
errChan <- monitorSize(cmd.Context(), sessionID)
|
||||
}()
|
||||
go func() {
|
||||
_, err := io.Copy(conn, os.Stdin)
|
||||
errChan <- err
|
||||
}()
|
||||
go func() {
|
||||
_, err := io.Copy(os.Stdout, conn)
|
||||
errChan <- err
|
||||
}()
|
||||
|
||||
select {
|
||||
case err = <-errChan:
|
||||
return err
|
||||
case <-cmd.Context().Done():
|
||||
return cmd.Context().Err()
|
||||
}
|
||||
},
|
||||
}
|
||||
addSshFlags(cmd, sshConf)
|
||||
cmd.Flags().StringArrayVar(&ExtraCIDR, "extra-cidr", []string{}, "Extra cidr string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
cmd.Flags().StringArrayVar(&ExtraCIDR, "extra-cidr", []string{}, "Extra network CIDR string, eg: --extra-cidr 192.168.0.159/24 --extra-cidr 192.168.1.160/32")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func monitorSize(ctx context.Context, sessionID string) error {
|
||||
conn := daemon.GetTCPClient(true)
|
||||
if conn == nil {
|
||||
return fmt.Errorf("conn is nil")
|
||||
}
|
||||
var tt = term.TTY{
|
||||
In: os.Stdin,
|
||||
Out: os.Stdout,
|
||||
Raw: false,
|
||||
TryDev: false,
|
||||
Parent: nil,
|
||||
}
|
||||
sizeQueue := tt.MonitorSize(tt.GetSize())
|
||||
if sizeQueue == nil {
|
||||
return fmt.Errorf("sizeQueue is nil")
|
||||
}
|
||||
//defer runtime.HandleCrash()
|
||||
config, err := websocket.NewConfig("ws://test/resize", "http://test")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.Header.Set("session-id", sessionID)
|
||||
client, err := websocket.NewClient(config, conn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encoder := json.NewEncoder(client)
|
||||
for ctx.Err() == nil {
|
||||
size := sizeQueue.Next()
|
||||
if size == nil {
|
||||
return nil
|
||||
}
|
||||
if err = encoder.Encode(&size); err != nil {
|
||||
log.Errorf("Encode resize: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,24 +9,23 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
)
|
||||
|
||||
// CmdSSHDaemon
|
||||
// 设置本地的IP是223.254.0.1/32 ,记得一定是掩码 32位,
|
||||
// 这样别的路由不会走到这里来
|
||||
// set local tun ip 223.254.0.1/32, remember to use mask 32
|
||||
func CmdSSHDaemon(_ cmdutil.Factory) *cobra.Command {
|
||||
var clientIP string
|
||||
cmd := &cobra.Command{
|
||||
Use: "ssh-daemon",
|
||||
Hidden: true,
|
||||
Short: "Ssh daemon server",
|
||||
Long: `Ssh daemon server`,
|
||||
Long: templates.LongDesc(i18n.T(`Ssh daemon server`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# SSH daemon server
|
||||
kubevpn ssh-daemon --client-ip 223.254.0.123/32
|
||||
`)),
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
err := daemon.StartupDaemon(cmd.Context())
|
||||
return err
|
||||
@@ -41,8 +40,8 @@ func CmdSSHDaemon(_ cmdutil.Factory) *cobra.Command {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, client.ServerIP)
|
||||
return nil
|
||||
_, err = fmt.Fprint(os.Stdout, client.ServerIP)
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&clientIP, "client-ip", "", "Client cidr")
|
||||
|
||||
@@ -1,41 +1,282 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/liggitt/tabwriter"
|
||||
"github.com/spf13/cobra"
|
||||
flag "github.com/spf13/pflag"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/cli-runtime/pkg/printers"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
FormatJson = "json"
|
||||
FormatYaml = "yaml"
|
||||
FormatTable = "table"
|
||||
)
|
||||
|
||||
func CmdStatus(f cmdutil.Factory) *cobra.Command {
|
||||
var aliasName string
|
||||
var localFile string
|
||||
var remoteAddr string
|
||||
var format string
|
||||
cmd := &cobra.Command{
|
||||
Use: "status",
|
||||
Short: i18n.T("KubeVPN status"),
|
||||
Long: templates.LongDesc(i18n.T(`KubeVPN status`)),
|
||||
Short: i18n.T("Show connect status and list proxy/clone resource"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Show connect status and list proxy/clone resource
|
||||
|
||||
Show connect status and list proxy or clone resource, you can check connect status by filed status and netif.
|
||||
if netif is empty, means tun device closed, so it's unhealthy, it will also show route info, if proxy workloads,
|
||||
not only show myself proxy resource, another route info will also display.
|
||||
`)),
|
||||
Example: templates.Examples(i18n.T(`
|
||||
# show status for kubevpn status
|
||||
# show status for connect status and list proxy/clone resource
|
||||
kubevpn status
|
||||
`)),
|
||||
|
||||
# query status by alias config name dev_new
|
||||
kubevpn status --alias dev_new
|
||||
|
||||
# query status with output json format
|
||||
kubevpn status -o json
|
||||
|
||||
# query status with output yaml format
|
||||
kubevpn status -o yaml
|
||||
`)),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
util.InitLoggerForClient(false)
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := daemon.GetClient(false).Status(
|
||||
var clusterIDs []string
|
||||
if aliasName != "" {
|
||||
configs, err := ParseAndGet(localFile, remoteAddr, aliasName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, config := range configs {
|
||||
clusterID, err := GetClusterIDByConfig(cmd, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterIDs = append(clusterIDs, clusterID)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := daemon.GetClient(false).Status(
|
||||
cmd.Context(),
|
||||
&rpc.StatusRequest{},
|
||||
&rpc.StatusRequest{
|
||||
ClusterIDs: clusterIDs,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, client.GetMessage())
|
||||
output, err := genOutput(resp, format)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprint(os.Stdout, output)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&aliasName, "alias", "", "Alias name, query connect status by alias config name")
|
||||
cmd.Flags().StringVarP(&localFile, "file", "f", config.GetConfigFilePath(), "Config file location")
|
||||
cmd.Flags().StringVarP(&remoteAddr, "remote", "r", "", "Remote config file, eg: https://raw.githubusercontent.com/kubenetworks/kubevpn/master/pkg/config/config.yaml")
|
||||
cmd.Flags().StringVarP(&format, "output", "o", FormatTable, fmt.Sprintf("Output format. One of: (%s, %s, %s)", FormatJson, FormatYaml, FormatTable))
|
||||
return cmd
|
||||
}
|
||||
|
||||
func genOutput(status *rpc.StatusResponse, format string) (string, error) {
|
||||
switch format {
|
||||
case FormatJson:
|
||||
if len(status.List) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
marshal, err := json.Marshal(status.List)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(marshal), nil
|
||||
|
||||
case FormatYaml:
|
||||
if len(status.List) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
marshal, err := yaml.Marshal(status.List)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(marshal), nil
|
||||
default:
|
||||
var sb = new(bytes.Buffer)
|
||||
w := printers.GetNewTabWriter(sb)
|
||||
genConnectMsg(w, status.List)
|
||||
genProxyMsg(w, status.List)
|
||||
genCloneMsg(w, status.List)
|
||||
_ = w.Flush()
|
||||
return sb.String(), nil
|
||||
}
|
||||
}
|
||||
|
||||
func genConnectMsg(w *tabwriter.Writer, status []*rpc.Status) {
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Mode", "Cluster", "Kubeconfig", "Namespace", "Status", "Netif")
|
||||
for _, c := range status {
|
||||
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\t%s\n", c.ID, c.Mode, c.Cluster, c.Kubeconfig, c.Namespace, c.Status, c.Netif)
|
||||
}
|
||||
}
|
||||
|
||||
func genProxyMsg(w *tabwriter.Writer, list []*rpc.Status) {
|
||||
var needsPrint bool
|
||||
for _, status := range list {
|
||||
if len(status.ProxyList) != 0 {
|
||||
needsPrint = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !needsPrint {
|
||||
return
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(w, "\n")
|
||||
w.SetRememberedWidths(nil)
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Name", "Headers", "IP", "PortMap", "CurrentPC")
|
||||
for _, c := range list {
|
||||
for _, proxy := range c.ProxyList {
|
||||
for _, rule := range proxy.RuleList {
|
||||
var headers []string
|
||||
for k, v := range rule.Headers {
|
||||
headers = append(headers, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
if len(headers) == 0 {
|
||||
headers = []string{"*"}
|
||||
}
|
||||
var portmap []string
|
||||
for k, v := range rule.PortMap {
|
||||
portmap = append(portmap, fmt.Sprintf("%d->%d", k, v))
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%v\n",
|
||||
c.ID,
|
||||
proxy.Workload,
|
||||
strings.Join(headers, ","),
|
||||
rule.LocalTunIPv4,
|
||||
strings.Join(portmap, ","),
|
||||
rule.CurrentDevice,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func genCloneMsg(w *tabwriter.Writer, list []*rpc.Status) {
|
||||
var needsPrint bool
|
||||
for _, status := range list {
|
||||
if len(status.CloneList) != 0 {
|
||||
needsPrint = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !needsPrint {
|
||||
return
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(w, "\n")
|
||||
w.SetRememberedWidths(nil)
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Name", "Headers", "ToName", "ToKubeconfig", "ToNamespace", "SyncthingGUI")
|
||||
for _, c := range list {
|
||||
for _, clone := range c.CloneList {
|
||||
//_, _ = fmt.Fprintf(w, "%s\n", clone.Workload)
|
||||
for _, rule := range clone.RuleList {
|
||||
var headers []string
|
||||
for k, v := range rule.Headers {
|
||||
headers = append(headers, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
if len(headers) == 0 {
|
||||
headers = []string{"*"}
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\t%s\n",
|
||||
c.ID,
|
||||
clone.Workload,
|
||||
strings.Join(headers, ","),
|
||||
rule.DstWorkload,
|
||||
rule.DstKubeconfig,
|
||||
rule.DstNamespace,
|
||||
clone.SyncthingGUIAddr,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func GetClusterIDByConfig(cmd *cobra.Command, config Config) (string, error) {
|
||||
flags := flag.NewFlagSet("", flag.ContinueOnError)
|
||||
var sshConf = &pkgssh.SshConfig{}
|
||||
pkgssh.AddSshFlags(flags, sshConf)
|
||||
handler.AddExtraRoute(flags, &handler.ExtraRouteInfo{})
|
||||
configFlags := genericclioptions.NewConfigFlags(false).WithDeprecatedPasswordFlag()
|
||||
configFlags.AddFlags(flags)
|
||||
matchVersionFlags := cmdutil.NewMatchVersionFlags(&warp{ConfigFlags: configFlags})
|
||||
matchVersionFlags.AddFlags(flags)
|
||||
factory := cmdutil.NewFactory(matchVersionFlags)
|
||||
|
||||
for _, command := range cmd.Parent().Commands() {
|
||||
command.Flags().VisitAll(func(f *flag.Flag) {
|
||||
if flags.Lookup(f.Name) == nil && flags.ShorthandLookup(f.Shorthand) == nil {
|
||||
flags.AddFlag(f)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
err := flags.ParseAll(config.Flags, func(flag *flag.Flag, value string) error {
|
||||
_ = flags.Set(flag.Name, value)
|
||||
return nil
|
||||
})
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(factory)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
file, err := util.ConvertToTempKubeconfigFile(bytes)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
flags = flag.NewFlagSet("", flag.ContinueOnError)
|
||||
flags.AddFlag(&flag.Flag{
|
||||
Name: "kubeconfig",
|
||||
DefValue: file,
|
||||
})
|
||||
flags.AddFlag(&flag.Flag{
|
||||
Name: "namespace",
|
||||
DefValue: ns,
|
||||
})
|
||||
var path string
|
||||
path, err = pkgssh.SshJump(cmd.Context(), sshConf, flags, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var c = &handler.ConnectOptions{}
|
||||
err = c.InitClient(util.InitFactoryByPath(path, ns))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = c.InitDHCP(cmd.Context())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return c.GetClusterID(), nil
|
||||
}
|
||||
|
||||
218
cmd/kubevpn/cmds/status_test.go
Normal file
218
cmd/kubevpn/cmds/status_test.go
Normal file
@@ -0,0 +1,218 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
)
|
||||
|
||||
func TestPrintProxyAndClone(t *testing.T) {
|
||||
var status = &rpc.StatusResponse{
|
||||
List: []*rpc.Status{
|
||||
{
|
||||
ID: 0,
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun4",
|
||||
ProxyList: []*rpc.Proxy{
|
||||
{
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Workload: "deployment.apps/authors",
|
||||
RuleList: []*rpc.ProxyRule{
|
||||
{
|
||||
Headers: map[string]string{"user": "naison"},
|
||||
LocalTunIPv4: "223.254.0.103",
|
||||
LocalTunIPv6: "efff:ffff:ffff:ffff:ffff:ffff:ffff:999d",
|
||||
CurrentDevice: false,
|
||||
PortMap: map[int32]int32{8910: 8910},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
CloneList: []*rpc.Clone{
|
||||
{
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Workload: "deployment.apps/ratings",
|
||||
RuleList: []*rpc.CloneRule{{
|
||||
Headers: map[string]string{"user": "naison"},
|
||||
DstClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
DstCluster: "ccm6epn7qvcplhs3o8p00",
|
||||
DstKubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
DstNamespace: "vke-system",
|
||||
DstWorkload: "deployment.apps/ratings-clone-5ngn6",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: 1,
|
||||
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
|
||||
Cluster: "ccnepblsebp68ivej4a20",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun5",
|
||||
ProxyList: []*rpc.Proxy{},
|
||||
CloneList: []*rpc.Clone{},
|
||||
},
|
||||
},
|
||||
}
|
||||
output, err := genOutput(status, FormatTable)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fmt.Println(output)
|
||||
}
|
||||
|
||||
func TestPrintProxy(t *testing.T) {
|
||||
var status = &rpc.StatusResponse{
|
||||
List: []*rpc.Status{
|
||||
{
|
||||
ID: 0,
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun4",
|
||||
ProxyList: []*rpc.Proxy{
|
||||
{
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Workload: "deployment.apps/authors",
|
||||
RuleList: []*rpc.ProxyRule{
|
||||
{
|
||||
Headers: map[string]string{"user": "naison"},
|
||||
LocalTunIPv4: "223.254.0.103",
|
||||
LocalTunIPv6: "efff:ffff:ffff:ffff:ffff:ffff:ffff:999d",
|
||||
CurrentDevice: false,
|
||||
PortMap: map[int32]int32{8910: 8910},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
CloneList: []*rpc.Clone{},
|
||||
},
|
||||
{
|
||||
ID: 1,
|
||||
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
|
||||
Cluster: "ccnepblsebp68ivej4a20",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun5",
|
||||
ProxyList: []*rpc.Proxy{},
|
||||
CloneList: []*rpc.Clone{},
|
||||
},
|
||||
},
|
||||
}
|
||||
output, err := genOutput(status, FormatTable)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fmt.Println(output)
|
||||
}
|
||||
|
||||
func TestPrintClone(t *testing.T) {
|
||||
var status = &rpc.StatusResponse{
|
||||
List: []*rpc.Status{
|
||||
{
|
||||
ID: 0,
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun4",
|
||||
ProxyList: []*rpc.Proxy{},
|
||||
CloneList: []*rpc.Clone{
|
||||
{
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Workload: "deployment.apps/ratings",
|
||||
RuleList: []*rpc.CloneRule{{
|
||||
Headers: map[string]string{"user": "naison"},
|
||||
DstClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
DstCluster: "ccm6epn7qvcplhs3o8p00",
|
||||
DstKubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
DstNamespace: "vke-system",
|
||||
DstWorkload: "deployment.apps/ratings-clone-5ngn6",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: 1,
|
||||
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
|
||||
Cluster: "ccnepblsebp68ivej4a20",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun5",
|
||||
ProxyList: []*rpc.Proxy{},
|
||||
CloneList: []*rpc.Clone{},
|
||||
},
|
||||
},
|
||||
}
|
||||
output, err := genOutput(status, FormatTable)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fmt.Println(output)
|
||||
}
|
||||
|
||||
func TestPrint(t *testing.T) {
|
||||
var status = &rpc.StatusResponse{
|
||||
List: []*rpc.Status{
|
||||
{
|
||||
ID: 0,
|
||||
ClusterID: "ac6d8dfb-1d23-4f2a-b11e-9c775fd22b84",
|
||||
Cluster: "ccm6epn7qvcplhs3o8p00",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/test-feiyan-config-private-new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun4",
|
||||
ProxyList: []*rpc.Proxy{},
|
||||
CloneList: []*rpc.Clone{},
|
||||
},
|
||||
{
|
||||
ID: 1,
|
||||
ClusterID: "c08cae70-0021-46c9-a1dc-38e6a2f11443",
|
||||
Cluster: "ccnepblsebp68ivej4a20",
|
||||
Mode: "full",
|
||||
Kubeconfig: "/Users/bytedance/.kube/dev_fy_config_new",
|
||||
Namespace: "vke-system",
|
||||
Status: "Connected",
|
||||
Netif: "utun5",
|
||||
ProxyList: []*rpc.Proxy{},
|
||||
CloneList: []*rpc.Clone{},
|
||||
},
|
||||
},
|
||||
}
|
||||
output, err := genOutput(status, FormatTable)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fmt.Println(output)
|
||||
}
|
||||
30
cmd/kubevpn/cmds/syncthing.go
Normal file
30
cmd/kubevpn/cmds/syncthing.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/syncthing"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdSyncthing(_ cmdutil.Factory) *cobra.Command {
|
||||
var detach bool
|
||||
var dir string
|
||||
cmd := &cobra.Command{
|
||||
Use: "syncthing",
|
||||
Short: i18n.T("Syncthing"),
|
||||
Long: templates.LongDesc(i18n.T(`Syncthing`)),
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
go util.StartupPProfForServer(0)
|
||||
return syncthing.StartServer(cmd.Context(), detach, dir)
|
||||
},
|
||||
Hidden: true,
|
||||
DisableFlagsInUseLine: true,
|
||||
}
|
||||
cmd.Flags().StringVar(&dir, "dir", "", "dir")
|
||||
cmd.Flags().BoolVarP(&detach, "detach", "d", false, "Run syncthing in background")
|
||||
return cmd
|
||||
}
|
||||
@@ -4,56 +4,55 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/oauth2"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/upgrade"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/upgrade"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func CmdUpgrade(_ cmdutil.Factory) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "upgrade",
|
||||
Short: "Upgrade KubeVPN version",
|
||||
Long: `Upgrade KubeVPN version, automatically download latest KubeVPN from GitHub`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Short: i18n.T("Upgrade kubevpn client to latest version"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Upgrade kubevpn client to latest version, automatically download and install latest kubevpn from GitHub.
|
||||
disconnect all from k8s cluster, leave all resources, remove all clone resource, and then,
|
||||
upgrade local daemon grpc server to latest version.
|
||||
`)),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
const (
|
||||
envLatestUrl = "KUBEVPN_LATEST_VERSION_URL"
|
||||
)
|
||||
util.InitLoggerForClient(false)
|
||||
var client = http.DefaultClient
|
||||
if config.GitHubOAuthToken != "" {
|
||||
client = oauth2.NewClient(cmd.Context(), oauth2.StaticTokenSource(&oauth2.Token{AccessToken: config.GitHubOAuthToken, TokenType: "Bearer"}))
|
||||
}
|
||||
latestVersion, latestCommit, url, err := util.GetManifest(client, runtime.GOOS, runtime.GOARCH)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = upgrade.Main(cmd.Context(), client, latestVersion, latestCommit, url)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Fprint(os.Stdout, "Upgrade daemon...")
|
||||
for _, isSudo := range []bool{false, true} {
|
||||
cli := daemon.GetClient(isSudo)
|
||||
if cli != nil {
|
||||
var response *rpc.UpgradeResponse
|
||||
response, err = cli.Upgrade(cmd.Context(), &rpc.UpgradeRequest{
|
||||
ClientVersion: latestVersion,
|
||||
ClientCommitId: latestCommit,
|
||||
})
|
||||
if err == nil && !response.NeedUpgrade {
|
||||
// do nothing
|
||||
} else {
|
||||
_ = quit(cmd.Context(), isSudo)
|
||||
}
|
||||
var url = os.Getenv(envLatestUrl)
|
||||
if url == "" {
|
||||
var latestVersion string
|
||||
var needsUpgrade bool
|
||||
var err error
|
||||
url, latestVersion, needsUpgrade, err = upgrade.NeedsUpgrade(cmd.Context(), client, config.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !needsUpgrade {
|
||||
_, _ = fmt.Fprintf(os.Stdout, "Already up to date, don't needs to upgrade, version: %s", latestVersion)
|
||||
return nil
|
||||
}
|
||||
_, _ = fmt.Fprintf(os.Stdout, "Current version is: %s less than latest version: %s, needs to upgrade", config.Version, latestVersion)
|
||||
_ = os.Setenv(envLatestUrl, url)
|
||||
_ = quit(cmd.Context(), false)
|
||||
_ = quit(cmd.Context(), true)
|
||||
}
|
||||
err = daemon.StartupDaemon(cmd.Context())
|
||||
fmt.Fprint(os.Stdout, "done")
|
||||
return upgrade.Main(cmd.Context(), client, url)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
|
||||
@@ -9,10 +9,12 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
)
|
||||
|
||||
// --ldflags -X
|
||||
@@ -33,12 +35,14 @@ func reformatDate(buildTime string) string {
|
||||
func CmdVersion(cmdutil.Factory) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print the client version information",
|
||||
Long: `Print the client version information`,
|
||||
Short: i18n.T("Print the client version information"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Print the client version information
|
||||
`)),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("KubeVPN: CLI\n")
|
||||
fmt.Printf(" Version: %s\n", config.Version)
|
||||
fmt.Printf(" DaemonVersion: %s\n", getDaemonVersion())
|
||||
fmt.Printf(" Daemon: %s\n", getDaemonVersion())
|
||||
fmt.Printf(" Image: %s\n", config.Image)
|
||||
fmt.Printf(" Branch: %s\n", Branch)
|
||||
fmt.Printf(" Git commit: %s\n", config.GitCommit)
|
||||
|
||||
@@ -3,23 +3,27 @@ package cmds
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/webhook"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/webhook"
|
||||
)
|
||||
|
||||
func CmdWebhook(f cmdutil.Factory) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "webhook",
|
||||
Hidden: true,
|
||||
Short: "Starts a HTTP server, useful for creating MutatingAdmissionWebhook",
|
||||
Long: `Starts a HTTP server, useful for creating MutatingAdmissionWebhook.
|
||||
After deploying it to Kubernetes cluster, the Administrator needs to create a MutatingWebhookConfiguration
|
||||
in the Kubernetes cluster to register remote webhook admission controllers.`,
|
||||
Short: i18n.T("Starts a HTTP server, useful for creating MutatingAdmissionWebhook"),
|
||||
Long: templates.LongDesc(i18n.T(`
|
||||
Starts a HTTP server, useful for creating MutatingAdmissionWebhook.
|
||||
After deploying it to Kubernetes cluster, the Administrator needs to create a MutatingWebhookConfiguration
|
||||
in the Kubernetes cluster to register remote webhook admission controllers.
|
||||
`)),
|
||||
Args: cobra.MaximumNArgs(0),
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
util.InitLogger(true)
|
||||
go util.StartupPProf(0)
|
||||
util.InitLoggerForServer(true)
|
||||
go util.StartupPProfForServer(0)
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return webhook.Main(f)
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/cmd/kubevpn/cmds"
|
||||
"github.com/wencaiwulue/kubevpn/v2/cmd/kubevpn/cmds"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
35
docs/en/Architecture.md
Normal file
35
docs/en/Architecture.md
Normal file
@@ -0,0 +1,35 @@
|
||||
## Architecture
|
||||
### Connect mode
|
||||
create a tunnel with port-forward, add route to virtual interface, like tun0, forward traffic though tunnel to remote traffic manager.
|
||||

|
||||
|
||||
### Reverse mode
|
||||
base on connect mode, inject a container to controller, use iptables to block all inbound traffic and forward to local though tunnel.
|
||||
|
||||
```text
|
||||
┌──────────┐ ┌─────────┌──────────┐ ┌──────────┐
|
||||
│ ServiceA ├───►│ sidecar │ ServiceB │ ┌─►│ ServiceC │
|
||||
└──────────┘ └────┌────┘──────────┘ │ └──────────┘
|
||||
│ │
|
||||
│ │ cloud
|
||||
─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘─ ─ ─ ─ ─ ─ ─ ─ ─┘ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
|
||||
│ │ local
|
||||
┌───┘──────┐ │
|
||||
│ ServiceB'├──────────┘
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
### Mesh mode
|
||||
base on reverse mode, using envoy as proxy, if headers have special key-value pair, it will route to local machine, if not, use origin service.
|
||||
```text
|
||||
┌──────────┐ ┌─────────┌────────────┐ ┌──────────┐
|
||||
│ ServiceA ├───►│ sidecar ├─► ServiceB │─►┌─►│ ServiceC │
|
||||
└──────────┘ └────┌────┘────────────┘ │ └──────────┘
|
||||
│ │ cloud
|
||||
─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─┘─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┘ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
|
||||
│ │ local
|
||||
header: foo=bar │
|
||||
┌───┘──────┐ │
|
||||
│ ServiceB'├─────────────┘
|
||||
└──────────┘
|
||||
```
|
||||
4
docs/en/images/connect-mode.drawio.svg
Normal file
4
docs/en/images/connect-mode.drawio.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 106 KiB |
363
go.mod
363
go.mod
@@ -1,215 +1,298 @@
|
||||
module github.com/wencaiwulue/kubevpn
|
||||
module github.com/wencaiwulue/kubevpn/v2
|
||||
|
||||
go 1.20
|
||||
go 1.23.2
|
||||
|
||||
require (
|
||||
github.com/cilium/ipam v0.0.0-20220824141044-46ef3d556735
|
||||
github.com/docker/cli v23.0.1+incompatible
|
||||
github.com/docker/docker v23.0.1+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/libcontainer v2.2.1+incompatible
|
||||
github.com/envoyproxy/go-control-plane v0.10.3
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/miekg/dns v1.1.50
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
golang.org/x/net v0.8.0
|
||||
golang.org/x/sys v0.6.0
|
||||
golang.zx2c4.com/wireguard v0.0.0-20220920152132-bb719d3a6e2c
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/grpc v1.53.0-dev.0.20230123225046-4075ef07c5d5
|
||||
google.golang.org/protobuf v1.30.0
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/api v0.26.3
|
||||
k8s.io/apimachinery v0.26.3
|
||||
k8s.io/cli-runtime v0.26.1
|
||||
k8s.io/client-go v0.26.3
|
||||
k8s.io/klog/v2 v2.90.1 // indirect
|
||||
k8s.io/kubectl v0.26.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/containerd/containerd v1.5.18
|
||||
github.com/cilium/ipam v0.0.0-20230509084518-fd66eae7909b
|
||||
github.com/containerd/containerd v1.7.14
|
||||
github.com/containernetworking/cni v1.1.2
|
||||
github.com/coredns/caddy v1.1.1
|
||||
github.com/coredns/coredns v1.10.1
|
||||
github.com/docker/distribution v2.8.1+incompatible
|
||||
github.com/coredns/coredns v1.11.2
|
||||
github.com/distribution/reference v0.6.0
|
||||
github.com/docker/cli v26.0.0+incompatible
|
||||
github.com/docker/docker v26.1.4+incompatible
|
||||
github.com/docker/go-connections v0.5.0
|
||||
github.com/docker/libcontainer v2.2.1+incompatible
|
||||
github.com/envoyproxy/go-control-plane v0.12.0
|
||||
github.com/fsnotify/fsnotify v1.7.0
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/go-version v1.6.0
|
||||
github.com/hpcloud/tail v1.0.0
|
||||
github.com/jcmturner/gofork v1.7.6
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4
|
||||
github.com/kevinburke/ssh_config v1.2.0
|
||||
github.com/libp2p/go-netroute v0.2.1
|
||||
github.com/mattbaird/jsonpatch v0.0.0-20200820163806-098863c1fc24
|
||||
github.com/prometheus-community/pro-bing v0.1.0
|
||||
github.com/schollz/progressbar/v3 v3.13.0
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de
|
||||
github.com/mattbaird/jsonpatch v0.0.0-20240118010651-0ba75a80ca38
|
||||
github.com/miekg/dns v1.1.58
|
||||
github.com/moby/sys/signal v0.7.0
|
||||
github.com/moby/term v0.5.0
|
||||
github.com/opencontainers/image-spec v1.1.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus-community/pro-bing v0.4.0
|
||||
github.com/schollz/progressbar/v3 v3.14.2
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
go.uber.org/automaxprocs v1.5.1
|
||||
golang.org/x/crypto v0.2.0
|
||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
|
||||
golang.org/x/oauth2 v0.6.0
|
||||
golang.org/x/sync v0.1.0
|
||||
golang.org/x/text v0.8.0
|
||||
golang.org/x/time v0.3.0
|
||||
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224
|
||||
gvisor.dev/gvisor v0.0.0-20230603040744-5c9219dedd33
|
||||
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749
|
||||
sigs.k8s.io/controller-runtime v0.14.5
|
||||
sigs.k8s.io/kustomize/api v0.12.1
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
github.com/syncthing/syncthing v1.27.12
|
||||
github.com/thejerf/suture/v4 v4.0.5
|
||||
go.uber.org/automaxprocs v1.5.3
|
||||
golang.org/x/crypto v0.28.0
|
||||
golang.org/x/net v0.30.0
|
||||
golang.org/x/oauth2 v0.21.0
|
||||
golang.org/x/sync v0.8.0
|
||||
golang.org/x/sys v0.26.0
|
||||
golang.org/x/text v0.19.0
|
||||
golang.org/x/time v0.6.0
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
||||
golang.zx2c4.com/wireguard v0.0.0-20220920152132-bb719d3a6e2c
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||
google.golang.org/grpc v1.62.1
|
||||
google.golang.org/protobuf v1.34.2
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987
|
||||
k8s.io/api v0.31.0-alpha.0
|
||||
k8s.io/apimachinery v0.31.0-alpha.0
|
||||
k8s.io/cli-runtime v0.29.3
|
||||
k8s.io/client-go v0.31.0-alpha.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kubectl v0.29.3
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
sigs.k8s.io/controller-runtime v0.18.4
|
||||
sigs.k8s.io/kustomize/api v0.16.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
tailscale.com v1.74.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.15.1 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cel.dev/expr v0.15.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.3.0 // indirect
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.28 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.29 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/obfuscate v0.0.0-20211129110424-6491aa3bf583 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0-rc.1 // indirect
|
||||
github.com/DataDog/datadog-go v4.8.2+incompatible // indirect
|
||||
github.com/DataDog/datadog-go/v5 v5.0.2 // indirect
|
||||
github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect
|
||||
github.com/DataDog/sketches-go v1.2.1 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/DataDog/appsec-internal-go v1.5.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.0 // indirect
|
||||
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
|
||||
github.com/DataDog/go-libddwaf/v2 v2.4.2 // indirect
|
||||
github.com/DataDog/go-sqllexer v0.0.11 // indirect
|
||||
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
|
||||
github.com/DataDog/sketches-go v1.4.4 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/antonmedv/expr v1.12.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.2 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
|
||||
github.com/antonmedv/expr v1.15.5 // indirect
|
||||
github.com/apparentlymart/go-cidr v1.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.194 // indirect
|
||||
github.com/aws/aws-sdk-go v1.51.12 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/calmh/incontainer v1.0.0 // indirect
|
||||
github.com/calmh/xdr v1.2.0 // indirect
|
||||
github.com/ccding/go-stun v0.1.5 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20230112175826-46e39c7b9b43 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dgraph-io/ristretto v0.1.0 // indirect
|
||||
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20240329184929-0c46c01016dc // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/dnstap/golang-dnstap v0.4.0 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.1 // indirect
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.2 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/ebitengine/purego v0.7.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.0 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/farsightsec/golang-framestream v0.3.0 // indirect
|
||||
github.com/fatih/camelcase v1.0.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
|
||||
github.com/fvbommel/sortorder v1.0.2 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/fvbommel/sortorder v1.1.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.6.0 // indirect
|
||||
github.com/gaissmai/bart v0.11.1 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
|
||||
github.com/go-errors/errors v1.5.1 // indirect
|
||||
github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 // indirect
|
||||
github.com/go-ldap/ldap/v3 v3.4.8 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
||||
github.com/golang/glog v1.0.0 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect
|
||||
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.3 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/greatroar/blobloom v0.8.0 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
|
||||
github.com/imdario/mergo v0.3.14 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
|
||||
github.com/illarion/gonotify/v2 v2.0.3 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/infobloxopen/go-trees v0.0.0-20200715205103-96a057b8dfb9 // indirect
|
||||
github.com/infobloxopen/go-trees v0.0.0-20221216143356-66ceba885ebc // indirect
|
||||
github.com/jackpal/gateway v1.0.15 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.15.15 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/julienschmidt/httprouter v1.3.0 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/klauspost/compress v1.17.7 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mdlayher/netlink v1.7.2 // indirect
|
||||
github.com/mdlayher/socket v0.5.0 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75 // indirect
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.2 // indirect
|
||||
github.com/moby/buildkit v0.9.0-rc1 // indirect
|
||||
github.com/moby/patternmatcher v0.5.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/sys/signal v0.7.0 // indirect
|
||||
github.com/moby/sys/symlink v0.2.0 // indirect
|
||||
github.com/moby/sys/user v0.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.20.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/runc v1.1.4 // indirect
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 // indirect
|
||||
github.com/openzipkin/zipkin-go v0.4.1 // indirect
|
||||
github.com/oschwald/geoip2-golang v1.8.0 // indirect
|
||||
github.com/oschwald/maxminddb-golang v1.10.0 // indirect
|
||||
github.com/openzipkin/zipkin-go v0.4.2 // indirect
|
||||
github.com/oschwald/geoip2-golang v1.11.0 // indirect
|
||||
github.com/oschwald/maxminddb-golang v1.13.1 // indirect
|
||||
github.com/outcaste-io/ristretto v0.2.3 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/philhofer/fwd v1.1.1 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.3 // indirect
|
||||
github.com/philhofer/fwd v1.1.2 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/quic-go/quic-go v0.46.0 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.24.7 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/stretchr/testify v1.9.0 // indirect
|
||||
github.com/syncthing/notify v0.0.0-20210616190510-c6b7342338d2 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
|
||||
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 // indirect
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
github.com/tinylib/msgp v1.1.6 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/tinylib/msgp v1.1.9 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.1.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.7 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.7 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.13 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.13 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.13 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.starlark.net v0.0.0-20230112144946-fae38c8a6d89 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.24.0 // indirect
|
||||
go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect
|
||||
go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect
|
||||
golang.org/x/mod v0.11.0 // indirect
|
||||
golang.org/x/term v0.6.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
google.golang.org/api v0.109.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5 // indirect
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.47.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||
go.opentelemetry.io/otel v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.22.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.starlark.net v0.0.0-20240329153429-e6e8e7ce1b7a // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect
|
||||
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/term v0.25.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/api v0.172.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.62.0 // indirect
|
||||
gopkg.in/evanphx/json-patch.v5 v5.9.0 // indirect
|
||||
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
inet.af/netaddr v0.0.0-20220617031823-097006376321 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.26.3 // indirect
|
||||
k8s.io/component-base v0.26.3 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect
|
||||
k8s.io/apiextensions-apiserver v0.31.0-alpha.0 // indirect
|
||||
k8s.io/component-base v0.31.0-alpha.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.16.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
)
|
||||
|
||||
@@ -19,7 +19,6 @@ const (
|
||||
KeyDHCP6 = "DHCP6"
|
||||
KeyEnvoy = "ENVOY_CONFIG"
|
||||
KeyClusterIPv4POOLS = "IPv4_POOLS"
|
||||
KeyRefCount = "REF_COUNT"
|
||||
|
||||
// secret keys
|
||||
// TLSCertKey is the key for tls certificates in a TLS secret.
|
||||
@@ -31,8 +30,10 @@ const (
|
||||
ContainerSidecarEnvoyProxy = "envoy-proxy"
|
||||
ContainerSidecarControlPlane = "control-plane"
|
||||
ContainerSidecarVPN = "vpn"
|
||||
ContainerSidecarSyncthing = "syncthing"
|
||||
|
||||
VolumeEnvoyConfig = "envoy-config"
|
||||
VolumeSyncthing = "syncthing"
|
||||
|
||||
innerIPv4Pool = "223.254.0.100/16"
|
||||
// 原因:在docker环境中,设置docker的 gateway 和 subnet,不能 inner 的冲突,也不能和 docker的 172.17 冲突
|
||||
@@ -68,14 +69,8 @@ const (
|
||||
EnvPodNamespace = "POD_NAMESPACE"
|
||||
|
||||
// header name
|
||||
HeaderPodName = "POD_NAME"
|
||||
HeaderPodNamespace = "POD_NAMESPACE"
|
||||
HeaderIPv4 = "IPv4"
|
||||
HeaderIPv6 = "IPv6"
|
||||
|
||||
// api
|
||||
APIRentIP = "/rent/ip"
|
||||
APIReleaseIP = "/release/ip"
|
||||
HeaderIPv4 = "IPv4"
|
||||
HeaderIPv6 = "IPv6"
|
||||
|
||||
KUBECONFIG = "kubeconfig"
|
||||
|
||||
@@ -83,14 +78,12 @@ const (
|
||||
ManageBy = konfig.ManagedbyLabelKey
|
||||
|
||||
// pprof port
|
||||
PProfPort = 32345
|
||||
PProfPort = 32345
|
||||
SudoPProfPort = 33345
|
||||
PProfDir = "pprof"
|
||||
|
||||
// startup by KubeVPN
|
||||
EnvStartSudoKubeVPNByKubeVPN = "DEPTH_SIGNED_BY_NAISON"
|
||||
EnvSSHJump = "SSH_JUMP_BY_KUBEVPN"
|
||||
EnvSSHJump = "SSH_JUMP_BY_KUBEVPN"
|
||||
|
||||
// transport mode
|
||||
ConfigKubeVPNTransportEngine = "transport-engine"
|
||||
// hosts entry key word
|
||||
HostsKeyWord = "# Add by KubeVPN"
|
||||
)
|
||||
@@ -107,6 +100,8 @@ var (
|
||||
OriginImage = "docker.io/naison/kubevpn:" + Version
|
||||
|
||||
DaemonPath string
|
||||
HomePath string
|
||||
PprofPath string
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -126,14 +121,16 @@ func init() {
|
||||
DockerRouterIP, DockerCIDR, _ = net.ParseCIDR(dockerInnerIPv4Pool)
|
||||
dir, _ := os.UserHomeDir()
|
||||
DaemonPath = filepath.Join(dir, HOME, Daemon)
|
||||
HomePath = filepath.Join(dir, HOME)
|
||||
PprofPath = filepath.Join(dir, HOME, Daemon, PProfDir)
|
||||
}
|
||||
|
||||
var Debug bool
|
||||
|
||||
var (
|
||||
SmallBufferSize = (1 << 13) - 1 // 8KB small buffer
|
||||
MediumBufferSize = (1 << 15) - 1 // 32KB medium buffer
|
||||
LargeBufferSize = (1 << 16) - 1 // 64KB large buffer
|
||||
SmallBufferSize = 2 * 1024 // 2KB small buffer
|
||||
MediumBufferSize = 8 * 1024 // 8KB medium buffer
|
||||
LargeBufferSize = 32 * 1024 // 32KB large buffer
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -153,23 +150,28 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
LPool = &sync.Pool{
|
||||
SPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]byte, SmallBufferSize)
|
||||
},
|
||||
}
|
||||
MPool = sync.Pool{
|
||||
New: func() any {
|
||||
return make([]byte, MediumBufferSize)
|
||||
},
|
||||
}
|
||||
LPool = sync.Pool{
|
||||
New: func() any {
|
||||
return make([]byte, LargeBufferSize)
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
var SPool = sync.Pool{
|
||||
New: func() any {
|
||||
return make([]byte, 2)
|
||||
},
|
||||
}
|
||||
|
||||
type Engine string
|
||||
|
||||
const (
|
||||
EngineGvisor Engine = "gvisor"
|
||||
EngineMix Engine = "mix"
|
||||
EngineRaw Engine = "raw"
|
||||
EngineSystem Engine = "system"
|
||||
)
|
||||
|
||||
const Slogan = "Now you can access resources in the kubernetes cluster !"
|
||||
|
||||
20
pkg/config/config.yaml
Normal file
20
pkg/config/config.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
# Here is an example config kubevpn config file, please change it into your custom config.
|
||||
# Support three filed: Name,Needs,Flags
|
||||
# Exec command: kubevpn alias qa <===> kubevpn connect --kubeconfig=~/.kube/jumper_config --namespace=default
|
||||
# Simple is Good ~
|
||||
|
||||
Name: dev
|
||||
Needs: qa
|
||||
Flags:
|
||||
- connect
|
||||
- --kubeconfig=~/.kube/config
|
||||
- --namespace=default
|
||||
- --lite
|
||||
|
||||
---
|
||||
|
||||
Name: qa
|
||||
Flags:
|
||||
- connect
|
||||
- --kubeconfig=~/.kube/jumper_config
|
||||
- --namespace=default
|
||||
@@ -1,6 +1,12 @@
|
||||
package config
|
||||
|
||||
import "os"
|
||||
import (
|
||||
_ "embed"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
HOME = ".kubevpn"
|
||||
@@ -15,15 +21,65 @@ const (
|
||||
LogFile = "daemon.log"
|
||||
|
||||
KubeVPNRestorePatchKey = "kubevpn-probe-restore-patch"
|
||||
|
||||
ConfigFile = "config.yaml"
|
||||
)
|
||||
|
||||
//go:embed config.yaml
|
||||
var config []byte
|
||||
|
||||
func init() {
|
||||
err := os.MkdirAll(DaemonPath, os.ModePerm)
|
||||
err := os.MkdirAll(DaemonPath, 0755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = os.Chmod(DaemonPath, os.ModePerm)
|
||||
err = os.Chmod(DaemonPath, 0755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = os.MkdirAll(PprofPath, 0755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = os.Chmod(PprofPath, 0755)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
path := filepath.Join(HomePath, ConfigFile)
|
||||
_, err = os.Stat(path)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
err = os.WriteFile(path, config, 0644)
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func GetSockPath(isSudo bool) string {
|
||||
name := SockPath
|
||||
if isSudo {
|
||||
name = SudoSockPath
|
||||
}
|
||||
return filepath.Join(DaemonPath, name)
|
||||
}
|
||||
|
||||
func GetPidPath(isSudo bool) string {
|
||||
name := PidPath
|
||||
if isSudo {
|
||||
name = SudoPidPath
|
||||
}
|
||||
return filepath.Join(DaemonPath, name)
|
||||
}
|
||||
|
||||
func GetSyncthingPath() string {
|
||||
return filepath.Join(DaemonPath, SyncthingDir)
|
||||
}
|
||||
|
||||
func GetSyncthingGUIPath() string {
|
||||
return filepath.Join(DaemonPath, SyncthingDir, SyncthingGUIDir)
|
||||
}
|
||||
|
||||
func GetConfigFilePath() string {
|
||||
return filepath.Join(HomePath, ConfigFile)
|
||||
}
|
||||
|
||||
97
pkg/config/syncthing.go
Normal file
97
pkg/config/syncthing.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
SyncthingDir = "syncthing"
|
||||
|
||||
SyncthingGUIDir = "gui"
|
||||
|
||||
DefaultRemoteDir = "/kubevpn-data"
|
||||
|
||||
// EnvDisableSyncthingLog disable syncthing log, because it can not set output writer, only write os.Stdout or io.Discard
|
||||
EnvDisableSyncthingLog = "LOGGER_DISCARD"
|
||||
|
||||
SyncthingAPIKey = "kubevpn"
|
||||
)
|
||||
|
||||
var LocalCert tls.Certificate
|
||||
var RemoteCert tls.Certificate
|
||||
var LocalDeviceID protocol.DeviceID
|
||||
var RemoteDeviceID protocol.DeviceID
|
||||
|
||||
const (
|
||||
SyncthingLocalDeviceID = "BSNCBRY-ZI5HLYC-YH6544V-SQ3IDKT-4JQKING-ZGSW463-UKYEYCA-WO7ZHA3"
|
||||
SyncthingLocalCert = `-----BEGIN CERTIFICATE-----
|
||||
MIICHjCCAaSgAwIBAgIIHY0CWDFbXYEwCgYIKoZIzj0EAwIwSjESMBAGA1UEChMJ
|
||||
U3luY3RoaW5nMSAwHgYDVQQLExdBdXRvbWF0aWNhbGx5IEdlbmVyYXRlZDESMBAG
|
||||
A1UEAxMJc3luY3RoaW5nMCAXDTI0MDYxOTAwMDAwMFoYDzE4NTQwOTExMDA1MDUy
|
||||
WjBKMRIwEAYDVQQKEwlTeW5jdGhpbmcxIDAeBgNVBAsTF0F1dG9tYXRpY2FsbHkg
|
||||
R2VuZXJhdGVkMRIwEAYDVQQDEwlzeW5jdGhpbmcwdjAQBgcqhkjOPQIBBgUrgQQA
|
||||
IgNiAAQj1ov1aM0902yssK+3LPiGM1e1pUcVRuQjxl0nDX0fpZp3kdeWeiBm9AlE
|
||||
uwhAll/8QjoWBlNiEXyGFN9lOaIGf7ZIk7owPT6LiJXc1n3E6iqHWeSXcZ9dJL7M
|
||||
+E4eleajVTBTMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYI
|
||||
KwYBBQUHAwIwDAYDVR0TAQH/BAIwADAUBgNVHREEDTALgglzeW5jdGhpbmcwCgYI
|
||||
KoZIzj0EAwIDaAAwZQIwJI4KA9JgFXWU4dWq6JnIr+lAuIJ5ON2lFPrX8JWi1Z3F
|
||||
UXrvm80w+uR+1rLt6AdkAjEA3dpoBnS7tV21krEVmfX2vabtkzZidhXwuvP+1VJN
|
||||
By4EwZnuTLX3TqQx2TERF9rV
|
||||
-----END CERTIFICATE-----
|
||||
`
|
||||
SyncthingLocalKey = `-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDAltfhZ8YO4CrPsvFRpU6P8lOspm5VXFGvJghSaDr4D/ub66+4HpTk9
|
||||
3TdgtbUSMSmgBwYFK4EEACKhZANiAAQj1ov1aM0902yssK+3LPiGM1e1pUcVRuQj
|
||||
xl0nDX0fpZp3kdeWeiBm9AlEuwhAll/8QjoWBlNiEXyGFN9lOaIGf7ZIk7owPT6L
|
||||
iJXc1n3E6iqHWeSXcZ9dJL7M+E4eleY=
|
||||
-----END EC PRIVATE KEY-----
|
||||
`
|
||||
)
|
||||
|
||||
const (
|
||||
SyncthingRemoteDeviceID = "OELB2JL-MIOW652-6JPBYPZ-POV3EBV-XEOW2Z2-I45QUGZ-QF5TT4P-Z2AH7AU"
|
||||
SyncthingRemoteCert = `-----BEGIN CERTIFICATE-----
|
||||
MIICHzCCAaWgAwIBAgIJAOGCLdtwnUShMAoGCCqGSM49BAMCMEoxEjAQBgNVBAoT
|
||||
CVN5bmN0aGluZzEgMB4GA1UECxMXQXV0b21hdGljYWxseSBHZW5lcmF0ZWQxEjAQ
|
||||
BgNVBAMTCXN5bmN0aGluZzAgFw0yNDA2MTkwMDAwMDBaGA8xODU0MDkxMTAwNTA1
|
||||
MlowSjESMBAGA1UEChMJU3luY3RoaW5nMSAwHgYDVQQLExdBdXRvbWF0aWNhbGx5
|
||||
IEdlbmVyYXRlZDESMBAGA1UEAxMJc3luY3RoaW5nMHYwEAYHKoZIzj0CAQYFK4EE
|
||||
ACIDYgAETwaM3V92D499uMXWFgGxdTUAvtp1tN7ePuJxt8W+FO0izG1fa7oU29Hp
|
||||
FU0Ohh3xwnQfEHIWzlKJllZ2ZbbXGOvcfr0Yfiir6ToKuN6185EA8RHkA+5HRtu5
|
||||
nw5wyWL/o1UwUzAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEG
|
||||
CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwFAYDVR0RBA0wC4IJc3luY3RoaW5nMAoG
|
||||
CCqGSM49BAMCA2gAMGUCMGxR9q9vjzm4GynOkoRIC+BQJN0zpiNusYUD6iYJNGe1
|
||||
wNH8jhOJEG+rjGracDZ6bgIxAIpyHv/rOAjEX7/wcafRqGTFhwXdRq0l3493aERd
|
||||
RCwqD8rbzP0QStVOCAE7xYt/sQ==
|
||||
-----END CERTIFICATE-----
|
||||
`
|
||||
SyncthingRemoteKey = `-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDAKabOokHf64xAsIQp5PA1zZ5vLjfcgKcuikx/D0CP6c2Cf48a6eADE
|
||||
GWrY1Ng8UzOgBwYFK4EEACKhZANiAARPBozdX3YPj324xdYWAbF1NQC+2nW03t4+
|
||||
4nG3xb4U7SLMbV9ruhTb0ekVTQ6GHfHCdB8QchbOUomWVnZlttcY69x+vRh+KKvp
|
||||
Ogq43rXzkQDxEeQD7kdG27mfDnDJYv8=
|
||||
-----END EC PRIVATE KEY-----
|
||||
`
|
||||
)
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
LocalCert, err = tls.X509KeyPair([]byte(SyncthingLocalCert), []byte(SyncthingLocalKey))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
RemoteCert, err = tls.X509KeyPair([]byte(SyncthingRemoteCert), []byte(SyncthingRemoteKey))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
LocalDeviceID, err = protocol.DeviceIDFromString(SyncthingLocalDeviceID)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
RemoteDeviceID, err = protocol.DeviceIDFromString(SyncthingRemoteDeviceID)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
|
||||
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
|
||||
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
|
||||
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
|
||||
@@ -38,6 +39,7 @@ type Rule struct {
|
||||
Headers map[string]string
|
||||
LocalTunIPv4 string
|
||||
LocalTunIPv6 string
|
||||
PortMap map[int32]int32
|
||||
}
|
||||
|
||||
func (a *Virtual) To() (
|
||||
@@ -55,9 +57,9 @@ func (a *Virtual) To() (
|
||||
var rr []*route.Route
|
||||
for _, rule := range a.Rules {
|
||||
for _, ip := range []string{rule.LocalTunIPv4, rule.LocalTunIPv6} {
|
||||
clusterName := fmt.Sprintf("%s_%v", ip, port.ContainerPort)
|
||||
clusterName := fmt.Sprintf("%s_%v", ip, rule.PortMap[port.ContainerPort])
|
||||
clusters = append(clusters, ToCluster(clusterName))
|
||||
endpoints = append(endpoints, ToEndPoint(clusterName, ip, port.ContainerPort))
|
||||
endpoints = append(endpoints, ToEndPoint(clusterName, ip, rule.PortMap[port.ContainerPort]))
|
||||
rr = append(rr, ToRoute(clusterName, rule.Headers))
|
||||
}
|
||||
}
|
||||
@@ -122,6 +124,9 @@ func ToCluster(clusterName string) *cluster.Cluster {
|
||||
LbPolicy: cluster.Cluster_ROUND_ROBIN,
|
||||
TypedExtensionProtocolOptions: map[string]*anypb.Any{
|
||||
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": anyFunc(&httpv3.HttpProtocolOptions{
|
||||
CommonHttpProtocolOptions: &corev3.HttpProtocolOptions{
|
||||
IdleTimeout: durationpb.New(time.Second * 10),
|
||||
},
|
||||
UpstreamProtocolOptions: &httpv3.HttpProtocolOptions_UseDownstreamProtocolConfig{
|
||||
UseDownstreamProtocolConfig: &httpv3.HttpProtocolOptions_UseDownstreamHttpConfig{},
|
||||
},
|
||||
|
||||
@@ -10,14 +10,15 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func Main(filename string, port uint, logger *log.Logger) {
|
||||
func Main(ctx context.Context, filename string, port uint, logger *log.Logger) error {
|
||||
snapshotCache := cache.NewSnapshotCache(false, cache.IDHash{}, logger)
|
||||
proc := NewProcessor(snapshotCache, logger)
|
||||
|
||||
errChan := make(chan error, 2)
|
||||
|
||||
go func() {
|
||||
ctx := context.Background()
|
||||
server := serverv3.NewServer(ctx, snapshotCache, nil)
|
||||
RunServer(ctx, server, port)
|
||||
errChan <- RunServer(ctx, server, port)
|
||||
}()
|
||||
|
||||
notifyCh := make(chan NotifyMessage, 100)
|
||||
@@ -29,20 +30,29 @@ func Main(filename string, port uint, logger *log.Logger) {
|
||||
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(fmt.Errorf("failed to create file watcher, err: %v", err))
|
||||
return fmt.Errorf("failed to create file watcher: %v", err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
if err = watcher.Add(filename); err != nil {
|
||||
log.Fatal(fmt.Errorf("failed to add file: %s to wather, err: %v", filename, err))
|
||||
err = watcher.Add(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add file: %s to wather: %v", filename, err)
|
||||
}
|
||||
go func() {
|
||||
log.Fatal(Watch(watcher, filename, notifyCh))
|
||||
errChan <- Watch(watcher, filename, notifyCh)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg := <-notifyCh:
|
||||
proc.ProcessFile(msg)
|
||||
err = proc.ProcessFile(msg)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to process file: %v", err)
|
||||
return err
|
||||
}
|
||||
case err = <-errChan:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,20 +14,20 @@ import (
|
||||
"github.com/envoyproxy/go-control-plane/pkg/cache/types"
|
||||
"github.com/envoyproxy/go-control-plane/pkg/cache/v3"
|
||||
"github.com/envoyproxy/go-control-plane/pkg/resource/v3"
|
||||
"github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
utilcache "k8s.io/apimachinery/pkg/util/cache"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
type Processor struct {
|
||||
cache cache.SnapshotCache
|
||||
logger *logrus.Logger
|
||||
logger *log.Logger
|
||||
version int64
|
||||
|
||||
expireCache *utilcache.Expiring
|
||||
}
|
||||
|
||||
func NewProcessor(cache cache.SnapshotCache, log *logrus.Logger) *Processor {
|
||||
func NewProcessor(cache cache.SnapshotCache, log *log.Logger) *Processor {
|
||||
return &Processor{
|
||||
cache: cache,
|
||||
logger: log,
|
||||
@@ -44,11 +44,11 @@ func (p *Processor) newVersion() string {
|
||||
return strconv.FormatInt(p.version, 10)
|
||||
}
|
||||
|
||||
func (p *Processor) ProcessFile(file NotifyMessage) {
|
||||
func (p *Processor) ProcessFile(file NotifyMessage) error {
|
||||
configList, err := ParseYaml(file.FilePath)
|
||||
if err != nil {
|
||||
p.logger.Errorf("error parsing yaml file: %+v", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
for _, config := range configList {
|
||||
if len(config.Uid) == 0 {
|
||||
@@ -76,21 +76,22 @@ func (p *Processor) ProcessFile(file NotifyMessage) {
|
||||
|
||||
if err != nil {
|
||||
p.logger.Errorf("snapshot inconsistency: %v, err: %v", snapshot, err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
if err = snapshot.Consistent(); err != nil {
|
||||
p.logger.Errorf("snapshot inconsistency: %v, err: %v", snapshot, err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
p.logger.Debugf("will serve snapshot %+v, nodeID: %s", snapshot, config.Uid)
|
||||
if err = p.cache.SetSnapshot(context.Background(), config.Uid, snapshot); err != nil {
|
||||
p.logger.Errorf("snapshot error %q for %v", err, snapshot)
|
||||
p.logger.Fatal(err)
|
||||
return err
|
||||
}
|
||||
|
||||
p.expireCache.Set(config.Uid, config, time.Minute*5)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParseYaml(file string) ([]*Virtual, error) {
|
||||
|
||||
@@ -21,13 +21,13 @@ const (
|
||||
grpcMaxConcurrentStreams = 1000000
|
||||
)
|
||||
|
||||
func RunServer(ctx context.Context, server serverv3.Server, port uint) {
|
||||
func RunServer(ctx context.Context, server serverv3.Server, port uint) error {
|
||||
grpcServer := grpc.NewServer(grpc.MaxConcurrentStreams(grpcMaxConcurrentStreams))
|
||||
|
||||
var lc net.ListenConfig
|
||||
listener, err := lc.Listen(ctx, "tcp", fmt.Sprintf(":%d", port))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return err
|
||||
}
|
||||
|
||||
discoverygrpc.RegisterAggregatedDiscoveryServiceServer(grpcServer, server)
|
||||
@@ -38,8 +38,6 @@ func RunServer(ctx context.Context, server serverv3.Server, port uint) {
|
||||
secretservice.RegisterSecretDiscoveryServiceServer(grpcServer, server)
|
||||
runtimeservice.RegisterRuntimeDiscoveryServiceServer(grpcServer, server)
|
||||
|
||||
log.Infof("management server listening on %d", port)
|
||||
if err = grpcServer.Serve(listener); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Infof("Management server listening on %d", port)
|
||||
return grpcServer.Serve(listener)
|
||||
}
|
||||
|
||||
@@ -13,12 +13,12 @@ var (
|
||||
)
|
||||
|
||||
type Chain struct {
|
||||
Retries int
|
||||
retries int
|
||||
node *Node
|
||||
}
|
||||
|
||||
func NewChain(retry int, node *Node) *Chain {
|
||||
return &Chain{Retries: retry, node: node}
|
||||
return &Chain{retries: retry, node: node}
|
||||
}
|
||||
|
||||
func (c *Chain) Node() *Node {
|
||||
@@ -30,7 +30,7 @@ func (c *Chain) IsEmpty() bool {
|
||||
}
|
||||
|
||||
func (c *Chain) DialContext(ctx context.Context) (conn net.Conn, err error) {
|
||||
for i := 0; i < int(math.Max(float64(1), float64(c.Retries))); i++ {
|
||||
for i := 0; i < int(math.Max(float64(1), float64(c.retries))); i++ {
|
||||
conn, err = c.dial(ctx)
|
||||
if err == nil {
|
||||
break
|
||||
|
||||
27
pkg/core/gvisoricmpforwarder.go
Normal file
27
pkg/core/gvisoricmpforwarder.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func ICMPForwarder(s *stack.Stack, ctx context.Context) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
|
||||
return func(id stack.TransportEndpointID, buffer *stack.PacketBuffer) bool {
|
||||
log.Debugf("[TUN-ICMP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
|
||||
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
|
||||
)
|
||||
ctx1, cancelFunc := context.WithCancel(ctx)
|
||||
defer cancelFunc()
|
||||
ok, err := util.PingOnce(ctx1, id.RemoteAddress.String(), id.LocalAddress.String())
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-ICMP] Failed to ping dst %s from src %s",
|
||||
id.LocalAddress.String(), id.RemoteAddress.String(),
|
||||
)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
}
|
||||
@@ -15,16 +15,8 @@ import (
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
|
||||
)
|
||||
|
||||
var _ stack.UniqueID = (*id)(nil)
|
||||
|
||||
type id struct {
|
||||
}
|
||||
|
||||
func (i id) UniqueID() uint64 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
|
||||
nicID := tcpip.NICID(1)
|
||||
s := stack.New(stack.Options{
|
||||
NetworkProtocols: []stack.NetworkProtocolFactory{
|
||||
ipv4.NewProtocol,
|
||||
@@ -40,29 +32,30 @@ func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
|
||||
// Enable raw sockets for users with sufficient
|
||||
// privileges.
|
||||
RawFactory: raw.EndpointFactory{},
|
||||
UniqueID: id{},
|
||||
})
|
||||
// set handler for TCP UDP ICMP
|
||||
s.SetTransportProtocolHandler(tcp.ProtocolNumber, TCPForwarder(s))
|
||||
s.SetTransportProtocolHandler(udp.ProtocolNumber, UDPForwarder(s))
|
||||
s.SetTransportProtocolHandler(tcp.ProtocolNumber, TCPForwarder(s, ctx))
|
||||
s.SetTransportProtocolHandler(udp.ProtocolNumber, UDPForwarder(s, ctx))
|
||||
s.SetTransportProtocolHandler(header.ICMPv4ProtocolNumber, ICMPForwarder(s, ctx))
|
||||
s.SetTransportProtocolHandler(header.ICMPv6ProtocolNumber, ICMPForwarder(s, ctx))
|
||||
|
||||
s.SetRouteTable([]tcpip.Route{
|
||||
{
|
||||
Destination: header.IPv4EmptySubnet,
|
||||
NIC: 1,
|
||||
NIC: nicID,
|
||||
},
|
||||
{
|
||||
Destination: header.IPv6EmptySubnet,
|
||||
NIC: 1,
|
||||
NIC: nicID,
|
||||
},
|
||||
})
|
||||
|
||||
s.CreateNICWithOptions(1, packetsocket.New(tun), stack.NICOptions{
|
||||
s.CreateNICWithOptions(nicID, packetsocket.New(tun), stack.NICOptions{
|
||||
Disabled: false,
|
||||
Context: ctx,
|
||||
})
|
||||
s.SetPromiscuousMode(1, true)
|
||||
s.SetSpoofing(1, true)
|
||||
s.SetPromiscuousMode(nicID, true)
|
||||
s.SetSpoofing(nicID, true)
|
||||
|
||||
// Enable SACK Recovery.
|
||||
{
|
||||
@@ -93,17 +86,17 @@ func NewStack(ctx context.Context, tun stack.LinkEndpoint) *stack.Stack {
|
||||
|
||||
{
|
||||
if err := s.SetForwardingDefaultAndAllNICs(ipv4.ProtocolNumber, true); err != nil {
|
||||
log.Fatalf("set ipv4 forwarding: %v", err)
|
||||
log.Fatalf("Set IPv4 forwarding: %v", err)
|
||||
}
|
||||
if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, true); err != nil {
|
||||
log.Fatalf("set ipv6 forwarding: %v", err)
|
||||
log.Fatalf("Set IPv6 forwarding: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
option := tcpip.TCPModerateReceiveBufferOption(true)
|
||||
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &option); err != nil {
|
||||
log.Fatalf("set TCP moderate receive buffer: %v", err)
|
||||
log.Fatalf("Set TCP moderate receive buffer: %v", err)
|
||||
}
|
||||
}
|
||||
return s
|
||||
|
||||
@@ -4,10 +4,12 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
|
||||
@@ -15,44 +17,32 @@ import (
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
|
||||
"gvisor.dev/gvisor/pkg/waiter"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
)
|
||||
|
||||
var GvisorTCPForwardAddr string
|
||||
|
||||
func TCPForwarder(s *stack.Stack) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
|
||||
func TCPForwarder(s *stack.Stack, ctx context.Context) func(stack.TransportEndpointID, *stack.PacketBuffer) bool {
|
||||
return tcp.NewForwarder(s, 0, 100000, func(request *tcp.ForwarderRequest) {
|
||||
defer request.Complete(false)
|
||||
id := request.ID()
|
||||
log.Debugf("[TUN-TCP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
|
||||
log.Debugf("[TUN-TCP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
|
||||
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
|
||||
)
|
||||
|
||||
node, err := ParseNode(GvisorTCPForwardAddr)
|
||||
// 2, dial proxy
|
||||
host := id.LocalAddress.String()
|
||||
port := fmt.Sprintf("%d", id.LocalPort)
|
||||
var remote net.Conn
|
||||
var d = net.Dialer{Timeout: time.Second * 5}
|
||||
remote, err := d.DialContext(ctx, "tcp", net.JoinHostPort(host, port))
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-TCP] Error: can not parse gvisor tcp forward addr %s: %v", GvisorTCPForwardAddr, err)
|
||||
return
|
||||
}
|
||||
node.Client = &Client{
|
||||
Connector: GvisorTCPTunnelConnector(),
|
||||
Transporter: TCPTransporter(),
|
||||
}
|
||||
forwardChain := NewChain(5, node)
|
||||
|
||||
remote, err := forwardChain.dial(context.Background())
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-TCP] Error: failed to dial remote conn: %v", err)
|
||||
return
|
||||
}
|
||||
if err = WriteProxyInfo(remote, id); err != nil {
|
||||
log.Debugf("[TUN-TCP] Error: failed to write proxy info: %v", err)
|
||||
log.Errorf("[TUN-TCP] Failed to connect addr %s: %v", net.JoinHostPort(host, port), err)
|
||||
return
|
||||
}
|
||||
|
||||
w := &waiter.Queue{}
|
||||
endpoint, tErr := request.CreateEndpoint(w)
|
||||
if tErr != nil {
|
||||
log.Debugf("[TUN-TCP] Error: can not create endpoint: %v", tErr)
|
||||
log.Debugf("[TUN-TCP] Failed to create endpoint: %v", tErr)
|
||||
return
|
||||
}
|
||||
conn := gonet.NewTCPConn(w, endpoint)
|
||||
@@ -61,30 +51,30 @@ func TCPForwarder(s *stack.Stack) func(stack.TransportEndpointID, *stack.PacketB
|
||||
defer remote.Close()
|
||||
errChan := make(chan error, 2)
|
||||
go func() {
|
||||
i := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(i[:])
|
||||
written, err2 := io.CopyBuffer(remote, conn, i)
|
||||
log.Debugf("[TUN-TCP] Debug: write length %d data to remote", written)
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(buf[:])
|
||||
written, err2 := io.CopyBuffer(remote, conn, buf)
|
||||
log.Debugf("[TUN-TCP] Write length %d data to remote", written)
|
||||
errChan <- err2
|
||||
}()
|
||||
go func() {
|
||||
i := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(i[:])
|
||||
written, err2 := io.CopyBuffer(conn, remote, i)
|
||||
log.Debugf("[TUN-TCP] Debug: read length %d data from remote", written)
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(buf[:])
|
||||
written, err2 := io.CopyBuffer(conn, remote, buf)
|
||||
log.Debugf("[TUN-TCP] Read length %d data from remote", written)
|
||||
errChan <- err2
|
||||
}()
|
||||
err = <-errChan
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
log.Debugf("[TUN-TCP] Error: dsiconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
|
||||
log.Debugf("[TUN-TCP] Disconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
|
||||
}
|
||||
}).HandlePacket
|
||||
}
|
||||
|
||||
func WriteProxyInfo(conn net.Conn, id stack.TransportEndpointID) error {
|
||||
var b bytes.Buffer
|
||||
i := config.SPool.Get().([]byte)[:]
|
||||
defer config.SPool.Put(i[:])
|
||||
i := config.MPool.Get().([]byte)[:]
|
||||
defer config.MPool.Put(i[:])
|
||||
binary.BigEndian.PutUint16(i, id.LocalPort)
|
||||
b.Write(i)
|
||||
binary.BigEndian.PutUint16(i, id.RemotePort)
|
||||
|
||||
@@ -2,94 +2,63 @@ package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
"sync"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
type gvisorTCPTunnelConnector struct {
|
||||
type gvisorTCPHandler struct {
|
||||
// map[srcIP]net.Conn
|
||||
routeMapTCP *sync.Map
|
||||
packetChan chan *datagramPacket
|
||||
}
|
||||
|
||||
func GvisorTCPTunnelConnector() Connector {
|
||||
return &gvisorTCPTunnelConnector{}
|
||||
}
|
||||
|
||||
func (c *gvisorTCPTunnelConnector) ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error) {
|
||||
switch con := conn.(type) {
|
||||
case *net.TCPConn:
|
||||
err := con.SetNoDelay(true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = con.SetKeepAlive(true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = con.SetKeepAlivePeriod(15 * time.Second)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
type gvisorTCPHandler struct{}
|
||||
|
||||
func GvisorTCPHandler() Handler {
|
||||
return &gvisorTCPHandler{}
|
||||
return &gvisorTCPHandler{
|
||||
routeMapTCP: RouteMapTCP,
|
||||
packetChan: TCPPacketChan,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *gvisorTCPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
|
||||
defer tcpConn.Close()
|
||||
log.Debugf("[TUN-TCP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
// 1, get proxy info
|
||||
endpointID, err := ParseProxyInfo(tcpConn)
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-TCP] Error: failed to parse proxy info: %v", err)
|
||||
return
|
||||
}
|
||||
log.Debugf("[TUN-TCP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
|
||||
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
|
||||
)
|
||||
// 2, dial proxy
|
||||
host := endpointID.LocalAddress.String()
|
||||
port := fmt.Sprintf("%d", endpointID.LocalPort)
|
||||
var remote net.Conn
|
||||
remote, err = net.DialTimeout("tcp", net.JoinHostPort(host, port), time.Second*5)
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-TCP] Error: failed to connect addr %s: %v", net.JoinHostPort(host, port), err)
|
||||
return
|
||||
}
|
||||
cancel, cancelFunc := context.WithCancel(ctx)
|
||||
defer cancelFunc()
|
||||
log.Debugf("[TCP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
h.handle(cancel, tcpConn)
|
||||
}
|
||||
|
||||
func (h *gvisorTCPHandler) handle(ctx context.Context, tcpConn net.Conn) {
|
||||
endpoint := channel.New(tcp.DefaultReceiveBufferSize, uint32(config.DefaultMTU), tcpip.GetRandMacAddr())
|
||||
errChan := make(chan error, 2)
|
||||
go func() {
|
||||
i := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(i[:])
|
||||
written, err2 := io.CopyBuffer(remote, tcpConn, i)
|
||||
log.Debugf("[TUN-TCP] Debug: write length %d data to remote", written)
|
||||
errChan <- err2
|
||||
h.readFromTCPConnWriteToEndpoint(ctx, tcpConn, endpoint)
|
||||
util.SafeClose(errChan)
|
||||
}()
|
||||
go func() {
|
||||
i := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(i[:])
|
||||
written, err2 := io.CopyBuffer(tcpConn, remote, i)
|
||||
log.Debugf("[TUN-TCP] Debug: read length %d data from remote", written)
|
||||
errChan <- err2
|
||||
h.readFromEndpointWriteToTCPConn(ctx, tcpConn, endpoint)
|
||||
util.SafeClose(errChan)
|
||||
}()
|
||||
err = <-errChan
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
log.Debugf("[TUN-TCP] Error: dsiconnect: %s >-<: %s: %v", tcpConn.LocalAddr(), remote.RemoteAddr(), err)
|
||||
stack := NewStack(ctx, sniffer.NewWithPrefix(endpoint, "[gVISOR] "))
|
||||
defer stack.Destroy()
|
||||
select {
|
||||
case <-errChan:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func GvisorTCPListener(addr string) (net.Listener, error) {
|
||||
log.Debug("gvisor tcp listen addr", addr)
|
||||
log.Debugf("Gvisor TCP listening addr: %s", addr)
|
||||
laddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
132
pkg/core/gvisortunendpoint.go
Executable file
132
pkg/core/gvisortunendpoint.go
Executable file
@@ -0,0 +1,132 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"github.com/google/gopacket/layers"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
"gvisor.dev/gvisor/pkg/buffer"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/header"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func (h *gvisorTCPHandler) readFromEndpointWriteToTCPConn(ctx context.Context, conn net.Conn, endpoint *channel.Endpoint) {
|
||||
tcpConn, _ := newGvisorFakeUDPTunnelConnOverTCP(ctx, conn)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
pktBuffer := endpoint.ReadContext(ctx)
|
||||
if pktBuffer != nil {
|
||||
sniffer.LogPacket("[gVISOR] ", sniffer.DirectionSend, pktBuffer.NetworkProtocolNumber, pktBuffer)
|
||||
buf := pktBuffer.ToView().AsSlice()
|
||||
_, err := tcpConn.Write(buf)
|
||||
if err != nil {
|
||||
log.Errorf("[TUN] Failed to write data to tun device: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tun --> dispatcher
|
||||
func (h *gvisorTCPHandler) readFromTCPConnWriteToEndpoint(ctx context.Context, conn net.Conn, endpoint *channel.Endpoint) {
|
||||
tcpConn, _ := newGvisorFakeUDPTunnelConnOverTCP(ctx, conn)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
buf := config.SPool.Get().([]byte)[:]
|
||||
read, err := tcpConn.Read(buf[:])
|
||||
if err != nil {
|
||||
log.Errorf("[TUN] Failed to read from tcp conn: %v", err)
|
||||
config.SPool.Put(buf[:])
|
||||
return
|
||||
}
|
||||
if read == 0 {
|
||||
log.Warnf("[TUN] Read from tcp conn length is %d", read)
|
||||
config.SPool.Put(buf[:])
|
||||
continue
|
||||
}
|
||||
// Try to determine network protocol number, default zero.
|
||||
var protocol tcpip.NetworkProtocolNumber
|
||||
var ipProtocol int
|
||||
var src, dst net.IP
|
||||
// TUN interface with IFF_NO_PI enabled, thus
|
||||
// we need to determine protocol from version field
|
||||
if util.IsIPv4(buf) {
|
||||
protocol = header.IPv4ProtocolNumber
|
||||
ipHeader, err := ipv4.ParseHeader(buf[:read])
|
||||
if err != nil {
|
||||
log.Errorf("Failed to parse IPv4 header: %v", err)
|
||||
config.SPool.Put(buf[:])
|
||||
continue
|
||||
}
|
||||
ipProtocol = ipHeader.Protocol
|
||||
src = ipHeader.Src
|
||||
dst = ipHeader.Dst
|
||||
} else if util.IsIPv6(buf) {
|
||||
protocol = header.IPv6ProtocolNumber
|
||||
ipHeader, err := ipv6.ParseHeader(buf[:read])
|
||||
if err != nil {
|
||||
log.Errorf("Failed to parse IPv6 header: %s", err.Error())
|
||||
config.SPool.Put(buf[:])
|
||||
continue
|
||||
}
|
||||
ipProtocol = ipHeader.NextHeader
|
||||
src = ipHeader.Src
|
||||
dst = ipHeader.Dst
|
||||
} else {
|
||||
log.Debugf("[TUN-GVISOR] Unknown packet")
|
||||
config.SPool.Put(buf[:])
|
||||
continue
|
||||
}
|
||||
|
||||
h.addRoute(src, conn)
|
||||
// inner ip like 223.254.0.100/102/103 connect each other
|
||||
if config.CIDR.Contains(dst) || config.CIDR6.Contains(dst) {
|
||||
log.Tracef("[TUN-RAW] Forward to TUN device, SRC: %s, DST: %s, Length: %d", src.String(), dst.String(), read)
|
||||
util.SafeWrite(h.packetChan, &datagramPacket{
|
||||
DataLength: uint16(read),
|
||||
Data: buf[:],
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
|
||||
ReserveHeaderBytes: 0,
|
||||
Payload: buffer.MakeWithData(buf[:read]),
|
||||
})
|
||||
config.SPool.Put(buf[:])
|
||||
sniffer.LogPacket("[gVISOR] ", sniffer.DirectionRecv, protocol, pkt)
|
||||
endpoint.InjectInbound(protocol, pkt)
|
||||
pkt.DecRef()
|
||||
log.Tracef("[TUN-%s] Write to Gvisor IP-Protocol: %s, SRC: %s, DST: %s, Length: %d", layers.IPProtocol(ipProtocol).String(), layers.IPProtocol(ipProtocol).String(), src.String(), dst, read)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *gvisorTCPHandler) addRoute(src net.IP, tcpConn net.Conn) {
|
||||
value, loaded := h.routeMapTCP.LoadOrStore(src.String(), tcpConn)
|
||||
if loaded {
|
||||
if tcpConn != value.(net.Conn) {
|
||||
h.routeMapTCP.Store(src.String(), tcpConn)
|
||||
log.Debugf("[TCP] Replace route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
|
||||
}
|
||||
} else {
|
||||
log.Debugf("[TCP] Add new route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
|
||||
}
|
||||
}
|
||||
@@ -2,81 +2,115 @@ package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
|
||||
"gvisor.dev/gvisor/pkg/waiter"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
)
|
||||
|
||||
var GvisorUDPForwardAddr string
|
||||
|
||||
func UDPForwarder(s *stack.Stack) func(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
|
||||
func UDPForwarder(s *stack.Stack, ctx context.Context) func(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
|
||||
return udp.NewForwarder(s, func(request *udp.ForwarderRequest) {
|
||||
endpointID := request.ID()
|
||||
log.Debugf("[TUN-UDP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
|
||||
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
|
||||
id := request.ID()
|
||||
log.Debugf("[TUN-UDP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
|
||||
id.LocalPort, id.LocalAddress.String(), id.RemotePort, id.RemoteAddress.String(),
|
||||
)
|
||||
src := &net.UDPAddr{
|
||||
IP: id.RemoteAddress.AsSlice(),
|
||||
Port: int(id.RemotePort),
|
||||
}
|
||||
dst := &net.UDPAddr{
|
||||
IP: id.LocalAddress.AsSlice(),
|
||||
Port: int(id.LocalPort),
|
||||
}
|
||||
|
||||
w := &waiter.Queue{}
|
||||
endpoint, tErr := request.CreateEndpoint(w)
|
||||
if tErr != nil {
|
||||
log.Debugf("[TUN-UDP] Error: can not create endpoint: %v", tErr)
|
||||
log.Debugf("[TUN-UDP] Failed to create endpoint to dst: %s: %v", dst.String(), tErr)
|
||||
return
|
||||
}
|
||||
|
||||
node, err := ParseNode(GvisorUDPForwardAddr)
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: parse gviosr udp forward addr %s: %v", GvisorUDPForwardAddr, err)
|
||||
// dial dst
|
||||
remote, err1 := net.DialUDP("udp", nil, dst)
|
||||
if err1 != nil {
|
||||
log.Errorf("[TUN-UDP] Failed to connect dst: %s: %v", dst.String(), err1)
|
||||
return
|
||||
}
|
||||
node.Client = &Client{
|
||||
Connector: GvisorUDPOverTCPTunnelConnector(endpointID),
|
||||
Transporter: TCPTransporter(),
|
||||
}
|
||||
forwardChain := NewChain(5, node)
|
||||
|
||||
ctx := context.Background()
|
||||
c, err := forwardChain.getConn(ctx)
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: can not get conn: %v", err)
|
||||
return
|
||||
}
|
||||
if err = WriteProxyInfo(c, endpointID); err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: can not write proxy info: %v", err)
|
||||
return
|
||||
}
|
||||
remote, err := node.Client.ConnectContext(ctx, c)
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: can not connect: %v", err)
|
||||
return
|
||||
}
|
||||
conn := gonet.NewUDPConn(s, w, endpoint)
|
||||
conn := gonet.NewUDPConn(w, endpoint)
|
||||
go func() {
|
||||
defer conn.Close()
|
||||
defer remote.Close()
|
||||
errChan := make(chan error, 2)
|
||||
go func() {
|
||||
i := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(i[:])
|
||||
written, err2 := io.CopyBuffer(remote, conn, i)
|
||||
log.Debugf("[TUN-UDP] Debug: write length %d data to remote", written)
|
||||
errChan <- err2
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(buf[:])
|
||||
|
||||
var written int
|
||||
var err error
|
||||
for {
|
||||
err = conn.SetReadDeadline(time.Now().Add(time.Second * 120))
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
var read int
|
||||
read, _, err = conn.ReadFrom(buf[:])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
written += read
|
||||
err = remote.SetWriteDeadline(time.Now().Add(time.Second * 120))
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
_, err = remote.Write(buf[:read])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
log.Debugf("[TUN-UDP] Write length %d data from src: %s -> dst: %s", written, src.String(), dst.String())
|
||||
errChan <- err
|
||||
}()
|
||||
go func() {
|
||||
i := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(i[:])
|
||||
written, err2 := io.CopyBuffer(conn, remote, i)
|
||||
log.Debugf("[TUN-UDP] Debug: read length %d data from remote", written)
|
||||
errChan <- err2
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(buf[:])
|
||||
|
||||
var err error
|
||||
var written int
|
||||
for {
|
||||
err = remote.SetReadDeadline(time.Now().Add(time.Second * 120))
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
var n int
|
||||
n, _, err = remote.ReadFromUDP(buf[:])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
written += n
|
||||
err = conn.SetWriteDeadline(time.Now().Add(time.Second * 120))
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
_, err = conn.Write(buf[:n])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
log.Debugf("[TUN-UDP] Read length %d data from dst: %s -> src: %s", written, dst.String(), src.String())
|
||||
errChan <- err
|
||||
}()
|
||||
err = <-errChan
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
log.Debugf("[TUN-UDP] Error: dsiconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err)
|
||||
err1 = <-errChan
|
||||
if err1 != nil && !errors.Is(err1, io.EOF) {
|
||||
log.Debugf("[TUN-UDP] Disconnect: %s >-<: %s: %v", conn.LocalAddr(), remote.RemoteAddr(), err1)
|
||||
}
|
||||
}()
|
||||
}).HandlePacket
|
||||
|
||||
@@ -7,40 +7,9 @@ import (
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
)
|
||||
|
||||
type gvisorUDPOverTCPTunnelConnector struct {
|
||||
Id stack.TransportEndpointID
|
||||
}
|
||||
|
||||
func GvisorUDPOverTCPTunnelConnector(endpointID stack.TransportEndpointID) Connector {
|
||||
return &gvisorUDPOverTCPTunnelConnector{
|
||||
Id: endpointID,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *gvisorUDPOverTCPTunnelConnector) ConnectContext(ctx context.Context, conn net.Conn) (net.Conn, error) {
|
||||
switch con := conn.(type) {
|
||||
case *net.TCPConn:
|
||||
err := con.SetNoDelay(true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = con.SetKeepAlive(true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = con.SetKeepAlivePeriod(15 * time.Second)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return newGvisorFakeUDPTunnelConnOverTCP(ctx, conn)
|
||||
}
|
||||
|
||||
type gvisorUDPHandler struct{}
|
||||
|
||||
func GvisorUDPHandler() Handler {
|
||||
@@ -53,10 +22,10 @@ func (h *gvisorUDPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
|
||||
// 1, get proxy info
|
||||
endpointID, err := ParseProxyInfo(tcpConn)
|
||||
if err != nil {
|
||||
log.Warningf("[TUN-UDP] Error: Failed to parse proxy info: %v", err)
|
||||
log.Errorf("[TUN-UDP] Failed to parse proxy info: %v", err)
|
||||
return
|
||||
}
|
||||
log.Debugf("[TUN-UDP] Debug: LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
|
||||
log.Debugf("[TUN-UDP] LocalPort: %d, LocalAddress: %s, RemotePort: %d, RemoteAddress %s",
|
||||
endpointID.LocalPort, endpointID.LocalAddress.String(), endpointID.RemotePort, endpointID.RemoteAddress.String(),
|
||||
)
|
||||
// 2, dial proxy
|
||||
@@ -67,7 +36,7 @@ func (h *gvisorUDPHandler) Handle(ctx context.Context, tcpConn net.Conn) {
|
||||
var remote *net.UDPConn
|
||||
remote, err = net.DialUDP("udp", nil, addr)
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: failed to connect addr %s: %v", addr.String(), err)
|
||||
log.Errorf("[TUN-UDP] Failed to connect addr %s: %v", addr.String(), err)
|
||||
return
|
||||
}
|
||||
handle(ctx, tcpConn, remote)
|
||||
@@ -116,7 +85,7 @@ func (c *gvisorFakeUDPTunnelConn) Close() error {
|
||||
}
|
||||
|
||||
func GvisorUDPListener(addr string) (net.Listener, error) {
|
||||
log.Debug("gvisor UDP over TCP listen addr", addr)
|
||||
log.Debugf("Gvisor UDP over TCP listening addr: %s", addr)
|
||||
laddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -125,16 +94,16 @@ func GvisorUDPListener(addr string) (net.Listener, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tcpKeepAliveListener{ln}, nil
|
||||
return &tcpKeepAliveListener{TCPListener: ln}, nil
|
||||
}
|
||||
|
||||
func handle(ctx context.Context, tcpConn net.Conn, udpConn *net.UDPConn) {
|
||||
defer udpConn.Close()
|
||||
log.Debugf("[TUN-UDP] Debug: %s <-> %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
|
||||
log.Debugf("[TUN-UDP] %s <-> %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
|
||||
errChan := make(chan error, 2)
|
||||
go func() {
|
||||
b := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(b[:])
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(buf[:])
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -143,30 +112,42 @@ func handle(ctx context.Context, tcpConn net.Conn, udpConn *net.UDPConn) {
|
||||
default:
|
||||
}
|
||||
|
||||
dgram, err := readDatagramPacket(tcpConn, b[:])
|
||||
err := tcpConn.SetReadDeadline(time.Now().Add(time.Second * 30))
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-UDP] Debug: %s -> 0 : %v", tcpConn.RemoteAddr(), err)
|
||||
log.Errorf("[TUN-UDP] Failed to set read deadline: %v", err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
dgram, err := readDatagramPacket(tcpConn, buf[:])
|
||||
if err != nil {
|
||||
log.Errorf("[TUN-UDP] %s -> %s: %v", tcpConn.RemoteAddr(), udpConn.LocalAddr(), err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if dgram.DataLength == 0 {
|
||||
log.Debugf("[TUN-UDP] Error: length is zero")
|
||||
log.Errorf("[TUN-UDP] Length is zero")
|
||||
errChan <- fmt.Errorf("length of read packet is zero")
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = udpConn.Write(dgram.Data); err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: %s -> %s : %s", tcpConn.RemoteAddr(), "localhost:8422", err)
|
||||
err = udpConn.SetWriteDeadline(time.Now().Add(time.Second * 30))
|
||||
if err != nil {
|
||||
log.Errorf("[TUN-UDP] Failed to set write deadline: %v", err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
log.Debugf("[TUN-UDP] Debug: %s >>> %s length: %d", tcpConn.RemoteAddr(), "localhost:8422", dgram.DataLength)
|
||||
if _, err = udpConn.Write(dgram.Data); err != nil {
|
||||
log.Errorf("[TUN-UDP] %s -> %s : %s", tcpConn.RemoteAddr(), "localhost:8422", err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
log.Debugf("[TUN-UDP] %s >>> %s length: %d", tcpConn.RemoteAddr(), "localhost:8422", dgram.DataLength)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
b := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(b[:])
|
||||
buf := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(buf[:])
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -175,32 +156,44 @@ func handle(ctx context.Context, tcpConn net.Conn, udpConn *net.UDPConn) {
|
||||
default:
|
||||
}
|
||||
|
||||
n, _, err := udpConn.ReadFrom(b[:])
|
||||
err := udpConn.SetReadDeadline(time.Now().Add(time.Second * 30))
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: %s : %s", tcpConn.RemoteAddr(), err)
|
||||
log.Errorf("[TUN-UDP] Failed to set read deadline failed: %v", err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
n, _, err := udpConn.ReadFrom(buf[:])
|
||||
if err != nil {
|
||||
log.Errorf("[TUN-UDP] %s : %s", tcpConn.RemoteAddr(), err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if n == 0 {
|
||||
log.Debugf("[TUN-UDP] Error: length is zero")
|
||||
log.Errorf("[TUN-UDP] Length is zero")
|
||||
errChan <- fmt.Errorf("length of read packet is zero")
|
||||
return
|
||||
}
|
||||
|
||||
// pipe from peer to tunnel
|
||||
dgram := newDatagramPacket(b[:n])
|
||||
if err = dgram.Write(tcpConn); err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: %s <- %s : %s", tcpConn.RemoteAddr(), dgram.Addr(), err)
|
||||
err = tcpConn.SetWriteDeadline(time.Now().Add(time.Second * 30))
|
||||
if err != nil {
|
||||
log.Errorf("[TUN-UDP] Error: set write deadline failed: %v", err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
log.Debugf("[TUN-UDP] Debug: %s <<< %s length: %d", tcpConn.RemoteAddr(), dgram.Addr(), len(dgram.Data))
|
||||
dgram := newDatagramPacket(buf[:n])
|
||||
if err = dgram.Write(tcpConn); err != nil {
|
||||
log.Errorf("[TUN-UDP] Error: %s <- %s : %s", tcpConn.RemoteAddr(), dgram.Addr(), err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
log.Debugf("[TUN-UDP] %s <<< %s length: %d", tcpConn.RemoteAddr(), dgram.Addr(), len(dgram.Data))
|
||||
}
|
||||
}()
|
||||
err := <-errChan
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-UDP] Error: %v", err)
|
||||
log.Errorf("[TUN-UDP] %v", err)
|
||||
}
|
||||
log.Debugf("[TUN-UDP] Debug: %s >-< %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
|
||||
log.Debugf("[TUN-UDP] %s >-< %s", tcpConn.RemoteAddr(), udpConn.LocalAddr())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -7,9 +7,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrorInvalidNode = errors.New("invalid node")
|
||||
)
|
||||
var ErrorInvalidNode = errors.New("invalid node")
|
||||
|
||||
type Node struct {
|
||||
Addr string
|
||||
@@ -29,12 +27,13 @@ func ParseNode(s string) (*Node, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Node{
|
||||
node := &Node{
|
||||
Addr: u.Host,
|
||||
Remote: strings.Trim(u.EscapedPath(), "/"),
|
||||
Values: u.Query(),
|
||||
Protocol: u.Scheme,
|
||||
}, nil
|
||||
}
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// Get returns node parameter specified by key.
|
||||
|
||||
@@ -11,17 +11,15 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/tun"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/tun"
|
||||
)
|
||||
|
||||
var (
|
||||
// RouteNAT Globe route table for inner ip
|
||||
RouteNAT = NewNAT()
|
||||
// RouteConnNAT map[srcIP]net.Conn
|
||||
RouteConnNAT = &sync.Map{}
|
||||
// Chan tcp connects
|
||||
Chan = make(chan *datagramPacket, MaxSize)
|
||||
// RouteMapTCP map[srcIP]net.Conn Globe route table for inner ip
|
||||
RouteMapTCP = &sync.Map{}
|
||||
// TCPPacketChan tcp connects
|
||||
TCPPacketChan = make(chan *datagramPacket, MaxSize)
|
||||
)
|
||||
|
||||
type TCPUDPacket struct {
|
||||
@@ -39,7 +37,6 @@ type Route struct {
|
||||
}
|
||||
|
||||
func (r *Route) parseChain() (*Chain, error) {
|
||||
// parse the base nodes
|
||||
node, err := parseChainNode(r.ChainNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -50,7 +47,6 @@ func (r *Route) parseChain() (*Chain, error) {
|
||||
func parseChainNode(ns string) (*Node, error) {
|
||||
node, err := ParseNode(ns)
|
||||
if err != nil {
|
||||
log.Errorf("parse node error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
node.Client = &Client{
|
||||
@@ -63,7 +59,7 @@ func parseChainNode(ns string) (*Node, error) {
|
||||
func (r *Route) GenerateServers() ([]Server, error) {
|
||||
chain, err := r.parseChain()
|
||||
if err != nil && !errors.Is(err, ErrorInvalidNode) {
|
||||
log.Errorf("parse chain error: %v", err)
|
||||
log.Errorf("Failed to parse chain: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -72,7 +68,7 @@ func (r *Route) GenerateServers() ([]Server, error) {
|
||||
var node *Node
|
||||
node, err = ParseNode(serveNode)
|
||||
if err != nil {
|
||||
log.Errorf("parse node %s error: %v", serveNode, err)
|
||||
log.Errorf("Failed to parse node %s: %v", serveNode, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -91,32 +87,32 @@ func (r *Route) GenerateServers() ([]Server, error) {
|
||||
Gateway: node.Get("gw"),
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("create tun listener error: %v", err)
|
||||
log.Errorf("Failed to create tun listener: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
case "tcp":
|
||||
handler = TCPHandler()
|
||||
ln, err = TCPListener(node.Addr)
|
||||
if err != nil {
|
||||
log.Errorf("create tcp listener error: %v", err)
|
||||
log.Errorf("Failed to create tcp listener: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
case "gtcp":
|
||||
handler = GvisorTCPHandler()
|
||||
ln, err = GvisorTCPListener(node.Addr)
|
||||
if err != nil {
|
||||
log.Errorf("create gvisor tcp listener error: %v", err)
|
||||
log.Errorf("Failed to create gvisor tcp listener: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
case "gudp":
|
||||
handler = GvisorUDPHandler()
|
||||
ln, err = GvisorUDPListener(node.Addr)
|
||||
if err != nil {
|
||||
log.Errorf("create gvisor udp listener error: %v", err)
|
||||
log.Errorf("Failed to create gvisor udp listener: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
log.Errorf("not support protocol %s", node.Protocol)
|
||||
log.Errorf("Not support protocol %s", node.Protocol)
|
||||
return nil, fmt.Errorf("not support protocol %s", node.Protocol)
|
||||
}
|
||||
servers = append(servers, Server{Listener: ln, Handler: handler})
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
)
|
||||
|
||||
type tcpTransporter struct{}
|
||||
@@ -27,7 +27,7 @@ func TCPListener(addr string) (net.Listener, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tcpKeepAliveListener{ln}, nil
|
||||
return &tcpKeepAliveListener{TCPListener: ln}, nil
|
||||
}
|
||||
|
||||
type tcpKeepAliveListener struct {
|
||||
|
||||
@@ -3,13 +3,14 @@ package core
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
type fakeUDPTunnelConnector struct {
|
||||
@@ -41,33 +42,33 @@ func (c *fakeUDPTunnelConnector) ConnectContext(ctx context.Context, conn net.Co
|
||||
|
||||
type fakeUdpHandler struct {
|
||||
// map[srcIP]net.Conn
|
||||
connNAT *sync.Map
|
||||
ch chan *datagramPacket
|
||||
routeMapTCP *sync.Map
|
||||
packetChan chan *datagramPacket
|
||||
}
|
||||
|
||||
func TCPHandler() Handler {
|
||||
return &fakeUdpHandler{
|
||||
connNAT: RouteConnNAT,
|
||||
ch: Chan,
|
||||
routeMapTCP: RouteMapTCP,
|
||||
packetChan: TCPPacketChan,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *fakeUdpHandler) Handle(ctx context.Context, tcpConn net.Conn) {
|
||||
defer tcpConn.Close()
|
||||
log.Debugf("[tcpserver] %s -> %s\n", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
log.Debugf("[TCP] %s -> %s", tcpConn.RemoteAddr(), tcpConn.LocalAddr())
|
||||
|
||||
defer func(addr net.Addr) {
|
||||
var keys []string
|
||||
h.connNAT.Range(func(key, value any) bool {
|
||||
h.routeMapTCP.Range(func(key, value any) bool {
|
||||
if value.(net.Conn) == tcpConn {
|
||||
keys = append(keys, key.(string))
|
||||
}
|
||||
return true
|
||||
})
|
||||
for _, key := range keys {
|
||||
h.connNAT.Delete(key)
|
||||
h.routeMapTCP.Delete(key)
|
||||
}
|
||||
log.Debugf("[tcpserver] delete conn %s from globle routeConnNAT, deleted count %d", addr, len(keys))
|
||||
log.Debugf("[TCP] To %s by conn %s from globle route map TCP", strings.Join(keys, " "), addr)
|
||||
}(tcpConn.LocalAddr())
|
||||
|
||||
for {
|
||||
@@ -77,34 +78,31 @@ func (h *fakeUdpHandler) Handle(ctx context.Context, tcpConn net.Conn) {
|
||||
default:
|
||||
}
|
||||
|
||||
b := config.LPool.Get().([]byte)[:]
|
||||
dgram, err := readDatagramPacketServer(tcpConn, b[:])
|
||||
buf := config.SPool.Get().([]byte)[:]
|
||||
dgram, err := readDatagramPacketServer(tcpConn, buf[:])
|
||||
if err != nil {
|
||||
log.Debugf("[tcpserver] %s -> 0 : %v", tcpConn.RemoteAddr(), err)
|
||||
log.Errorf("[TCP] %s -> %s : %v", tcpConn.RemoteAddr(), tcpConn.LocalAddr(), err)
|
||||
config.SPool.Put(buf[:])
|
||||
return
|
||||
}
|
||||
|
||||
var src net.IP
|
||||
bb := dgram.Data[:dgram.DataLength]
|
||||
if util.IsIPv4(bb) {
|
||||
src = net.IPv4(bb[12], bb[13], bb[14], bb[15])
|
||||
} else if util.IsIPv6(bb) {
|
||||
src = bb[8:24]
|
||||
} else {
|
||||
log.Errorf("[tcpserver] unknown packet")
|
||||
src, _, err = util.ParseIP(dgram.Data[:dgram.DataLength])
|
||||
if err != nil {
|
||||
log.Errorf("[TCP] Unknown packet")
|
||||
config.SPool.Put(buf[:])
|
||||
continue
|
||||
}
|
||||
value, loaded := h.connNAT.LoadOrStore(src.String(), tcpConn)
|
||||
value, loaded := h.routeMapTCP.LoadOrStore(src.String(), tcpConn)
|
||||
if loaded {
|
||||
if tcpConn != value.(net.Conn) {
|
||||
h.connNAT.Store(src.String(), tcpConn)
|
||||
log.Debugf("[tcpserver] replace routeConnNAT: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
|
||||
h.routeMapTCP.Store(src.String(), tcpConn)
|
||||
log.Debugf("[TCP] Replace route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
|
||||
}
|
||||
log.Debugf("[tcpserver] find routeConnNAT: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
|
||||
} else {
|
||||
log.Debugf("[tcpserver] new routeConnNAT: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
|
||||
log.Debugf("[TCP] Add new route map TCP: %s -> %s-%s", src, tcpConn.LocalAddr(), tcpConn.RemoteAddr())
|
||||
}
|
||||
h.ch <- dgram
|
||||
util.SafeWrite(h.packetChan, dgram)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,221 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/google/gopacket/layers"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
"gvisor.dev/gvisor/pkg/buffer"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/header"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
)
|
||||
|
||||
var _ stack.LinkEndpoint = (*tunEndpoint)(nil)
|
||||
|
||||
// tunEndpoint /Users/naison/go/pkg/mod/gvisor.dev/gvisor@v0.0.0-20220422052705-39790bd3a15a/pkg/tcpip/link/tun/device.go:122
|
||||
type tunEndpoint struct {
|
||||
ctx context.Context
|
||||
tun net.Conn
|
||||
once sync.Once
|
||||
endpoint *channel.Endpoint
|
||||
engine config.Engine
|
||||
|
||||
in chan<- *DataElem
|
||||
out chan *DataElem
|
||||
}
|
||||
|
||||
// WritePackets writes packets. Must not be called with an empty list of
|
||||
// packet buffers.
|
||||
//
|
||||
// WritePackets may modify the packet buffers, and takes ownership of the PacketBufferList.
|
||||
// it is not safe to use the PacketBufferList after a call to WritePackets.
|
||||
func (e *tunEndpoint) WritePackets(p stack.PacketBufferList) (int, tcpip.Error) {
|
||||
return e.endpoint.WritePackets(p)
|
||||
}
|
||||
|
||||
// MTU is the maximum transmission unit for this endpoint. This is
|
||||
// usually dictated by the backing physical network; when such a
|
||||
// physical network doesn't exist, the limit is generally 64k, which
|
||||
// includes the maximum size of an IP packet.
|
||||
func (e *tunEndpoint) MTU() uint32 {
|
||||
return uint32(config.DefaultMTU)
|
||||
}
|
||||
|
||||
// MaxHeaderLength returns the maximum size the data link (and
|
||||
// lower level layers combined) headers can have. Higher levels use this
|
||||
// information to reserve space in the front of the packets they're
|
||||
// building.
|
||||
func (e *tunEndpoint) MaxHeaderLength() uint16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// LinkAddress returns the link address (typically a MAC) of the
|
||||
// endpoint.
|
||||
func (e *tunEndpoint) LinkAddress() tcpip.LinkAddress {
|
||||
return e.endpoint.LinkAddress()
|
||||
}
|
||||
|
||||
// Capabilities returns the set of capabilities supported by the
|
||||
// endpoint.
|
||||
func (e *tunEndpoint) Capabilities() stack.LinkEndpointCapabilities {
|
||||
return e.endpoint.LinkEPCapabilities
|
||||
}
|
||||
|
||||
// Attach attaches the data link layer endpoint to the network-layer
|
||||
// dispatcher of the stack.
|
||||
//
|
||||
// Attach is called with a nil dispatcher when the endpoint's NIC is being
|
||||
// removed.
|
||||
func (e *tunEndpoint) Attach(dispatcher stack.NetworkDispatcher) {
|
||||
e.endpoint.Attach(dispatcher)
|
||||
// queue --> tun
|
||||
e.once.Do(func() {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-e.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
read := e.endpoint.ReadContext(e.ctx)
|
||||
if !read.IsNil() {
|
||||
bb := read.ToView().AsSlice()
|
||||
i := config.LPool.Get().([]byte)[:]
|
||||
n := copy(i, bb)
|
||||
bb = nil
|
||||
e.out <- NewDataElem(i[:], n, nil, nil)
|
||||
}
|
||||
}
|
||||
}()
|
||||
// tun --> dispatcher
|
||||
go func() {
|
||||
// full(all use gvisor), mix(cluster network use gvisor), raw(not use gvisor)
|
||||
for {
|
||||
bytes := config.LPool.Get().([]byte)[:]
|
||||
read, err := e.tun.Read(bytes[:])
|
||||
if err != nil {
|
||||
// if context is still going
|
||||
if e.ctx.Err() == nil {
|
||||
log.Fatalf("[TUN]: read from tun failed: %v", err)
|
||||
} else {
|
||||
log.Info("tun device closed")
|
||||
}
|
||||
return
|
||||
}
|
||||
if read == 0 {
|
||||
log.Warnf("[TUN]: read from tun length is %d", read)
|
||||
continue
|
||||
}
|
||||
// Try to determine network protocol number, default zero.
|
||||
var protocol tcpip.NetworkProtocolNumber
|
||||
var ipProtocol int
|
||||
var src, dst net.IP
|
||||
// TUN interface with IFF_NO_PI enabled, thus
|
||||
// we need to determine protocol from version field
|
||||
version := bytes[0] >> 4
|
||||
if version == 4 {
|
||||
protocol = header.IPv4ProtocolNumber
|
||||
ipHeader, err := ipv4.ParseHeader(bytes[:read])
|
||||
if err != nil {
|
||||
log.Errorf("parse ipv4 header failed: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
ipProtocol = ipHeader.Protocol
|
||||
src = ipHeader.Src
|
||||
dst = ipHeader.Dst
|
||||
} else if version == 6 {
|
||||
protocol = header.IPv6ProtocolNumber
|
||||
ipHeader, err := ipv6.ParseHeader(bytes[:read])
|
||||
if err != nil {
|
||||
log.Errorf("parse ipv6 header failed: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
ipProtocol = ipHeader.NextHeader
|
||||
src = ipHeader.Src
|
||||
dst = ipHeader.Dst
|
||||
} else {
|
||||
log.Debugf("[TUN-gvisor] unknown packet version %d", version)
|
||||
continue
|
||||
}
|
||||
// only tcp and udp needs to distinguish transport engine
|
||||
// gvisor: all network use gvisor
|
||||
// mix: cluster network use gvisor, diy network use raw
|
||||
// raw: all network use raw
|
||||
if (ipProtocol == int(layers.IPProtocolUDP) || ipProtocol == int(layers.IPProtocolUDPLite) || ipProtocol == int(layers.IPProtocolTCP)) &&
|
||||
(e.engine == config.EngineGvisor || (e.engine == config.EngineMix && (!config.CIDR.Contains(dst) && !config.CIDR6.Contains(dst)))) {
|
||||
pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
|
||||
ReserveHeaderBytes: 0,
|
||||
Payload: buffer.MakeWithData(bytes[:read]),
|
||||
})
|
||||
//defer pkt.DecRef()
|
||||
config.LPool.Put(bytes[:])
|
||||
e.endpoint.InjectInbound(protocol, pkt)
|
||||
log.Debugf("[TUN-%s] IP-Protocol: %s, SRC: %s, DST: %s, Length: %d", layers.IPProtocol(ipProtocol).String(), layers.IPProtocol(ipProtocol).String(), src.String(), dst, read)
|
||||
} else {
|
||||
log.Debugf("[TUN-RAW] IP-Protocol: %s, SRC: %s, DST: %s, Length: %d", layers.IPProtocol(ipProtocol).String(), src.String(), dst, read)
|
||||
e.in <- NewDataElem(bytes[:], read, src, dst)
|
||||
}
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for elem := range e.out {
|
||||
_, err := e.tun.Write(elem.Data()[:elem.Length()])
|
||||
config.LPool.Put(elem.Data()[:])
|
||||
if err != nil {
|
||||
log.Fatalf("[TUN] Fatal: failed to write data to tun device: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
// IsAttached returns whether a NetworkDispatcher is attached to the
|
||||
// endpoint.
|
||||
func (e *tunEndpoint) IsAttached() bool {
|
||||
return e.endpoint.IsAttached()
|
||||
}
|
||||
|
||||
// Wait waits for any worker goroutines owned by the endpoint to stop.
|
||||
//
|
||||
// For now, requesting that an endpoint's worker goroutine(s) stop is
|
||||
// implementation specific.
|
||||
//
|
||||
// Wait will not block if the endpoint hasn't started any goroutines
|
||||
// yet, even if it might later.
|
||||
func (e *tunEndpoint) Wait() {
|
||||
return
|
||||
}
|
||||
|
||||
// ARPHardwareType returns the ARPHRD_TYPE of the link endpoint.
|
||||
//
|
||||
// See:
|
||||
// https://github.com/torvalds/linux/blob/aa0c9086b40c17a7ad94425b3b70dd1fdd7497bf/include/uapi/linux/if_arp.h#L30
|
||||
func (e *tunEndpoint) ARPHardwareType() header.ARPHardwareType {
|
||||
return header.ARPHardwareNone
|
||||
}
|
||||
|
||||
// AddHeader adds a link layer header to the packet if required.
|
||||
func (e *tunEndpoint) AddHeader(ptr stack.PacketBufferPtr) {
|
||||
return
|
||||
}
|
||||
|
||||
func NewTunEndpoint(ctx context.Context, tun net.Conn, mtu uint32, engine config.Engine, in chan<- *DataElem, out chan *DataElem) stack.LinkEndpoint {
|
||||
addr, _ := tcpip.ParseMACAddress("02:03:03:04:05:06")
|
||||
return &tunEndpoint{
|
||||
ctx: ctx,
|
||||
tun: tun,
|
||||
endpoint: channel.New(tcp.DefaultReceiveBufferSize, mtu, addr),
|
||||
engine: engine,
|
||||
in: in,
|
||||
out: out,
|
||||
}
|
||||
}
|
||||
@@ -2,135 +2,75 @@ package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/layers"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxSize = 1000
|
||||
MaxThread = 10
|
||||
MaxConn = 1
|
||||
MaxSize = 1000
|
||||
)
|
||||
|
||||
type tunHandler struct {
|
||||
chain *Chain
|
||||
node *Node
|
||||
routeNAT *NAT
|
||||
chain *Chain
|
||||
node *Node
|
||||
routeMapUDP *RouteMap
|
||||
// map[srcIP]net.Conn
|
||||
routeConnNAT *sync.Map
|
||||
chExit chan error
|
||||
routeMapTCP *sync.Map
|
||||
chExit chan error
|
||||
}
|
||||
|
||||
type NAT struct {
|
||||
type RouteMap struct {
|
||||
lock *sync.RWMutex
|
||||
routes map[string][]net.Addr
|
||||
routes map[string]net.Addr
|
||||
}
|
||||
|
||||
func NewNAT() *NAT {
|
||||
return &NAT{
|
||||
func NewRouteMap() *RouteMap {
|
||||
return &RouteMap{
|
||||
lock: &sync.RWMutex{},
|
||||
routes: map[string][]net.Addr{},
|
||||
routes: map[string]net.Addr{},
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NAT) RemoveAddr(addr net.Addr) (count int) {
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
for k, v := range n.routes {
|
||||
for i := 0; i < len(v); i++ {
|
||||
if v[i].String() == addr.String() {
|
||||
v = append(v[:i], v[i+1:]...)
|
||||
i--
|
||||
count++
|
||||
}
|
||||
}
|
||||
n.routes[k] = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (n *NAT) LoadOrStore(to net.IP, addr net.Addr) (result net.Addr, load bool) {
|
||||
func (n *RouteMap) LoadOrStore(to net.IP, addr net.Addr) (net.Addr, bool) {
|
||||
n.lock.RLock()
|
||||
addrList := n.routes[to.String()]
|
||||
route, load := n.routes[to.String()]
|
||||
n.lock.RUnlock()
|
||||
for _, add := range addrList {
|
||||
if add.String() == addr.String() {
|
||||
load = true
|
||||
result = addr
|
||||
return
|
||||
}
|
||||
if load {
|
||||
return route, true
|
||||
}
|
||||
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
if addrList == nil {
|
||||
n.routes[to.String()] = []net.Addr{addr}
|
||||
result = addr
|
||||
return
|
||||
} else {
|
||||
n.routes[to.String()] = append(n.routes[to.String()], addr)
|
||||
result = addr
|
||||
return
|
||||
}
|
||||
n.routes[to.String()] = addr
|
||||
return addr, false
|
||||
}
|
||||
|
||||
func (n *NAT) RouteTo(ip net.IP) net.Addr {
|
||||
n.lock.RLock()
|
||||
defer n.lock.RUnlock()
|
||||
addrList := n.routes[ip.String()]
|
||||
if len(addrList) == 0 {
|
||||
return nil
|
||||
}
|
||||
// for load balance
|
||||
index := rand.Intn(len(n.routes[ip.String()]))
|
||||
return addrList[index]
|
||||
}
|
||||
|
||||
func (n *NAT) Remove(ip net.IP, addr net.Addr) {
|
||||
func (n *RouteMap) Store(to net.IP, addr net.Addr) {
|
||||
n.lock.Lock()
|
||||
defer n.lock.Unlock()
|
||||
|
||||
addrList, ok := n.routes[ip.String()]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for i := 0; i < len(addrList); i++ {
|
||||
if addrList[i].String() == addr.String() {
|
||||
addrList = append(addrList[:i], addrList[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
n.routes[ip.String()] = addrList
|
||||
return
|
||||
n.routes[to.String()] = addr
|
||||
}
|
||||
|
||||
func (n *NAT) Range(f func(key string, v []net.Addr)) {
|
||||
func (n *RouteMap) RouteTo(ip net.IP) net.Addr {
|
||||
n.lock.RLock()
|
||||
defer n.lock.RUnlock()
|
||||
for k, v := range n.routes {
|
||||
f(k, v)
|
||||
}
|
||||
return n.routes[ip.String()]
|
||||
}
|
||||
|
||||
// TunHandler creates a handler for tun tunnel.
|
||||
func TunHandler(chain *Chain, node *Node) Handler {
|
||||
return &tunHandler{
|
||||
chain: chain,
|
||||
node: node,
|
||||
routeNAT: RouteNAT,
|
||||
routeConnNAT: RouteConnNAT,
|
||||
chExit: make(chan error, 1),
|
||||
chain: chain,
|
||||
node: node,
|
||||
routeMapUDP: NewRouteMap(),
|
||||
routeMapTCP: RouteMapTCP,
|
||||
chExit: make(chan error, 1),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,37 +82,11 @@ func (h *tunHandler) Handle(ctx context.Context, tun net.Conn) {
|
||||
}
|
||||
}
|
||||
|
||||
func (h tunHandler) printRoute() {
|
||||
for {
|
||||
select {
|
||||
case <-time.Tick(time.Second * 5):
|
||||
var i int
|
||||
var sb strings.Builder
|
||||
h.routeNAT.Range(func(key string, value []net.Addr) {
|
||||
i++
|
||||
var s []string
|
||||
for _, addr := range value {
|
||||
if addr != nil {
|
||||
s = append(s, addr.String())
|
||||
}
|
||||
}
|
||||
if len(s) != 0 {
|
||||
sb.WriteString(fmt.Sprintf("to: %s, route: %s\n", key, strings.Join(s, " ")))
|
||||
}
|
||||
})
|
||||
log.Debug(sb.String())
|
||||
log.Debug(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Device struct {
|
||||
tun net.Conn
|
||||
thread int
|
||||
tun net.Conn
|
||||
|
||||
tunInboundRaw chan *DataElem
|
||||
tunInbound chan *DataElem
|
||||
tunOutbound chan *DataElem
|
||||
tunInbound chan *DataElem
|
||||
tunOutbound chan *DataElem
|
||||
|
||||
// your main logic
|
||||
tunInboundHandler func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem)
|
||||
@@ -182,68 +96,62 @@ type Device struct {
|
||||
|
||||
func (d *Device) readFromTun() {
|
||||
for {
|
||||
b := config.LPool.Get().([]byte)[:]
|
||||
n, err := d.tun.Read(b[:])
|
||||
buf := config.SPool.Get().([]byte)[:]
|
||||
n, err := d.tun.Read(buf[:])
|
||||
if err != nil {
|
||||
select {
|
||||
case d.chExit <- err:
|
||||
default:
|
||||
}
|
||||
config.SPool.Put(buf[:])
|
||||
log.Errorf("[TUN] Failed to read from tun: %v", err)
|
||||
util.SafeWrite(d.chExit, err)
|
||||
return
|
||||
}
|
||||
d.tunInboundRaw <- &DataElem{
|
||||
data: b[:],
|
||||
length: n,
|
||||
if n == 0 {
|
||||
log.Errorf("[TUN] Read packet length 0")
|
||||
config.SPool.Put(buf[:])
|
||||
continue
|
||||
}
|
||||
|
||||
src, dst, err := util.ParseIP(buf[:n])
|
||||
if err != nil {
|
||||
log.Errorf("[TUN] Unknown packet")
|
||||
config.SPool.Put(buf[:])
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("[TUN] SRC: %s --> DST: %s, length: %d", src, dst, n)
|
||||
util.SafeWrite(d.tunInbound, &DataElem{
|
||||
data: buf[:],
|
||||
length: n,
|
||||
src: src,
|
||||
dst: dst,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Device) writeToTun() {
|
||||
for e := range d.tunOutbound {
|
||||
_, err := d.tun.Write(e.data[:e.length])
|
||||
config.LPool.Put(e.data[:])
|
||||
config.SPool.Put(e.data[:])
|
||||
if err != nil {
|
||||
select {
|
||||
case d.chExit <- err:
|
||||
default:
|
||||
}
|
||||
util.SafeWrite(d.chExit, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Device) parseIPHeader() {
|
||||
for e := range d.tunInboundRaw {
|
||||
if util.IsIPv4(e.data[:e.length]) {
|
||||
// ipv4.ParseHeader
|
||||
b := e.data[:e.length]
|
||||
e.src = net.IPv4(b[12], b[13], b[14], b[15])
|
||||
e.dst = net.IPv4(b[16], b[17], b[18], b[19])
|
||||
} else if util.IsIPv6(e.data[:e.length]) {
|
||||
// ipv6.ParseHeader
|
||||
e.src = e.data[:e.length][8:24]
|
||||
e.dst = e.data[:e.length][24:40]
|
||||
} else {
|
||||
log.Errorf("[tun-packet] unknown packet")
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("[tun] %s --> %s, length: %d", e.src, e.dst, e.length)
|
||||
d.tunInbound <- e
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Device) Close() {
|
||||
d.tun.Close()
|
||||
util.SafeClose(d.tunInbound)
|
||||
util.SafeClose(d.tunOutbound)
|
||||
util.SafeClose(TCPPacketChan)
|
||||
}
|
||||
|
||||
func heartbeats(tun net.Conn, in chan<- *DataElem) {
|
||||
conn, err := util.GetTunDeviceByConn(tun)
|
||||
func heartbeats(ctx context.Context, tun net.Conn) {
|
||||
tunIfi, err := util.GetTunDeviceByConn(tun)
|
||||
if err != nil {
|
||||
log.Errorf("get tun device error: %s", err.Error())
|
||||
log.Errorf("Failed to get tun device: %s", err.Error())
|
||||
return
|
||||
}
|
||||
srcIPv4, srcIPv6, err := util.GetLocalTunIP(conn.Name)
|
||||
srcIPv4, srcIPv6, err := util.GetTunDeviceIP(tunIfi.Name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -253,112 +161,51 @@ func heartbeats(tun net.Conn, in chan<- *DataElem) {
|
||||
if config.RouterIP6.To4().Equal(srcIPv6) {
|
||||
return
|
||||
}
|
||||
|
||||
var bytes []byte
|
||||
var bytes6 []byte
|
||||
if config.DockerRouterIP.To4().Equal(srcIPv4) {
|
||||
return
|
||||
}
|
||||
var dstIPv4, dstIPv6 = net.IPv4zero, net.IPv6zero
|
||||
if config.CIDR.Contains(srcIPv4) {
|
||||
dstIPv4 = config.RouterIP
|
||||
}
|
||||
if config.CIDR6.Contains(srcIPv6) {
|
||||
dstIPv6 = config.RouterIP6
|
||||
}
|
||||
if config.DockerCIDR.Contains(srcIPv4) {
|
||||
dstIPv4 = config.DockerRouterIP
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Second * 5)
|
||||
defer ticker.Stop()
|
||||
|
||||
for ; true; <-ticker.C {
|
||||
for i := 0; i < 4; i++ {
|
||||
if bytes == nil {
|
||||
bytes, err = genICMPPacket(srcIPv4, config.RouterIP)
|
||||
if err != nil {
|
||||
log.Errorf("generate ipv4 packet error: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
}
|
||||
if bytes6 == nil {
|
||||
bytes6, err = genICMPPacketIPv6(srcIPv6, config.RouterIP6)
|
||||
if err != nil {
|
||||
log.Errorf("generate ipv6 packet error: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
}
|
||||
for index, i2 := range [][]byte{bytes, bytes6} {
|
||||
data := config.LPool.Get().([]byte)[:]
|
||||
length := copy(data, i2)
|
||||
var src, dst net.IP
|
||||
if index == 0 {
|
||||
src, dst = srcIPv4, config.RouterIP
|
||||
} else {
|
||||
src, dst = srcIPv6, config.RouterIP6
|
||||
}
|
||||
in <- &DataElem{
|
||||
data: data[:],
|
||||
length: length,
|
||||
src: src,
|
||||
dst: dst,
|
||||
}
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
var src, dst net.IP
|
||||
src, dst = srcIPv4, dstIPv4
|
||||
if !dst.IsUnspecified() {
|
||||
go util.Ping(ctx, src.String(), dst.String())
|
||||
}
|
||||
src, dst = srcIPv6, dstIPv6
|
||||
if !dst.IsUnspecified() {
|
||||
go util.Ping(ctx, src.String(), dst.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func genICMPPacket(src net.IP, dst net.IP) ([]byte, error) {
|
||||
buf := gopacket.NewSerializeBuffer()
|
||||
icmpLayer := layers.ICMPv4{
|
||||
TypeCode: layers.CreateICMPv4TypeCode(layers.ICMPv4TypeEchoRequest, 0),
|
||||
Id: 3842,
|
||||
Seq: 1,
|
||||
}
|
||||
ipLayer := layers.IPv4{
|
||||
Version: 4,
|
||||
SrcIP: src,
|
||||
DstIP: dst,
|
||||
Protocol: layers.IPProtocolICMPv4,
|
||||
Flags: layers.IPv4DontFragment,
|
||||
TTL: 64,
|
||||
IHL: 5,
|
||||
Id: 55664,
|
||||
}
|
||||
opts := gopacket.SerializeOptions{
|
||||
FixLengths: true,
|
||||
ComputeChecksums: true,
|
||||
}
|
||||
err := gopacket.SerializeLayers(buf, opts, &ipLayer, &icmpLayer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize icmp packet, err: %v", err)
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func genICMPPacketIPv6(src net.IP, dst net.IP) ([]byte, error) {
|
||||
buf := gopacket.NewSerializeBuffer()
|
||||
icmpLayer := layers.ICMPv6{
|
||||
TypeCode: layers.CreateICMPv6TypeCode(layers.ICMPv6TypeEchoRequest, 0),
|
||||
}
|
||||
ipLayer := layers.IPv6{
|
||||
Version: 6,
|
||||
SrcIP: src,
|
||||
DstIP: dst,
|
||||
NextHeader: layers.IPProtocolICMPv6,
|
||||
HopLimit: 255,
|
||||
}
|
||||
opts := gopacket.SerializeOptions{
|
||||
FixLengths: true,
|
||||
}
|
||||
err := gopacket.SerializeLayers(buf, opts, &ipLayer, &icmpLayer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize icmp6 packet, err: %v", err)
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (d *Device) Start(ctx context.Context) {
|
||||
go d.readFromTun()
|
||||
for i := 0; i < d.thread; i++ {
|
||||
go d.parseIPHeader()
|
||||
}
|
||||
go d.tunInboundHandler(d.tunInbound, d.tunOutbound)
|
||||
go d.writeToTun()
|
||||
go heartbeats(d.tun, d.tunInbound)
|
||||
go heartbeats(ctx, d.tun)
|
||||
|
||||
select {
|
||||
case err := <-d.chExit:
|
||||
log.Errorf("device exit: %s", err.Error())
|
||||
log.Errorf("Device exit: %v", err)
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
@@ -370,26 +217,22 @@ func (d *Device) SetTunInboundHandler(handler func(tunInbound <-chan *DataElem,
|
||||
}
|
||||
|
||||
func (h *tunHandler) HandleServer(ctx context.Context, tun net.Conn) {
|
||||
go h.printRoute()
|
||||
|
||||
device := &Device{
|
||||
tun: tun,
|
||||
thread: MaxThread,
|
||||
tunInboundRaw: make(chan *DataElem, MaxSize),
|
||||
tunInbound: make(chan *DataElem, MaxSize),
|
||||
tunOutbound: make(chan *DataElem, MaxSize),
|
||||
chExit: h.chExit,
|
||||
tun: tun,
|
||||
tunInbound: make(chan *DataElem, MaxSize),
|
||||
tunOutbound: make(chan *DataElem, MaxSize),
|
||||
chExit: h.chExit,
|
||||
}
|
||||
device.SetTunInboundHandler(func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem) {
|
||||
for {
|
||||
for ctx.Err() == nil {
|
||||
packetConn, err := (&net.ListenConfig{}).ListenPacket(ctx, "udp", h.node.Addr)
|
||||
if err != nil {
|
||||
log.Debugf("[udp] can not listen %s, err: %v", h.node.Addr, err)
|
||||
log.Errorf("[UDP] Failed to listen %s: %v", h.node.Addr, err)
|
||||
return
|
||||
}
|
||||
err = transportTun(ctx, tunInbound, tunOutbound, packetConn, h.routeNAT, h.routeConnNAT)
|
||||
err = transportTunServer(ctx, tunInbound, tunOutbound, packetConn, h.routeMapUDP, h.routeMapTCP)
|
||||
if err != nil {
|
||||
log.Debugf("[tun] %s: %v", tun.LocalAddr(), err)
|
||||
log.Errorf("[TUN] %s: %v", tun.LocalAddr(), err)
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -431,19 +274,17 @@ type udpElem struct {
|
||||
}
|
||||
|
||||
type Peer struct {
|
||||
conn net.PacketConn
|
||||
thread int
|
||||
conn net.PacketConn
|
||||
|
||||
connInbound chan *udpElem
|
||||
parsedConnInfo chan *udpElem
|
||||
connInbound chan *udpElem
|
||||
|
||||
tunInbound <-chan *DataElem
|
||||
tunOutbound chan<- *DataElem
|
||||
|
||||
routeNAT *NAT
|
||||
// map[srcIP]net.Conn
|
||||
// routeConnNAT sync.Map
|
||||
routeConnNAT *sync.Map
|
||||
// map[srcIP.String()]net.Addr for udp
|
||||
routeMapUDP *RouteMap
|
||||
// map[srcIP.String()]net.Conn for tcp
|
||||
routeMapTCP *sync.Map
|
||||
|
||||
errChan chan error
|
||||
}
|
||||
@@ -457,96 +298,80 @@ func (p *Peer) sendErr(err error) {
|
||||
|
||||
func (p *Peer) readFromConn() {
|
||||
for {
|
||||
b := config.LPool.Get().([]byte)[:]
|
||||
n, srcAddr, err := p.conn.ReadFrom(b[:])
|
||||
buf := config.SPool.Get().([]byte)[:]
|
||||
n, from, err := p.conn.ReadFrom(buf[:])
|
||||
if err != nil {
|
||||
config.SPool.Put(buf[:])
|
||||
p.sendErr(err)
|
||||
return
|
||||
}
|
||||
|
||||
src, dst, err := util.ParseIP(buf[:n])
|
||||
if err != nil {
|
||||
config.SPool.Put(buf[:])
|
||||
log.Errorf("[TUN] Unknown packet: %v", err)
|
||||
continue
|
||||
}
|
||||
if addr, loaded := p.routeMapUDP.LoadOrStore(src, from); loaded {
|
||||
if addr.String() != from.String() {
|
||||
p.routeMapUDP.Store(src, from)
|
||||
log.Debugf("[TUN] Replace route map UDP: %s -> %s", src, from)
|
||||
}
|
||||
} else {
|
||||
log.Debugf("[TUN] Add new route map UDP: %s -> %s", src, from)
|
||||
}
|
||||
|
||||
p.connInbound <- &udpElem{
|
||||
from: srcAddr,
|
||||
data: b[:],
|
||||
from: from,
|
||||
data: buf[:],
|
||||
length: n,
|
||||
src: src,
|
||||
dst: dst,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Peer) readFromTCPConn() {
|
||||
for packet := range Chan {
|
||||
for packet := range TCPPacketChan {
|
||||
src, dst, err := util.ParseIP(packet.Data)
|
||||
if err != nil {
|
||||
log.Errorf("[TUN] Unknown packet")
|
||||
config.SPool.Put(packet.Data[:])
|
||||
continue
|
||||
}
|
||||
u := &udpElem{
|
||||
data: packet.Data[:],
|
||||
length: int(packet.DataLength),
|
||||
src: src,
|
||||
dst: dst,
|
||||
}
|
||||
b := packet.Data
|
||||
if util.IsIPv4(packet.Data) {
|
||||
// ipv4.ParseHeader
|
||||
u.src = net.IPv4(b[12], b[13], b[14], b[15])
|
||||
u.dst = net.IPv4(b[16], b[17], b[18], b[19])
|
||||
} else if util.IsIPv6(packet.Data) {
|
||||
// ipv6.ParseHeader
|
||||
u.src = b[8:24]
|
||||
u.dst = b[24:40]
|
||||
} else {
|
||||
log.Errorf("[tun-conn] unknown packet")
|
||||
continue
|
||||
}
|
||||
log.Debugf("[tcpserver] udp-tun %s >>> %s length: %d", u.src, u.dst, u.length)
|
||||
p.parsedConnInfo <- u
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Peer) parseHeader() {
|
||||
var firstIPv4, firstIPv6 = true, true
|
||||
for e := range p.connInbound {
|
||||
b := e.data[:e.length]
|
||||
if util.IsIPv4(e.data[:e.length]) {
|
||||
// ipv4.ParseHeader
|
||||
e.src = net.IPv4(b[12], b[13], b[14], b[15])
|
||||
e.dst = net.IPv4(b[16], b[17], b[18], b[19])
|
||||
} else if util.IsIPv6(e.data[:e.length]) {
|
||||
// ipv6.ParseHeader
|
||||
e.src = b[:e.length][8:24]
|
||||
e.dst = b[:e.length][24:40]
|
||||
} else {
|
||||
log.Errorf("[tun] unknown packet")
|
||||
continue
|
||||
}
|
||||
|
||||
if firstIPv4 || firstIPv6 {
|
||||
if util.IsIPv4(e.data[:e.length]) {
|
||||
firstIPv4 = false
|
||||
} else {
|
||||
firstIPv6 = false
|
||||
}
|
||||
if _, loaded := p.routeNAT.LoadOrStore(e.src, e.from); loaded {
|
||||
log.Debugf("[tun] find route: %s -> %s", e.src, e.from)
|
||||
} else {
|
||||
log.Debugf("[tun] new route: %s -> %s", e.src, e.from)
|
||||
}
|
||||
}
|
||||
p.parsedConnInfo <- e
|
||||
log.Debugf("[TCP] udp-tun %s >>> %s length: %d", u.src, u.dst, u.length)
|
||||
p.connInbound <- u
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Peer) routePeer() {
|
||||
for e := range p.parsedConnInfo {
|
||||
if routeToAddr := p.routeNAT.RouteTo(e.dst); routeToAddr != nil {
|
||||
log.Debugf("[tun] find route: %s -> %s", e.dst, routeToAddr)
|
||||
for e := range p.connInbound {
|
||||
if routeToAddr := p.routeMapUDP.RouteTo(e.dst); routeToAddr != nil {
|
||||
log.Debugf("[UDP] Find UDP route to dst: %s -> %s", e.dst, routeToAddr)
|
||||
_, err := p.conn.WriteTo(e.data[:e.length], routeToAddr)
|
||||
config.LPool.Put(e.data[:])
|
||||
config.SPool.Put(e.data[:])
|
||||
if err != nil {
|
||||
p.sendErr(err)
|
||||
return
|
||||
}
|
||||
} else if conn, ok := p.routeConnNAT.Load(e.dst.String()); ok {
|
||||
} else if conn, ok := p.routeMapTCP.Load(e.dst.String()); ok {
|
||||
log.Debugf("[TCP] Find TCP route to dst: %s -> %s", e.dst.String(), conn.(net.Conn).RemoteAddr())
|
||||
dgram := newDatagramPacket(e.data[:e.length])
|
||||
if err := dgram.Write(conn.(net.Conn)); err != nil {
|
||||
log.Debugf("[tcpserver] udp-tun %s <- %s : %s", conn.(net.Conn).RemoteAddr(), dgram.Addr(), err)
|
||||
err := dgram.Write(conn.(net.Conn))
|
||||
config.SPool.Put(e.data[:])
|
||||
if err != nil {
|
||||
log.Errorf("[TCP] udp-tun %s <- %s : %s", conn.(net.Conn).RemoteAddr(), dgram.Addr(), err)
|
||||
p.sendErr(err)
|
||||
return
|
||||
}
|
||||
config.LPool.Put(e.data[:])
|
||||
} else {
|
||||
log.Debugf("[TUN] Not found route to dst: %s, write to TUN device", e.dst.String())
|
||||
p.tunOutbound <- &DataElem{
|
||||
data: e.data,
|
||||
length: e.length,
|
||||
@@ -559,27 +384,28 @@ func (p *Peer) routePeer() {
|
||||
|
||||
func (p *Peer) routeTUN() {
|
||||
for e := range p.tunInbound {
|
||||
if addr := p.routeNAT.RouteTo(e.dst); addr != nil {
|
||||
log.Debugf("[tun] find route: %s -> %s", e.dst, addr)
|
||||
if addr := p.routeMapUDP.RouteTo(e.dst); addr != nil {
|
||||
log.Debugf("[TUN] Find UDP route to dst: %s -> %s", e.dst, addr)
|
||||
_, err := p.conn.WriteTo(e.data[:e.length], addr)
|
||||
config.LPool.Put(e.data[:])
|
||||
config.SPool.Put(e.data[:])
|
||||
if err != nil {
|
||||
log.Debugf("[tun] can not route: %s -> %s", e.dst, addr)
|
||||
log.Debugf("[TUN] Failed wirte to route dst: %s -> %s", e.dst, addr)
|
||||
p.sendErr(err)
|
||||
return
|
||||
}
|
||||
} else if conn, ok := p.routeConnNAT.Load(e.dst.String()); ok {
|
||||
} else if conn, ok := p.routeMapTCP.Load(e.dst.String()); ok {
|
||||
log.Debugf("[TUN] Find TCP route to dst: %s -> %s", e.dst.String(), conn.(net.Conn).RemoteAddr())
|
||||
dgram := newDatagramPacket(e.data[:e.length])
|
||||
err := dgram.Write(conn.(net.Conn))
|
||||
config.LPool.Put(e.data[:])
|
||||
config.SPool.Put(e.data[:])
|
||||
if err != nil {
|
||||
log.Debugf("[tcpserver] udp-tun %s <- %s : %s", conn.(net.Conn).RemoteAddr(), dgram.Addr(), err)
|
||||
log.Errorf("[TUN] Failed to write TCP %s <- %s : %s", conn.(net.Conn).RemoteAddr(), dgram.Addr(), err)
|
||||
p.sendErr(err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
config.LPool.Put(e.data[:])
|
||||
log.Debug(fmt.Errorf("[tun] no route for %s -> %s", e.src, e.dst))
|
||||
log.Errorf("[TUN] No route for src: %s -> dst: %s, drop it", e.src, e.dst)
|
||||
config.SPool.Put(e.data[:])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -587,9 +413,6 @@ func (p *Peer) routeTUN() {
|
||||
func (p *Peer) Start() {
|
||||
go p.readFromConn()
|
||||
go p.readFromTCPConn()
|
||||
for i := 0; i < p.thread; i++ {
|
||||
go p.parseHeader()
|
||||
}
|
||||
go p.routePeer()
|
||||
go p.routeTUN()
|
||||
}
|
||||
@@ -598,17 +421,15 @@ func (p *Peer) Close() {
|
||||
p.conn.Close()
|
||||
}
|
||||
|
||||
func transportTun(ctx context.Context, tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem, packetConn net.PacketConn, nat *NAT, connNAT *sync.Map) error {
|
||||
func transportTunServer(ctx context.Context, tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem, packetConn net.PacketConn, routeMapUDP *RouteMap, routeMapTCP *sync.Map) error {
|
||||
p := &Peer{
|
||||
conn: packetConn,
|
||||
thread: MaxThread,
|
||||
connInbound: make(chan *udpElem, MaxSize),
|
||||
parsedConnInfo: make(chan *udpElem, MaxSize),
|
||||
tunInbound: tunInbound,
|
||||
tunOutbound: tunOutbound,
|
||||
routeNAT: nat,
|
||||
routeConnNAT: connNAT,
|
||||
errChan: make(chan error, 2),
|
||||
conn: packetConn,
|
||||
connInbound: make(chan *udpElem, MaxSize),
|
||||
tunInbound: tunInbound,
|
||||
tunOutbound: tunOutbound,
|
||||
routeMapUDP: routeMapUDP,
|
||||
routeMapTCP: routeMapTCP,
|
||||
errChan: make(chan error, 2),
|
||||
}
|
||||
|
||||
defer p.Close()
|
||||
|
||||
@@ -2,28 +2,28 @@ package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func (h *tunHandler) HandleClient(ctx context.Context, tun net.Conn) {
|
||||
defer tun.Close()
|
||||
remoteAddr, err := net.ResolveUDPAddr("udp", h.node.Remote)
|
||||
if err != nil {
|
||||
log.Errorf("[tun] %s: remote addr: %v", tun.LocalAddr(), err)
|
||||
log.Errorf("[TUN-CLIENT] Failed to resolve udp addr %s: %v", h.node.Remote, err)
|
||||
return
|
||||
}
|
||||
in := make(chan *DataElem, MaxSize)
|
||||
out := make(chan *DataElem, MaxSize)
|
||||
engine := h.node.Get(config.ConfigKubeVPNTransportEngine)
|
||||
endpoint := NewTunEndpoint(ctx, tun, uint32(config.DefaultMTU), config.Engine(engine), in, out)
|
||||
stack := NewStack(ctx, endpoint)
|
||||
go stack.Wait()
|
||||
defer util.SafeClose(in)
|
||||
defer util.SafeClose(out)
|
||||
|
||||
d := &ClientDevice{
|
||||
tun: tun,
|
||||
@@ -32,22 +32,16 @@ func (h *tunHandler) HandleClient(ctx context.Context, tun net.Conn) {
|
||||
chExit: h.chExit,
|
||||
}
|
||||
d.SetTunInboundHandler(func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
for ctx.Err() == nil {
|
||||
packetConn, err := getRemotePacketConn(ctx, h.chain)
|
||||
if err != nil {
|
||||
log.Debugf("[tun-client] %s - %s: %s", tun.LocalAddr(), remoteAddr, err)
|
||||
time.Sleep(time.Second * 2)
|
||||
log.Debugf("[TUN-CLIENT] Failed to get remote conn from %s -> %s: %s", tun.LocalAddr(), remoteAddr, err)
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
continue
|
||||
}
|
||||
err = transportTunClient(ctx, tunInbound, tunOutbound, packetConn, remoteAddr)
|
||||
if err != nil {
|
||||
log.Debugf("[tun-client] %s: %v", tun.LocalAddr(), err)
|
||||
log.Debugf("[TUN-CLIENT] %s: %v", tun.LocalAddr(), err)
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -89,13 +83,13 @@ func transportTunClient(ctx context.Context, tunInbound <-chan *DataElem, tunOut
|
||||
go func() {
|
||||
for e := range tunInbound {
|
||||
if e.src.Equal(e.dst) {
|
||||
tunOutbound <- e
|
||||
util.SafeWrite(tunOutbound, e)
|
||||
continue
|
||||
}
|
||||
_, err := packetConn.WriteTo(e.data[:e.length], remoteAddr)
|
||||
config.LPool.Put(e.data[:])
|
||||
config.SPool.Put(e.data[:])
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
util.SafeWrite(errChan, errors.Wrap(err, fmt.Sprintf("failed to write packet to remote %s", remoteAddr)))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -103,13 +97,14 @@ func transportTunClient(ctx context.Context, tunInbound <-chan *DataElem, tunOut
|
||||
|
||||
go func() {
|
||||
for {
|
||||
b := config.LPool.Get().([]byte)[:]
|
||||
n, _, err := packetConn.ReadFrom(b[:])
|
||||
buf := config.SPool.Get().([]byte)[:]
|
||||
n, _, err := packetConn.ReadFrom(buf[:])
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
config.SPool.Put(buf[:])
|
||||
util.SafeWrite(errChan, errors.Wrap(err, fmt.Sprintf("failed to read packet from remote %s", remoteAddr)))
|
||||
return
|
||||
}
|
||||
tunOutbound <- &DataElem{data: b[:], length: n}
|
||||
util.SafeWrite(tunOutbound, &DataElem{data: buf[:], length: n})
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -132,11 +127,13 @@ type ClientDevice struct {
|
||||
|
||||
func (d *ClientDevice) Start(ctx context.Context) {
|
||||
go d.tunInboundHandler(d.tunInbound, d.tunOutbound)
|
||||
go heartbeats(d.tun, d.tunInbound)
|
||||
go heartbeats(ctx, d.tun)
|
||||
go d.readFromTun()
|
||||
go d.writeToTun()
|
||||
|
||||
select {
|
||||
case err := <-d.chExit:
|
||||
log.Errorf("[tun-client]: %v", err)
|
||||
log.Errorf("[TUN-CLIENT]: %v", err)
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
@@ -146,3 +143,41 @@ func (d *ClientDevice) Start(ctx context.Context) {
|
||||
func (d *ClientDevice) SetTunInboundHandler(handler func(tunInbound <-chan *DataElem, tunOutbound chan<- *DataElem)) {
|
||||
d.tunInboundHandler = handler
|
||||
}
|
||||
|
||||
func (d *ClientDevice) readFromTun() {
|
||||
for {
|
||||
buf := config.SPool.Get().([]byte)[:]
|
||||
n, err := d.tun.Read(buf[:])
|
||||
if err != nil {
|
||||
util.SafeWrite(d.chExit, err)
|
||||
config.SPool.Put(buf[:])
|
||||
return
|
||||
}
|
||||
if n == 0 {
|
||||
config.SPool.Put(buf[:])
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to determine network protocol number, default zero.
|
||||
var src, dst net.IP
|
||||
src, dst, err = util.ParseIP(buf[:n])
|
||||
if err != nil {
|
||||
log.Debugf("[TUN-GVISOR] Unknown packet: %v", err)
|
||||
config.SPool.Put(buf[:])
|
||||
continue
|
||||
}
|
||||
log.Tracef("[TUN-RAW] SRC: %s, DST: %s, Length: %d", src.String(), dst, n)
|
||||
util.SafeWrite(d.tunInbound, NewDataElem(buf[:], n, src, dst))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ClientDevice) writeToTun() {
|
||||
for e := range d.tunOutbound {
|
||||
_, err := d.tun.Write(e.data[:e.length])
|
||||
config.SPool.Put(e.data[:])
|
||||
if err != nil {
|
||||
util.SafeWrite(d.chExit, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
)
|
||||
|
||||
type datagramPacket struct {
|
||||
@@ -61,10 +61,10 @@ func readDatagramPacketServer(r io.Reader, b []byte) (*datagramPacket, error) {
|
||||
}
|
||||
|
||||
func (addr *datagramPacket) Write(w io.Writer) error {
|
||||
b := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(b[:])
|
||||
binary.BigEndian.PutUint16(b[:2], uint16(len(addr.Data)))
|
||||
n := copy(b[2:], addr.Data)
|
||||
_, err := w.Write(b[:n+2])
|
||||
buf := config.MPool.Get().([]byte)[:]
|
||||
defer config.MPool.Put(buf[:])
|
||||
binary.BigEndian.PutUint16(buf[:2], uint16(len(addr.Data)))
|
||||
n := copy(buf[2:], addr.Data)
|
||||
_, err := w.Write(buf[:n+2])
|
||||
return err
|
||||
}
|
||||
|
||||
12
pkg/cp/cp.go
12
pkg/cp/cp.go
@@ -12,7 +12,7 @@ import (
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/cli-runtime/pkg/genericiooptions"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubectl/pkg/cmd/exec"
|
||||
@@ -32,11 +32,11 @@ type CopyOptions struct {
|
||||
|
||||
args []string
|
||||
|
||||
genericclioptions.IOStreams
|
||||
genericiooptions.IOStreams
|
||||
}
|
||||
|
||||
// NewCopyOptions creates the options for copy
|
||||
func NewCopyOptions(ioStreams genericclioptions.IOStreams) *CopyOptions {
|
||||
func NewCopyOptions(ioStreams genericiooptions.IOStreams) *CopyOptions {
|
||||
return &CopyOptions{
|
||||
IOStreams: ioStreams,
|
||||
}
|
||||
@@ -149,7 +149,7 @@ func (o *CopyOptions) Run() error {
|
||||
func (o *CopyOptions) checkDestinationIsDir(dest fileSpec) error {
|
||||
options := &exec.ExecOptions{
|
||||
StreamOptions: exec.StreamOptions{
|
||||
IOStreams: genericclioptions.IOStreams{
|
||||
IOStreams: genericiooptions.IOStreams{
|
||||
Out: bytes.NewBuffer([]byte{}),
|
||||
ErrOut: bytes.NewBuffer([]byte{}),
|
||||
},
|
||||
@@ -199,7 +199,7 @@ func (o *CopyOptions) copyToPod(src, dest fileSpec, options *exec.ExecOptions) e
|
||||
}
|
||||
|
||||
options.StreamOptions = exec.StreamOptions{
|
||||
IOStreams: genericclioptions.IOStreams{
|
||||
IOStreams: genericiooptions.IOStreams{
|
||||
In: reader,
|
||||
Out: o.Out,
|
||||
ErrOut: o.ErrOut,
|
||||
@@ -246,7 +246,7 @@ func (t *TarPipe) initReadFrom(n uint64) {
|
||||
t.reader, t.outStream = io.Pipe()
|
||||
options := &exec.ExecOptions{
|
||||
StreamOptions: exec.StreamOptions{
|
||||
IOStreams: genericclioptions.IOStreams{
|
||||
IOStreams: genericiooptions.IOStreams{
|
||||
In: nil,
|
||||
Out: t.outStream,
|
||||
ErrOut: t.o.Out,
|
||||
|
||||
@@ -1,35 +1,34 @@
|
||||
package action
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"context"
|
||||
"io"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) error {
|
||||
func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) (err error) {
|
||||
defer func() {
|
||||
util.InitLoggerForServer(true)
|
||||
log.SetOutput(svr.LogFile)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
config.Debug = false
|
||||
}()
|
||||
config.Debug = req.Level == int32(log.DebugLevel)
|
||||
out := io.MultiWriter(newCloneWarp(resp), svr.LogFile)
|
||||
util.InitLoggerForClient(config.Debug)
|
||||
log.SetOutput(out)
|
||||
log.SetLevel(log.InfoLevel)
|
||||
var sshConf = util.ParseSshFromRPC(req.SshJump)
|
||||
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
|
||||
connReq := &rpc.ConnectRequest{
|
||||
KubeconfigBytes: req.KubeconfigBytes,
|
||||
Namespace: req.Namespace,
|
||||
ExtraCIDR: req.ExtraCIDR,
|
||||
ExtraDomain: req.ExtraDomain,
|
||||
UseLocalDNS: req.UseLocalDNS,
|
||||
ExtraRoute: req.ExtraRoute,
|
||||
Engine: req.Engine,
|
||||
SshJump: req.SshJump,
|
||||
TransferImage: req.TransferImage,
|
||||
@@ -42,29 +41,20 @@ func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var msg *rpc.ConnectResponse
|
||||
for {
|
||||
msg, err = connResp.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err == nil {
|
||||
fmt.Fprint(out, msg.Message)
|
||||
} else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.ConnectResponse](connResp, out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
util.InitLoggerForClient(config.Debug)
|
||||
log.SetOutput(out)
|
||||
|
||||
options := &handler.CloneOptions{
|
||||
Namespace: req.Namespace,
|
||||
Headers: req.Headers,
|
||||
Workloads: req.Workloads,
|
||||
ExtraCIDR: req.ExtraCIDR,
|
||||
ExtraDomain: req.ExtraDomain,
|
||||
UseLocalDNS: req.UseLocalDNS,
|
||||
Engine: config.Engine(req.Engine),
|
||||
Namespace: req.Namespace,
|
||||
Headers: req.Headers,
|
||||
Workloads: req.Workloads,
|
||||
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
|
||||
Engine: config.Engine(req.Engine),
|
||||
OriginKubeconfigPath: req.OriginKubeconfigPath,
|
||||
|
||||
TargetKubeconfig: req.TargetKubeconfig,
|
||||
TargetNamespace: req.TargetNamespace,
|
||||
@@ -72,6 +62,9 @@ func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) err
|
||||
TargetImage: req.TargetImage,
|
||||
TargetRegistry: req.TargetRegistry,
|
||||
IsChangeTargetRegistry: req.IsChangeTargetRegistry,
|
||||
TargetWorkloadNames: map[string]string{},
|
||||
LocalDir: req.LocalDir,
|
||||
RemoteDir: req.RemoteDir,
|
||||
}
|
||||
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
|
||||
if err != nil {
|
||||
@@ -82,23 +75,34 @@ func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) err
|
||||
Name: "kubeconfig",
|
||||
DefValue: file,
|
||||
})
|
||||
sshCtx, sshFunc := context.WithCancel(context.Background())
|
||||
defer func() {
|
||||
if err != nil {
|
||||
_ = options.Cleanup()
|
||||
sshFunc()
|
||||
}
|
||||
}()
|
||||
options.AddRollbackFunc(func() error {
|
||||
sshFunc()
|
||||
return nil
|
||||
})
|
||||
var path string
|
||||
path, err = handler.SshJump(resp.Context(), sshConf, flags, false)
|
||||
path, err = ssh.SshJump(sshCtx, sshConf, flags, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f := InitFactoryByPath(path, req.Namespace)
|
||||
f := util.InitFactoryByPath(path, req.Namespace)
|
||||
err = options.InitClient(f)
|
||||
if err != nil {
|
||||
log.Errorf("init client failed: %v", err)
|
||||
log.Errorf("Failed to init client: %v", err)
|
||||
return err
|
||||
}
|
||||
config.Image = req.Image
|
||||
log.Infof("clone workloads...")
|
||||
err = options.DoClone(resp.Context())
|
||||
log.Infof("Clone workloads...")
|
||||
options.SetContext(sshCtx)
|
||||
err = options.DoClone(resp.Context(), []byte(req.KubeconfigBytes))
|
||||
if err != nil {
|
||||
log.Errorf("clone workloads failed: %v", err)
|
||||
_ = options.Cleanup()
|
||||
log.Errorf("Clone workloads failed: %v", err)
|
||||
return err
|
||||
}
|
||||
svr.clone = options
|
||||
@@ -110,10 +114,10 @@ type cloneWarp struct {
|
||||
}
|
||||
|
||||
func (r *cloneWarp) Write(p []byte) (n int, err error) {
|
||||
err = r.server.Send(&rpc.CloneResponse{
|
||||
_ = r.server.Send(&rpc.CloneResponse{
|
||||
Message: string(p),
|
||||
})
|
||||
return len(p), err
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func newCloneWarp(server rpc.Daemon_CloneServer) io.Writer {
|
||||
|
||||
@@ -4,17 +4,17 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
var CancelFunc = make(map[string]context.CancelFunc)
|
||||
|
||||
func (svr *Server) ConfigAdd(ctx context.Context, req *rpc.ConfigAddRequest) (*rpc.ConfigAddResponse, error) {
|
||||
var sshConf = util.ParseSshFromRPC(req.SshJump)
|
||||
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
|
||||
func (svr *Server) ConfigAdd(ctx context.Context, req *rpc.ConfigAddRequest) (resp *rpc.ConfigAddResponse, err error) {
|
||||
var file string
|
||||
file, err = util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -24,13 +24,19 @@ func (svr *Server) ConfigAdd(ctx context.Context, req *rpc.ConfigAddRequest) (*r
|
||||
DefValue: file,
|
||||
})
|
||||
sshCtx, sshCancel := context.WithCancel(context.Background())
|
||||
defer func() {
|
||||
if err != nil {
|
||||
sshCancel()
|
||||
}
|
||||
}()
|
||||
var path string
|
||||
path, err = handler.SshJump(sshCtx, sshConf, flags, true)
|
||||
CancelFunc[path] = sshCancel
|
||||
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
|
||||
path, err = ssh.SshJump(sshCtx, sshConf, flags, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
CancelFunc[path] = sshCancel
|
||||
return &rpc.ConfigAddResponse{ClusterID: path}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -8,22 +8,24 @@ import (
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectForkServer) error {
|
||||
func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectForkServer) (err error) {
|
||||
defer func() {
|
||||
util.InitLoggerForServer(true)
|
||||
log.SetOutput(svr.LogFile)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
config.Debug = false
|
||||
}()
|
||||
config.Debug = req.Level == int32(log.DebugLevel)
|
||||
out := io.MultiWriter(newConnectForkWarp(resp), svr.LogFile)
|
||||
util.InitLoggerForClient(config.Debug)
|
||||
log.SetOutput(out)
|
||||
log.SetLevel(log.InfoLevel)
|
||||
if !svr.IsSudo {
|
||||
return svr.redirectConnectForkToSudoDaemon(req, resp)
|
||||
}
|
||||
@@ -33,19 +35,17 @@ func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectF
|
||||
Namespace: req.Namespace,
|
||||
Headers: req.Headers,
|
||||
Workloads: req.Workloads,
|
||||
ExtraCIDR: req.ExtraCIDR,
|
||||
ExtraDomain: req.ExtraDomain,
|
||||
UseLocalDNS: req.UseLocalDNS,
|
||||
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
|
||||
Engine: config.Engine(req.Engine),
|
||||
OriginKubeconfigPath: req.OriginKubeconfigPath,
|
||||
Lock: &svr.Lock,
|
||||
}
|
||||
var sshConf = util.ParseSshFromRPC(req.SshJump)
|
||||
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
|
||||
var transferImage = req.TransferImage
|
||||
|
||||
go util.StartupPProf(config.PProfPort)
|
||||
defaultlog.Default().SetOutput(io.Discard)
|
||||
if transferImage {
|
||||
err := util.TransferImage(ctx, sshConf, config.OriginImage, req.Image, out)
|
||||
err = ssh.TransferImage(ctx, sshConf, config.OriginImage, req.Image, out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -65,12 +65,19 @@ func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectF
|
||||
sshCancel()
|
||||
return nil
|
||||
})
|
||||
defer func() {
|
||||
if err != nil {
|
||||
connect.Cleanup()
|
||||
sshCancel()
|
||||
}
|
||||
}()
|
||||
|
||||
var path string
|
||||
path, err = handler.SshJump(sshCtx, sshConf, flags, false)
|
||||
path, err = ssh.SshJump(sshCtx, sshConf, flags, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = connect.InitClient(InitFactoryByPath(path, req.Namespace))
|
||||
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -78,7 +85,7 @@ func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectF
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = connect.RentInnerIP(ctx)
|
||||
err = connect.GetIPFromContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -86,16 +93,18 @@ func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectF
|
||||
config.Image = req.Image
|
||||
err = connect.DoConnect(sshCtx, true)
|
||||
if err != nil {
|
||||
log.Errorf("do connect error: %v", err)
|
||||
connect.Cleanup()
|
||||
log.Errorf("Do connect error: %v", err)
|
||||
return err
|
||||
}
|
||||
svr.secondaryConnect = append(svr.secondaryConnect, connect)
|
||||
|
||||
if resp.Context().Err() != nil {
|
||||
return resp.Context().Err()
|
||||
}
|
||||
svr.secondaryConnect = append(svr.secondaryConnect, connect)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer) error {
|
||||
func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer) (err error) {
|
||||
cli := svr.GetClient(true)
|
||||
if cli == nil {
|
||||
return fmt.Errorf("sudo daemon not start")
|
||||
@@ -104,13 +113,11 @@ func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp
|
||||
Namespace: req.Namespace,
|
||||
Headers: req.Headers,
|
||||
Workloads: req.Workloads,
|
||||
ExtraCIDR: req.ExtraCIDR,
|
||||
ExtraDomain: req.ExtraDomain,
|
||||
UseLocalDNS: req.UseLocalDNS,
|
||||
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
|
||||
Engine: config.Engine(req.Engine),
|
||||
OriginKubeconfigPath: req.OriginKubeconfigPath,
|
||||
}
|
||||
var sshConf = util.ParseSshFromRPC(req.SshJump)
|
||||
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
|
||||
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -125,12 +132,17 @@ func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp
|
||||
sshCancel()
|
||||
return nil
|
||||
})
|
||||
defer func() {
|
||||
if err != nil {
|
||||
sshCancel()
|
||||
}
|
||||
}()
|
||||
var path string
|
||||
path, err = handler.SshJump(sshCtx, sshConf, flags, true)
|
||||
path, err = ssh.SshJump(sshCtx, sshConf, flags, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = connect.InitClient(InitFactoryByPath(path, req.Namespace))
|
||||
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -147,12 +159,12 @@ func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp
|
||||
)
|
||||
if err == nil && isSameCluster && options.Equal(connect) {
|
||||
// same cluster, do nothing
|
||||
log.Infof("already connect to cluster")
|
||||
log.Infof("Connected with cluster")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
ctx, err := connect.RentInnerIP(resp.Context())
|
||||
ctx, err := connect.RentIP(resp.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -161,50 +173,15 @@ func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
recv, err := connResp.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
err = resp.Send(recv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.CopyGRPCStream[rpc.ConnectResponse](connResp, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Context().Err() != nil {
|
||||
return resp.Context().Err()
|
||||
}
|
||||
svr.secondaryConnect = append(svr.secondaryConnect, connect)
|
||||
|
||||
if req.Foreground {
|
||||
<-resp.Context().Done()
|
||||
for i := 0; i < len(svr.secondaryConnect); i++ {
|
||||
if svr.secondaryConnect[i] == connect {
|
||||
cli := svr.GetClient(false)
|
||||
if cli == nil {
|
||||
return fmt.Errorf("sudo daemon not start")
|
||||
}
|
||||
disconnect, err := cli.Disconnect(context.Background(), &rpc.DisconnectRequest{
|
||||
ID: pointer.Int32(int32(i)),
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("disconnect error: %v", err)
|
||||
return err
|
||||
}
|
||||
for {
|
||||
recv, err := disconnect.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info(recv.Message)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -213,10 +190,10 @@ type connectForkWarp struct {
|
||||
}
|
||||
|
||||
func (r *connectForkWarp) Write(p []byte) (n int, err error) {
|
||||
err = r.server.Send(&rpc.ConnectResponse{
|
||||
_ = r.server.Send(&rpc.ConnectResponse{
|
||||
Message: string(p),
|
||||
})
|
||||
return len(p), err
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func newConnectForkWarp(server rpc.Daemon_ConnectForkServer) io.Writer {
|
||||
|
||||
@@ -5,62 +5,67 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
defaultlog "log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/client-go/rest"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer) error {
|
||||
func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer) (e error) {
|
||||
defer func() {
|
||||
util.InitLoggerForServer(true)
|
||||
log.SetOutput(svr.LogFile)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
config.Debug = false
|
||||
}()
|
||||
config.Debug = req.Level == int32(log.DebugLevel)
|
||||
out := io.MultiWriter(newWarp(resp), svr.LogFile)
|
||||
util.InitLoggerForClient(config.Debug)
|
||||
log.SetOutput(out)
|
||||
log.SetLevel(log.InfoLevel)
|
||||
if !svr.IsSudo {
|
||||
return svr.redirectToSudoDaemon(req, resp)
|
||||
}
|
||||
|
||||
ctx := resp.Context()
|
||||
if !svr.t.IsZero() {
|
||||
log.Debugf("already connect to another cluster, you can disconnect this connect by command `kubevpn disconnect`")
|
||||
s := "Already connected to cluster in full mode, you can use options `--lite` to connect to another cluster"
|
||||
log.Debugf(s)
|
||||
// todo define already connect error?
|
||||
return status.Error(codes.AlreadyExists, "already connect to another cluster, you can disconnect this connect by command `kubevpn disconnect`")
|
||||
return status.Error(codes.AlreadyExists, s)
|
||||
}
|
||||
defer func() {
|
||||
if e != nil || ctx.Err() != nil {
|
||||
if svr.connect != nil {
|
||||
svr.connect.Cleanup()
|
||||
svr.connect = nil
|
||||
}
|
||||
svr.t = time.Time{}
|
||||
}
|
||||
}()
|
||||
svr.t = time.Now()
|
||||
svr.connect = &handler.ConnectOptions{
|
||||
Namespace: req.Namespace,
|
||||
Headers: req.Headers,
|
||||
PortMap: req.PortMap,
|
||||
Workloads: req.Workloads,
|
||||
ExtraCIDR: req.ExtraCIDR,
|
||||
ExtraDomain: req.ExtraDomain,
|
||||
UseLocalDNS: req.UseLocalDNS,
|
||||
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
|
||||
Engine: config.Engine(req.Engine),
|
||||
OriginKubeconfigPath: req.OriginKubeconfigPath,
|
||||
Lock: &svr.Lock,
|
||||
}
|
||||
var sshConf = util.ParseSshFromRPC(req.SshJump)
|
||||
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
|
||||
var transferImage = req.TransferImage
|
||||
|
||||
go util.StartupPProf(config.PProfPort)
|
||||
defaultlog.Default().SetOutput(io.Discard)
|
||||
if transferImage {
|
||||
err := util.TransferImage(ctx, sshConf, config.OriginImage, req.Image, out)
|
||||
err := ssh.TransferImage(ctx, sshConf, config.OriginImage, req.Image, out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -80,12 +85,17 @@ func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServe
|
||||
sshCancel()
|
||||
return nil
|
||||
})
|
||||
defer func() {
|
||||
if e != nil {
|
||||
sshCancel()
|
||||
}
|
||||
}()
|
||||
var path string
|
||||
path, err = handler.SshJump(sshCtx, sshConf, flags, false)
|
||||
path, err = ssh.SshJump(sshCtx, sshConf, flags, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = svr.connect.InitClient(InitFactoryByPath(path, req.Namespace))
|
||||
err = svr.connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -93,7 +103,7 @@ func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServe
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = svr.connect.RentInnerIP(ctx)
|
||||
err = svr.connect.GetIPFromContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -101,7 +111,7 @@ func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServe
|
||||
config.Image = req.Image
|
||||
err = svr.connect.DoConnect(sshCtx, false)
|
||||
if err != nil {
|
||||
log.Errorf("do connect error: %v", err)
|
||||
log.Errorf("Failed to connect: %v", err)
|
||||
svr.connect.Cleanup()
|
||||
svr.connect = nil
|
||||
svr.t = time.Time{}
|
||||
@@ -110,7 +120,7 @@ func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServe
|
||||
return nil
|
||||
}
|
||||
|
||||
func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer) error {
|
||||
func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServer) (e error) {
|
||||
cli := svr.GetClient(true)
|
||||
if cli == nil {
|
||||
return fmt.Errorf("sudo daemon not start")
|
||||
@@ -118,14 +128,13 @@ func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon
|
||||
connect := &handler.ConnectOptions{
|
||||
Namespace: req.Namespace,
|
||||
Headers: req.Headers,
|
||||
PortMap: req.PortMap,
|
||||
Workloads: req.Workloads,
|
||||
ExtraCIDR: req.ExtraCIDR,
|
||||
ExtraDomain: req.ExtraDomain,
|
||||
UseLocalDNS: req.UseLocalDNS,
|
||||
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
|
||||
Engine: config.Engine(req.Engine),
|
||||
OriginKubeconfigPath: req.OriginKubeconfigPath,
|
||||
}
|
||||
var sshConf = util.ParseSshFromRPC(req.SshJump)
|
||||
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
|
||||
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -140,12 +149,17 @@ func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon
|
||||
sshCancel()
|
||||
return nil
|
||||
})
|
||||
defer func() {
|
||||
if e != nil {
|
||||
sshCancel()
|
||||
}
|
||||
}()
|
||||
var path string
|
||||
path, err = handler.SshJump(sshCtx, sshConf, flags, true)
|
||||
path, err = ssh.SshJump(sshCtx, sshConf, flags, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = connect.InitClient(InitFactoryByPath(path, req.Namespace))
|
||||
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -162,12 +176,12 @@ func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon
|
||||
)
|
||||
if err == nil && isSameCluster && svr.connect.Equal(connect) {
|
||||
// same cluster, do nothing
|
||||
log.Infof("already connect to cluster")
|
||||
log.Infof("Connected to cluster")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
ctx, err := connect.RentInnerIP(resp.Context())
|
||||
ctx, err := connect.RentIP(resp.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -176,49 +190,17 @@ func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
recv, err := connResp.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
err = resp.Send(recv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.CopyGRPCStream[rpc.ConnectResponse](connResp, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Context().Err() != nil {
|
||||
return resp.Context().Err()
|
||||
}
|
||||
svr.t = time.Now()
|
||||
svr.connect = connect
|
||||
|
||||
// hangup
|
||||
if req.Foreground {
|
||||
<-resp.Context().Done()
|
||||
|
||||
client := svr.GetClient(false)
|
||||
if client == nil {
|
||||
return fmt.Errorf("daemon not start")
|
||||
}
|
||||
disconnect, err := client.Disconnect(context.Background(), &rpc.DisconnectRequest{
|
||||
ID: pointer.Int32(int32(0)),
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("disconnect error: %v", err)
|
||||
return err
|
||||
}
|
||||
for {
|
||||
recv, err := disconnect.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
log.Error(err)
|
||||
return err
|
||||
}
|
||||
log.Info(recv.Message)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -227,52 +209,12 @@ type warp struct {
|
||||
}
|
||||
|
||||
func (r *warp) Write(p []byte) (n int, err error) {
|
||||
err = r.server.Send(&rpc.ConnectResponse{
|
||||
_ = r.server.Send(&rpc.ConnectResponse{
|
||||
Message: string(p),
|
||||
})
|
||||
return len(p), err
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func newWarp(server rpc.Daemon_ConnectServer) io.Writer {
|
||||
return &warp{server: server}
|
||||
}
|
||||
|
||||
func InitFactory(kubeconfigBytes string, ns string) cmdutil.Factory {
|
||||
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
|
||||
configFlags.WrapConfigFn = func(c *rest.Config) *rest.Config {
|
||||
if path, ok := os.LookupEnv(config.EnvSSHJump); ok {
|
||||
bytes, err := os.ReadFile(path)
|
||||
cmdutil.CheckErr(err)
|
||||
var conf *restclient.Config
|
||||
conf, err = clientcmd.RESTConfigFromKubeConfig(bytes)
|
||||
cmdutil.CheckErr(err)
|
||||
return conf
|
||||
}
|
||||
return c
|
||||
}
|
||||
// todo optimize here
|
||||
temp, err := os.CreateTemp("", "*.json")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
err = temp.Close()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
err = os.WriteFile(temp.Name(), []byte(kubeconfigBytes), os.ModePerm)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
configFlags.KubeConfig = pointer.String(temp.Name())
|
||||
configFlags.Namespace = pointer.String(ns)
|
||||
matchVersionFlags := cmdutil.NewMatchVersionFlags(configFlags)
|
||||
return cmdutil.NewFactory(matchVersionFlags)
|
||||
}
|
||||
|
||||
func InitFactoryByPath(kubeconfig string, ns string) cmdutil.Factory {
|
||||
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
|
||||
configFlags.KubeConfig = pointer.String(kubeconfig)
|
||||
configFlags.Namespace = pointer.String(ns)
|
||||
matchVersionFlags := cmdutil.NewMatchVersionFlags(configFlags)
|
||||
return cmdutil.NewFactory(matchVersionFlags)
|
||||
}
|
||||
|
||||
@@ -1,17 +1,114 @@
|
||||
package action
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/dns"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/dns"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func (svr *Server) Disconnect(req *rpc.DisconnectRequest, resp rpc.Daemon_DisconnectServer) error {
|
||||
defer func() {
|
||||
util.InitLoggerForServer(true)
|
||||
log.SetOutput(svr.LogFile)
|
||||
config.Debug = false
|
||||
}()
|
||||
out := io.MultiWriter(newDisconnectWarp(resp), svr.LogFile)
|
||||
util.InitLoggerForClient(config.Debug)
|
||||
log.SetOutput(out)
|
||||
switch {
|
||||
case req.GetAll():
|
||||
if svr.clone != nil {
|
||||
_ = svr.clone.Cleanup()
|
||||
}
|
||||
svr.clone = nil
|
||||
|
||||
connects := handler.Connects(svr.secondaryConnect).Append(svr.connect)
|
||||
for _, connect := range connects.Sort() {
|
||||
if connect != nil {
|
||||
connect.Cleanup()
|
||||
}
|
||||
}
|
||||
svr.secondaryConnect = nil
|
||||
svr.connect = nil
|
||||
svr.t = time.Time{}
|
||||
case req.ID != nil && req.GetID() == 0:
|
||||
if svr.connect != nil {
|
||||
svr.connect.Cleanup()
|
||||
}
|
||||
svr.connect = nil
|
||||
svr.t = time.Time{}
|
||||
|
||||
if svr.clone != nil {
|
||||
_ = svr.clone.Cleanup()
|
||||
}
|
||||
svr.clone = nil
|
||||
case req.ID != nil:
|
||||
index := req.GetID() - 1
|
||||
if index < int32(len(svr.secondaryConnect)) {
|
||||
svr.secondaryConnect[index].Cleanup()
|
||||
svr.secondaryConnect = append(svr.secondaryConnect[:index], svr.secondaryConnect[index+1:]...)
|
||||
} else {
|
||||
log.Errorf("Index %d out of range", req.GetID())
|
||||
}
|
||||
case req.KubeconfigBytes != nil && req.Namespace != nil:
|
||||
err := disconnectByKubeConfig(
|
||||
resp.Context(),
|
||||
svr,
|
||||
req.GetKubeconfigBytes(),
|
||||
req.GetNamespace(),
|
||||
req.GetSshJump(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case len(req.ClusterIDs) != 0:
|
||||
s := sets.New(req.ClusterIDs...)
|
||||
var connects = *new(handler.Connects)
|
||||
var foundModeFull bool
|
||||
if s.Has(svr.connect.GetClusterID()) {
|
||||
connects = connects.Append(svr.connect)
|
||||
foundModeFull = true
|
||||
}
|
||||
for i := 0; i < len(svr.secondaryConnect); i++ {
|
||||
if s.Has(svr.secondaryConnect[i].GetClusterID()) {
|
||||
connects = connects.Append(svr.secondaryConnect[i])
|
||||
svr.secondaryConnect = append(svr.secondaryConnect[:i], svr.secondaryConnect[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
for _, connect := range connects.Sort() {
|
||||
if connect != nil {
|
||||
connect.Cleanup()
|
||||
}
|
||||
}
|
||||
if foundModeFull {
|
||||
svr.connect = nil
|
||||
svr.t = time.Time{}
|
||||
if svr.clone != nil {
|
||||
_ = svr.clone.Cleanup()
|
||||
}
|
||||
svr.clone = nil
|
||||
}
|
||||
}
|
||||
|
||||
if svr.connect == nil && len(svr.secondaryConnect) == 0 {
|
||||
if svr.IsSudo {
|
||||
_ = dns.CleanupHosts()
|
||||
}
|
||||
}
|
||||
|
||||
if !svr.IsSudo {
|
||||
cli := svr.GetClient(true)
|
||||
if cli == nil {
|
||||
@@ -21,79 +118,84 @@ func (svr *Server) Disconnect(req *rpc.DisconnectRequest, resp rpc.Daemon_Discon
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var recv *rpc.DisconnectResponse
|
||||
for {
|
||||
recv, err = connResp.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
err = resp.Send(recv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.CopyGRPCStream[rpc.DisconnectResponse](connResp, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
log.SetOutput(svr.LogFile)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
}()
|
||||
out := io.MultiWriter(newDisconnectWarp(resp), svr.LogFile)
|
||||
log.SetOutput(out)
|
||||
log.SetLevel(log.InfoLevel)
|
||||
|
||||
if req.GetAll() {
|
||||
if svr.connect != nil {
|
||||
svr.connect.Cleanup()
|
||||
}
|
||||
if svr.clone != nil {
|
||||
_ = svr.clone.Cleanup()
|
||||
}
|
||||
svr.t = time.Time{}
|
||||
svr.connect = nil
|
||||
svr.clone = nil
|
||||
|
||||
for _, options := range svr.secondaryConnect {
|
||||
options.Cleanup()
|
||||
}
|
||||
svr.secondaryConnect = nil
|
||||
} else if req.ID != nil && req.GetID() == 0 {
|
||||
if svr.connect != nil {
|
||||
svr.connect.Cleanup()
|
||||
}
|
||||
if svr.clone != nil {
|
||||
_ = svr.clone.Cleanup()
|
||||
}
|
||||
svr.t = time.Time{}
|
||||
svr.connect = nil
|
||||
svr.clone = nil
|
||||
} else if req.ID != nil {
|
||||
index := req.GetID() - 1
|
||||
if index < int32(len(svr.secondaryConnect)) {
|
||||
svr.secondaryConnect[index].Cleanup()
|
||||
svr.secondaryConnect = append(svr.secondaryConnect[:index], svr.secondaryConnect[index+1:]...)
|
||||
} else {
|
||||
log.Errorf("index %d out of range", req.GetID())
|
||||
}
|
||||
}
|
||||
|
||||
if svr.connect == nil && len(svr.secondaryConnect) == 0 {
|
||||
dns.CleanupHosts()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func disconnectByKubeConfig(ctx context.Context, svr *Server, kubeconfigBytes string, ns string, jump *rpc.SshJump) error {
|
||||
file, err := util.ConvertToTempKubeconfigFile([]byte(kubeconfigBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flags := pflag.NewFlagSet("", pflag.ContinueOnError)
|
||||
flags.AddFlag(&pflag.Flag{
|
||||
Name: "kubeconfig",
|
||||
DefValue: file,
|
||||
})
|
||||
var sshConf = ssh.ParseSshFromRPC(jump)
|
||||
var path string
|
||||
path, err = ssh.SshJump(ctx, sshConf, flags, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
connect := &handler.ConnectOptions{
|
||||
Namespace: ns,
|
||||
}
|
||||
err = connect.InitClient(util.InitFactoryByPath(path, ns))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
disconnect(svr, connect)
|
||||
return nil
|
||||
}
|
||||
|
||||
func disconnect(svr *Server, connect *handler.ConnectOptions) {
|
||||
client := svr.GetClient(false)
|
||||
if client == nil {
|
||||
return
|
||||
}
|
||||
if svr.connect != nil {
|
||||
isSameCluster, err := util.IsSameCluster(
|
||||
svr.connect.GetClientset().CoreV1().ConfigMaps(svr.connect.Namespace), svr.connect.Namespace,
|
||||
connect.GetClientset().CoreV1().ConfigMaps(connect.Namespace), connect.Namespace,
|
||||
)
|
||||
if err == nil && isSameCluster {
|
||||
log.Infof("Disconnecting from the cluster...")
|
||||
svr.connect.Cleanup()
|
||||
svr.connect = nil
|
||||
svr.t = time.Time{}
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(svr.secondaryConnect); i++ {
|
||||
options := svr.secondaryConnect[i]
|
||||
isSameCluster, err := util.IsSameCluster(
|
||||
options.GetClientset().CoreV1().ConfigMaps(options.Namespace), options.Namespace,
|
||||
connect.GetClientset().CoreV1().ConfigMaps(connect.Namespace), connect.Namespace,
|
||||
)
|
||||
if err == nil && isSameCluster {
|
||||
log.Infof("Disconnecting from the cluster...")
|
||||
options.Cleanup()
|
||||
svr.secondaryConnect = append(svr.secondaryConnect[:i], svr.secondaryConnect[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type disconnectWarp struct {
|
||||
server rpc.Daemon_DisconnectServer
|
||||
}
|
||||
|
||||
func (r *disconnectWarp) Write(p []byte) (n int, err error) {
|
||||
err = r.server.Send(&rpc.DisconnectResponse{
|
||||
_ = r.server.Send(&rpc.DisconnectResponse{
|
||||
Message: string(p),
|
||||
})
|
||||
return len(p), err
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func newDisconnectWarp(server rpc.Daemon_DisconnectServer) io.Writer {
|
||||
|
||||
@@ -2,35 +2,40 @@ package action
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/metadata"
|
||||
"k8s.io/client-go/metadata/metadatainformer"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
)
|
||||
|
||||
func (svr *Server) Get(ctx context.Context, req *rpc.GetRequest) (*rpc.GetResponse, error) {
|
||||
if svr.connect == nil {
|
||||
if svr.connect == nil || svr.connect.Context() == nil {
|
||||
return nil, errors.New("not connected")
|
||||
}
|
||||
if svr.gr == nil {
|
||||
if svr.resourceLists == nil {
|
||||
restConfig, err := svr.connect.GetFactory().ToRESTConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
restConfig.WarningHandler = rest.NoWarnings{}
|
||||
config, err := discovery.NewDiscoveryClientForConfig(restConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
svr.gr, err = restmapper.GetAPIGroupResources(config)
|
||||
svr.resourceLists, err = discovery.ServerPreferredResources(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -42,73 +47,77 @@ func (svr *Server) Get(ctx context.Context, req *rpc.GetRequest) (*rpc.GetRespon
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
svr.informer = metadatainformer.NewSharedInformerFactory(forConfig, time.Second*5)
|
||||
for _, resources := range svr.gr {
|
||||
for _, apiResources := range resources.VersionedResources {
|
||||
for _, resource := range apiResources {
|
||||
have := sets.New[string](resource.Kind, resource.Name, resource.SingularName).Insert(resource.ShortNames...).Has(req.Resource)
|
||||
if have {
|
||||
resourcesFor, err := mapper.RESTMapping(schema.GroupKind{
|
||||
Group: resource.Group,
|
||||
Kind: resource.Kind,
|
||||
}, resource.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
svr.informer.ForResource(resourcesFor.Resource)
|
||||
}
|
||||
for _, resourceList := range svr.resourceLists {
|
||||
for _, resource := range resourceList.APIResources {
|
||||
var groupVersion schema.GroupVersion
|
||||
groupVersion, err = schema.ParseGroupVersion(resourceList.GroupVersion)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var mapping schema.GroupVersionResource
|
||||
mapping, err = mapper.ResourceFor(groupVersion.WithResource(resource.Name))
|
||||
if err != nil {
|
||||
if meta.IsNoMatchError(err) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
_ = svr.informer.ForResource(mapping).Informer().SetWatchErrorHandler(func(r *cache.Reflector, err error) {
|
||||
_, _ = svr.LogFile.Write([]byte(err.Error()))
|
||||
})
|
||||
}
|
||||
}
|
||||
go svr.informer.Start(svr.connect.Context().Done())
|
||||
go svr.informer.WaitForCacheSync(make(chan struct{}))
|
||||
svr.informer.Start(svr.connect.Context().Done())
|
||||
svr.informer.WaitForCacheSync(ctx.Done())
|
||||
}
|
||||
informer, err := svr.getInformer(req)
|
||||
informer, gvk, err := svr.getInformer(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var result []*rpc.Metadata
|
||||
for _, m := range informer.Informer().GetIndexer().List() {
|
||||
object, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var result []string
|
||||
for _, m := range informer.Informer().GetStore().List() {
|
||||
objectMetadata, ok := m.(*v1.PartialObjectMetadata)
|
||||
if ok {
|
||||
deepCopy := objectMetadata.DeepCopy()
|
||||
deepCopy.SetGroupVersionKind(*gvk)
|
||||
deepCopy.ManagedFields = nil
|
||||
marshal, err := json.Marshal(deepCopy)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
result = append(result, string(marshal))
|
||||
}
|
||||
result = append(result, &rpc.Metadata{
|
||||
Name: object.GetName(),
|
||||
Namespace: object.GetNamespace(),
|
||||
})
|
||||
}
|
||||
|
||||
return &rpc.GetResponse{Metadata: result}, nil
|
||||
}
|
||||
|
||||
func (svr *Server) getInformer(req *rpc.GetRequest) (informers.GenericInformer, error) {
|
||||
func (svr *Server) getInformer(req *rpc.GetRequest) (informers.GenericInformer, *schema.GroupVersionKind, error) {
|
||||
mapper, err := svr.connect.GetFactory().ToRESTMapper()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
var resourcesFor *meta.RESTMapping
|
||||
out:
|
||||
for _, resources := range svr.gr {
|
||||
for _, apiResources := range resources.VersionedResources {
|
||||
for _, resource := range apiResources {
|
||||
have := sets.New[string](resource.Kind, resource.Name, resource.SingularName).Insert(resource.ShortNames...).Has(req.Resource)
|
||||
if have {
|
||||
resourcesFor, err = mapper.RESTMapping(schema.GroupKind{
|
||||
Group: resource.Group,
|
||||
Kind: resource.Kind,
|
||||
}, resource.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
break out
|
||||
for _, resources := range svr.resourceLists {
|
||||
for _, resource := range resources.APIResources {
|
||||
have := sets.New[string](resource.Kind, resource.Name, resource.SingularName).Insert(resource.ShortNames...).Has(req.Resource)
|
||||
if have {
|
||||
var groupVersion schema.GroupVersion
|
||||
groupVersion, err = schema.ParseGroupVersion(resources.GroupVersion)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var mapping schema.GroupVersionResource
|
||||
mapping, err = mapper.ResourceFor(groupVersion.WithResource(resource.Name))
|
||||
if err != nil {
|
||||
if meta.IsNoMatchError(err) {
|
||||
continue
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
return svr.informer.ForResource(mapping), ptr.To(groupVersion.WithKind(resource.Kind)), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if resourcesFor == nil {
|
||||
return nil, errors.New("ErrResourceNotFound")
|
||||
}
|
||||
|
||||
return svr.informer.ForResource(resourcesFor.Resource), nil
|
||||
return nil, nil, errors.New("ErrResourceNotFound")
|
||||
}
|
||||
|
||||
11
pkg/daemon/action/identify.go
Normal file
11
pkg/daemon/action/identify.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package action
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
)
|
||||
|
||||
func (svr *Server) Identify(ctx context.Context, req *rpc.IdentifyRequest) (*rpc.IdentifyResponse, error) {
|
||||
return &rpc.IdentifyResponse{ID: svr.ID}, nil
|
||||
}
|
||||
@@ -3,38 +3,42 @@ package action
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/inject"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func (svr *Server) Leave(req *rpc.LeaveRequest, resp rpc.Daemon_LeaveServer) error {
|
||||
defer func() {
|
||||
util.InitLoggerForServer(true)
|
||||
log.SetOutput(svr.LogFile)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
config.Debug = false
|
||||
}()
|
||||
out := io.MultiWriter(newLeaveWarp(resp), svr.LogFile)
|
||||
util.InitLoggerForClient(config.Debug)
|
||||
log.SetOutput(out)
|
||||
log.SetLevel(log.InfoLevel)
|
||||
if svr.connect == nil {
|
||||
log.Infof("not proxy any resource in cluster")
|
||||
log.Infof("Not proxy any resource in cluster")
|
||||
return fmt.Errorf("not proxy any resource in cluster")
|
||||
}
|
||||
|
||||
factory := svr.connect.GetFactory()
|
||||
namespace := svr.connect.Namespace
|
||||
maps := svr.connect.GetClientset().CoreV1().ConfigMaps(namespace)
|
||||
v4, _ := svr.connect.GetLocalTunIP()
|
||||
for _, workload := range req.GetWorkloads() {
|
||||
// add rollback func to remove envoy config
|
||||
log.Infof("leave workload %s", workload)
|
||||
err := handler.UnPatchContainer(factory, maps, namespace, workload, svr.connect.GetLocalTunIPv4())
|
||||
err := inject.UnPatchContainer(factory, maps, namespace, workload, v4)
|
||||
if err != nil {
|
||||
log.Errorf("leave workload %s failed: %v", workload, err)
|
||||
log.Errorf("Leaving workload %s failed: %v", workload, err)
|
||||
continue
|
||||
}
|
||||
log.Infof("leave workload %s successfully", workload)
|
||||
err = util.RolloutStatus(resp.Context(), factory, namespace, workload, time.Minute*60)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -44,10 +48,10 @@ type leaveWarp struct {
|
||||
}
|
||||
|
||||
func (r *leaveWarp) Write(p []byte) (n int, err error) {
|
||||
err = r.server.Send(&rpc.LeaveResponse{
|
||||
_ = r.server.Send(&rpc.LeaveResponse{
|
||||
Message: string(p),
|
||||
})
|
||||
return len(p), err
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func newLeaveWarp(server rpc.Daemon_LeaveServer) io.Writer {
|
||||
|
||||
@@ -9,9 +9,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
k8syaml "sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/controlplane"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/controlplane"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
)
|
||||
|
||||
func (svr *Server) List(ctx context.Context, req *rpc.ListRequest) (*rpc.ListResponse, error) {
|
||||
|
||||
@@ -1,14 +1,32 @@
|
||||
package action
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/hpcloud/tail"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
)
|
||||
|
||||
func (svr *Server) Logs(req *rpc.LogRequest, resp rpc.Daemon_LogsServer) error {
|
||||
path := GetDaemonLogPath()
|
||||
config := tail.Config{Follow: req.Follow, ReOpen: true, MustExist: true}
|
||||
|
||||
lines, err2 := countLines(path)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
|
||||
// only show latest N lines
|
||||
if req.Lines < 0 {
|
||||
lines = -req.Lines
|
||||
} else {
|
||||
lines -= req.Lines
|
||||
}
|
||||
|
||||
config := tail.Config{Follow: req.Follow, ReOpen: false, MustExist: true, Logger: log.New(io.Discard, "", log.LstdFlags)}
|
||||
if !req.Follow {
|
||||
// FATAL -- cannot set ReOpen without Follow.
|
||||
config.ReOpen = false
|
||||
@@ -29,10 +47,36 @@ func (svr *Server) Logs(req *rpc.LogRequest, resp rpc.Daemon_LogsServer) error {
|
||||
if line.Err != nil {
|
||||
return err
|
||||
}
|
||||
err = resp.Send(&rpc.LogResponse{Message: line.Text})
|
||||
|
||||
if lines--; lines >= 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
err = resp.Send(&rpc.LogResponse{Message: line.Text + "\n"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func countLines(filename string) (int32, error) {
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
lineCount := int32(0)
|
||||
|
||||
for scanner.Scan() {
|
||||
lineCount++
|
||||
}
|
||||
|
||||
if err = scanner.Err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return lineCount, nil
|
||||
}
|
||||
|
||||
@@ -1,17 +1,19 @@
|
||||
package action
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/utils/pointer"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/pkg/util"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
// Proxy
|
||||
@@ -21,26 +23,28 @@ import (
|
||||
// 2. if already connect to cluster
|
||||
// 2.1 disconnect from cluster
|
||||
// 2.2 same as step 1
|
||||
func (svr *Server) Proxy(req *rpc.ConnectRequest, resp rpc.Daemon_ProxyServer) error {
|
||||
out := io.MultiWriter(newProxyWarp(resp), svr.LogFile)
|
||||
log.SetOutput(out)
|
||||
func (svr *Server) Proxy(req *rpc.ConnectRequest, resp rpc.Daemon_ProxyServer) (e error) {
|
||||
defer func() {
|
||||
util.InitLoggerForServer(true)
|
||||
log.SetOutput(svr.LogFile)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
config.Debug = false
|
||||
}()
|
||||
log.SetLevel(log.InfoLevel)
|
||||
out := io.MultiWriter(newProxyWarp(resp), svr.LogFile)
|
||||
config.Image = req.Image
|
||||
config.Debug = req.Level == int32(log.DebugLevel)
|
||||
util.InitLoggerForClient(config.Debug)
|
||||
log.SetOutput(out)
|
||||
ctx := resp.Context()
|
||||
connect := &handler.ConnectOptions{
|
||||
Namespace: req.Namespace,
|
||||
Headers: req.Headers,
|
||||
PortMap: req.PortMap,
|
||||
Workloads: req.Workloads,
|
||||
ExtraCIDR: req.ExtraCIDR,
|
||||
ExtraDomain: req.ExtraDomain,
|
||||
UseLocalDNS: req.UseLocalDNS,
|
||||
ExtraRouteInfo: *handler.ParseExtraRouteFromRPC(req.ExtraRoute),
|
||||
Engine: config.Engine(req.Engine),
|
||||
OriginKubeconfigPath: req.OriginKubeconfigPath,
|
||||
}
|
||||
var sshConf = util.ParseSshFromRPC(req.SshJump)
|
||||
var sshConf = ssh.ParseSshFromRPC(req.SshJump)
|
||||
|
||||
file, err := util.ConvertToTempKubeconfigFile([]byte(req.KubeconfigBytes))
|
||||
if err != nil {
|
||||
@@ -52,11 +56,11 @@ func (svr *Server) Proxy(req *rpc.ConnectRequest, resp rpc.Daemon_ProxyServer) e
|
||||
DefValue: file,
|
||||
})
|
||||
var path string
|
||||
path, err = handler.SshJump(ctx, sshConf, flags, false)
|
||||
path, err = ssh.SshJump(ctx, sshConf, flags, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = connect.InitClient(InitFactoryByPath(path, req.Namespace))
|
||||
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -65,6 +69,12 @@ func (svr *Server) Proxy(req *rpc.ConnectRequest, resp rpc.Daemon_ProxyServer) e
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if e != nil && svr.connect != nil {
|
||||
_ = svr.connect.LeaveProxyResources(context.Background())
|
||||
}
|
||||
}()
|
||||
|
||||
daemonClient := svr.GetClient(false)
|
||||
if daemonClient == nil {
|
||||
return fmt.Errorf("daemon is not avaliable")
|
||||
@@ -77,62 +87,54 @@ func (svr *Server) Proxy(req *rpc.ConnectRequest, resp rpc.Daemon_ProxyServer) e
|
||||
)
|
||||
if err == nil && isSameCluster && svr.connect.Equal(connect) {
|
||||
// same cluster, do nothing
|
||||
log.Infof("already connect to cluster")
|
||||
log.Infof("Connected to cluster")
|
||||
} else {
|
||||
log.Infof("try to disconnect from another cluster")
|
||||
var disconnect rpc.Daemon_DisconnectClient
|
||||
disconnect, err = daemonClient.Disconnect(ctx, &rpc.DisconnectRequest{
|
||||
ID: pointer.Int32(0),
|
||||
log.Infof("Disconnecting from another cluster...")
|
||||
var disconnectResp rpc.Daemon_DisconnectClient
|
||||
disconnectResp, err = daemonClient.Disconnect(ctx, &rpc.DisconnectRequest{
|
||||
KubeconfigBytes: ptr.To(req.KubeconfigBytes),
|
||||
Namespace: ptr.To(connect.Namespace),
|
||||
SshJump: sshConf.ToRPC(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var recv *rpc.DisconnectResponse
|
||||
for {
|
||||
recv, err = disconnect.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
log.Errorf("recv from disconnect failed, %v", err)
|
||||
return err
|
||||
}
|
||||
err = resp.Send(&rpc.ConnectResponse{Message: recv.Message})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.CopyAndConvertGRPCStream[rpc.DisconnectResponse, rpc.ConnectResponse](
|
||||
disconnectResp,
|
||||
resp,
|
||||
func(response *rpc.DisconnectResponse) *rpc.ConnectResponse {
|
||||
return &rpc.ConnectResponse{Message: response.Message}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
util.InitLoggerForClient(config.Debug)
|
||||
log.SetOutput(out)
|
||||
}
|
||||
}
|
||||
|
||||
if svr.connect == nil {
|
||||
log.Infof("connectting to cluster")
|
||||
log.Debugf("Connectting to cluster")
|
||||
var connResp rpc.Daemon_ConnectClient
|
||||
connResp, err = daemonClient.Connect(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var recv *rpc.ConnectResponse
|
||||
for {
|
||||
recv, err = connResp.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
err = resp.Send(recv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.CopyGRPCStream[rpc.ConnectResponse](connResp, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
util.InitLoggerForClient(config.Debug)
|
||||
log.SetOutput(out)
|
||||
}
|
||||
|
||||
svr.connect.Workloads = req.Workloads
|
||||
svr.connect.Headers = req.Headers
|
||||
svr.connect.PortMap = req.PortMap
|
||||
err = svr.connect.CreateRemoteInboundPod(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("create remote inbound pod failed: %s", err.Error())
|
||||
log.Errorf("Failed to inject inbound sidecar: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -143,10 +145,10 @@ type proxyWarp struct {
|
||||
}
|
||||
|
||||
func (r *proxyWarp) Write(p []byte) (n int, err error) {
|
||||
err = r.server.Send(&rpc.ConnectResponse{
|
||||
_ = r.server.Send(&rpc.ConnectResponse{
|
||||
Message: string(p),
|
||||
})
|
||||
return len(p), err
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func newProxyWarp(server rpc.Daemon_ProxyServer) io.Writer {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user