fix codacy issues

Signed-off-by: Steffen Vogel <post@steffenvogel.de>
This commit is contained in:
Steffen Vogel
2022-10-14 10:19:47 +02:00
parent d2b82e0bb7
commit 06ffcfe199
27 changed files with 128 additions and 99 deletions

View File

@@ -34,7 +34,7 @@ cunīcu's documentation can be found here: [cunicu.li/docs](https://cunicu.li/do
## Authors ## Authors
- Steffen Vogel ([@stv0g](https://github.com/stv0g), Institute for Automation of Complex Power Systems, RWTH Aachen University) - Steffen Vogel ([@stv0g](https://github.com/stv0g), Institute for Automation of Complex Power Systems, RWTH Aachen University)
## License ## License

View File

@@ -10,7 +10,7 @@ import (
"go.uber.org/zap/zapio" "go.uber.org/zap/zapio"
"github.com/stv0g/cunicu/pkg/config" "github.com/stv0g/cunicu/pkg/config"
d "github.com/stv0g/cunicu/pkg/daemon" "github.com/stv0g/cunicu/pkg/daemon"
"github.com/stv0g/cunicu/pkg/rpc" "github.com/stv0g/cunicu/pkg/rpc"
"github.com/stv0g/cunicu/pkg/util/terminal" "github.com/stv0g/cunicu/pkg/util/terminal"
) )
@@ -19,8 +19,8 @@ var (
daemonCmd = &cobra.Command{ daemonCmd = &cobra.Command{
Use: "daemon [interface-names...]", Use: "daemon [interface-names...]",
Short: "Start the daemon", Short: "Start the daemon",
Example: `$ cunicu daemon -u -x mysecretpass wg0`, Example: `$ cunicu daemon -U -x mysecretpass wg0`,
Run: daemon, Run: daemonRun,
ValidArgsFunction: cobra.NoFileCompletions, ValidArgsFunction: cobra.NoFileCompletions,
} }
@@ -43,20 +43,22 @@ func init() {
panic(err) panic(err)
} }
if err := daemonCmd.MarkFlagFilename("config", "yaml", "json"); err != nil { if err := daemonCmd.MarkPersistentFlagFilename("config", "yaml", "json"); err != nil {
panic(err) panic(err)
} }
pf.VisitAll(func(f *pflag.Flag) { pf.VisitAll(func(f *pflag.Flag) {
if f.Value.Type() == "bool" { if f.Value.Type() == "bool" {
daemonCmd.RegisterFlagCompletionFunc(f.Name, BooleanCompletions) if err := daemonCmd.RegisterFlagCompletionFunc(f.Name, BooleanCompletions); err != nil {
panic(err)
}
} }
}) })
rootCmd.AddCommand(daemonCmd) rootCmd.AddCommand(daemonCmd)
} }
func daemon(cmd *cobra.Command, args []string) { func daemonRun(cmd *cobra.Command, args []string) {
io.WriteString(os.Stdout, Banner(color)) io.WriteString(os.Stdout, Banner(color))
if err := cfg.Init(args); err != nil { if err := cfg.Init(args); err != nil {
@@ -76,7 +78,7 @@ func daemon(cmd *cobra.Command, args []string) {
} }
// Create daemon // Create daemon
d, err := d.New(cfg) d, err := daemon.New(cfg)
if err != nil { if err != nil {
logger.Fatal("Failed to create daemon", zap.Error(err)) logger.Fatal("Failed to create daemon", zap.Error(err))
} }

View File

@@ -39,7 +39,10 @@ func init() {
func interfaceValidArg(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { func interfaceValidArg(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
// Establish RPC connection // Establish RPC connection
rpcConnect(cmd, args) if err := rpcConnect(cmd, args); err != nil {
return nil, cobra.ShellCompDirectiveError
}
defer rpcDisconnect(cmd, args) defer rpcDisconnect(cmd, args)
p := &rpcproto.GetStatusParams{} p := &rpcproto.GetStatusParams{}
@@ -100,11 +103,15 @@ func invite(cmd *cobra.Command, args []string) {
if qrCode { if qrCode {
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
cfg.Dump(buf) if err := cfg.Dump(buf); err != nil {
logger.Fatal("Failed to dump config", zap.Error(err))
}
terminal.QRCode(buf.String()) terminal.QRCode(buf.String())
} else { } else {
cfg.Dump(os.Stdout) if err := cfg.Dump(os.Stdout); err != nil {
logger.Fatal("Failed to dump config", zap.Error(err))
}
} }
} }
} }

View File

@@ -30,7 +30,7 @@ func init() {
pf.VarP(&format, "format", "f", "Output `format` (one of: human, json)") pf.VarP(&format, "format", "f", "Output `format` (one of: human, json)")
pf.BoolVarP(&indent, "indent", "i", true, "Format and indent JSON ouput") pf.BoolVarP(&indent, "indent", "i", true, "Format and indent JSON ouput")
if err := daemonCmd.RegisterFlagCompletionFunc("format", cobra.FixedCompletions([]string{"human", "json"}, cobra.ShellCompDirectiveNoFileComp)); err != nil { if err := statusCmd.RegisterFlagCompletionFunc("format", cobra.FixedCompletions([]string{"human", "json"}, cobra.ShellCompDirectiveNoFileComp)); err != nil {
panic(err) panic(err)
} }
@@ -39,7 +39,9 @@ func init() {
func statusValidArgs(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { func statusValidArgs(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
// Establish RPC connection // Establish RPC connection
rpcConnect(cmd, args) if err := rpcConnect(cmd, args); err != nil {
return nil, cobra.ShellCompDirectiveError
}
defer rpcDisconnect(cmd, args) defer rpcDisconnect(cmd, args)
p := &rpcproto.GetStatusParams{} p := &rpcproto.GetStatusParams{}

View File

@@ -48,7 +48,9 @@ func wgShowValidArgs(cmd *cobra.Command, args []string, toComplete string) ([]st
if len(args) == 0 { if len(args) == 0 {
comps = []string{"all", "interfaces"} comps = []string{"all", "interfaces"}
rpcConnect(cmd, args) if err := rpcConnect(cmd, args); err != nil {
return nil, cobra.ShellCompDirectiveError
}
defer rpcDisconnect(cmd, args) defer rpcDisconnect(cmd, args)
sts, err := rpcClient.GetStatus(context.Background(), &rpcproto.GetStatusParams{}) sts, err := rpcClient.GetStatus(context.Background(), &rpcproto.GetStatusParams{})

View File

@@ -7,13 +7,13 @@ sidebar_position: 99
There exist a suite of other peer-to-peer VPN solutions comparable to cunīcu: There exist a suite of other peer-to-peer VPN solutions comparable to cunīcu:
- [Tinc-VPN](https://www.tinc-vpn.org/) - [Tinc-VPN](https://www.tinc-vpn.org/)
- [weron](https://github.com/pojntfx/weron) - [weron](https://github.com/pojntfx/weron)
- [Tailscale](https://tailscale.com/) - [Tailscale](https://tailscale.com/)
- [Zerotier](https://www.zerotier.com/) - [Zerotier](https://www.zerotier.com/)
- [netbird](https://netbird.io/) (formerly Wiretrustee) - [netbird](https://netbird.io/) (formerly Wiretrustee)
- [wg-dynamic](https://github.com/WireGuard/wg-dynamic/blob/master/docs/idea.md) - [wg-dynamic](https://github.com/WireGuard/wg-dynamic/blob/master/docs/idea.md)
- [Nebula](https://github.com/slackhq/nebula) - [Nebula](https://github.com/slackhq/nebula)
- [Netmaker](https://www.netmaker.org/) - [Netmaker](https://www.netmaker.org/)
You might also want to have a look at [@HarvsG](https://github.com/HarvsG) [WireGuardMeshes](https://github.com/HarvsG/WireGuardMeshes) comparison. You might also want to have a look at [@HarvsG](https://github.com/HarvsG) [WireGuardMeshes](https://github.com/HarvsG/WireGuardMeshes) comparison.

View File

@@ -108,4 +108,3 @@ sidebar_position: 20
[rfc7064]: https://datatracker.ietf.org/doc/html/rfc7064 [rfc7064]: https://datatracker.ietf.org/doc/html/rfc7064
[rfc7065]: https://datatracker.ietf.org/doc/html/rfc7065 [rfc7065]: https://datatracker.ietf.org/doc/html/rfc7065

View File

@@ -31,6 +31,6 @@ This addresses calculation can be performed with the [`cunicu addresses`](../usa
The following settings are automatically assigned if they have not been set before: The following settings are automatically assigned if they have not been set before:
- **Private Key:** a new random private key will be generated. - **Private Key:** a new random private key will be generated.
- **Listen port:** the next free port in the configured listen port range is used (see `wireguard.listen_port_range` setting). - **Listen port:** the next free port in the configured listen port range is used (see `wireguard.listen_port_range` setting).
- **MTU:** is automatically determined from the endpoint addresses or the system default route. - **MTU:** is automatically determined from the endpoint addresses or the system default route.

View File

@@ -9,11 +9,11 @@ E.g. we can use cunīcu for the post-quantum safe exchange of pre-shared keys wi
Currently, the following features are implemented as separate modules: Currently, the following features are implemented as separate modules:
- [Auto-configuration of missing interface settings and link-local IP addresses](./autocfg.md) (`autocfg`) - [Auto-configuration of missing interface settings and link-local IP addresses](./autocfg.md) (`autocfg`)
- [Config Synchronization](./cfgsync.md) (`cfgsync`) - [Config Synchronization](./cfgsync.md) (`cfgsync`)
- [Peer Discovery](./pdisc.md) (`pdisc`) - [Peer Discovery](./pdisc.md) (`pdisc`)
- [Endpoint Discovery](./epdisc.md) (`epdisc`) - [Endpoint Discovery](./epdisc.md) (`epdisc`)
- [Hooks](./hooks.md) (`hooks`) - [Hooks](./hooks.md) (`hooks`)
- [Hosts-file Synchronization](./hsync.md) (`hsync`) - [Hosts-file Synchronization](./hsync.md) (`hsync`)
- [Pre-shared Key Establishment](./pske.md) (`pske`) - [Pre-shared Key Establishment](./pske.md) (`pske`)
- [Route Synchronization](./rtsync.md) (`rtsync`) - [Route Synchronization](./rtsync.md) (`rtsync`)

View File

@@ -7,7 +7,7 @@ title: Route Synchronization
The route synchronization feature keeps the kernel routing table in sync with WireGuard's _AllowedIPs_ setting. The route synchronization feature keeps the kernel routing table in sync with WireGuard's _AllowedIPs_ setting.
This synchronization is bi-directional: This synchronization is bi-directional:
- Networks with are found in a Peers AllowedIP list will be installed as a kernel route. - Networks with are found in a Peers AllowedIP list will be installed as a kernel route.
- Kernel routes with the peers link-local IP address as next-hop will be added to the Peers _AllowedIPs_ list. - Kernel routes with the peers link-local IP address as next-hop will be added to the Peers _AllowedIPs_ list.
This rather simple feature allows user to pair cunicu with a software routing daemon like [Bird2](https://bird.network.cz/) while using a single WireGuard interface with multiple peer-to-peer links. This rather simple feature allows user to pair cunicu with a software routing daemon like [Bird2](https://bird.network.cz/) while using a single WireGuard interface with multiple peer-to-peer links.

View File

@@ -52,7 +52,7 @@ Once this has been done, the cunīcu logs should show a line `state=connected`.
## Authors ## Authors
- Steffen Vogel ([@stv0g](https://github.com/stv0g), Institute for Automation of Complex Power Systems, RWTH Aachen University) - Steffen Vogel ([@stv0g](https://github.com/stv0g), Institute for Automation of Complex Power Systems, RWTH Aachen University)
## Join us ## Join us
@@ -60,7 +60,7 @@ Please feel free to [join our Slack channel](https://join.slack.com/t/gophers/sh
## Name ## Name
The project name _cunīcu_ [kʊˈniːkʊ] is derived from the [latin noun cunīculus](https://en.wiktionary.org/wiki/cuniculus#Latin) which means rabbit, a rabbit burrow or underground tunnel. We have choosen it as a name for this project as _cunīcu_ builds tunnels between otherwise hard to reach network locations. The project name _cunīcu_ \[kʊˈniː\] is derived from the [latin noun cunīculus](https://en.wiktionary.org/wiki/cuniculus#Latin) which means rabbit, a rabbit burrow or underground tunnel. We have choosen it as a name for this project as _cunīcu_ builds tunnels between otherwise hard to reach network locations.
It has been changed from the former name _wice_ in order to broaden the scope of the project and avoid any potential trademark violations. It has been changed from the former name _wice_ in order to broaden the scope of the project and avoid any potential trademark violations.
## License ## License

View File

@@ -13,24 +13,24 @@ import (
"github.com/stv0g/cunicu/pkg/util/buildinfo" "github.com/stv0g/cunicu/pkg/util/buildinfo"
) )
type remoteFileProvider struct { type RemoteFileProvider struct {
url *url.URL url *url.URL
etag string etag string
lastModified time.Time lastModified time.Time
order []string order []string
} }
func RemoteFileProvider(u *url.URL) *remoteFileProvider { func NewRemoteFileProvider(u *url.URL) *RemoteFileProvider {
return &remoteFileProvider{ return &RemoteFileProvider{
url: u, url: u,
} }
} }
func (p *remoteFileProvider) Read() (map[string]interface{}, error) { func (p *RemoteFileProvider) Read() (map[string]interface{}, error) {
return nil, errors.New("this provider does not support parsers") return nil, errors.New("this provider does not support parsers")
} }
func (p *remoteFileProvider) ReadBytes() ([]byte, error) { func (p *RemoteFileProvider) ReadBytes() ([]byte, error) {
if p.url.Scheme != "https" { if p.url.Scheme != "https" {
host, _, err := net.SplitHostPort(p.url.Host) host, _, err := net.SplitHostPort(p.url.Host)
if err != nil { if err != nil {
@@ -81,12 +81,14 @@ func (p *remoteFileProvider) ReadBytes() ([]byte, error) {
return buf, nil return buf, nil
} }
func (p *remoteFileProvider) Order() []string { func (p *RemoteFileProvider) Order() []string {
return p.order return p.order
} }
func (p *remoteFileProvider) Version() any { func (p *RemoteFileProvider) Version() any {
p.hasChanged() if _, err := p.hasChanged(); err != nil {
return nil
}
if p.etag != "" { if p.etag != "" {
return p.etag return p.etag
@@ -99,7 +101,7 @@ func (p *remoteFileProvider) Version() any {
return nil return nil
} }
func (p *remoteFileProvider) hasChanged() (bool, error) { func (p *RemoteFileProvider) hasChanged() (bool, error) {
client := &http.Client{ client := &http.Client{
Timeout: 5 * time.Second, Timeout: 5 * time.Second,
} }
@@ -130,19 +132,19 @@ func (p *remoteFileProvider) hasChanged() (bool, error) {
return resp.StatusCode == 200, nil return resp.StatusCode == 200, nil
} }
type localFileProvider struct { type LocalFileProvider struct {
*file.File *file.File
order []string order []string
} }
func LocalFileProvider(u *url.URL) *localFileProvider { func NewLocalFileProvider(u *url.URL) *LocalFileProvider {
return &localFileProvider{ return &LocalFileProvider{
File: file.Provider(u.Path), File: file.Provider(u.Path),
} }
} }
func (p *localFileProvider) ReadBytes() ([]byte, error) { func (p *LocalFileProvider) ReadBytes() ([]byte, error) {
buf, err := p.File.ReadBytes() buf, err := p.File.ReadBytes()
if err == nil { if err == nil {
@@ -152,6 +154,6 @@ func (p *localFileProvider) ReadBytes() ([]byte, error) {
return buf, err return buf, err
} }
func (p *localFileProvider) Order() []string { func (p *LocalFileProvider) Order() []string {
return p.order return p.order
} }

View File

@@ -94,7 +94,7 @@ func (p *lookupProvider) SubProviders() []koanf.Provider {
if err != nil { if err != nil {
p.logger.Warn("failed to parse URL for configuration file", zap.Error(err)) p.logger.Warn("failed to parse URL for configuration file", zap.Error(err))
} else { } else {
ps = append(ps, RemoteFileProvider(u)) ps = append(ps, NewRemoteFileProvider(u))
} }
} }

View File

@@ -83,7 +83,7 @@ type Provider struct {
// - command line flags // - command line flags
func (c *Config) GetProviders() ([]koanf.Provider, error) { func (c *Config) GetProviders() ([]koanf.Provider, error) {
ps := []koanf.Provider{ ps := []koanf.Provider{
StructsProvider(&DefaultSettings, "koanf"), NewStructsProvider(&DefaultSettings, "koanf"),
WireGuardProvider(), WireGuardProvider(),
} }
@@ -97,12 +97,13 @@ func (c *Config) GetProviders() ([]koanf.Provider, error) {
if len(c.Files) == 0 { if len(c.Files) == 0 {
searchPath := []string{"/etc", "/etc/cunicu"} searchPath := []string{"/etc", "/etc/cunicu"}
if cwd, err := os.Getwd(); err != nil { cwd, err := os.Getwd()
if err != nil {
return nil, fmt.Errorf("failed to get working directory") return nil, fmt.Errorf("failed to get working directory")
} else {
searchPath = append(searchPath, cwd)
} }
searchPath = append(searchPath, cwd)
if cfgDir := os.Getenv("CUNICU_CONFIG_DIR"); cfgDir != "" { if cfgDir := os.Getenv("CUNICU_CONFIG_DIR"); cfgDir != "" {
searchPath = append(searchPath, cfgDir) searchPath = append(searchPath, cfgDir)
} }
@@ -125,9 +126,9 @@ func (c *Config) GetProviders() ([]koanf.Provider, error) {
var p koanf.Provider var p koanf.Provider
switch u.Scheme { switch u.Scheme {
case "http", "https": case "http", "https":
p = RemoteFileProvider(u) p = NewRemoteFileProvider(u)
case "": case "":
p = LocalFileProvider(u) p = NewLocalFileProvider(u)
default: default:
return nil, fmt.Errorf("unsupported scheme '%s' for config file", u.Scheme) return nil, fmt.Errorf("unsupported scheme '%s' for config file", u.Scheme)
} }
@@ -138,7 +139,7 @@ func (c *Config) GetProviders() ([]koanf.Provider, error) {
// Add a runtime configuration file if it exists // Add a runtime configuration file if it exists
if fi, err := os.Stat(RuntimeConfigFile); err == nil && !fi.IsDir() { if fi, err := os.Stat(RuntimeConfigFile); err == nil && !fi.IsDir() {
ps = append(ps, ps = append(ps,
LocalFileProvider(&url.URL{ NewLocalFileProvider(&url.URL{
Path: RuntimeConfigFile, Path: RuntimeConfigFile,
}), }),
) )

View File

@@ -41,7 +41,7 @@ func (s *Source) Load() error {
func load(p koanf.Provider) (*koanf.Koanf, []string, error) { func load(p koanf.Provider) (*koanf.Koanf, []string, error) {
var q koanf.Parser var q koanf.Parser
switch p.(type) { switch p.(type) {
case *remoteFileProvider, *localFileProvider: case *RemoteFileProvider, *LocalFileProvider:
q = yaml.Parser() q = yaml.Parser()
default: default:
q = nil q = nil
@@ -68,15 +68,16 @@ func load(p koanf.Provider) (*koanf.Koanf, []string, error) {
if s, ok := p.(SubProvidable); ok { if s, ok := p.(SubProvidable); ok {
for _, p := range s.SubProviders() { for _, p := range s.SubProviders() {
if d, m, err := load(p); err != nil { d, m, err := load(p)
if err != nil {
return nil, nil, err return nil, nil, err
} else {
if err := k.Merge(d); err != nil {
return nil, nil, fmt.Errorf("failed to merge config: %w", err)
}
o = append(o, m...)
} }
if err := k.Merge(d); err != nil {
return nil, nil, fmt.Errorf("failed to merge config: %w", err)
}
o = append(o, m...)
} }
} }

View File

@@ -4,24 +4,24 @@ import (
"errors" "errors"
) )
type structsProvider struct { type StructsProvider struct {
value any value any
tag string tag string
} }
// StructsProvider is very similar koanf's struct provider // StructsProvider is very similar koanf's struct provider
// but slightly adjusted to our needs. // but slightly adjusted to our needs.
func StructsProvider(v any, t string) *structsProvider { func NewStructsProvider(v any, t string) *StructsProvider {
return &structsProvider{ return &StructsProvider{
value: v, value: v,
tag: t, tag: t,
} }
} }
func (p *structsProvider) ReadBytes() ([]byte, error) { func (p *StructsProvider) ReadBytes() ([]byte, error) {
return nil, errors.New("this provider requires no parser") return nil, errors.New("this provider requires no parser")
} }
func (p *structsProvider) Read() (map[string]any, error) { func (p *StructsProvider) Read() (map[string]any, error) {
return Map(p.value, p.tag), nil return Map(p.value, p.tag), nil
} }

View File

@@ -93,7 +93,9 @@ func (d *Daemon) Run() error {
go d.watcher.Watch() go d.watcher.Watch()
d.watcher.Sync() if err := d.watcher.Sync(); err != nil {
return fmt.Errorf("initial sync failed: %w", err)
}
out: out:
for { for {

View File

@@ -1,4 +1,3 @@
// Package feat contains several sub-packages each implementing a dedicated feature.
package daemon package daemon
import ( import (

View File

@@ -194,7 +194,7 @@ func (e *Interface) PeerByPublicKey(pk crypto.Key) *Peer {
// Endpoint returns the best guess about our own endpoint // Endpoint returns the best guess about our own endpoint
func (e *Interface) Endpoint() (*net.UDPAddr, error) { func (e *Interface) Endpoint() (*net.UDPAddr, error) {
var ep *net.UDPAddr var ep *net.UDPAddr
var bestPrio uint32 = 0 var bestPrio uint32
for _, p := range e.Peers { for _, p := range e.Peers {
cs, err := p.agent.GetLocalCandidates() cs, err := p.agent.GetLocalCandidates()

View File

@@ -400,14 +400,17 @@ func (p *Peer) Marshal() *protoepdisc.Peer {
} }
for _, cps := range p.agent.GetCandidatePairsStats() { for _, cps := range p.agent.GetCandidatePairsStats() {
cps := cps
q.CandidatePairStats = append(q.CandidatePairStats, protoepdisc.NewCandidatePairStats(&cps)) q.CandidatePairStats = append(q.CandidatePairStats, protoepdisc.NewCandidatePairStats(&cps))
} }
for _, cs := range p.agent.GetLocalCandidatesStats() { for _, cs := range p.agent.GetLocalCandidatesStats() {
cs := cs
q.LocalCandidateStats = append(q.LocalCandidateStats, protoepdisc.NewCandidateStats(&cs)) q.LocalCandidateStats = append(q.LocalCandidateStats, protoepdisc.NewCandidateStats(&cs))
} }
for _, cs := range p.agent.GetRemoteCandidatesStats() { for _, cs := range p.agent.GetRemoteCandidatesStats() {
cs := cs
q.RemoteCandidateStats = append(q.RemoteCandidateStats, protoepdisc.NewCandidateStats(&cs)) q.RemoteCandidateStats = append(q.RemoteCandidateStats, protoepdisc.NewCandidateStats(&cs))
} }
} }
@@ -429,16 +432,16 @@ func (p *Peer) Reachability() protoepdisc.Reachability {
case ice.CandidateTypeServerReflexive: case ice.CandidateTypeServerReflexive:
if cp.Remote.NetworkType().IsTCP() { if cp.Remote.NetworkType().IsTCP() {
return protoepdisc.Reachability_DIRECT_TCP return protoepdisc.Reachability_DIRECT_TCP
} else {
return protoepdisc.Reachability_DIRECT_UDP
} }
return protoepdisc.Reachability_DIRECT_UDP
case ice.CandidateTypeRelay: case ice.CandidateTypeRelay:
if cp.Remote.NetworkType().IsTCP() { if cp.Remote.NetworkType().IsTCP() {
return protoepdisc.Reachability_RELAY_TCP return protoepdisc.Reachability_RELAY_TCP
} else {
return protoepdisc.Reachability_RELAY_UDP
} }
return protoepdisc.Reachability_RELAY_UDP
} }
} }

View File

@@ -104,6 +104,8 @@ func (pd *Interface) sendPeerDescription(chg pdiscproto.PeerDescriptionChange, p
// Static addresses // Static addresses
for _, addr := range pd.Settings.Addresses { for _, addr := range pd.Settings.Addresses {
addr := addr
_, bits := addr.Mask.Size() _, bits := addr.Mask.Size()
addr.Mask = net.CIDRMask(bits, bits) addr.Mask = net.CIDRMask(bits, bits)
@@ -112,6 +114,8 @@ func (pd *Interface) sendPeerDescription(chg pdiscproto.PeerDescriptionChange, p
// Auto-generated prefixes // Auto-generated prefixes
for _, pfx := range pd.Settings.Prefixes { for _, pfx := range pd.Settings.Prefixes {
pfx := pfx
addr := pk.IPAddress(pfx) addr := pk.IPAddress(pfx)
_, bits := addr.Mask.Size() _, bits := addr.Mask.Size()

View File

@@ -25,7 +25,10 @@ func (rs *Interface) OnPeerAdded(p *core.Peer) {
rs.gwMap[gw] = p rs.gwMap[gw] = p
} }
rs.syncKernel() // Initial sync // Initial sync
if err := rs.syncKernel(); err != nil {
rs.logger.Error("Failed to synchronize kernel routing table", zap.Error(err))
}
p.OnModified(rs) p.OnModified(rs)
} }

View File

@@ -65,7 +65,7 @@ func (p *Peer) Dump(wr io.Writer, verbosity int) error {
} }
if p.SelectedCandidatePair != nil { if p.SelectedCandidatePair != nil {
if _, err := terminal.FprintKV(wr, "candidate-pair", p.SelectedCandidatePair.ToString()); err != nil { if _, err := terminal.FprintKV(wr, "candidate pair", p.SelectedCandidatePair.ToString()); err != nil {
return err return err
} }
} }

View File

@@ -84,7 +84,7 @@ func (s *EndpointDiscoveryServer) RestartPeer(ctx context.Context, params *rpcpr
} }
func (s *EndpointDiscoveryServer) SendConnectionStates(stream rpcproto.Daemon_StreamEventsServer) { func (s *EndpointDiscoveryServer) SendConnectionStates(stream rpcproto.Daemon_StreamEventsServer) {
s.daemon.ForEachInterface(func(di *daemon.Interface) error { if err := s.daemon.ForEachInterface(func(di *daemon.Interface) error {
i := s.Interface(di) i := s.Interface(di)
for _, p := range i.Peers { for _, p := range i.Peers {
@@ -102,12 +102,14 @@ func (s *EndpointDiscoveryServer) SendConnectionStates(stream rpcproto.Daemon_St
if err := stream.Send(e); err == io.EOF { if err := stream.Send(e); err == io.EOF {
continue continue
} else if err != nil { } else if err != nil {
s.logger.Error("Failed to send", zap.Error(err)) s.logger.Error("Failed to send connection states", zap.Error(err))
} }
} }
return nil return nil
}) }); err != nil {
s.logger.Error("Failed to send connection states", zap.Error(err))
}
} }
func (s *EndpointDiscoveryServer) OnConnectionStateChange(p *epdisc.Peer, new, prev icex.ConnectionState) { func (s *EndpointDiscoveryServer) OnConnectionStateChange(p *epdisc.Peer, new, prev icex.ConnectionState) {

View File

@@ -14,7 +14,7 @@ function request() {
} }
function undraft_release() { function undraft_release() {
request releases/$1 -X PATCH -d '{ "draft": false }' | \ request "releases/$1" -X PATCH -d '{ "draft": false }' | \
jq . jq .
} }
@@ -28,7 +28,7 @@ function download_asset() {
curl --silent \ curl --silent \
--location \ --location \
--output ${ASSET_NAME} \ --output "${ASSET_NAME}" \
--header "Authorization: Bearer ${GITHUB_TOKEN}" \ --header "Authorization: Bearer ${GITHUB_TOKEN}" \
--header "Accept:application/octet-stream" \ --header "Accept:application/octet-stream" \
"https://api.github.com/repos/${REPO}/releases/assets/${ASSET_ID}" "https://api.github.com/repos/${REPO}/releases/assets/${ASSET_ID}"
@@ -37,7 +37,7 @@ function download_asset() {
function upload_asset() { function upload_asset() {
RELEASE_ID=$1 RELEASE_ID=$1
FILENAME=$2 FILENAME=$2
MIME_TYPE=$(file -b --mime-type ${FILENAME}) MIME_TYPE=$(file -b --mime-type "${FILENAME}")
curl --silent \ curl --silent \
--location \ --location \
@@ -45,7 +45,7 @@ function upload_asset() {
--header "Content-Type: ${MIME_TYPE}" \ --header "Content-Type: ${MIME_TYPE}" \
--header "Accept: application/vnd.github+json" \ --header "Accept: application/vnd.github+json" \
--header "Authorization: Bearer ${GITHUB_TOKEN}" \ --header "Authorization: Bearer ${GITHUB_TOKEN}" \
--data-binary @${FILENAME} \ --data-binary "@${FILENAME}" \
"https://uploads.github.com/repos/${REPO}/releases/${RELEASE_ID}/assets?name=${FILENAME}" | \ "https://uploads.github.com/repos/${REPO}/releases/${RELEASE_ID}/assets?name=${FILENAME}" | \
jq . jq .
} }

View File

@@ -2,39 +2,39 @@
This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator. This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator.
### Installation ## Installation
``` ```bash
$ yarn $ yarn
``` ```
### Local Development ## Local Development
``` ```bash
$ yarn start $ yarn start
``` ```
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
### Build ## Build
``` ```bash
$ yarn build $ yarn build
``` ```
This command generates static content into the `build` directory and can be served using any static contents hosting service. This command generates static content into the `build` directory and can be served using any static contents hosting service.
### Deployment ## Deployment
Using SSH: Using SSH:
``` ```bash
$ USE_SSH=true yarn deploy $ USE_SSH=true yarn deploy
``` ```
Not using SSH: Not using SSH:
``` ```bash
$ GIT_USER=<Your GitHub username> yarn deploy $ GIT_USER=<Your GitHub username> yarn deploy
``` ```

View File

@@ -23,7 +23,7 @@ The project has now its dedicated [website](https://cunicu.li), [GitHub organiza
Feel free to follow us there for updates! Feel free to follow us there for updates!
You might also have realized that the project name has changed. We decided to rebrand from the previous name _wice_ to avoid any potential trademark issues with the WireGuard project as well as another small German company named _WICE_. You might also have realized that the project name has changed. We decided to rebrand from the previous name _wice_ to avoid any potential trademark issues with the WireGuard project as well as another small German company named _WICE_.
The project name _cunīcu_ [kʊˈniːkʊ] is derived from the [latin noun cunīculus](https://en.wiktionary.org/wiki/cuniculus#Latin) which means rabbit, a rabbit burrow or underground tunnel. We have choosen it as a name for this project as _cunīcu_ builds tunnels between otherwise hard to reach network locations. The project name _cunīcu_ \[kʊˈniː\] is derived from the [latin noun cunīculus](https://en.wiktionary.org/wiki/cuniculus#Latin) which means rabbit, a rabbit burrow or underground tunnel. We have choosen it as a name for this project as _cunīcu_ builds tunnels between otherwise hard to reach network locations.
This also gave us the opportunity to redesign the logo which you find further down in the sticker design. This also gave us the opportunity to redesign the logo which you find further down in the sticker design.