mirror of
https://github.com/yggdrasil-network/yggdrasil-go.git
synced 2024-11-23 18:15:24 +00:00
commit
562a7d1f19
19
CHANGELOG.md
19
CHANGELOG.md
@ -25,6 +25,25 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
|
||||
- in case of vulnerabilities.
|
||||
-->
|
||||
|
||||
## [0.3.8] - 2019-08-21
|
||||
### Changed
|
||||
- Yggdrasil can now send multiple packets from the switch at once, which results in improved throughput with smaller packets or lower MTUs
|
||||
- Performance has been slightly improved by not allocating cancellations where not necessary
|
||||
- Crypto-key routing options have been renamed for clarity
|
||||
- `IPv4Sources` is now named `IPv4LocalSubnets`
|
||||
- `IPv6Sources` is now named `IPv6LocalSubnets`
|
||||
- `IPv4Destinations` is now named `IPv4RemoteSubnets`
|
||||
- `IPv6Destinations` is now named `IPv6RemoteSubnets`
|
||||
- The old option names will continue to be accepted by the configuration parser for now but may not be indefinitely
|
||||
- When presented with multiple paths between two nodes, the switch now prefers the most recently used port when possible instead of the least recently used, helping to reduce packet reordering
|
||||
- New nonce tracking should help to reduce the number of packets dropped as a result of multiple/aggregate paths or congestion control in the switch
|
||||
|
||||
### Fixed
|
||||
- **Security vulnerability**: Address verification was not strict enough, which could result in a malicious session sending traffic with unexpected or spoofed source or destination addresses which Yggdrasil could fail to reject
|
||||
- Versions `0.3.6` and `0.3.7` are vulnerable - users of these versions should upgrade as soon as possible
|
||||
- Versions `0.3.5` and earlier are not affected
|
||||
- A deadlock was fixed in the session code which could result in Yggdrasil failing to pass traffic after some time
|
||||
|
||||
## [0.3.7] - 2019-08-14
|
||||
### Changed
|
||||
- The switch should now forward packets along a single path more consistently in cases where congestion is low and multiple equal-length paths exist, which should improve stability and result in fewer out-of-order packets
|
||||
|
@ -80,6 +80,24 @@ func readConfig(useconf *bool, useconffile *string, normaliseconf *bool) *config
|
||||
if listen, ok := dat["Listen"].(string); ok {
|
||||
dat["Listen"] = []string{listen}
|
||||
}
|
||||
if tunnelrouting, ok := dat["TunnelRouting"].(map[string]interface{}); ok {
|
||||
if c, ok := tunnelrouting["IPv4Sources"]; ok {
|
||||
delete(tunnelrouting, "IPv4Sources")
|
||||
tunnelrouting["IPv4LocalSubnets"] = c
|
||||
}
|
||||
if c, ok := tunnelrouting["IPv6Sources"]; ok {
|
||||
delete(tunnelrouting, "IPv6Sources")
|
||||
tunnelrouting["IPv6LocalSubnets"] = c
|
||||
}
|
||||
if c, ok := tunnelrouting["IPv4Destinations"]; ok {
|
||||
delete(tunnelrouting, "IPv4Destinations")
|
||||
tunnelrouting["IPv4RemoteSubnets"] = c
|
||||
}
|
||||
if c, ok := tunnelrouting["IPv6Destinations"]; ok {
|
||||
delete(tunnelrouting, "IPv6Destinations")
|
||||
tunnelrouting["IPv6RemoteSubnets"] = c
|
||||
}
|
||||
}
|
||||
// Sanitise the config
|
||||
confJson, err := json.Marshal(dat)
|
||||
if err != nil {
|
||||
|
5
go.mod
5
go.mod
@ -1,19 +1,18 @@
|
||||
module github.com/yggdrasil-network/yggdrasil-go
|
||||
|
||||
require (
|
||||
github.com/docker/libcontainer v2.2.1+incompatible
|
||||
github.com/gologme/log v0.0.0-20181207131047-4e5d8ccb38e8
|
||||
github.com/hashicorp/go-syslog v1.0.0
|
||||
github.com/hjson/hjson-go v0.0.0-20181010104306-a25ecf6bd222
|
||||
github.com/kardianos/minwinsvc v0.0.0-20151122163309-cad6b2b879b0
|
||||
github.com/mitchellh/mapstructure v1.1.2
|
||||
github.com/songgao/packets v0.0.0-20160404182456-549a10cd4091
|
||||
github.com/songgao/water v0.0.0-20190725173103-fd331bda3f4b // indirect
|
||||
github.com/vishvananda/netlink v1.0.0
|
||||
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f // indirect
|
||||
github.com/yggdrasil-network/water v0.0.0-20190812103929-c83fe40250f8
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a
|
||||
golang.org/x/text v0.3.2
|
||||
golang.org/x/tools v0.0.0-20190814171936-5b18234b3ae0 // indirect
|
||||
)
|
||||
|
44
go.sum
44
go.sum
@ -1,5 +1,3 @@
|
||||
github.com/docker/libcontainer v2.2.1+incompatible h1:++SbbkCw+X8vAd4j2gOCzZ2Nn7s2xFALTf7LZKmM1/0=
|
||||
github.com/docker/libcontainer v2.2.1+incompatible/go.mod h1:osvj61pYsqhNCMLGX31xr7klUBhHb/ZBuXS0o1Fvwbw=
|
||||
github.com/gologme/log v0.0.0-20181207131047-4e5d8ccb38e8 h1:WD8iJ37bRNwvETMfVTusVSAi0WdXTpfNVGY2aHycNKY=
|
||||
github.com/gologme/log v0.0.0-20181207131047-4e5d8ccb38e8/go.mod h1:gq31gQ8wEHkR+WekdWsqDuf8pXTUZA9BnnzTuPz1Y9U=
|
||||
github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
|
||||
@ -12,48 +10,19 @@ github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQz
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/songgao/packets v0.0.0-20160404182456-549a10cd4091 h1:1zN6ImoqhSJhN8hGXFaJlSC8msLmIbX8bFqOfWLKw0w=
|
||||
github.com/songgao/packets v0.0.0-20160404182456-549a10cd4091/go.mod h1:N20Z5Y8oye9a7HmytmZ+tr8Q2vlP0tAHP13kTHzwvQY=
|
||||
github.com/songgao/water v0.0.0-20190725173103-fd331bda3f4b h1:+y4hCMc/WKsDbAPsOQZgBSaSZ26uh2afyaWeVg/3s/c=
|
||||
github.com/songgao/water v0.0.0-20190725173103-fd331bda3f4b/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
|
||||
github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM=
|
||||
github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f h1:nBX3nTcmxEtHSERBJaIo1Qa26VwRaopnZmfDQUXsF4I=
|
||||
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||
github.com/yggdrasil-network/water v0.0.0-20180615095340-f732c88f34ae h1:MYCANF1kehCG6x6G+/9txLfq6n3lS5Vp0Mxn1hdiBAc=
|
||||
github.com/yggdrasil-network/water v0.0.0-20180615095340-f732c88f34ae/go.mod h1:R0SBCsugm+Sf1katgTb2t7GXMm+nRIv43tM4VDZbaOs=
|
||||
github.com/yggdrasil-network/water v0.0.0-20190719211521-a76871ea954b/go.mod h1:R0SBCsugm+Sf1katgTb2t7GXMm+nRIv43tM4VDZbaOs=
|
||||
github.com/yggdrasil-network/water v0.0.0-20190719213007-b160316e362e/go.mod h1:R0SBCsugm+Sf1katgTb2t7GXMm+nRIv43tM4VDZbaOs=
|
||||
github.com/yggdrasil-network/water v0.0.0-20190720101301-5db94379a5eb/go.mod h1:R0SBCsugm+Sf1katgTb2t7GXMm+nRIv43tM4VDZbaOs=
|
||||
github.com/yggdrasil-network/water v0.0.0-20190720145626-28ccb9101d55/go.mod h1:R0SBCsugm+Sf1katgTb2t7GXMm+nRIv43tM4VDZbaOs=
|
||||
github.com/yggdrasil-network/water v0.0.0-20190725073841-250edb919f8a h1:mQ0mPD+dyB/vaDPyVkCBiXUQu9Or7/cRSTjPlV8tXvw=
|
||||
github.com/yggdrasil-network/water v0.0.0-20190725073841-250edb919f8a/go.mod h1:R0SBCsugm+Sf1katgTb2t7GXMm+nRIv43tM4VDZbaOs=
|
||||
github.com/yggdrasil-network/water v0.0.0-20190725123504-a16161896c34 h1:Qh5FE+Q5iGqpmR/FPMYHuoZLN921au/nxAlmKe+Hdbo=
|
||||
github.com/yggdrasil-network/water v0.0.0-20190725123504-a16161896c34/go.mod h1:R0SBCsugm+Sf1katgTb2t7GXMm+nRIv43tM4VDZbaOs=
|
||||
github.com/yggdrasil-network/water v0.0.0-20190812103929-c83fe40250f8 h1:YY9Pg2BEp0jeUVU60svTOaDr+fs1ySC9RbdC1Qc6wOw=
|
||||
github.com/yggdrasil-network/water v0.0.0-20190812103929-c83fe40250f8/go.mod h1:R0SBCsugm+Sf1katgTb2t7GXMm+nRIv43tM4VDZbaOs=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72y/zjbZ3UcXC7dClwKbUI0=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/net v0.0.0-20181207154023-610586996380 h1:zPQexyRtNYBc7bcHmehl1dH6TB3qn8zytv8cBGLDNY0=
|
||||
golang.org/x/net v0.0.0-20181207154023-610586996380/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e h1:njOxP/wVblhCLIUhjHXf6X+dzTt5OQ3vMQo9mkOIKIo=
|
||||
golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7 h1:LepdCS8Gf/MVejFIt8lsiexZATdoGVyp5bcyS+rYoUI=
|
||||
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0=
|
||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e h1:TsjK5I7fXk8f2FQrgu6NS7i5Qih3knl2FL1htyguLRE=
|
||||
golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
@ -61,10 +30,3 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
golang.org/x/tools v0.0.0-20190724185037-8aa4eac1a7c1/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
golang.org/x/tools v0.0.0-20190802003818-e9bb7d36c060/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
golang.org/x/tools v0.0.0-20190809145639-6d4652c779c4/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190814171936-5b18234b3ae0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -74,11 +74,11 @@ type SessionFirewall struct {
|
||||
|
||||
// TunnelRouting contains the crypto-key routing tables for tunneling
|
||||
type TunnelRouting struct {
|
||||
Enable bool `comment:"Enable or disable tunnel routing."`
|
||||
IPv6Destinations map[string]string `comment:"IPv6 CIDR subnets, mapped to the EncryptionPublicKey to which they\nshould be routed, e.g. { \"aaaa:bbbb:cccc::/e\": \"boxpubkey\", ... }"`
|
||||
IPv6Sources []string `comment:"Optional IPv6 source subnets which are allowed to be tunnelled in\naddition to this node's Yggdrasil address/subnet. If not\nspecified, only traffic originating from this node's Yggdrasil\naddress or subnet will be tunnelled."`
|
||||
IPv4Destinations map[string]string `comment:"IPv4 CIDR subnets, mapped to the EncryptionPublicKey to which they\nshould be routed, e.g. { \"a.b.c.d/e\": \"boxpubkey\", ... }"`
|
||||
IPv4Sources []string `comment:"IPv4 source subnets which are allowed to be tunnelled. Unlike for\nIPv6, this option is required for bridging IPv4 traffic. Only\ntraffic with a source matching these subnets will be tunnelled."`
|
||||
Enable bool `comment:"Enable or disable tunnel routing."`
|
||||
IPv6RemoteSubnets map[string]string `comment:"IPv6 subnets belonging to remote nodes, mapped to the node's public\nkey, e.g. { \"aaaa:bbbb:cccc::/e\": \"boxpubkey\", ... }"`
|
||||
IPv6LocalSubnets []string `comment:"IPv6 subnets belonging to this node's end of the tunnels. Only traffic\nfrom these ranges (or the Yggdrasil node's IPv6 address/subnet)\nwill be tunnelled."`
|
||||
IPv4RemoteSubnets map[string]string `comment:"IPv4 subnets belonging to remote nodes, mapped to the node's public\nkey, e.g. { \"a.b.c.d/e\": \"boxpubkey\", ... }"`
|
||||
IPv4LocalSubnets []string `comment:"IPv4 subnets belonging to this node's end of the tunnels. Only traffic\nfrom these ranges will be tunnelled."`
|
||||
}
|
||||
|
||||
// SwitchOptions contains tuning options for the switch
|
||||
|
@ -66,15 +66,15 @@ func (t *TunAdapter) SetupAdminHandlers(a *admin.AdminSocket) {
|
||||
t.ckr.setEnabled(enabled)
|
||||
return admin.Info{"enabled": enabled}, nil
|
||||
})
|
||||
a.AddHandler("addSourceSubnet", []string{"subnet"}, func(in admin.Info) (admin.Info, error) {
|
||||
if err := t.ckr.addSourceSubnet(in["subnet"].(string)); err == nil {
|
||||
a.AddHandler("addLocalSubnet", []string{"subnet"}, func(in admin.Info) (admin.Info, error) {
|
||||
if err := t.ckr.addLocalSubnet(in["subnet"].(string)); err == nil {
|
||||
return admin.Info{"added": []string{in["subnet"].(string)}}, nil
|
||||
} else {
|
||||
return admin.Info{"not_added": []string{in["subnet"].(string)}}, errors.New("Failed to add source subnet")
|
||||
}
|
||||
})
|
||||
a.AddHandler("addRoute", []string{"subnet", "box_pub_key"}, func(in admin.Info) (admin.Info, error) {
|
||||
if err := t.ckr.addRoute(in["subnet"].(string), in["box_pub_key"].(string)); err == nil {
|
||||
a.AddHandler("addRemoteSubnet", []string{"subnet", "box_pub_key"}, func(in admin.Info) (admin.Info, error) {
|
||||
if err := t.ckr.addRemoteSubnet(in["subnet"].(string), in["box_pub_key"].(string)); err == nil {
|
||||
return admin.Info{"added": []string{fmt.Sprintf("%s via %s", in["subnet"].(string), in["box_pub_key"].(string))}}, nil
|
||||
} else {
|
||||
return admin.Info{"not_added": []string{fmt.Sprintf("%s via %s", in["subnet"].(string), in["box_pub_key"].(string))}}, errors.New("Failed to add route")
|
||||
@ -87,8 +87,8 @@ func (t *TunAdapter) SetupAdminHandlers(a *admin.AdminSocket) {
|
||||
subnets = append(subnets, subnet.String())
|
||||
}
|
||||
}
|
||||
getSourceSubnets(t.ckr.ipv4sources)
|
||||
getSourceSubnets(t.ckr.ipv6sources)
|
||||
getSourceSubnets(t.ckr.ipv4locals)
|
||||
getSourceSubnets(t.ckr.ipv6locals)
|
||||
return admin.Info{"source_subnets": subnets}, nil
|
||||
})
|
||||
a.AddHandler("getRoutes", []string{}, func(in admin.Info) (admin.Info, error) {
|
||||
@ -98,19 +98,19 @@ func (t *TunAdapter) SetupAdminHandlers(a *admin.AdminSocket) {
|
||||
routes[ckr.subnet.String()] = hex.EncodeToString(ckr.destination[:])
|
||||
}
|
||||
}
|
||||
getRoutes(t.ckr.ipv4routes)
|
||||
getRoutes(t.ckr.ipv6routes)
|
||||
getRoutes(t.ckr.ipv4remotes)
|
||||
getRoutes(t.ckr.ipv6remotes)
|
||||
return admin.Info{"routes": routes}, nil
|
||||
})
|
||||
a.AddHandler("removeSourceSubnet", []string{"subnet"}, func(in admin.Info) (admin.Info, error) {
|
||||
if err := t.ckr.removeSourceSubnet(in["subnet"].(string)); err == nil {
|
||||
a.AddHandler("removeLocalSubnet", []string{"subnet"}, func(in admin.Info) (admin.Info, error) {
|
||||
if err := t.ckr.removeLocalSubnet(in["subnet"].(string)); err == nil {
|
||||
return admin.Info{"removed": []string{in["subnet"].(string)}}, nil
|
||||
} else {
|
||||
return admin.Info{"not_removed": []string{in["subnet"].(string)}}, errors.New("Failed to remove source subnet")
|
||||
}
|
||||
})
|
||||
a.AddHandler("removeRoute", []string{"subnet", "box_pub_key"}, func(in admin.Info) (admin.Info, error) {
|
||||
if err := t.ckr.removeRoute(in["subnet"].(string), in["box_pub_key"].(string)); err == nil {
|
||||
a.AddHandler("removeRemoteSubnet", []string{"subnet", "box_pub_key"}, func(in admin.Info) (admin.Info, error) {
|
||||
if err := t.ckr.removeRemoteSubnet(in["subnet"].(string), in["box_pub_key"].(string)); err == nil {
|
||||
return admin.Info{"removed": []string{fmt.Sprintf("%s via %s", in["subnet"].(string), in["box_pub_key"].(string))}}, nil
|
||||
} else {
|
||||
return admin.Info{"not_removed": []string{fmt.Sprintf("%s via %s", in["subnet"].(string), in["box_pub_key"].(string))}}, errors.New("Failed to remove route")
|
||||
|
@ -21,15 +21,15 @@ type cryptokey struct {
|
||||
tun *TunAdapter
|
||||
enabled atomic.Value // bool
|
||||
reconfigure chan chan error
|
||||
ipv4routes []cryptokey_route
|
||||
ipv6routes []cryptokey_route
|
||||
ipv4remotes []cryptokey_route
|
||||
ipv6remotes []cryptokey_route
|
||||
ipv4cache map[address.Address]cryptokey_route
|
||||
ipv6cache map[address.Address]cryptokey_route
|
||||
ipv4sources []net.IPNet
|
||||
ipv6sources []net.IPNet
|
||||
mutexroutes sync.RWMutex
|
||||
ipv4locals []net.IPNet
|
||||
ipv6locals []net.IPNet
|
||||
mutexremotes sync.RWMutex
|
||||
mutexcaches sync.RWMutex
|
||||
mutexsources sync.RWMutex
|
||||
mutexlocals sync.RWMutex
|
||||
}
|
||||
|
||||
type cryptokey_route struct {
|
||||
@ -65,43 +65,43 @@ func (c *cryptokey) configure() error {
|
||||
c.setEnabled(current.TunnelRouting.Enable)
|
||||
|
||||
// Clear out existing routes
|
||||
c.mutexroutes.Lock()
|
||||
c.ipv6routes = make([]cryptokey_route, 0)
|
||||
c.ipv4routes = make([]cryptokey_route, 0)
|
||||
c.mutexroutes.Unlock()
|
||||
c.mutexremotes.Lock()
|
||||
c.ipv6remotes = make([]cryptokey_route, 0)
|
||||
c.ipv4remotes = make([]cryptokey_route, 0)
|
||||
c.mutexremotes.Unlock()
|
||||
|
||||
// Add IPv6 routes
|
||||
for ipv6, pubkey := range current.TunnelRouting.IPv6Destinations {
|
||||
if err := c.addRoute(ipv6, pubkey); err != nil {
|
||||
for ipv6, pubkey := range current.TunnelRouting.IPv6RemoteSubnets {
|
||||
if err := c.addRemoteSubnet(ipv6, pubkey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Add IPv4 routes
|
||||
for ipv4, pubkey := range current.TunnelRouting.IPv4Destinations {
|
||||
if err := c.addRoute(ipv4, pubkey); err != nil {
|
||||
for ipv4, pubkey := range current.TunnelRouting.IPv4RemoteSubnets {
|
||||
if err := c.addRemoteSubnet(ipv4, pubkey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Clear out existing sources
|
||||
c.mutexsources.Lock()
|
||||
c.ipv6sources = make([]net.IPNet, 0)
|
||||
c.ipv4sources = make([]net.IPNet, 0)
|
||||
c.mutexsources.Unlock()
|
||||
c.mutexlocals.Lock()
|
||||
c.ipv6locals = make([]net.IPNet, 0)
|
||||
c.ipv4locals = make([]net.IPNet, 0)
|
||||
c.mutexlocals.Unlock()
|
||||
|
||||
// Add IPv6 sources
|
||||
c.ipv6sources = make([]net.IPNet, 0)
|
||||
for _, source := range current.TunnelRouting.IPv6Sources {
|
||||
if err := c.addSourceSubnet(source); err != nil {
|
||||
c.ipv6locals = make([]net.IPNet, 0)
|
||||
for _, source := range current.TunnelRouting.IPv6LocalSubnets {
|
||||
if err := c.addLocalSubnet(source); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Add IPv4 sources
|
||||
c.ipv4sources = make([]net.IPNet, 0)
|
||||
for _, source := range current.TunnelRouting.IPv4Sources {
|
||||
if err := c.addSourceSubnet(source); err != nil {
|
||||
c.ipv4locals = make([]net.IPNet, 0)
|
||||
for _, source := range current.TunnelRouting.IPv4LocalSubnets {
|
||||
if err := c.addLocalSubnet(source); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -128,35 +128,21 @@ func (c *cryptokey) isEnabled() bool {
|
||||
|
||||
// Check whether the given address (with the address length specified in bytes)
|
||||
// matches either the current node's address, the node's routed subnet or the
|
||||
// list of subnets specified in IPv4Sources/IPv6Sources.
|
||||
func (c *cryptokey) isValidSource(addr address.Address, addrlen int) bool {
|
||||
c.mutexsources.RLock()
|
||||
defer c.mutexsources.RUnlock()
|
||||
|
||||
ip := net.IP(addr[:addrlen])
|
||||
|
||||
if addrlen == net.IPv6len {
|
||||
// Does this match our node's address?
|
||||
if bytes.Equal(addr[:16], c.tun.addr[:16]) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Does this match our node's subnet?
|
||||
if bytes.Equal(addr[:8], c.tun.subnet[:8]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// list of subnets specified in ipv4locals/ipv6locals.
|
||||
func (c *cryptokey) isValidLocalAddress(addr address.Address, addrlen int) bool {
|
||||
c.mutexlocals.RLock()
|
||||
defer c.mutexlocals.RUnlock()
|
||||
// Does it match a configured CKR source?
|
||||
if c.isEnabled() {
|
||||
ip := net.IP(addr[:addrlen])
|
||||
// Build our references to the routing sources
|
||||
var routingsources *[]net.IPNet
|
||||
|
||||
// Check if the prefix is IPv4 or IPv6
|
||||
if addrlen == net.IPv6len {
|
||||
routingsources = &c.ipv6sources
|
||||
routingsources = &c.ipv6locals
|
||||
} else if addrlen == net.IPv4len {
|
||||
routingsources = &c.ipv4sources
|
||||
routingsources = &c.ipv4locals
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
@ -174,9 +160,9 @@ func (c *cryptokey) isValidSource(addr address.Address, addrlen int) bool {
|
||||
|
||||
// Adds a source subnet, which allows traffic with these source addresses to
|
||||
// be tunnelled using crypto-key routing.
|
||||
func (c *cryptokey) addSourceSubnet(cidr string) error {
|
||||
c.mutexsources.Lock()
|
||||
defer c.mutexsources.Unlock()
|
||||
func (c *cryptokey) addLocalSubnet(cidr string) error {
|
||||
c.mutexlocals.Lock()
|
||||
defer c.mutexlocals.Unlock()
|
||||
|
||||
// Is the CIDR we've been given valid?
|
||||
_, ipnet, err := net.ParseCIDR(cidr)
|
||||
@ -192,9 +178,9 @@ func (c *cryptokey) addSourceSubnet(cidr string) error {
|
||||
|
||||
// Check if the prefix is IPv4 or IPv6
|
||||
if prefixsize == net.IPv6len*8 {
|
||||
routingsources = &c.ipv6sources
|
||||
routingsources = &c.ipv6locals
|
||||
} else if prefixsize == net.IPv4len*8 {
|
||||
routingsources = &c.ipv4sources
|
||||
routingsources = &c.ipv4locals
|
||||
} else {
|
||||
return errors.New("Unexpected prefix size")
|
||||
}
|
||||
@ -214,10 +200,10 @@ func (c *cryptokey) addSourceSubnet(cidr string) error {
|
||||
|
||||
// Adds a destination route for the given CIDR to be tunnelled to the node
|
||||
// with the given BoxPubKey.
|
||||
func (c *cryptokey) addRoute(cidr string, dest string) error {
|
||||
c.mutexroutes.Lock()
|
||||
func (c *cryptokey) addRemoteSubnet(cidr string, dest string) error {
|
||||
c.mutexremotes.Lock()
|
||||
c.mutexcaches.Lock()
|
||||
defer c.mutexroutes.Unlock()
|
||||
defer c.mutexremotes.Unlock()
|
||||
defer c.mutexcaches.Unlock()
|
||||
|
||||
// Is the CIDR we've been given valid?
|
||||
@ -235,10 +221,10 @@ func (c *cryptokey) addRoute(cidr string, dest string) error {
|
||||
|
||||
// Check if the prefix is IPv4 or IPv6
|
||||
if prefixsize == net.IPv6len*8 {
|
||||
routingtable = &c.ipv6routes
|
||||
routingtable = &c.ipv6remotes
|
||||
routingcache = &c.ipv6cache
|
||||
} else if prefixsize == net.IPv4len*8 {
|
||||
routingtable = &c.ipv4routes
|
||||
routingtable = &c.ipv4remotes
|
||||
routingcache = &c.ipv4cache
|
||||
} else {
|
||||
return errors.New("Unexpected prefix size")
|
||||
@ -323,14 +309,14 @@ func (c *cryptokey) getPublicKeyForAddress(addr address.Address, addrlen int) (c
|
||||
|
||||
c.mutexcaches.RUnlock()
|
||||
|
||||
c.mutexroutes.RLock()
|
||||
defer c.mutexroutes.RUnlock()
|
||||
c.mutexremotes.RLock()
|
||||
defer c.mutexremotes.RUnlock()
|
||||
|
||||
// Check if the prefix is IPv4 or IPv6
|
||||
if addrlen == net.IPv6len {
|
||||
routingtable = &c.ipv6routes
|
||||
routingtable = &c.ipv6remotes
|
||||
} else if addrlen == net.IPv4len {
|
||||
routingtable = &c.ipv4routes
|
||||
routingtable = &c.ipv4remotes
|
||||
} else {
|
||||
return crypto.BoxPubKey{}, errors.New("Unexpected prefix size")
|
||||
}
|
||||
@ -339,7 +325,7 @@ func (c *cryptokey) getPublicKeyForAddress(addr address.Address, addrlen int) (c
|
||||
ip := make(net.IP, addrlen)
|
||||
copy(ip[:addrlen], addr[:])
|
||||
|
||||
// Check if we have a route. At this point c.ipv6routes should be
|
||||
// Check if we have a route. At this point c.ipv6remotes should be
|
||||
// pre-sorted so that the most specific routes are first
|
||||
for _, route := range *routingtable {
|
||||
// Does this subnet match the given IP?
|
||||
@ -366,14 +352,14 @@ func (c *cryptokey) getPublicKeyForAddress(addr address.Address, addrlen int) (c
|
||||
}
|
||||
|
||||
// No route was found if we got to this point
|
||||
return crypto.BoxPubKey{}, errors.New(fmt.Sprintf("No route to %s", ip.String()))
|
||||
return crypto.BoxPubKey{}, fmt.Errorf("no route to %s", ip.String())
|
||||
}
|
||||
|
||||
// Removes a source subnet, which allows traffic with these source addresses to
|
||||
// be tunnelled using crypto-key routing.
|
||||
func (c *cryptokey) removeSourceSubnet(cidr string) error {
|
||||
c.mutexsources.Lock()
|
||||
defer c.mutexsources.Unlock()
|
||||
func (c *cryptokey) removeLocalSubnet(cidr string) error {
|
||||
c.mutexlocals.Lock()
|
||||
defer c.mutexlocals.Unlock()
|
||||
|
||||
// Is the CIDR we've been given valid?
|
||||
_, ipnet, err := net.ParseCIDR(cidr)
|
||||
@ -389,9 +375,9 @@ func (c *cryptokey) removeSourceSubnet(cidr string) error {
|
||||
|
||||
// Check if the prefix is IPv4 or IPv6
|
||||
if prefixsize == net.IPv6len*8 {
|
||||
routingsources = &c.ipv6sources
|
||||
routingsources = &c.ipv6locals
|
||||
} else if prefixsize == net.IPv4len*8 {
|
||||
routingsources = &c.ipv4sources
|
||||
routingsources = &c.ipv4locals
|
||||
} else {
|
||||
return errors.New("Unexpected prefix size")
|
||||
}
|
||||
@ -409,10 +395,10 @@ func (c *cryptokey) removeSourceSubnet(cidr string) error {
|
||||
|
||||
// Removes a destination route for the given CIDR to be tunnelled to the node
|
||||
// with the given BoxPubKey.
|
||||
func (c *cryptokey) removeRoute(cidr string, dest string) error {
|
||||
c.mutexroutes.Lock()
|
||||
func (c *cryptokey) removeRemoteSubnet(cidr string, dest string) error {
|
||||
c.mutexremotes.Lock()
|
||||
c.mutexcaches.Lock()
|
||||
defer c.mutexroutes.Unlock()
|
||||
defer c.mutexremotes.Unlock()
|
||||
defer c.mutexcaches.Unlock()
|
||||
|
||||
// Is the CIDR we've been given valid?
|
||||
@ -430,10 +416,10 @@ func (c *cryptokey) removeRoute(cidr string, dest string) error {
|
||||
|
||||
// Check if the prefix is IPv4 or IPv6
|
||||
if prefixsize == net.IPv6len*8 {
|
||||
routingtable = &c.ipv6routes
|
||||
routingtable = &c.ipv6remotes
|
||||
routingcache = &c.ipv6cache
|
||||
} else if prefixsize == net.IPv4len*8 {
|
||||
routingtable = &c.ipv4routes
|
||||
routingtable = &c.ipv4remotes
|
||||
routingcache = &c.ipv4cache
|
||||
} else {
|
||||
return errors.New("Unexpected prefix size")
|
||||
|
@ -1,10 +1,12 @@
|
||||
package tuntap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||
"github.com/yggdrasil-network/yggdrasil-go/src/yggdrasil"
|
||||
"golang.org/x/net/icmp"
|
||||
@ -70,6 +72,63 @@ func (s *tunConn) reader() (err error) {
|
||||
return e
|
||||
}
|
||||
} else if len(bs) > 0 {
|
||||
ipv4 := len(bs) > 20 && bs[0]&0xf0 == 0x40
|
||||
ipv6 := len(bs) > 40 && bs[0]&0xf0 == 0x60
|
||||
isCGA := true
|
||||
// Check source addresses
|
||||
switch {
|
||||
case ipv6 && bs[8] == 0x02 && bytes.Equal(s.addr[:16], bs[8:24]): // source
|
||||
case ipv6 && bs[8] == 0x03 && bytes.Equal(s.snet[:8], bs[8:16]): // source
|
||||
default:
|
||||
isCGA = false
|
||||
}
|
||||
// Check destiantion addresses
|
||||
switch {
|
||||
case ipv6 && bs[24] == 0x02 && bytes.Equal(s.tun.addr[:16], bs[24:40]): // destination
|
||||
case ipv6 && bs[24] == 0x03 && bytes.Equal(s.tun.subnet[:8], bs[24:32]): // destination
|
||||
default:
|
||||
isCGA = false
|
||||
}
|
||||
// Decide how to handle the packet
|
||||
var skip bool
|
||||
switch {
|
||||
case isCGA: // Allowed
|
||||
case s.tun.ckr.isEnabled() && (ipv4 || ipv6):
|
||||
var srcAddr address.Address
|
||||
var dstAddr address.Address
|
||||
var addrlen int
|
||||
if ipv4 {
|
||||
copy(srcAddr[:], bs[12:16])
|
||||
copy(dstAddr[:], bs[16:20])
|
||||
addrlen = 4
|
||||
}
|
||||
if ipv6 {
|
||||
copy(srcAddr[:], bs[8:24])
|
||||
copy(dstAddr[:], bs[24:40])
|
||||
addrlen = 16
|
||||
}
|
||||
if !s.tun.ckr.isValidLocalAddress(dstAddr, addrlen) {
|
||||
// The destination address isn't in our CKR allowed range
|
||||
skip = true
|
||||
} else if key, err := s.tun.ckr.getPublicKeyForAddress(srcAddr, addrlen); err == nil {
|
||||
srcNodeID := crypto.GetNodeID(&key)
|
||||
if s.conn.RemoteAddr() == *srcNodeID {
|
||||
// This is the one allowed CKR case, where source and destination addresses are both good
|
||||
} else {
|
||||
// The CKR key associated with this address doesn't match the sender's NodeID
|
||||
skip = true
|
||||
}
|
||||
} else {
|
||||
// We have no CKR route for this source address
|
||||
skip = true
|
||||
}
|
||||
default:
|
||||
skip = true
|
||||
}
|
||||
if skip {
|
||||
util.PutBytes(bs)
|
||||
continue
|
||||
}
|
||||
s.tun.send <- bs
|
||||
s.stillAlive()
|
||||
} else {
|
||||
@ -96,6 +155,63 @@ func (s *tunConn) writer() error {
|
||||
if !ok {
|
||||
return errors.New("send closed")
|
||||
}
|
||||
v4 := len(bs) > 20 && bs[0]&0xf0 == 0x40
|
||||
v6 := len(bs) > 40 && bs[0]&0xf0 == 0x60
|
||||
isCGA := true
|
||||
// Check source addresses
|
||||
switch {
|
||||
case v6 && bs[8] == 0x02 && bytes.Equal(s.tun.addr[:16], bs[8:24]): // source
|
||||
case v6 && bs[8] == 0x03 && bytes.Equal(s.tun.subnet[:8], bs[8:16]): // source
|
||||
default:
|
||||
isCGA = false
|
||||
}
|
||||
// Check destiantion addresses
|
||||
switch {
|
||||
case v6 && bs[24] == 0x02 && bytes.Equal(s.addr[:16], bs[24:40]): // destination
|
||||
case v6 && bs[24] == 0x03 && bytes.Equal(s.snet[:8], bs[24:32]): // destination
|
||||
default:
|
||||
isCGA = false
|
||||
}
|
||||
// Decide how to handle the packet
|
||||
var skip bool
|
||||
switch {
|
||||
case isCGA: // Allowed
|
||||
case s.tun.ckr.isEnabled() && (v4 || v6):
|
||||
var srcAddr address.Address
|
||||
var dstAddr address.Address
|
||||
var addrlen int
|
||||
if v4 {
|
||||
copy(srcAddr[:], bs[12:16])
|
||||
copy(dstAddr[:], bs[16:20])
|
||||
addrlen = 4
|
||||
}
|
||||
if v6 {
|
||||
copy(srcAddr[:], bs[8:24])
|
||||
copy(dstAddr[:], bs[24:40])
|
||||
addrlen = 16
|
||||
}
|
||||
if !s.tun.ckr.isValidLocalAddress(srcAddr, addrlen) {
|
||||
// The source address isn't in our CKR allowed range
|
||||
skip = true
|
||||
} else if key, err := s.tun.ckr.getPublicKeyForAddress(dstAddr, addrlen); err == nil {
|
||||
dstNodeID := crypto.GetNodeID(&key)
|
||||
if s.conn.RemoteAddr() == *dstNodeID {
|
||||
// This is the one allowed CKR case, where source and destination addresses are both good
|
||||
} else {
|
||||
// The CKR key associated with this address doesn't match the sender's NodeID
|
||||
skip = true
|
||||
}
|
||||
} else {
|
||||
// We have no CKR route for this destination address... why do we have the packet in the first place?
|
||||
skip = true
|
||||
}
|
||||
default:
|
||||
skip = true
|
||||
}
|
||||
if skip {
|
||||
util.PutBytes(bs)
|
||||
continue
|
||||
}
|
||||
msg := yggdrasil.FlowKeyMessage{
|
||||
FlowKey: util.GetFlowKey(bs),
|
||||
Message: bs,
|
||||
|
@ -2,7 +2,6 @@ package tuntap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
@ -22,24 +21,6 @@ func (tun *TunAdapter) writer() error {
|
||||
continue
|
||||
}
|
||||
if tun.iface.IsTAP() {
|
||||
var dstAddr address.Address
|
||||
if b[0]&0xf0 == 0x60 {
|
||||
if len(b) < 40 {
|
||||
//panic("Tried to send a packet shorter than an IPv6 header...")
|
||||
util.PutBytes(b)
|
||||
continue
|
||||
}
|
||||
copy(dstAddr[:16], b[24:])
|
||||
} else if b[0]&0xf0 == 0x40 {
|
||||
if len(b) < 20 {
|
||||
//panic("Tried to send a packet shorter than an IPv4 header...")
|
||||
util.PutBytes(b)
|
||||
continue
|
||||
}
|
||||
copy(dstAddr[:4], b[16:])
|
||||
} else {
|
||||
return errors.New("Invalid address family")
|
||||
}
|
||||
sendndp := func(dstAddr address.Address) {
|
||||
neigh, known := tun.icmpv6.getNeighbor(dstAddr)
|
||||
known = known && (time.Since(neigh.lastsolicitation).Seconds() < 30)
|
||||
@ -48,6 +29,7 @@ func (tun *TunAdapter) writer() error {
|
||||
}
|
||||
}
|
||||
peermac := net.HardwareAddr{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
|
||||
var dstAddr address.Address
|
||||
var peerknown bool
|
||||
if b[0]&0xf0 == 0x40 {
|
||||
dstAddr = tun.addr
|
||||
@ -69,7 +51,6 @@ func (tun *TunAdapter) writer() error {
|
||||
} else {
|
||||
// Nothing has been discovered, try to discover the destination
|
||||
sendndp(tun.addr)
|
||||
|
||||
}
|
||||
if peerknown {
|
||||
var proto ethernet.Ethertype
|
||||
@ -146,10 +127,7 @@ func (tun *TunAdapter) readerPacketHandler(ch chan []byte) {
|
||||
// From the IP header, work out what our source and destination addresses
|
||||
// and node IDs are. We will need these in order to work out where to send
|
||||
// the packet
|
||||
var srcAddr address.Address
|
||||
var dstAddr address.Address
|
||||
var dstNodeID *crypto.NodeID
|
||||
var dstNodeIDMask *crypto.NodeID
|
||||
var dstSnet address.Subnet
|
||||
var addrlen int
|
||||
n := len(bs)
|
||||
@ -166,7 +144,6 @@ func (tun *TunAdapter) readerPacketHandler(ch chan []byte) {
|
||||
}
|
||||
// IPv6 address
|
||||
addrlen = 16
|
||||
copy(srcAddr[:addrlen], bs[8:])
|
||||
copy(dstAddr[:addrlen], bs[24:])
|
||||
copy(dstSnet[:addrlen/2], bs[24:])
|
||||
} else if bs[0]&0xf0 == 0x40 {
|
||||
@ -180,37 +157,29 @@ func (tun *TunAdapter) readerPacketHandler(ch chan []byte) {
|
||||
}
|
||||
// IPv4 address
|
||||
addrlen = 4
|
||||
copy(srcAddr[:addrlen], bs[12:])
|
||||
copy(dstAddr[:addrlen], bs[16:])
|
||||
} else {
|
||||
// Unknown address length or protocol, so drop the packet and ignore it
|
||||
tun.log.Traceln("Unknown packet type, dropping")
|
||||
continue
|
||||
}
|
||||
if tun.ckr.isEnabled() && !tun.ckr.isValidSource(srcAddr, addrlen) {
|
||||
// The packet had a source address that doesn't belong to us or our
|
||||
// configured crypto-key routing source subnets
|
||||
continue
|
||||
}
|
||||
if !dstAddr.IsValid() && !dstSnet.IsValid() {
|
||||
if key, err := tun.ckr.getPublicKeyForAddress(dstAddr, addrlen); err == nil {
|
||||
// A public key was found, get the node ID for the search
|
||||
dstNodeID = crypto.GetNodeID(&key)
|
||||
// Do a quick check to ensure that the node ID refers to a vaild
|
||||
// Yggdrasil address or subnet - this might be superfluous
|
||||
addr := *address.AddrForNodeID(dstNodeID)
|
||||
copy(dstAddr[:], addr[:])
|
||||
copy(dstSnet[:], addr[:])
|
||||
// Are we certain we looked up a valid node?
|
||||
if !dstAddr.IsValid() && !dstSnet.IsValid() {
|
||||
continue
|
||||
if tun.ckr.isEnabled() {
|
||||
if addrlen != 16 || (!dstAddr.IsValid() && !dstSnet.IsValid()) {
|
||||
if key, err := tun.ckr.getPublicKeyForAddress(dstAddr, addrlen); err == nil {
|
||||
// A public key was found, get the node ID for the search
|
||||
dstNodeID := crypto.GetNodeID(&key)
|
||||
dstAddr = *address.AddrForNodeID(dstNodeID)
|
||||
dstSnet = *address.SubnetForNodeID(dstNodeID)
|
||||
addrlen = 16
|
||||
}
|
||||
} else {
|
||||
// No public key was found in the CKR table so we've exhausted our options
|
||||
continue
|
||||
}
|
||||
}
|
||||
if addrlen != 16 || (!dstAddr.IsValid() && !dstSnet.IsValid()) {
|
||||
// Couldn't find this node's ygg IP
|
||||
continue
|
||||
}
|
||||
// Do we have an active connection for this node address?
|
||||
var dstNodeID, dstNodeIDMask *crypto.NodeID
|
||||
tun.mutex.RLock()
|
||||
session, isIn := tun.addrToConn[dstAddr]
|
||||
if !isIn || session == nil {
|
||||
|
@ -137,20 +137,23 @@ func (c *Conn) doSearch() {
|
||||
go func() { c.core.router.admin <- routerWork }()
|
||||
}
|
||||
|
||||
func (c *Conn) getDeadlineCancellation(value *atomic.Value) util.Cancellation {
|
||||
func (c *Conn) getDeadlineCancellation(value *atomic.Value) (util.Cancellation, bool) {
|
||||
if deadline, ok := value.Load().(time.Time); ok {
|
||||
// A deadline is set, so return a Cancellation that uses it
|
||||
return util.CancellationWithDeadline(c.session.cancel, deadline)
|
||||
c := util.CancellationWithDeadline(c.session.cancel, deadline)
|
||||
return c, true
|
||||
} else {
|
||||
// No cancellation was set, so return a child cancellation with no timeout
|
||||
return util.CancellationChild(c.session.cancel)
|
||||
// No deadline was set, so just return the existinc cancellation and a dummy value
|
||||
return c.session.cancel, false
|
||||
}
|
||||
}
|
||||
|
||||
// Used internally by Read, the caller is responsible for util.PutBytes when they're done.
|
||||
func (c *Conn) ReadNoCopy() ([]byte, error) {
|
||||
cancel := c.getDeadlineCancellation(&c.readDeadline)
|
||||
defer cancel.Cancel(nil)
|
||||
cancel, doCancel := c.getDeadlineCancellation(&c.readDeadline)
|
||||
if doCancel {
|
||||
defer cancel.Cancel(nil)
|
||||
}
|
||||
// Wait for some traffic to come through from the session
|
||||
select {
|
||||
case <-cancel.Finished():
|
||||
@ -207,8 +210,10 @@ func (c *Conn) WriteNoCopy(msg FlowKeyMessage) error {
|
||||
}
|
||||
c.session.doFunc(sessionFunc)
|
||||
if err == nil {
|
||||
cancel := c.getDeadlineCancellation(&c.writeDeadline)
|
||||
defer cancel.Cancel(nil)
|
||||
cancel, doCancel := c.getDeadlineCancellation(&c.writeDeadline)
|
||||
if doCancel {
|
||||
defer cancel.Cancel(nil)
|
||||
}
|
||||
select {
|
||||
case <-cancel.Finished():
|
||||
if cancel.Error() == util.CancellationTimeoutError {
|
||||
|
@ -556,8 +556,10 @@ func DEBUG_simLinkPeers(p, q *peer) {
|
||||
goWorkers := func(source, dest *peer) {
|
||||
source.linkOut = make(chan []byte, 1)
|
||||
send := make(chan []byte, 1)
|
||||
source.out = func(bs []byte) {
|
||||
send <- bs
|
||||
source.out = func(bss [][]byte) {
|
||||
for _, bs := range bss {
|
||||
send <- bs
|
||||
}
|
||||
}
|
||||
go source.linkLoop()
|
||||
go func() {
|
||||
|
@ -69,7 +69,6 @@ func (d *Dialer) DialByNodeIDandMask(nodeID, nodeMask *crypto.NodeID) (*Conn, er
|
||||
defer t.Stop()
|
||||
select {
|
||||
case <-conn.session.init:
|
||||
conn.session.startWorkers()
|
||||
return conn, nil
|
||||
case <-t.C:
|
||||
conn.Close()
|
||||
|
@ -37,7 +37,7 @@ type linkInfo struct {
|
||||
|
||||
type linkInterfaceMsgIO interface {
|
||||
readMsg() ([]byte, error)
|
||||
writeMsg([]byte) (int, error)
|
||||
writeMsgs([][]byte) (int, error)
|
||||
close() error
|
||||
// These are temporary workarounds to stream semantics
|
||||
_sendMetaBytes([]byte) error
|
||||
@ -207,11 +207,11 @@ func (intf *linkInterface) handler() error {
|
||||
intf.link.core.peers.removePeer(intf.peer.port)
|
||||
}()
|
||||
// Finish setting up the peer struct
|
||||
out := make(chan []byte, 1)
|
||||
out := make(chan [][]byte, 1)
|
||||
defer close(out)
|
||||
intf.peer.out = func(msg []byte) {
|
||||
intf.peer.out = func(msgs [][]byte) {
|
||||
defer func() { recover() }()
|
||||
out <- msg
|
||||
out <- msgs
|
||||
}
|
||||
intf.peer.linkOut = make(chan []byte, 1)
|
||||
themAddr := address.AddrForNodeID(crypto.GetNodeID(&intf.info.box))
|
||||
@ -234,12 +234,12 @@ func (intf *linkInterface) handler() error {
|
||||
interval := 4 * time.Second
|
||||
tcpTimer := time.NewTimer(interval) // used for backwards compat with old tcp
|
||||
defer util.TimerStop(tcpTimer)
|
||||
send := func(bs []byte) {
|
||||
send := func(bss [][]byte) {
|
||||
sendBlocked.Reset(time.Second)
|
||||
intf.msgIO.writeMsg(bs)
|
||||
size, _ := intf.msgIO.writeMsgs(bss)
|
||||
util.TimerStop(sendBlocked)
|
||||
select {
|
||||
case signalSent <- len(bs) > 0:
|
||||
case signalSent <- size > 0:
|
||||
default:
|
||||
}
|
||||
}
|
||||
@ -247,7 +247,7 @@ func (intf *linkInterface) handler() error {
|
||||
// First try to send any link protocol traffic
|
||||
select {
|
||||
case msg := <-intf.peer.linkOut:
|
||||
send(msg)
|
||||
send([][]byte{msg})
|
||||
continue
|
||||
default:
|
||||
}
|
||||
@ -259,19 +259,21 @@ func (intf *linkInterface) handler() error {
|
||||
case <-tcpTimer.C:
|
||||
intf.link.core.log.Tracef("Sending (legacy) keep-alive to %s: %s, source %s",
|
||||
strings.ToUpper(intf.info.linkType), themString, intf.info.local)
|
||||
send(nil)
|
||||
send([][]byte{nil})
|
||||
case <-sendAck:
|
||||
intf.link.core.log.Tracef("Sending ack to %s: %s, source %s",
|
||||
strings.ToUpper(intf.info.linkType), themString, intf.info.local)
|
||||
send(nil)
|
||||
send([][]byte{nil})
|
||||
case msg := <-intf.peer.linkOut:
|
||||
send(msg)
|
||||
case msg, ok := <-out:
|
||||
send([][]byte{msg})
|
||||
case msgs, ok := <-out:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
send(msg)
|
||||
util.PutBytes(msg)
|
||||
send(msgs)
|
||||
for _, msg := range msgs {
|
||||
util.PutBytes(msg)
|
||||
}
|
||||
select {
|
||||
case signalReady <- struct{}{}:
|
||||
default:
|
||||
|
@ -109,7 +109,7 @@ type peer struct {
|
||||
linkOut (chan []byte) // used for protocol traffic (to bypass queues)
|
||||
doSend (chan struct{}) // tell the linkLoop to send a switchMsg
|
||||
dinfo (chan *dhtInfo) // used to keep the DHT working
|
||||
out func([]byte) // Set up by whatever created the peers struct, used to send packets to other nodes
|
||||
out func([][]byte) // Set up by whatever created the peers struct, used to send packets to other nodes
|
||||
close func() // Called when a peer is removed, to close the underlying connection, or via admin api
|
||||
}
|
||||
|
||||
@ -250,11 +250,15 @@ func (p *peer) handleTraffic(packet []byte, pTypeLen int) {
|
||||
}
|
||||
|
||||
// This just calls p.out(packet) for now.
|
||||
func (p *peer) sendPacket(packet []byte) {
|
||||
func (p *peer) sendPackets(packets [][]byte) {
|
||||
// Is there ever a case where something more complicated is needed?
|
||||
// What if p.out blocks?
|
||||
atomic.AddUint64(&p.bytesSent, uint64(len(packet)))
|
||||
p.out(packet)
|
||||
var size int
|
||||
for _, packet := range packets {
|
||||
size += len(packet)
|
||||
}
|
||||
atomic.AddUint64(&p.bytesSent, uint64(size))
|
||||
p.out(packets)
|
||||
}
|
||||
|
||||
// This wraps the packet in the inner (ephemeral) and outer (permanent) crypto layers.
|
||||
|
@ -39,10 +39,10 @@ type router struct {
|
||||
reconfigure chan chan error
|
||||
addr address.Address
|
||||
subnet address.Subnet
|
||||
in <-chan []byte // packets we received from the network, link to peer's "out"
|
||||
out func([]byte) // packets we're sending to the network, link to peer's "in"
|
||||
reset chan struct{} // signal that coords changed (re-init sessions/dht)
|
||||
admin chan func() // pass a lambda for the admin socket to query stuff
|
||||
in <-chan [][]byte // packets we received from the network, link to peer's "out"
|
||||
out func([]byte) // packets we're sending to the network, link to peer's "in"
|
||||
reset chan struct{} // signal that coords changed (re-init sessions/dht)
|
||||
admin chan func() // pass a lambda for the admin socket to query stuff
|
||||
nodeinfo nodeinfo
|
||||
}
|
||||
|
||||
@ -52,7 +52,7 @@ func (r *router) init(core *Core) {
|
||||
r.reconfigure = make(chan chan error, 1)
|
||||
r.addr = *address.AddrForNodeID(&r.core.dht.nodeID)
|
||||
r.subnet = *address.SubnetForNodeID(&r.core.dht.nodeID)
|
||||
in := make(chan []byte, 1) // TODO something better than this...
|
||||
in := make(chan [][]byte, 1) // TODO something better than this...
|
||||
self := linkInterface{
|
||||
name: "(self)",
|
||||
info: linkInfo{
|
||||
@ -62,7 +62,7 @@ func (r *router) init(core *Core) {
|
||||
},
|
||||
}
|
||||
p := r.core.peers.newPeer(&r.core.boxPub, &r.core.sigPub, &crypto.BoxSharedKey{}, &self, nil)
|
||||
p.out = func(packet []byte) { in <- packet }
|
||||
p.out = func(packets [][]byte) { in <- packets }
|
||||
r.in = in
|
||||
out := make(chan []byte, 32)
|
||||
go func() {
|
||||
@ -114,8 +114,10 @@ func (r *router) mainLoop() {
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case p := <-r.in:
|
||||
r.handleIn(p)
|
||||
case ps := <-r.in:
|
||||
for _, p := range ps {
|
||||
r.handleIn(p)
|
||||
}
|
||||
case info := <-r.core.dht.peers:
|
||||
r.core.dht.insertPeer(info)
|
||||
case <-r.reset:
|
||||
|
@ -259,6 +259,7 @@ func (ss *sessions) createSession(theirPermKey *crypto.BoxPubKey) *sessionInfo {
|
||||
<-sinfo.cancel.Finished()
|
||||
sinfo.core.router.doAdmin(sinfo.close)
|
||||
}()
|
||||
go sinfo.startWorkers()
|
||||
return &sinfo
|
||||
}
|
||||
|
||||
@ -386,7 +387,6 @@ func (ss *sessions) handlePing(ping *sessionPing) {
|
||||
for i := range conn.nodeMask {
|
||||
conn.nodeMask[i] = 0xFF
|
||||
}
|
||||
conn.session.startWorkers()
|
||||
c := ss.listener.conn
|
||||
go func() { c <- conn }()
|
||||
}
|
||||
@ -568,6 +568,12 @@ func (sinfo *sessionInfo) recvWorker() {
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-sinfo.cancel.Finished():
|
||||
return
|
||||
case <-sinfo.init:
|
||||
// Wait until the session has finished initializing before processing any packets
|
||||
}
|
||||
for {
|
||||
for len(callbacks) > 0 {
|
||||
select {
|
||||
@ -634,6 +640,12 @@ func (sinfo *sessionInfo) sendWorker() {
|
||||
util.WorkerGo(poolFunc)
|
||||
callbacks = append(callbacks, ch)
|
||||
}
|
||||
select {
|
||||
case <-sinfo.cancel.Finished():
|
||||
return
|
||||
case <-sinfo.init:
|
||||
// Wait until the session has finished initializing before processing any packets
|
||||
}
|
||||
for {
|
||||
for len(callbacks) > 0 {
|
||||
select {
|
||||
|
@ -35,29 +35,19 @@ func (s *stream) init(rwc io.ReadWriteCloser) {
|
||||
}
|
||||
|
||||
// writeMsg writes a message with stream padding, and is *not* thread safe.
|
||||
func (s *stream) writeMsg(bs []byte) (int, error) {
|
||||
func (s *stream) writeMsgs(bss [][]byte) (int, error) {
|
||||
buf := s.outputBuffer[:0]
|
||||
buf = append(buf, streamMsg[:])
|
||||
l := wire_put_uint64(uint64(len(bs)), util.GetBytes())
|
||||
defer util.PutBytes(l)
|
||||
buf = append(buf, l)
|
||||
padLen := len(buf[0]) + len(buf[1])
|
||||
buf = append(buf, bs)
|
||||
totalLen := padLen + len(bs)
|
||||
s.outputBuffer = buf[:0] // So we can reuse the same underlying array later
|
||||
var bn int
|
||||
for bn < totalLen {
|
||||
n, err := buf.WriteTo(s.rwc)
|
||||
bn += int(n)
|
||||
if err != nil {
|
||||
l := bn - padLen
|
||||
if l < 0 {
|
||||
l = 0
|
||||
}
|
||||
return l, err
|
||||
}
|
||||
var written int
|
||||
for _, bs := range bss {
|
||||
buf = append(buf, streamMsg[:])
|
||||
buf = append(buf, wire_encode_uint64(uint64(len(bs))))
|
||||
buf = append(buf, bs)
|
||||
written += len(bs)
|
||||
}
|
||||
return len(bs), nil
|
||||
s.outputBuffer = buf[:0] // So we can reuse the same underlying array later
|
||||
_, err := buf.WriteTo(s.rwc)
|
||||
// TODO only include number of bytes from bs *successfully* written?
|
||||
return written, err
|
||||
}
|
||||
|
||||
// readMsg reads a message from the stream, accounting for stream padding, and is *not* thread safe.
|
||||
|
@ -709,7 +709,7 @@ func (t *switchTable) handleIn(packet []byte, idle map[switchPort]time.Time) boo
|
||||
if best != nil {
|
||||
// Send to the best idle next hop
|
||||
delete(idle, best.port)
|
||||
best.sendPacket(packet)
|
||||
best.sendPackets([][]byte{packet})
|
||||
return true
|
||||
}
|
||||
// Didn't find anyone idle to send it to
|
||||
@ -784,39 +784,49 @@ func (t *switchTable) handleIdle(port switchPort) bool {
|
||||
if to == nil {
|
||||
return true
|
||||
}
|
||||
var best string
|
||||
var bestPriority float64
|
||||
var packets [][]byte
|
||||
var psize int
|
||||
t.queues.cleanup(t)
|
||||
now := time.Now()
|
||||
for streamID, buf := range t.queues.bufs {
|
||||
// Filter over the streams that this node is closer to
|
||||
// Keep the one with the smallest queue
|
||||
packet := buf.packets[0]
|
||||
coords := switch_getPacketCoords(packet.bytes)
|
||||
priority := float64(now.Sub(packet.time)) / float64(buf.size)
|
||||
if priority > bestPriority && t.portIsCloser(coords, port) {
|
||||
best = streamID
|
||||
bestPriority = priority
|
||||
for psize < 65535 {
|
||||
var best string
|
||||
var bestPriority float64
|
||||
for streamID, buf := range t.queues.bufs {
|
||||
// Filter over the streams that this node is closer to
|
||||
// Keep the one with the smallest queue
|
||||
packet := buf.packets[0]
|
||||
coords := switch_getPacketCoords(packet.bytes)
|
||||
priority := float64(now.Sub(packet.time)) / float64(buf.size)
|
||||
if priority > bestPriority && t.portIsCloser(coords, port) {
|
||||
best = streamID
|
||||
bestPriority = priority
|
||||
}
|
||||
}
|
||||
}
|
||||
if bestPriority != 0 {
|
||||
buf := t.queues.bufs[best]
|
||||
var packet switch_packetInfo
|
||||
// TODO decide if this should be LIFO or FIFO
|
||||
packet, buf.packets = buf.packets[0], buf.packets[1:]
|
||||
buf.size -= uint64(len(packet.bytes))
|
||||
t.queues.size -= uint64(len(packet.bytes))
|
||||
if len(buf.packets) == 0 {
|
||||
delete(t.queues.bufs, best)
|
||||
if bestPriority != 0 {
|
||||
buf := t.queues.bufs[best]
|
||||
var packet switch_packetInfo
|
||||
// TODO decide if this should be LIFO or FIFO
|
||||
packet, buf.packets = buf.packets[0], buf.packets[1:]
|
||||
buf.size -= uint64(len(packet.bytes))
|
||||
t.queues.size -= uint64(len(packet.bytes))
|
||||
if len(buf.packets) == 0 {
|
||||
delete(t.queues.bufs, best)
|
||||
} else {
|
||||
// Need to update the map, since buf was retrieved by value
|
||||
t.queues.bufs[best] = buf
|
||||
}
|
||||
packets = append(packets, packet.bytes)
|
||||
psize += len(packet.bytes)
|
||||
} else {
|
||||
// Need to update the map, since buf was retrieved by value
|
||||
t.queues.bufs[best] = buf
|
||||
// Finished finding packets
|
||||
break
|
||||
}
|
||||
to.sendPacket(packet.bytes)
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
if len(packets) > 0 {
|
||||
to.sendPackets(packets)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// The switch worker does routing lookups and sends packets to where they need to be
|
||||
@ -826,7 +836,7 @@ func (t *switchTable) doWorker() {
|
||||
// Keep sending packets to the router
|
||||
self := t.core.peers.getPorts()[0]
|
||||
for bs := range sendingToRouter {
|
||||
self.sendPacket(bs)
|
||||
self.sendPackets([][]byte{bs})
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
|
Loading…
Reference in New Issue
Block a user