mirror of
https://github.com/yggdrasil-network/yggdrasil-go.git
synced 2024-11-23 18:15:24 +00:00
(broken state) WIP rewriting core to use ironwood
This commit is contained in:
parent
ace7b43b6d
commit
f1c37f8440
5
go.mod
5
go.mod
@ -3,6 +3,7 @@ module github.com/yggdrasil-network/yggdrasil-go
|
|||||||
go 1.16
|
go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/Arceliar/ironwood v0.0.0-20210508094446-74a68e4f5970 // indirect
|
||||||
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979
|
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979
|
||||||
github.com/cheggaaa/pb/v3 v3.0.6
|
github.com/cheggaaa/pb/v3 v3.0.6
|
||||||
github.com/fatih/color v1.10.0 // indirect
|
github.com/fatih/color v1.10.0 // indirect
|
||||||
@ -15,10 +16,12 @@ require (
|
|||||||
github.com/rivo/uniseg v0.2.0 // indirect
|
github.com/rivo/uniseg v0.2.0 // indirect
|
||||||
github.com/vishvananda/netlink v1.1.0
|
github.com/vishvananda/netlink v1.1.0
|
||||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
|
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
|
||||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
|
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
|
||||||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b
|
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b
|
||||||
golang.org/x/text v0.3.6-0.20210220033129-8f690f22cf1c
|
golang.org/x/text v0.3.6-0.20210220033129-8f690f22cf1c
|
||||||
golang.zx2c4.com/wireguard v0.0.0-20210306175010-7e3b8371a1bf
|
golang.zx2c4.com/wireguard v0.0.0-20210306175010-7e3b8371a1bf
|
||||||
golang.zx2c4.com/wireguard/windows v0.3.8
|
golang.zx2c4.com/wireguard/windows v0.3.8
|
||||||
)
|
)
|
||||||
|
|
||||||
|
replace github.com/Arceliar/ironwood => ../ironwood
|
||||||
|
4
go.sum
4
go.sum
@ -1,3 +1,5 @@
|
|||||||
|
github.com/Arceliar/ironwood v0.0.0-20210508094446-74a68e4f5970 h1:sKiz18LynwInybi9BIhM8tdvZlSurnT6rM/ZUEqMgzU=
|
||||||
|
github.com/Arceliar/ironwood v0.0.0-20210508094446-74a68e4f5970/go.mod h1:RP72rucOFm5udrnEzTmIWLRVGQiV/fSUAQXJ0RST/nk=
|
||||||
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979 h1:WndgpSW13S32VLQ3ugUxx2EnnWmgba1kCqPkd4Gk1yQ=
|
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979 h1:WndgpSW13S32VLQ3ugUxx2EnnWmgba1kCqPkd4Gk1yQ=
|
||||||
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979/go.mod h1:6Lkn+/zJilRMsKmbmG1RPoamiArC6HS73xbwRyp3UyI=
|
github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979/go.mod h1:6Lkn+/zJilRMsKmbmG1RPoamiArC6HS73xbwRyp3UyI=
|
||||||
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
||||||
@ -39,6 +41,8 @@ github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1
|
|||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
|
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
|
||||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||||
|
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b h1:7mWr3k41Qtv8XlltBkDkl8LoP3mpSgBW8BUoxtEdbXg=
|
||||||
|
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
|
||||||
|
@ -46,7 +46,7 @@ func TimerStop(t *time.Timer) bool {
|
|||||||
|
|
||||||
// FuncTimeout runs the provided function in a separate goroutine, and returns true if the function finishes executing before the timeout passes, or false if the timeout passes.
|
// FuncTimeout runs the provided function in a separate goroutine, and returns true if the function finishes executing before the timeout passes, or false if the timeout passes.
|
||||||
// It includes no mechanism to stop the function if the timeout fires, so the user is expected to do so on their own (such as with a Cancellation or a context).
|
// It includes no mechanism to stop the function if the timeout fires, so the user is expected to do so on their own (such as with a Cancellation or a context).
|
||||||
func FuncTimeout(f func(), timeout time.Duration) bool {
|
func FuncTimeout(timeout time.Duration, f func()) bool {
|
||||||
success := make(chan struct{})
|
success := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
defer close(success)
|
defer close(success)
|
||||||
|
@ -1,18 +1,17 @@
|
|||||||
package yggdrasil
|
package yggdrasil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
//"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
//"fmt"
|
||||||
"net"
|
"net"
|
||||||
"sort"
|
//"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gologme/log"
|
"github.com/gologme/log"
|
||||||
//"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||||
|
//"github.com/Arceliar/phony"
|
||||||
"github.com/Arceliar/phony"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Peer represents a single peer object. This contains information from the
|
// Peer represents a single peer object. This contains information from the
|
||||||
@ -95,13 +94,13 @@ type SwitchQueue struct {
|
|||||||
// Note that sessions will automatically be closed by Yggdrasil if no traffic is
|
// Note that sessions will automatically be closed by Yggdrasil if no traffic is
|
||||||
// exchanged for around two minutes.
|
// exchanged for around two minutes.
|
||||||
type Session struct {
|
type Session struct {
|
||||||
PublicKey crypto.BoxPubKey // The public key of the remote node
|
PublicKey crypto.BoxPubKey // The public key of the remote node
|
||||||
Coords []uint64 // The coordinates of the remote node
|
Coords []uint64 // The coordinates of the remote node
|
||||||
BytesSent uint64 // Bytes sent to the session
|
BytesSent uint64 // Bytes sent to the session
|
||||||
BytesRecvd uint64 // Bytes received from the session
|
BytesRecvd uint64 // Bytes received from the session
|
||||||
MTU MTU // The maximum supported message size of the session
|
//MTU MTU // The maximum supported message size of the session
|
||||||
Uptime time.Duration // How long this session has been active for
|
Uptime time.Duration // How long this session has been active for
|
||||||
WasMTUFixed bool // This field is no longer used
|
WasMTUFixed bool // This field is no longer used
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPeers returns one or more Peer objects containing information about active
|
// GetPeers returns one or more Peer objects containing information about active
|
||||||
@ -109,6 +108,7 @@ type Session struct {
|
|||||||
// includes information about the current node (with a port number of 0). If
|
// includes information about the current node (with a port number of 0). If
|
||||||
// there is exactly one entry then this node is not connected to any other nodes
|
// there is exactly one entry then this node is not connected to any other nodes
|
||||||
// and is therefore isolated.
|
// and is therefore isolated.
|
||||||
|
/* TODO
|
||||||
func (c *Core) GetPeers() []Peer {
|
func (c *Core) GetPeers() []Peer {
|
||||||
var ports map[switchPort]*peer
|
var ports map[switchPort]*peer
|
||||||
phony.Block(&c.peers, func() { ports = c.peers.ports })
|
phony.Block(&c.peers, func() { ports = c.peers.ports })
|
||||||
@ -136,12 +136,14 @@ func (c *Core) GetPeers() []Peer {
|
|||||||
}
|
}
|
||||||
return peers
|
return peers
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// GetSwitchPeers returns zero or more SwitchPeer objects containing information
|
// GetSwitchPeers returns zero or more SwitchPeer objects containing information
|
||||||
// about switch port connections with other Yggdrasil nodes. Note that, unlike
|
// about switch port connections with other Yggdrasil nodes. Note that, unlike
|
||||||
// GetPeers, GetSwitchPeers does not include information about the current node,
|
// GetPeers, GetSwitchPeers does not include information about the current node,
|
||||||
// therefore it is possible for this to return zero elements if the node is
|
// therefore it is possible for this to return zero elements if the node is
|
||||||
// isolated or not connected to any peers.
|
// isolated or not connected to any peers.
|
||||||
|
/* TODO
|
||||||
func (c *Core) GetSwitchPeers() []SwitchPeer {
|
func (c *Core) GetSwitchPeers() []SwitchPeer {
|
||||||
var switchpeers []SwitchPeer
|
var switchpeers []SwitchPeer
|
||||||
var table *lookupTable
|
var table *lookupTable
|
||||||
@ -172,9 +174,11 @@ func (c *Core) GetSwitchPeers() []SwitchPeer {
|
|||||||
}
|
}
|
||||||
return switchpeers
|
return switchpeers
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// GetDHT returns zero or more entries as stored in the DHT, cached primarily
|
// GetDHT returns zero or more entries as stored in the DHT, cached primarily
|
||||||
// from searches that have already taken place.
|
// from searches that have already taken place.
|
||||||
|
/* TODO
|
||||||
func (c *Core) GetDHT() []DHTEntry {
|
func (c *Core) GetDHT() []DHTEntry {
|
||||||
var dhtentries []DHTEntry
|
var dhtentries []DHTEntry
|
||||||
getDHT := func() {
|
getDHT := func() {
|
||||||
@ -198,8 +202,10 @@ func (c *Core) GetDHT() []DHTEntry {
|
|||||||
phony.Block(&c.router, getDHT)
|
phony.Block(&c.router, getDHT)
|
||||||
return dhtentries
|
return dhtentries
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// GetSessions returns a list of open sessions from this node to other nodes.
|
// GetSessions returns a list of open sessions from this node to other nodes.
|
||||||
|
/* TODO
|
||||||
func (c *Core) GetSessions() []Session {
|
func (c *Core) GetSessions() []Session {
|
||||||
var sessions []Session
|
var sessions []Session
|
||||||
getSessions := func() {
|
getSessions := func() {
|
||||||
@ -224,11 +230,13 @@ func (c *Core) GetSessions() []Session {
|
|||||||
phony.Block(&c.router, getSessions)
|
phony.Block(&c.router, getSessions)
|
||||||
return sessions
|
return sessions
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// ConnListen returns a listener for Yggdrasil session connections. You can only
|
// ConnListen returns a listener for Yggdrasil session connections. You can only
|
||||||
// call this function once as each Yggdrasil node can only have a single
|
// call this function once as each Yggdrasil node can only have a single
|
||||||
// ConnListener. Make sure to keep the reference to this for as long as it is
|
// ConnListener. Make sure to keep the reference to this for as long as it is
|
||||||
// needed.
|
// needed.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) ConnListen() (*Listener, error) {
|
func (c *Core) ConnListen() (*Listener, error) {
|
||||||
c.router.sessions.listenerMutex.Lock()
|
c.router.sessions.listenerMutex.Lock()
|
||||||
defer c.router.sessions.listenerMutex.Unlock()
|
defer c.router.sessions.listenerMutex.Unlock()
|
||||||
@ -242,16 +250,19 @@ func (c *Core) ConnListen() (*Listener, error) {
|
|||||||
}
|
}
|
||||||
return c.router.sessions.listener, nil
|
return c.router.sessions.listener, nil
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// ConnDialer returns a dialer for Yggdrasil session connections. Since
|
// ConnDialer returns a dialer for Yggdrasil session connections. Since
|
||||||
// ConnDialers are stateless, you can request as many dialers as you like,
|
// ConnDialers are stateless, you can request as many dialers as you like,
|
||||||
// although ideally you should request only one and keep the reference to it for
|
// although ideally you should request only one and keep the reference to it for
|
||||||
// as long as it is needed.
|
// as long as it is needed.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) ConnDialer() (*Dialer, error) {
|
func (c *Core) ConnDialer() (*Dialer, error) {
|
||||||
return &Dialer{
|
return &Dialer{
|
||||||
core: c,
|
core: c,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// ListenTCP starts a new TCP listener. The input URI should match that of the
|
// ListenTCP starts a new TCP listener. The input URI should match that of the
|
||||||
// "Listen" configuration item, e.g.
|
// "Listen" configuration item, e.g.
|
||||||
@ -270,26 +281,34 @@ func (c *Core) ListenTLS(uri string) (*TcpListener, error) {
|
|||||||
// NodeID gets the node ID. This is derived from your router encryption keys.
|
// NodeID gets the node ID. This is derived from your router encryption keys.
|
||||||
// Remote nodes wanting to open connections to your node will need to know your
|
// Remote nodes wanting to open connections to your node will need to know your
|
||||||
// node ID.
|
// node ID.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) NodeID() *crypto.NodeID {
|
func (c *Core) NodeID() *crypto.NodeID {
|
||||||
return crypto.GetNodeID(&c.boxPub)
|
return crypto.GetNodeID(&c.boxPub)
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// TreeID gets the tree ID. This is derived from your switch signing keys. There
|
// TreeID gets the tree ID. This is derived from your switch signing keys. There
|
||||||
// is typically no need to share this key.
|
// is typically no need to share this key.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) TreeID() *crypto.TreeID {
|
func (c *Core) TreeID() *crypto.TreeID {
|
||||||
return crypto.GetTreeID(&c.sigPub)
|
return crypto.GetTreeID(&c.sigPub)
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// SigningPublicKey gets the node's signing public key, as used by the switch.
|
// SigningPublicKey gets the node's signing public key, as used by the switch.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) SigningPublicKey() string {
|
func (c *Core) SigningPublicKey() string {
|
||||||
return hex.EncodeToString(c.sigPub[:])
|
return hex.EncodeToString(c.sigPub[:])
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// EncryptionPublicKey gets the node's encryption public key, as used by the
|
// EncryptionPublicKey gets the node's encryption public key, as used by the
|
||||||
// router.
|
// router.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) EncryptionPublicKey() string {
|
func (c *Core) EncryptionPublicKey() string {
|
||||||
return hex.EncodeToString(c.boxPub[:])
|
return hex.EncodeToString(c.boxPub[:])
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// Coords returns the current coordinates of the node. Note that these can
|
// Coords returns the current coordinates of the node. Note that these can
|
||||||
// change at any time for a number of reasons, not limited to but including
|
// change at any time for a number of reasons, not limited to but including
|
||||||
@ -300,6 +319,7 @@ func (c *Core) EncryptionPublicKey() string {
|
|||||||
// you are the root of the network that you are connected to, or you are not
|
// you are the root of the network that you are connected to, or you are not
|
||||||
// connected to any other nodes (effectively making you the root of a
|
// connected to any other nodes (effectively making you the root of a
|
||||||
// single-node network).
|
// single-node network).
|
||||||
|
/* TODO?
|
||||||
func (c *Core) Coords() []uint64 {
|
func (c *Core) Coords() []uint64 {
|
||||||
var coords []byte
|
var coords []byte
|
||||||
phony.Block(&c.router, func() {
|
phony.Block(&c.router, func() {
|
||||||
@ -307,6 +327,7 @@ func (c *Core) Coords() []uint64 {
|
|||||||
})
|
})
|
||||||
return wire_coordsBytestoUint64s(coords)
|
return wire_coordsBytestoUint64s(coords)
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// Address gets the IPv6 address of the Yggdrasil node. This is always a /128
|
// Address gets the IPv6 address of the Yggdrasil node. This is always a /128
|
||||||
// address. The IPv6 address is only relevant when the node is operating as an
|
// address. The IPv6 address is only relevant when the node is operating as an
|
||||||
@ -314,10 +335,8 @@ func (c *Core) Coords() []uint64 {
|
|||||||
// that application also implements either VPN functionality or deals with IP
|
// that application also implements either VPN functionality or deals with IP
|
||||||
// packets specifically.
|
// packets specifically.
|
||||||
func (c *Core) Address() net.IP {
|
func (c *Core) Address() net.IP {
|
||||||
panic("TODO")
|
addr := net.IP(address.AddrForKey(c.public)[:])
|
||||||
return nil
|
return addr
|
||||||
//address := net.IP(address.AddrForNodeID(c.NodeID())[:])
|
|
||||||
//return address
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Subnet gets the routed IPv6 subnet of the Yggdrasil node. This is always a
|
// Subnet gets the routed IPv6 subnet of the Yggdrasil node. This is always a
|
||||||
@ -326,28 +345,31 @@ func (c *Core) Address() net.IP {
|
|||||||
// that application also implements either VPN functionality or deals with IP
|
// that application also implements either VPN functionality or deals with IP
|
||||||
// packets specifically.
|
// packets specifically.
|
||||||
func (c *Core) Subnet() net.IPNet {
|
func (c *Core) Subnet() net.IPNet {
|
||||||
panic("TODO")
|
subnet := address.SubnetForKey(c.public)[:]
|
||||||
return net.IPNet{}
|
subnet = append(subnet, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||||
//subnet := address.SubnetForNodeID(c.NodeID())[:]
|
return net.IPNet{IP: subnet, Mask: net.CIDRMask(64, 128)}
|
||||||
//subnet = append(subnet, 0, 0, 0, 0, 0, 0, 0, 0)
|
|
||||||
//return net.IPNet{IP: subnet, Mask: net.CIDRMask(64, 128)}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MyNodeInfo gets the currently configured nodeinfo. NodeInfo is typically
|
// MyNodeInfo gets the currently configured nodeinfo. NodeInfo is typically
|
||||||
// specified through the "NodeInfo" option in the node configuration or using
|
// specified through the "NodeInfo" option in the node configuration or using
|
||||||
// the SetNodeInfo function, although it may also contain other built-in values
|
// the SetNodeInfo function, although it may also contain other built-in values
|
||||||
// such as "buildname", "buildversion" etc.
|
// such as "buildname", "buildversion" etc.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) MyNodeInfo() NodeInfoPayload {
|
func (c *Core) MyNodeInfo() NodeInfoPayload {
|
||||||
return c.router.nodeinfo.getNodeInfo()
|
return c.router.nodeinfo.getNodeInfo()
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// SetNodeInfo sets the local nodeinfo. Note that nodeinfo can be any value or
|
// SetNodeInfo sets the local nodeinfo. Note that nodeinfo can be any value or
|
||||||
// struct, it will be serialised into JSON automatically.
|
// struct, it will be serialised into JSON automatically.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) SetNodeInfo(nodeinfo interface{}, nodeinfoprivacy bool) {
|
func (c *Core) SetNodeInfo(nodeinfo interface{}, nodeinfoprivacy bool) {
|
||||||
c.router.nodeinfo.setNodeInfo(nodeinfo, nodeinfoprivacy)
|
c.router.nodeinfo.setNodeInfo(nodeinfo, nodeinfoprivacy)
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// GetMaximumSessionMTU returns the maximum allowed session MTU size.
|
// GetMaximumSessionMTU returns the maximum allowed session MTU size.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) GetMaximumSessionMTU() MTU {
|
func (c *Core) GetMaximumSessionMTU() MTU {
|
||||||
var mtu MTU
|
var mtu MTU
|
||||||
phony.Block(&c.router, func() {
|
phony.Block(&c.router, func() {
|
||||||
@ -355,10 +377,12 @@ func (c *Core) GetMaximumSessionMTU() MTU {
|
|||||||
})
|
})
|
||||||
return mtu
|
return mtu
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// SetMaximumSessionMTU sets the maximum allowed session MTU size. The default
|
// SetMaximumSessionMTU sets the maximum allowed session MTU size. The default
|
||||||
// value is 65535 bytes. Session pings will be sent to update all open sessions
|
// value is 65535 bytes. Session pings will be sent to update all open sessions
|
||||||
// if the MTU has changed.
|
// if the MTU has changed.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) SetMaximumSessionMTU(mtu MTU) {
|
func (c *Core) SetMaximumSessionMTU(mtu MTU) {
|
||||||
phony.Block(&c.router, func() {
|
phony.Block(&c.router, func() {
|
||||||
if c.router.sessions.myMaximumMTU != mtu {
|
if c.router.sessions.myMaximumMTU != mtu {
|
||||||
@ -367,11 +391,13 @@ func (c *Core) SetMaximumSessionMTU(mtu MTU) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// GetNodeInfo requests nodeinfo from a remote node, as specified by the public
|
// GetNodeInfo requests nodeinfo from a remote node, as specified by the public
|
||||||
// key and coordinates specified. The third parameter specifies whether a cached
|
// key and coordinates specified. The third parameter specifies whether a cached
|
||||||
// result is acceptable - this results in less traffic being generated than is
|
// result is acceptable - this results in less traffic being generated than is
|
||||||
// necessary when, e.g. crawling the network.
|
// necessary when, e.g. crawling the network.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) GetNodeInfo(key crypto.BoxPubKey, coords []uint64, nocache bool) (NodeInfoPayload, error) {
|
func (c *Core) GetNodeInfo(key crypto.BoxPubKey, coords []uint64, nocache bool) (NodeInfoPayload, error) {
|
||||||
response := make(chan *NodeInfoPayload, 1)
|
response := make(chan *NodeInfoPayload, 1)
|
||||||
c.router.nodeinfo.addCallback(key, func(nodeinfo *NodeInfoPayload) {
|
c.router.nodeinfo.addCallback(key, func(nodeinfo *NodeInfoPayload) {
|
||||||
@ -390,6 +416,7 @@ func (c *Core) GetNodeInfo(key crypto.BoxPubKey, coords []uint64, nocache bool)
|
|||||||
}
|
}
|
||||||
return NodeInfoPayload{}, fmt.Errorf("getNodeInfo timeout: %s", hex.EncodeToString(key[:]))
|
return NodeInfoPayload{}, fmt.Errorf("getNodeInfo timeout: %s", hex.EncodeToString(key[:]))
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// SetSessionGatekeeper allows you to configure a handler function for deciding
|
// SetSessionGatekeeper allows you to configure a handler function for deciding
|
||||||
// whether a session should be allowed or not. The default session firewall is
|
// whether a session should be allowed or not. The default session firewall is
|
||||||
@ -397,12 +424,14 @@ func (c *Core) GetNodeInfo(key crypto.BoxPubKey, coords []uint64, nocache bool)
|
|||||||
// side and a boolean which is true if we initiated the session or false if we
|
// side and a boolean which is true if we initiated the session or false if we
|
||||||
// received an incoming session request. The function should return true to
|
// received an incoming session request. The function should return true to
|
||||||
// allow the session or false to reject it.
|
// allow the session or false to reject it.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) SetSessionGatekeeper(f func(pubkey *crypto.BoxPubKey, initiator bool) bool) {
|
func (c *Core) SetSessionGatekeeper(f func(pubkey *crypto.BoxPubKey, initiator bool) bool) {
|
||||||
c.router.sessions.isAllowedMutex.Lock()
|
c.router.sessions.isAllowedMutex.Lock()
|
||||||
defer c.router.sessions.isAllowedMutex.Unlock()
|
defer c.router.sessions.isAllowedMutex.Unlock()
|
||||||
|
|
||||||
c.router.sessions.isAllowedHandler = f
|
c.router.sessions.isAllowedHandler = f
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// SetLogger sets the output logger of the Yggdrasil node after startup. This
|
// SetLogger sets the output logger of the Yggdrasil node after startup. This
|
||||||
// may be useful if you want to redirect the output later. Note that this
|
// may be useful if you want to redirect the output later. Note that this
|
||||||
@ -469,6 +498,8 @@ func (c *Core) RemovePeer(addr string, sintf string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
panic("TODO")
|
||||||
|
/* TODO?
|
||||||
c.peers.Act(nil, func() {
|
c.peers.Act(nil, func() {
|
||||||
ports := c.peers.ports
|
ports := c.peers.ports
|
||||||
for _, peer := range ports {
|
for _, peer := range ports {
|
||||||
@ -477,6 +508,7 @@ func (c *Core) RemovePeer(addr string, sintf string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
*/
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -493,6 +525,7 @@ func (c *Core) CallPeer(addr string, sintf string) error {
|
|||||||
|
|
||||||
// DisconnectPeer disconnects a peer once. This should be specified as a port
|
// DisconnectPeer disconnects a peer once. This should be specified as a port
|
||||||
// number.
|
// number.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) DisconnectPeer(port uint64) error {
|
func (c *Core) DisconnectPeer(port uint64) error {
|
||||||
c.peers.Act(nil, func() {
|
c.peers.Act(nil, func() {
|
||||||
if p, isIn := c.peers.ports[switchPort(port)]; isIn {
|
if p, isIn := c.peers.ports[switchPort(port)]; isIn {
|
||||||
@ -501,34 +534,42 @@ func (c *Core) DisconnectPeer(port uint64) error {
|
|||||||
})
|
})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// GetAllowedEncryptionPublicKeys returns the public keys permitted for incoming
|
// GetAllowedEncryptionPublicKeys returns the public keys permitted for incoming
|
||||||
// peer connections. If this list is empty then all incoming peer connections
|
// peer connections. If this list is empty then all incoming peer connections
|
||||||
// are accepted by default.
|
// are accepted by default.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) GetAllowedEncryptionPublicKeys() []string {
|
func (c *Core) GetAllowedEncryptionPublicKeys() []string {
|
||||||
return c.peers.getAllowedEncryptionPublicKeys()
|
return c.peers.getAllowedEncryptionPublicKeys()
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// AddAllowedEncryptionPublicKey whitelists a key for incoming peer connections.
|
// AddAllowedEncryptionPublicKey whitelists a key for incoming peer connections.
|
||||||
// By default all incoming peer connections are accepted, but adding public keys
|
// By default all incoming peer connections are accepted, but adding public keys
|
||||||
// to the whitelist using this function enables strict checking from that point
|
// to the whitelist using this function enables strict checking from that point
|
||||||
// forward. Once the whitelist is enabled, only peer connections from
|
// forward. Once the whitelist is enabled, only peer connections from
|
||||||
// whitelisted public keys will be accepted.
|
// whitelisted public keys will be accepted.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) AddAllowedEncryptionPublicKey(bstr string) (err error) {
|
func (c *Core) AddAllowedEncryptionPublicKey(bstr string) (err error) {
|
||||||
c.peers.addAllowedEncryptionPublicKey(bstr)
|
c.peers.addAllowedEncryptionPublicKey(bstr)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// RemoveAllowedEncryptionPublicKey removes a key from the whitelist for
|
// RemoveAllowedEncryptionPublicKey removes a key from the whitelist for
|
||||||
// incoming peer connections. If none are set, an empty list permits all
|
// incoming peer connections. If none are set, an empty list permits all
|
||||||
// incoming connections.
|
// incoming connections.
|
||||||
|
/* TODO?
|
||||||
func (c *Core) RemoveAllowedEncryptionPublicKey(bstr string) (err error) {
|
func (c *Core) RemoveAllowedEncryptionPublicKey(bstr string) (err error) {
|
||||||
c.peers.removeAllowedEncryptionPublicKey(bstr)
|
c.peers.removeAllowedEncryptionPublicKey(bstr)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// DHTPing sends a DHT ping to the node with the provided key and coords,
|
// DHTPing sends a DHT ping to the node with the provided key and coords,
|
||||||
// optionally looking up the specified target NodeID.
|
// optionally looking up the specified target NodeID.
|
||||||
|
/* NOT TODO!!
|
||||||
func (c *Core) DHTPing(key crypto.BoxPubKey, coords []uint64, target *crypto.NodeID) (DHTRes, error) {
|
func (c *Core) DHTPing(key crypto.BoxPubKey, coords []uint64, target *crypto.NodeID) (DHTRes, error) {
|
||||||
resCh := make(chan *dhtRes, 1)
|
resCh := make(chan *dhtRes, 1)
|
||||||
info := dhtInfo{
|
info := dhtInfo{
|
||||||
@ -564,3 +605,4 @@ func (c *Core) DHTPing(key crypto.BoxPubKey, coords []uint64, target *crypto.Nod
|
|||||||
}
|
}
|
||||||
return DHTRes{}, fmt.Errorf("DHT ping timeout: %s", hex.EncodeToString(key[:]))
|
return DHTRes{}, fmt.Errorf("DHT ping timeout: %s", hex.EncodeToString(key[:]))
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
@ -1,397 +0,0 @@
|
|||||||
package yggdrasil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/types"
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
|
||||||
|
|
||||||
"github.com/Arceliar/phony"
|
|
||||||
)
|
|
||||||
|
|
||||||
type MTU = types.MTU
|
|
||||||
|
|
||||||
// ConnError implements the net.Error interface
|
|
||||||
type ConnError struct {
|
|
||||||
error
|
|
||||||
timeout bool
|
|
||||||
temporary bool
|
|
||||||
closed bool
|
|
||||||
maxsize int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timeout returns true if the error relates to a timeout condition on the
|
|
||||||
// connection.
|
|
||||||
func (e *ConnError) Timeout() bool {
|
|
||||||
return e.timeout
|
|
||||||
}
|
|
||||||
|
|
||||||
// Temporary return true if the error is temporary or false if it is a permanent
|
|
||||||
// error condition.
|
|
||||||
func (e *ConnError) Temporary() bool {
|
|
||||||
return e.temporary
|
|
||||||
}
|
|
||||||
|
|
||||||
// PacketTooBig returns in response to sending a packet that is too large, and
|
|
||||||
// if so, the maximum supported packet size that should be used for the
|
|
||||||
// connection.
|
|
||||||
func (e *ConnError) PacketTooBig() bool {
|
|
||||||
return e.maxsize > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// PacketMaximumSize returns the maximum supported packet size. This will only
|
|
||||||
// return a non-zero value if ConnError.PacketTooBig() returns true.
|
|
||||||
func (e *ConnError) PacketMaximumSize() int {
|
|
||||||
if !e.PacketTooBig() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return e.maxsize
|
|
||||||
}
|
|
||||||
|
|
||||||
// Closed returns if the session is already closed and is now unusable.
|
|
||||||
func (e *ConnError) Closed() bool {
|
|
||||||
return e.closed
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Conn struct is a reference to an active connection session between the
|
|
||||||
// local node and a remote node. Conn implements the io.ReadWriteCloser
|
|
||||||
// interface and is used to send and receive traffic with a remote node.
|
|
||||||
type Conn struct {
|
|
||||||
phony.Inbox
|
|
||||||
core *Core
|
|
||||||
readDeadline *time.Time
|
|
||||||
writeDeadline *time.Time
|
|
||||||
nodeID *crypto.NodeID
|
|
||||||
nodeMask *crypto.NodeID
|
|
||||||
session *sessionInfo
|
|
||||||
mtu MTU
|
|
||||||
readCallback func([]byte)
|
|
||||||
readBuffer chan []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO func NewConn() that initializes additional fields as needed
|
|
||||||
func newConn(core *Core, nodeID *crypto.NodeID, nodeMask *crypto.NodeID, session *sessionInfo) *Conn {
|
|
||||||
conn := Conn{
|
|
||||||
core: core,
|
|
||||||
nodeID: nodeID,
|
|
||||||
nodeMask: nodeMask,
|
|
||||||
session: session,
|
|
||||||
readBuffer: make(chan []byte, 1024),
|
|
||||||
}
|
|
||||||
return &conn
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string that uniquely identifies a connection. Currently this
|
|
||||||
// takes a form similar to "conn=0x0000000", which contains a memory reference
|
|
||||||
// to the Conn object. While this value should always be unique for each Conn
|
|
||||||
// object, the format of this is not strictly defined and may change in the
|
|
||||||
// future.
|
|
||||||
func (c *Conn) String() string {
|
|
||||||
var s string
|
|
||||||
phony.Block(c, func() { s = fmt.Sprintf("conn=%p", c) })
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) setMTU(from phony.Actor, mtu MTU) {
|
|
||||||
c.Act(from, func() { c.mtu = mtu })
|
|
||||||
}
|
|
||||||
|
|
||||||
// This should never be called from an actor, used in the dial functions
|
|
||||||
func (c *Conn) search() error {
|
|
||||||
var err error
|
|
||||||
done := make(chan struct{})
|
|
||||||
phony.Block(&c.core.router, func() {
|
|
||||||
_, isIn := c.core.router.searches.searches[*c.nodeID]
|
|
||||||
if !isIn {
|
|
||||||
searchCompleted := func(sinfo *sessionInfo, e error) {
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
// Somehow this was called multiple times, TODO don't let that happen
|
|
||||||
if sinfo != nil {
|
|
||||||
// Need to clean up to avoid a session leak
|
|
||||||
sinfo.cancel.Cancel(nil)
|
|
||||||
sinfo.sessions.removeSession(sinfo)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
if sinfo != nil {
|
|
||||||
// Finish initializing the session
|
|
||||||
c.session = sinfo
|
|
||||||
c.session.setConn(nil, c)
|
|
||||||
c.nodeID = crypto.GetNodeID(&c.session.theirPermPub)
|
|
||||||
for i := range c.nodeMask {
|
|
||||||
c.nodeMask[i] = 0xFF
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = e
|
|
||||||
close(done)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sinfo := c.core.router.searches.newIterSearch(c.nodeID, c.nodeMask, searchCompleted)
|
|
||||||
sinfo.startSearch()
|
|
||||||
} else {
|
|
||||||
err = errors.New("search already exists")
|
|
||||||
close(done)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
<-done
|
|
||||||
if c.session == nil && err == nil {
|
|
||||||
panic("search failed but returned no error")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Used in session keep-alive traffic
|
|
||||||
func (c *Conn) _doSearch() {
|
|
||||||
s := fmt.Sprintf("conn=%p", c)
|
|
||||||
routerWork := func() {
|
|
||||||
// Check to see if there is a search already matching the destination
|
|
||||||
sinfo, isIn := c.core.router.searches.searches[*c.nodeID]
|
|
||||||
if !isIn {
|
|
||||||
// Nothing was found, so create a new search
|
|
||||||
searchCompleted := func(sinfo *sessionInfo, e error) {}
|
|
||||||
sinfo = c.core.router.searches.newIterSearch(c.nodeID, c.nodeMask, searchCompleted)
|
|
||||||
c.core.log.Debugf("%s DHT search started: %p", s, sinfo)
|
|
||||||
// Start the search
|
|
||||||
sinfo.startSearch()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.core.router.Act(c.session, routerWork)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) _getDeadlineCancellation(t *time.Time) (util.Cancellation, bool) {
|
|
||||||
if t != nil {
|
|
||||||
// A deadline is set, so return a Cancellation that uses it
|
|
||||||
c := util.CancellationWithDeadline(c.session.cancel, *t)
|
|
||||||
return c, true
|
|
||||||
}
|
|
||||||
// No deadline was set, so just return the existing cancellation and a dummy value
|
|
||||||
return c.session.cancel, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetReadCallback allows you to specify a function that will be called whenever
|
|
||||||
// a packet is received. This should be used if you wish to implement
|
|
||||||
// asynchronous patterns for receiving data from the remote node.
|
|
||||||
//
|
|
||||||
// Note that if a read callback has been supplied, you should no longer attempt
|
|
||||||
// to use the synchronous Read function.
|
|
||||||
func (c *Conn) SetReadCallback(callback func([]byte)) {
|
|
||||||
c.Act(nil, func() {
|
|
||||||
c.readCallback = callback
|
|
||||||
c._drainReadBuffer()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) _drainReadBuffer() {
|
|
||||||
if c.readCallback == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case bs := <-c.readBuffer:
|
|
||||||
c.readCallback(bs)
|
|
||||||
c.Act(nil, c._drainReadBuffer) // In case there's more
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Called by the session to pass a new message to the Conn
|
|
||||||
func (c *Conn) recvMsg(from phony.Actor, msg []byte) {
|
|
||||||
c.Act(from, func() {
|
|
||||||
if c.readCallback != nil {
|
|
||||||
c.readCallback(msg)
|
|
||||||
} else {
|
|
||||||
select {
|
|
||||||
case c.readBuffer <- msg:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Used internally by Read, the caller is responsible for util.PutBytes when they're done.
|
|
||||||
func (c *Conn) readNoCopy() ([]byte, error) {
|
|
||||||
var cancel util.Cancellation
|
|
||||||
var doCancel bool
|
|
||||||
phony.Block(c, func() { cancel, doCancel = c._getDeadlineCancellation(c.readDeadline) })
|
|
||||||
if doCancel {
|
|
||||||
defer cancel.Cancel(nil)
|
|
||||||
}
|
|
||||||
// Wait for some traffic to come through from the session
|
|
||||||
select {
|
|
||||||
case <-cancel.Finished():
|
|
||||||
if cancel.Error() == util.CancellationTimeoutError {
|
|
||||||
return nil, ConnError{errors.New("read timeout"), true, false, false, 0}
|
|
||||||
}
|
|
||||||
return nil, ConnError{errors.New("session closed"), false, false, true, 0}
|
|
||||||
case bs := <-c.readBuffer:
|
|
||||||
return bs, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read allows you to read from the connection in a synchronous fashion. The
|
|
||||||
// function will block up until the point that either new data is available, the
|
|
||||||
// connection has been closed or the read deadline has been reached. If the
|
|
||||||
// function succeeds, the number of bytes read from the connection will be
|
|
||||||
// returned. Otherwise, an error condition will be returned.
|
|
||||||
//
|
|
||||||
// Note that you can also implement asynchronous reads by using SetReadCallback.
|
|
||||||
// If you do that, you should no longer attempt to use the Read function.
|
|
||||||
func (c *Conn) Read(b []byte) (int, error) {
|
|
||||||
bs, err := c.readNoCopy()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
n := len(bs)
|
|
||||||
if len(bs) > len(b) {
|
|
||||||
n = len(b)
|
|
||||||
err = ConnError{errors.New("read buffer too small for entire packet"), false, true, false, 0}
|
|
||||||
}
|
|
||||||
// Copy results to the output slice and clean up
|
|
||||||
copy(b, bs)
|
|
||||||
// Return the number of bytes copied to the slice, along with any error
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Conn) _write(msg FlowKeyMessage) error {
|
|
||||||
if len(msg.Message) > int(c.mtu) {
|
|
||||||
return ConnError{errors.New("packet too big"), true, false, false, int(c.mtu)}
|
|
||||||
}
|
|
||||||
c.session.Act(c, func() {
|
|
||||||
// Send the packet
|
|
||||||
c.session._send(msg)
|
|
||||||
// Session keep-alive, while we wait for the crypto workers from send
|
|
||||||
switch {
|
|
||||||
case time.Since(c.session.time) > 6*time.Second:
|
|
||||||
if c.session.time.Before(c.session.pingTime) && time.Since(c.session.pingTime) > 6*time.Second {
|
|
||||||
// TODO double check that the above condition is correct
|
|
||||||
c._doSearch()
|
|
||||||
} else {
|
|
||||||
c.session.ping(c.session) // TODO send from self if this becomes an actor
|
|
||||||
}
|
|
||||||
case c.session.reset && c.session.pingTime.Before(c.session.time):
|
|
||||||
c.session.ping(c.session) // TODO send from self if this becomes an actor
|
|
||||||
default: // Don't do anything, to keep traffic throttled
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteFrom should be called by a phony.Actor, and tells the Conn to send a
|
|
||||||
// message. This is used internally by Write. If the callback is called with a
|
|
||||||
// non-nil value, then it is safe to reuse the argument FlowKeyMessage.
|
|
||||||
func (c *Conn) WriteFrom(from phony.Actor, msg FlowKeyMessage, callback func(error)) {
|
|
||||||
c.Act(from, func() {
|
|
||||||
callback(c._write(msg))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeNoCopy is used internally by Write and makes use of WriteFrom under the hood.
|
|
||||||
// The caller must not reuse the argument FlowKeyMessage when a nil error is returned.
|
|
||||||
func (c *Conn) writeNoCopy(msg FlowKeyMessage) error {
|
|
||||||
var cancel util.Cancellation
|
|
||||||
var doCancel bool
|
|
||||||
phony.Block(c, func() { cancel, doCancel = c._getDeadlineCancellation(c.writeDeadline) })
|
|
||||||
if doCancel {
|
|
||||||
defer cancel.Cancel(nil)
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
select {
|
|
||||||
case <-cancel.Finished():
|
|
||||||
if cancel.Error() == util.CancellationTimeoutError {
|
|
||||||
err = ConnError{errors.New("write timeout"), true, false, false, 0}
|
|
||||||
} else {
|
|
||||||
err = ConnError{errors.New("session closed"), false, false, true, 0}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
done := make(chan struct{})
|
|
||||||
callback := func(e error) { err = e; close(done) }
|
|
||||||
c.WriteFrom(nil, msg, callback)
|
|
||||||
<-done
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write allows you to write to the connection in a synchronous fashion. This
|
|
||||||
// function may block until either the write has completed, the connection has
|
|
||||||
// been closed or the write deadline has been reached. If the function succeeds,
|
|
||||||
// the number of written bytes is returned. Otherwise, an error condition is
|
|
||||||
// returned.
|
|
||||||
func (c *Conn) Write(b []byte) (int, error) {
|
|
||||||
written := len(b)
|
|
||||||
bs := make([]byte, 0, len(b)+crypto.BoxOverhead)
|
|
||||||
bs = append(bs, b...)
|
|
||||||
msg := FlowKeyMessage{Message: bs}
|
|
||||||
err := c.writeNoCopy(msg)
|
|
||||||
if err != nil {
|
|
||||||
written = 0
|
|
||||||
}
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close will close an open connection and any blocking operations on the
|
|
||||||
// connection will unblock and return. From this point forward, the connection
|
|
||||||
// can no longer be used and you should no longer attempt to Read or Write to
|
|
||||||
// the connection.
|
|
||||||
func (c *Conn) Close() (err error) {
|
|
||||||
phony.Block(c, func() {
|
|
||||||
if c.session != nil {
|
|
||||||
// Close the session, if it hasn't been closed already
|
|
||||||
if e := c.session.cancel.Cancel(errors.New("connection closed")); e != nil {
|
|
||||||
err = ConnError{errors.New("close failed, session already closed"), false, false, true, 0}
|
|
||||||
} else {
|
|
||||||
c.session.doRemove()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// LocalAddr returns the complete public key of the local side of the
|
|
||||||
// connection. This is always going to return your own node's public key.
|
|
||||||
func (c *Conn) LocalAddr() net.Addr {
|
|
||||||
return &c.core.boxPub
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoteAddr returns the complete public key of the remote side of the
|
|
||||||
// connection.
|
|
||||||
func (c *Conn) RemoteAddr() net.Addr {
|
|
||||||
if c.session != nil {
|
|
||||||
return &c.session.theirPermPub
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetDeadline is equivalent to calling both SetReadDeadline and
|
|
||||||
// SetWriteDeadline with the same value, configuring the maximum amount of time
|
|
||||||
// that synchronous Read and Write operations can block for. If no deadline is
|
|
||||||
// configured, Read and Write operations can potentially block indefinitely.
|
|
||||||
func (c *Conn) SetDeadline(t time.Time) error {
|
|
||||||
c.SetReadDeadline(t)
|
|
||||||
c.SetWriteDeadline(t)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetReadDeadline configures the maximum amount of time that a synchronous Read
|
|
||||||
// operation can block for. A Read operation will unblock at the point that the
|
|
||||||
// read deadline is reached if no other condition (such as data arrival or
|
|
||||||
// connection closure) happens first. If no deadline is configured, Read
|
|
||||||
// operations can potentially block indefinitely.
|
|
||||||
func (c *Conn) SetReadDeadline(t time.Time) error {
|
|
||||||
// TODO warn that this can block while waiting for the Conn actor to run, so don't call it from other actors...
|
|
||||||
phony.Block(c, func() { c.readDeadline = &t })
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetWriteDeadline configures the maximum amount of time that a synchronous
|
|
||||||
// Write operation can block for. A Write operation will unblock at the point
|
|
||||||
// that the read deadline is reached if no other condition (such as data sending
|
|
||||||
// or connection closure) happens first. If no deadline is configured, Write
|
|
||||||
// operations can potentially block indefinitely.
|
|
||||||
func (c *Conn) SetWriteDeadline(t time.Time) error {
|
|
||||||
// TODO warn that this can block while waiting for the Conn actor to run, so don't call it from other actors...
|
|
||||||
phony.Block(c, func() { c.writeDeadline = &t })
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,16 +1,18 @@
|
|||||||
package yggdrasil
|
package yggdrasil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/ed25519"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
iw "github.com/Arceliar/ironwood/encrypted"
|
||||||
"github.com/Arceliar/phony"
|
"github.com/Arceliar/phony"
|
||||||
"github.com/gologme/log"
|
"github.com/gologme/log"
|
||||||
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/config"
|
"github.com/yggdrasil-network/yggdrasil-go/src/config"
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
//"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/version"
|
"github.com/yggdrasil-network/yggdrasil-go/src/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -21,14 +23,10 @@ type Core struct {
|
|||||||
// We're going to keep our own copy of the provided config - that way we can
|
// We're going to keep our own copy of the provided config - that way we can
|
||||||
// guarantee that it will be covered by the mutex
|
// guarantee that it will be covered by the mutex
|
||||||
phony.Inbox
|
phony.Inbox
|
||||||
|
*iw.PacketConn
|
||||||
config config.NodeState // Config
|
config config.NodeState // Config
|
||||||
boxPub crypto.BoxPubKey
|
secret ed25519.PrivateKey
|
||||||
boxPriv crypto.BoxPrivKey
|
public ed25519.PublicKey
|
||||||
sigPub crypto.SigPubKey
|
|
||||||
sigPriv crypto.SigPrivKey
|
|
||||||
switchTable switchTable
|
|
||||||
peers peers
|
|
||||||
router router
|
|
||||||
links links
|
links links
|
||||||
log *log.Logger
|
log *log.Logger
|
||||||
addPeerTimer *time.Timer
|
addPeerTimer *time.Timer
|
||||||
@ -45,40 +43,23 @@ func (c *Core) _init() error {
|
|||||||
|
|
||||||
current := c.config.GetCurrent()
|
current := c.config.GetCurrent()
|
||||||
|
|
||||||
boxPrivHex, err := hex.DecodeString(current.EncryptionPrivateKey)
|
sigPriv, err := hex.DecodeString(current.SigningPrivateKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(boxPrivHex) < crypto.BoxPrivKeyLen {
|
if len(sigPriv) < ed25519.PrivateKeySize {
|
||||||
return errors.New("EncryptionPrivateKey is incorrect length")
|
|
||||||
}
|
|
||||||
|
|
||||||
sigPrivHex, err := hex.DecodeString(current.SigningPrivateKey)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(sigPrivHex) < crypto.SigPrivKeyLen {
|
|
||||||
return errors.New("SigningPrivateKey is incorrect length")
|
return errors.New("SigningPrivateKey is incorrect length")
|
||||||
}
|
}
|
||||||
|
|
||||||
copy(c.boxPriv[:], boxPrivHex)
|
c.secret = ed25519.PrivateKey(sigPriv)
|
||||||
copy(c.sigPriv[:], sigPrivHex)
|
sigPub := c.secret.Public()
|
||||||
|
c.public = sigPub.(ed25519.PublicKey)
|
||||||
|
|
||||||
boxPub, sigPub := c.boxPriv.Public(), c.sigPriv.Public()
|
pc, err := iw.NewPacketConn(c.secret)
|
||||||
|
if err != nil {
|
||||||
copy(c.boxPub[:], boxPub[:])
|
return err
|
||||||
copy(c.sigPub[:], sigPub[:])
|
|
||||||
|
|
||||||
if bp := hex.EncodeToString(c.boxPub[:]); current.EncryptionPublicKey != bp {
|
|
||||||
c.log.Warnln("EncryptionPublicKey in config is incorrect, should be", bp)
|
|
||||||
}
|
}
|
||||||
if sp := hex.EncodeToString(c.sigPub[:]); current.SigningPublicKey != sp {
|
c.PacketConn = pc
|
||||||
c.log.Warnln("SigningPublicKey in config is incorrect, should be", sp)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.peers.init(c)
|
|
||||||
c.router.init(c)
|
|
||||||
c.switchTable.init(c) // TODO move before peers? before router?
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -126,8 +107,9 @@ func (c *Core) UpdateConfig(config *config.NodeConfig) {
|
|||||||
c.config.Replace(*config)
|
c.config.Replace(*config)
|
||||||
|
|
||||||
// Notify the router and switch about the new configuration
|
// Notify the router and switch about the new configuration
|
||||||
c.router.Act(c, c.router.reconfigure)
|
panic("TODO")
|
||||||
c.switchTable.Act(c, c.switchTable.reconfigure)
|
//c.router.Act(c, c.router.reconfigure)
|
||||||
|
//c.switchTable.Act(c, c.switchTable.reconfigure)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,15 +152,15 @@ func (c *Core) _start(nc *config.NodeConfig, log *log.Logger) (*config.NodeState
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.switchTable.start(); err != nil {
|
//if err := c.switchTable.start(); err != nil {
|
||||||
c.log.Errorln("Failed to start switch")
|
// c.log.Errorln("Failed to start switch")
|
||||||
return nil, err
|
// return nil, err
|
||||||
}
|
//}
|
||||||
|
|
||||||
if err := c.router.start(); err != nil {
|
//if err := c.router.start(); err != nil {
|
||||||
c.log.Errorln("Failed to start router")
|
// c.log.Errorln("Failed to start router")
|
||||||
return nil, err
|
// return nil, err
|
||||||
}
|
//}
|
||||||
|
|
||||||
c.Act(c, c._addPeerLoop)
|
c.Act(c, c._addPeerLoop)
|
||||||
|
|
||||||
|
@ -1,463 +0,0 @@
|
|||||||
package yggdrasil
|
|
||||||
|
|
||||||
// A chord-like Distributed Hash Table (DHT).
|
|
||||||
// Used to look up coords given a NodeID and bitmask (taken from an IPv6 address).
|
|
||||||
// Keeps track of immediate successor, predecessor, and all peers.
|
|
||||||
// Also keeps track of other nodes if they're closer in tree space than all other known nodes encountered when heading in either direction to that point, under the hypothesis that, for the kinds of networks we care about, this should probabilistically include the node needed to keep lookups to near O(logn) steps.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sort"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
dht_lookup_size = 16
|
|
||||||
dht_timeout = 6 * time.Minute
|
|
||||||
dht_max_delay = 5 * time.Minute
|
|
||||||
dht_max_delay_dirty = 30 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
// dhtInfo represents everything we know about a node in the DHT.
|
|
||||||
// This includes its key, a cache of its NodeID, coords, and timing/ping related info for deciding who/when to ping nodes for maintenance.
|
|
||||||
type dhtInfo struct {
|
|
||||||
nodeID_hidden *crypto.NodeID
|
|
||||||
key crypto.BoxPubKey
|
|
||||||
coords []byte
|
|
||||||
recv time.Time // When we last received a message
|
|
||||||
pings int // Time out if at least 3 consecutive maintenance pings drop
|
|
||||||
throttle time.Duration
|
|
||||||
path []byte // source route the destination, learned from response rpath
|
|
||||||
dirty bool // Set to true if we've used this node in ping responses (for queries about someone other than the person doing the asking, i.e. real searches) since the last time we heard from the node
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the *NodeID associated with dhtInfo.key, calculating it on the fly the first time or from a cache all subsequent times.
|
|
||||||
func (info *dhtInfo) getNodeID() *crypto.NodeID {
|
|
||||||
if info.nodeID_hidden == nil {
|
|
||||||
info.nodeID_hidden = crypto.GetNodeID(&info.key)
|
|
||||||
}
|
|
||||||
return info.nodeID_hidden
|
|
||||||
}
|
|
||||||
|
|
||||||
// Request for a node to do a lookup.
|
|
||||||
// Includes our key and coords so they can send a response back, and the destination NodeID we want to ask about.
|
|
||||||
type dhtReq struct {
|
|
||||||
Key crypto.BoxPubKey // Key of whoever asked
|
|
||||||
Coords []byte // Coords of whoever asked
|
|
||||||
Dest crypto.NodeID // NodeID they're asking about
|
|
||||||
}
|
|
||||||
|
|
||||||
// Response to a DHT lookup.
|
|
||||||
// Includes the key and coords of the node that's responding, and the destination they were asked about.
|
|
||||||
// The main part is Infos []*dhtInfo, the lookup response.
|
|
||||||
type dhtRes struct {
|
|
||||||
Key crypto.BoxPubKey // key of the sender
|
|
||||||
Coords []byte // coords of the sender
|
|
||||||
Dest crypto.NodeID
|
|
||||||
Infos []*dhtInfo // response
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parts of a DHT req usable as a key in a map.
|
|
||||||
type dhtReqKey struct {
|
|
||||||
key crypto.BoxPubKey
|
|
||||||
dest crypto.NodeID
|
|
||||||
}
|
|
||||||
|
|
||||||
// The main DHT struct.
|
|
||||||
type dht struct {
|
|
||||||
router *router
|
|
||||||
nodeID crypto.NodeID
|
|
||||||
reqs map[dhtReqKey]time.Time // Keeps track of recent outstanding requests
|
|
||||||
callbacks map[dhtReqKey][]dht_callbackInfo // Search and admin lookup callbacks
|
|
||||||
// These next two could be replaced by a single linked list or similar...
|
|
||||||
table map[crypto.NodeID]*dhtInfo
|
|
||||||
imp []*dhtInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initializes the DHT.
|
|
||||||
func (t *dht) init(r *router) {
|
|
||||||
t.router = r
|
|
||||||
t.nodeID = *t.router.core.NodeID()
|
|
||||||
t.callbacks = make(map[dhtReqKey][]dht_callbackInfo)
|
|
||||||
t.reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *dht) reconfigure() {
|
|
||||||
// This is where reconfiguration would go, if we had anything to do
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resets the DHT in response to coord changes.
|
|
||||||
// This empties all info from the DHT and drops outstanding requests.
|
|
||||||
func (t *dht) reset() {
|
|
||||||
t.reqs = make(map[dhtReqKey]time.Time)
|
|
||||||
for _, info := range t.table {
|
|
||||||
if t.isImportant(info) {
|
|
||||||
t.ping(info, nil) // This will source route if a path is already known
|
|
||||||
if info.path != nil {
|
|
||||||
// In case the source route died, but the dest coords are still OK...
|
|
||||||
info.path = nil
|
|
||||||
t.ping(info, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.table = make(map[crypto.NodeID]*dhtInfo)
|
|
||||||
t.imp = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Does a DHT lookup and returns up to dht_lookup_size results.
|
|
||||||
func (t *dht) lookup(nodeID *crypto.NodeID, everything bool) []*dhtInfo {
|
|
||||||
results := make([]*dhtInfo, 0, len(t.table))
|
|
||||||
for _, info := range t.table {
|
|
||||||
results = append(results, info)
|
|
||||||
}
|
|
||||||
if len(results) > dht_lookup_size {
|
|
||||||
// Drop the middle part, so we keep some nodes before and after.
|
|
||||||
// This should help to bootstrap / recover more quickly.
|
|
||||||
sort.SliceStable(results, func(i, j int) bool {
|
|
||||||
return dht_ordered(nodeID, results[i].getNodeID(), results[j].getNodeID())
|
|
||||||
})
|
|
||||||
newRes := make([]*dhtInfo, 0, len(results))
|
|
||||||
newRes = append(newRes, results[len(results)-dht_lookup_size/2:]...)
|
|
||||||
newRes = append(newRes, results[:len(results)-dht_lookup_size/2]...)
|
|
||||||
results = newRes
|
|
||||||
results = results[:dht_lookup_size]
|
|
||||||
}
|
|
||||||
for _, info := range results {
|
|
||||||
info.dirty = true
|
|
||||||
}
|
|
||||||
return results
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert into table, preserving the time we last sent a packet if the node was already in the table, otherwise setting that time to now.
|
|
||||||
func (t *dht) insert(info *dhtInfo) {
|
|
||||||
if *info.getNodeID() == t.nodeID {
|
|
||||||
// This shouldn't happen, but don't add it if it does
|
|
||||||
return
|
|
||||||
}
|
|
||||||
info.recv = time.Now()
|
|
||||||
if oldInfo, isIn := t.table[*info.getNodeID()]; isIn {
|
|
||||||
sameCoords := true
|
|
||||||
if len(info.coords) != len(oldInfo.coords) {
|
|
||||||
sameCoords = false
|
|
||||||
} else {
|
|
||||||
for idx := 0; idx < len(info.coords); idx++ {
|
|
||||||
if info.coords[idx] != oldInfo.coords[idx] {
|
|
||||||
sameCoords = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if sameCoords {
|
|
||||||
info.throttle = oldInfo.throttle
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.imp = nil // It needs to update to get a pointer to the new info
|
|
||||||
t.table[*info.getNodeID()] = info
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert a peer into the table if it hasn't been pinged lately, to keep peers from dropping
|
|
||||||
func (t *dht) insertPeer(info *dhtInfo) {
|
|
||||||
t.insert(info) // FIXME this resets timers / ping counts / etc, so it seems kind of dangerous
|
|
||||||
t.ping(info, nil) // This is a quick fix to the above, ping them immediately...
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return true if first/second/third are (partially) ordered correctly.
|
|
||||||
func dht_ordered(first, second, third *crypto.NodeID) bool {
|
|
||||||
lessOrEqual := func(first, second *crypto.NodeID) bool {
|
|
||||||
for idx := 0; idx < crypto.NodeIDLen; idx++ {
|
|
||||||
if first[idx] > second[idx] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if first[idx] < second[idx] {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
firstLessThanSecond := lessOrEqual(first, second)
|
|
||||||
secondLessThanThird := lessOrEqual(second, third)
|
|
||||||
thirdLessThanFirst := lessOrEqual(third, first)
|
|
||||||
switch {
|
|
||||||
case firstLessThanSecond && secondLessThanThird:
|
|
||||||
// Nothing wrapped around 0, the easy case
|
|
||||||
return true
|
|
||||||
case thirdLessThanFirst && firstLessThanSecond:
|
|
||||||
// Third wrapped around 0
|
|
||||||
return true
|
|
||||||
case secondLessThanThird && thirdLessThanFirst:
|
|
||||||
// Second (and third) wrapped around 0
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reads a request, performs a lookup, and responds.
|
|
||||||
// Update info about the node that sent the request.
|
|
||||||
func (t *dht) handleReq(req *dhtReq, rpath []byte) {
|
|
||||||
// Send them what they asked for
|
|
||||||
res := dhtRes{
|
|
||||||
Key: t.router.core.boxPub,
|
|
||||||
Coords: t.router.table.self.getCoords(),
|
|
||||||
Dest: req.Dest,
|
|
||||||
Infos: t.lookup(&req.Dest, false),
|
|
||||||
}
|
|
||||||
t.sendRes(&res, req, rpath)
|
|
||||||
// Also add them to our DHT
|
|
||||||
info := dhtInfo{
|
|
||||||
key: req.Key,
|
|
||||||
coords: req.Coords,
|
|
||||||
}
|
|
||||||
if _, isIn := t.table[*info.getNodeID()]; !isIn && t.isImportant(&info) {
|
|
||||||
t.ping(&info, nil)
|
|
||||||
}
|
|
||||||
// Maybe mark nodes from lookup as dirty
|
|
||||||
if req.Dest != *info.getNodeID() {
|
|
||||||
// This node asked about someone other than themself, so this wasn't just idle traffic.
|
|
||||||
for _, info := range res.Infos {
|
|
||||||
// Mark nodes dirty so we're sure to check up on them again later
|
|
||||||
info.dirty = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sends a lookup response to the specified node.
|
|
||||||
func (t *dht) sendRes(res *dhtRes, req *dhtReq, rpath []byte) {
|
|
||||||
// Send a reply for a dhtReq
|
|
||||||
bs := res.encode()
|
|
||||||
shared := t.router.sessions.getSharedKey(&t.router.core.boxPriv, &req.Key)
|
|
||||||
payload, nonce := crypto.BoxSeal(shared, bs, nil)
|
|
||||||
path := append([]byte{0}, switch_reverseCoordBytes(rpath)...)
|
|
||||||
p := wire_protoTrafficPacket{
|
|
||||||
Offset: 1,
|
|
||||||
Coords: path,
|
|
||||||
ToKey: req.Key,
|
|
||||||
FromKey: t.router.core.boxPub,
|
|
||||||
Nonce: *nonce,
|
|
||||||
Payload: payload,
|
|
||||||
}
|
|
||||||
packet := p.encode()
|
|
||||||
t.router.out(packet)
|
|
||||||
}
|
|
||||||
|
|
||||||
type dht_callbackInfo struct {
|
|
||||||
f func(*dhtRes)
|
|
||||||
time time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds a callback and removes it after some timeout.
|
|
||||||
func (t *dht) addCallback(rq *dhtReqKey, callback func(*dhtRes)) {
|
|
||||||
info := dht_callbackInfo{callback, time.Now().Add(6 * time.Second)}
|
|
||||||
t.callbacks[*rq] = append(t.callbacks[*rq], info)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reads a lookup response, checks that we had sent a matching request, and processes the response info.
|
|
||||||
// This mainly consists of updating the node we asked in our DHT (they responded, so we know they're still alive), and deciding if we want to do anything with their responses
|
|
||||||
func (t *dht) handleRes(res *dhtRes, rpath []byte) {
|
|
||||||
rq := dhtReqKey{res.Key, res.Dest}
|
|
||||||
if callbacks, isIn := t.callbacks[rq]; isIn {
|
|
||||||
for _, callback := range callbacks {
|
|
||||||
callback.f(res)
|
|
||||||
}
|
|
||||||
delete(t.callbacks, rq)
|
|
||||||
}
|
|
||||||
_, isIn := t.reqs[rq]
|
|
||||||
if !isIn {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
delete(t.reqs, rq)
|
|
||||||
rinfo := dhtInfo{
|
|
||||||
key: res.Key,
|
|
||||||
coords: res.Coords,
|
|
||||||
path: switch_reverseCoordBytes(rpath),
|
|
||||||
}
|
|
||||||
if t.isImportant(&rinfo) {
|
|
||||||
t.insert(&rinfo)
|
|
||||||
}
|
|
||||||
for _, info := range res.Infos {
|
|
||||||
if *info.getNodeID() == t.nodeID {
|
|
||||||
continue
|
|
||||||
} // Skip self
|
|
||||||
if _, isIn := t.table[*info.getNodeID()]; isIn {
|
|
||||||
// TODO? don't skip if coords are different?
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if t.isImportant(info) {
|
|
||||||
t.ping(info, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sends a lookup request to the specified node.
|
|
||||||
func (t *dht) sendReq(req *dhtReq, dest *dhtInfo) {
|
|
||||||
// Send a dhtReq to the node in dhtInfo
|
|
||||||
bs := req.encode()
|
|
||||||
shared := t.router.sessions.getSharedKey(&t.router.core.boxPriv, &dest.key)
|
|
||||||
payload, nonce := crypto.BoxSeal(shared, bs, nil)
|
|
||||||
p := wire_protoTrafficPacket{
|
|
||||||
Coords: dest.coords,
|
|
||||||
ToKey: dest.key,
|
|
||||||
FromKey: t.router.core.boxPub,
|
|
||||||
Nonce: *nonce,
|
|
||||||
Payload: payload,
|
|
||||||
}
|
|
||||||
if dest.path != nil {
|
|
||||||
p.Coords = append([]byte{0}, dest.path...)
|
|
||||||
p.Offset += 1
|
|
||||||
}
|
|
||||||
packet := p.encode()
|
|
||||||
t.router.out(packet)
|
|
||||||
rq := dhtReqKey{dest.key, req.Dest}
|
|
||||||
t.reqs[rq] = time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sends a lookup to this info, looking for the target.
|
|
||||||
func (t *dht) ping(info *dhtInfo, target *crypto.NodeID) {
|
|
||||||
// Creates a req for the node at dhtInfo, asking them about the target (if one is given) or themself (if no target is given)
|
|
||||||
if target == nil {
|
|
||||||
target = &t.nodeID
|
|
||||||
}
|
|
||||||
req := dhtReq{
|
|
||||||
Key: t.router.core.boxPub,
|
|
||||||
Coords: t.router.table.self.getCoords(),
|
|
||||||
Dest: *target,
|
|
||||||
}
|
|
||||||
t.sendReq(&req, info)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Periodic maintenance work to keep important DHT nodes alive.
|
|
||||||
func (t *dht) doMaintenance() {
|
|
||||||
now := time.Now()
|
|
||||||
newReqs := make(map[dhtReqKey]time.Time, len(t.reqs))
|
|
||||||
for key, start := range t.reqs {
|
|
||||||
if now.Sub(start) < 6*time.Second {
|
|
||||||
newReqs[key] = start
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.reqs = newReqs
|
|
||||||
newCallbacks := make(map[dhtReqKey][]dht_callbackInfo, len(t.callbacks))
|
|
||||||
for key, cs := range t.callbacks {
|
|
||||||
for _, c := range cs {
|
|
||||||
if now.Before(c.time) {
|
|
||||||
newCallbacks[key] = append(newCallbacks[key], c)
|
|
||||||
} else {
|
|
||||||
// Signal failure
|
|
||||||
c.f(nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.callbacks = newCallbacks
|
|
||||||
for infoID, info := range t.table {
|
|
||||||
switch {
|
|
||||||
case info.pings > 6:
|
|
||||||
// It failed to respond to too many pings
|
|
||||||
fallthrough
|
|
||||||
case now.Sub(info.recv) > dht_timeout:
|
|
||||||
// It's too old
|
|
||||||
fallthrough
|
|
||||||
case info.dirty && now.Sub(info.recv) > dht_max_delay_dirty && !t.isImportant(info):
|
|
||||||
// We won't ping it to refresh it, so just drop it
|
|
||||||
delete(t.table, infoID)
|
|
||||||
t.imp = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, info := range t.getImportant() {
|
|
||||||
switch {
|
|
||||||
case now.Sub(info.recv) > info.throttle:
|
|
||||||
info.throttle *= 2
|
|
||||||
if info.throttle < time.Second {
|
|
||||||
info.throttle = time.Second
|
|
||||||
} else if info.throttle > dht_max_delay {
|
|
||||||
info.throttle = dht_max_delay
|
|
||||||
}
|
|
||||||
fallthrough
|
|
||||||
case info.dirty && now.Sub(info.recv) > dht_max_delay_dirty:
|
|
||||||
t.ping(info, nil)
|
|
||||||
info.pings++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets a list of important nodes, used by isImportant.
|
|
||||||
func (t *dht) getImportant() []*dhtInfo {
|
|
||||||
if t.imp == nil {
|
|
||||||
// Get a list of all known nodes
|
|
||||||
infos := make([]*dhtInfo, 0, len(t.table))
|
|
||||||
for _, info := range t.table {
|
|
||||||
infos = append(infos, info)
|
|
||||||
}
|
|
||||||
// Sort them by increasing order in distance along the ring
|
|
||||||
sort.SliceStable(infos, func(i, j int) bool {
|
|
||||||
// Sort in order of predecessors (!), reverse from chord normal, because it plays nicer with zero bits for unknown parts of target addresses
|
|
||||||
return dht_ordered(infos[j].getNodeID(), infos[i].getNodeID(), &t.nodeID)
|
|
||||||
})
|
|
||||||
// Keep the ones that are no further than the closest seen so far
|
|
||||||
minDist := ^uint64(0)
|
|
||||||
loc := t.router.table.self
|
|
||||||
important := infos[:0]
|
|
||||||
for _, info := range infos {
|
|
||||||
dist := uint64(loc.dist(info.coords))
|
|
||||||
if dist < minDist {
|
|
||||||
minDist = dist
|
|
||||||
important = append(important, info)
|
|
||||||
} else if len(important) < 2 {
|
|
||||||
important = append(important, info)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var temp []*dhtInfo
|
|
||||||
minDist = ^uint64(0)
|
|
||||||
for idx := len(infos) - 1; idx >= 0; idx-- {
|
|
||||||
info := infos[idx]
|
|
||||||
dist := uint64(loc.dist(info.coords))
|
|
||||||
if dist < minDist {
|
|
||||||
minDist = dist
|
|
||||||
temp = append(temp, info)
|
|
||||||
} else if len(temp) < 2 {
|
|
||||||
temp = append(temp, info)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for idx := len(temp) - 1; idx >= 0; idx-- {
|
|
||||||
important = append(important, temp[idx])
|
|
||||||
}
|
|
||||||
t.imp = important
|
|
||||||
}
|
|
||||||
return t.imp
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if this is a node we need to keep track of for the DHT to work.
|
|
||||||
func (t *dht) isImportant(ninfo *dhtInfo) bool {
|
|
||||||
if ninfo.key == t.router.core.boxPub {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
important := t.getImportant()
|
|
||||||
// Check if ninfo is of equal or greater importance to what we already know
|
|
||||||
loc := t.router.table.self
|
|
||||||
ndist := uint64(loc.dist(ninfo.coords))
|
|
||||||
minDist := ^uint64(0)
|
|
||||||
for _, info := range important {
|
|
||||||
if (*info.getNodeID() == *ninfo.getNodeID()) ||
|
|
||||||
(ndist < minDist && dht_ordered(info.getNodeID(), ninfo.getNodeID(), &t.nodeID)) {
|
|
||||||
// Either the same node, or a better one
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
dist := uint64(loc.dist(info.coords))
|
|
||||||
if dist < minDist {
|
|
||||||
minDist = dist
|
|
||||||
}
|
|
||||||
}
|
|
||||||
minDist = ^uint64(0)
|
|
||||||
for idx := len(important) - 1; idx >= 0; idx-- {
|
|
||||||
info := important[idx]
|
|
||||||
if (*info.getNodeID() == *ninfo.getNodeID()) ||
|
|
||||||
(ndist < minDist && dht_ordered(&t.nodeID, ninfo.getNodeID(), info.getNodeID())) {
|
|
||||||
// Either the same node, or a better one
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
dist := uint64(loc.dist(info.coords))
|
|
||||||
if dist < minDist {
|
|
||||||
minDist = dist
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// We didn't find any important node that ninfo is better than
|
|
||||||
return false
|
|
||||||
}
|
|
@ -1,120 +0,0 @@
|
|||||||
package yggdrasil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Dialer represents an Yggdrasil connection dialer.
|
|
||||||
type Dialer struct {
|
|
||||||
core *Core
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dial opens a session to the given node. The first parameter should be
|
|
||||||
// "curve25519" or "nodeid" and the second parameter should contain a
|
|
||||||
// hexadecimal representation of the target. It uses DialContext internally.
|
|
||||||
func (d *Dialer) Dial(network, address string) (net.Conn, error) {
|
|
||||||
return d.DialContext(nil, network, address)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DialContext is used internally by Dial, and should only be used with a
|
|
||||||
// context that includes a timeout. It uses DialByNodeIDandMask internally when
|
|
||||||
// the network is "nodeid", or DialByPublicKey when the network is "curve25519".
|
|
||||||
func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
|
||||||
var nodeID crypto.NodeID
|
|
||||||
var nodeMask crypto.NodeID
|
|
||||||
// Process
|
|
||||||
switch network {
|
|
||||||
case "curve25519":
|
|
||||||
dest, err := hex.DecodeString(address)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(dest) != crypto.BoxPubKeyLen {
|
|
||||||
return nil, errors.New("invalid key length supplied")
|
|
||||||
}
|
|
||||||
var pubKey crypto.BoxPubKey
|
|
||||||
copy(pubKey[:], dest)
|
|
||||||
return d.DialByPublicKey(ctx, &pubKey)
|
|
||||||
case "nodeid":
|
|
||||||
// A node ID was provided - we don't need to do anything special with it
|
|
||||||
if tokens := strings.Split(address, "/"); len(tokens) == 2 {
|
|
||||||
l, err := strconv.Atoi(tokens[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
dest, err := hex.DecodeString(tokens[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
copy(nodeID[:], dest)
|
|
||||||
for idx := 0; idx < l; idx++ {
|
|
||||||
nodeMask[idx/8] |= 0x80 >> byte(idx%8)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
dest, err := hex.DecodeString(tokens[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
copy(nodeID[:], dest)
|
|
||||||
for i := range nodeMask {
|
|
||||||
nodeMask[i] = 0xFF
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return d.DialByNodeIDandMask(ctx, &nodeID, &nodeMask)
|
|
||||||
default:
|
|
||||||
// An unexpected address type was given, so give up
|
|
||||||
return nil, errors.New("unexpected address type")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DialByNodeIDandMask opens a session to the given node based on raw NodeID
|
|
||||||
// parameters. If ctx is nil or has no timeout, then a default timeout of 6
|
|
||||||
// seconds will apply, beginning *after* the search finishes.
|
|
||||||
func (d *Dialer) DialByNodeIDandMask(ctx context.Context, nodeID, nodeMask *crypto.NodeID) (net.Conn, error) {
|
|
||||||
startDial := time.Now()
|
|
||||||
conn := newConn(d.core, nodeID, nodeMask, nil)
|
|
||||||
if err := conn.search(); err != nil {
|
|
||||||
// TODO: make searches take a context, so they can be cancelled early
|
|
||||||
conn.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
endSearch := time.Now()
|
|
||||||
d.core.log.Debugln("Dial searched for:", nodeID, "in time:", endSearch.Sub(startDial))
|
|
||||||
conn.session.setConn(nil, conn)
|
|
||||||
var cancel context.CancelFunc
|
|
||||||
if ctx == nil {
|
|
||||||
ctx = context.Background()
|
|
||||||
}
|
|
||||||
ctx, cancel = context.WithTimeout(ctx, 6*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
select {
|
|
||||||
case <-conn.session.init:
|
|
||||||
endInit := time.Now()
|
|
||||||
d.core.log.Debugln("Dial initialized session for:", nodeID, "in time:", endInit.Sub(endSearch))
|
|
||||||
d.core.log.Debugln("Finished dial for:", nodeID, "in time:", endInit.Sub(startDial))
|
|
||||||
return conn, nil
|
|
||||||
case <-ctx.Done():
|
|
||||||
conn.Close()
|
|
||||||
return nil, errors.New("session handshake timeout")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DialByPublicKey opens a session to the given node based on the public key. If
|
|
||||||
// ctx is nil or has no timeout, then a default timeout of 6 seconds will apply,
|
|
||||||
// beginning *after* the search finishes.
|
|
||||||
func (d *Dialer) DialByPublicKey(ctx context.Context, pubKey *crypto.BoxPubKey) (net.Conn, error) {
|
|
||||||
nodeID := crypto.GetNodeID(pubKey)
|
|
||||||
var nodeMask crypto.NodeID
|
|
||||||
for i := range nodeMask {
|
|
||||||
nodeMask[i] = 0xFF
|
|
||||||
}
|
|
||||||
return d.DialByNodeIDandMask(ctx, nodeID, &nodeMask)
|
|
||||||
}
|
|
@ -1,6 +1,7 @@
|
|||||||
package yggdrasil
|
package yggdrasil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/ed25519"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -17,8 +18,7 @@ import (
|
|||||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
||||||
"golang.org/x/net/proxy"
|
"golang.org/x/net/proxy"
|
||||||
|
//"github.com/Arceliar/phony" // TODO? use instead of mutexes
|
||||||
"github.com/Arceliar/phony"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type links struct {
|
type links struct {
|
||||||
@ -30,48 +30,27 @@ type links struct {
|
|||||||
// TODO timeout (to remove from switch), read from config.ReadTimeout
|
// TODO timeout (to remove from switch), read from config.ReadTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// linkInfo is used as a map key
|
||||||
type linkInfo struct {
|
type linkInfo struct {
|
||||||
box crypto.BoxPubKey // Their encryption key
|
key crypto.SigPubKey
|
||||||
sig crypto.SigPubKey // Their signing key
|
linkType string // Type of link, e.g. TCP, AWDL
|
||||||
linkType string // Type of link, e.g. TCP, AWDL
|
local string // Local name or address
|
||||||
local string // Local name or address
|
remote string // Remote name or address
|
||||||
remote string // Remote name or address
|
|
||||||
}
|
|
||||||
|
|
||||||
type linkMsgIO interface {
|
|
||||||
readMsg() ([]byte, error)
|
|
||||||
writeMsgs([][]byte) (int, error)
|
|
||||||
close() error
|
|
||||||
// These are temporary workarounds to stream semantics
|
|
||||||
_sendMetaBytes([]byte) error
|
|
||||||
_recvMetaBytes() ([]byte, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type link struct {
|
type link struct {
|
||||||
lname string
|
lname string
|
||||||
links *links
|
links *links
|
||||||
peer *peer
|
conn net.Conn
|
||||||
options linkOptions
|
options linkOptions
|
||||||
msgIO linkMsgIO
|
info linkInfo
|
||||||
info linkInfo
|
incoming bool
|
||||||
incoming bool
|
force bool
|
||||||
force bool
|
closed chan struct{}
|
||||||
closed chan struct{}
|
|
||||||
reader linkReader // Reads packets, notifies this link, passes packets to switch
|
|
||||||
writer linkWriter // Writes packets, notifies this link
|
|
||||||
phony.Inbox // Protects the below
|
|
||||||
sendTimer *time.Timer // Fires to signal that sending is blocked
|
|
||||||
keepAliveTimer *time.Timer // Fires to send keep-alive traffic
|
|
||||||
stallTimer *time.Timer // Fires to signal that no incoming traffic (including keep-alive) has been seen
|
|
||||||
closeTimer *time.Timer // Fires when the link has been idle so long we need to close it
|
|
||||||
readUnblocked bool // True if we've sent a read message unblocking this peer in the switch
|
|
||||||
writeUnblocked bool // True if we've sent a write message unblocking this peer in the swithc
|
|
||||||
shutdown bool // True if we're shutting down, avoids sending some messages that could race with new peers being crated in the same port
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type linkOptions struct {
|
type linkOptions struct {
|
||||||
pinnedCurve25519Keys map[crypto.BoxPubKey]struct{}
|
pinnedEd25519Keys map[crypto.SigPubKey]struct{}
|
||||||
pinnedEd25519Keys map[crypto.SigPubKey]struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *links) init(c *Core) error {
|
func (l *links) init(c *Core) error {
|
||||||
@ -100,16 +79,6 @@ func (l *links) call(uri string, sintf string) error {
|
|||||||
}
|
}
|
||||||
pathtokens := strings.Split(strings.Trim(u.Path, "/"), "/")
|
pathtokens := strings.Split(strings.Trim(u.Path, "/"), "/")
|
||||||
tcpOpts := tcpOptions{}
|
tcpOpts := tcpOptions{}
|
||||||
if pubkeys, ok := u.Query()["curve25519"]; ok && len(pubkeys) > 0 {
|
|
||||||
tcpOpts.pinnedCurve25519Keys = make(map[crypto.BoxPubKey]struct{})
|
|
||||||
for _, pubkey := range pubkeys {
|
|
||||||
if boxPub, err := hex.DecodeString(pubkey); err == nil {
|
|
||||||
var boxPubKey crypto.BoxPubKey
|
|
||||||
copy(boxPubKey[:], boxPub)
|
|
||||||
tcpOpts.pinnedCurve25519Keys[boxPubKey] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if pubkeys, ok := u.Query()["ed25519"]; ok && len(pubkeys) > 0 {
|
if pubkeys, ok := u.Query()["ed25519"]; ok && len(pubkeys) > 0 {
|
||||||
tcpOpts.pinnedEd25519Keys = make(map[crypto.SigPubKey]struct{})
|
tcpOpts.pinnedEd25519Keys = make(map[crypto.SigPubKey]struct{})
|
||||||
for _, pubkey := range pubkeys {
|
for _, pubkey := range pubkeys {
|
||||||
@ -157,13 +126,13 @@ func (l *links) listen(uri string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *links) create(msgIO linkMsgIO, name, linkType, local, remote string, incoming, force bool, options linkOptions) (*link, error) {
|
func (l *links) create(conn net.Conn, name, linkType, local, remote string, incoming, force bool, options linkOptions) (*link, error) {
|
||||||
// Technically anything unique would work for names, but let's pick something human readable, just for debugging
|
// Technically anything unique would work for names, but let's pick something human readable, just for debugging
|
||||||
intf := link{
|
intf := link{
|
||||||
|
conn: conn,
|
||||||
lname: name,
|
lname: name,
|
||||||
links: l,
|
links: l,
|
||||||
options: options,
|
options: options,
|
||||||
msgIO: msgIO,
|
|
||||||
info: linkInfo{
|
info: linkInfo{
|
||||||
linkType: linkType,
|
linkType: linkType,
|
||||||
local: local,
|
local: local,
|
||||||
@ -172,10 +141,6 @@ func (l *links) create(msgIO linkMsgIO, name, linkType, local, remote string, in
|
|||||||
incoming: incoming,
|
incoming: incoming,
|
||||||
force: force,
|
force: force,
|
||||||
}
|
}
|
||||||
intf.writer.intf = &intf
|
|
||||||
intf.writer.worker = make(chan [][]byte, 1)
|
|
||||||
intf.reader.intf = &intf
|
|
||||||
intf.reader.err = make(chan error)
|
|
||||||
return &intf, nil
|
return &intf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -189,30 +154,31 @@ func (l *links) stop() error {
|
|||||||
|
|
||||||
func (intf *link) handler() (chan struct{}, error) {
|
func (intf *link) handler() (chan struct{}, error) {
|
||||||
// TODO split some of this into shorter functions, so it's easier to read, and for the FIXME duplicate peer issue mentioned later
|
// TODO split some of this into shorter functions, so it's easier to read, and for the FIXME duplicate peer issue mentioned later
|
||||||
go func() {
|
defer intf.conn.Close()
|
||||||
for bss := range intf.writer.worker {
|
|
||||||
intf.msgIO.writeMsgs(bss)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
defer intf.writer.Act(nil, func() {
|
|
||||||
intf.writer.closed = true
|
|
||||||
close(intf.writer.worker)
|
|
||||||
})
|
|
||||||
myLinkPub, myLinkPriv := crypto.NewBoxKeys()
|
|
||||||
meta := version_getBaseMetadata()
|
meta := version_getBaseMetadata()
|
||||||
meta.box = intf.links.core.boxPub
|
meta.key = intf.links.core.public
|
||||||
meta.sig = intf.links.core.sigPub
|
|
||||||
meta.link = *myLinkPub
|
|
||||||
metaBytes := meta.encode()
|
metaBytes := meta.encode()
|
||||||
// TODO timeouts on send/recv (goroutine for send/recv, channel select w/ timer)
|
// TODO timeouts on send/recv (goroutine for send/recv, channel select w/ timer)
|
||||||
var err error
|
var err error
|
||||||
if !util.FuncTimeout(func() { err = intf.msgIO._sendMetaBytes(metaBytes) }, 30*time.Second) {
|
if !util.FuncTimeout(30*time.Second, func() {
|
||||||
|
var n int
|
||||||
|
n, err = intf.conn.Write(metaBytes)
|
||||||
|
if err == nil && n != len(metaBytes) {
|
||||||
|
err = errors.New("incomplete metadata send")
|
||||||
|
}
|
||||||
|
}) {
|
||||||
return nil, errors.New("timeout on metadata send")
|
return nil, errors.New("timeout on metadata send")
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if !util.FuncTimeout(func() { metaBytes, err = intf.msgIO._recvMetaBytes() }, 30*time.Second) {
|
if !util.FuncTimeout(30*time.Second, func() {
|
||||||
|
var n int
|
||||||
|
n, err = io.ReadFull(intf.conn, metaBytes)
|
||||||
|
if err == nil && n != len(metaBytes) {
|
||||||
|
err = errors.New("incomplete metadata recv")
|
||||||
|
}
|
||||||
|
}) {
|
||||||
return nil, errors.New("timeout on metadata recv")
|
return nil, errors.New("timeout on metadata recv")
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -229,35 +195,31 @@ func (intf *link) handler() (chan struct{}, error) {
|
|||||||
}
|
}
|
||||||
// Check if the remote side matches the keys we expected. This is a bit of a weak
|
// Check if the remote side matches the keys we expected. This is a bit of a weak
|
||||||
// check - in future versions we really should check a signature or something like that.
|
// check - in future versions we really should check a signature or something like that.
|
||||||
if pinned := intf.options.pinnedCurve25519Keys; pinned != nil {
|
|
||||||
if _, allowed := pinned[meta.box]; !allowed {
|
|
||||||
intf.links.core.log.Errorf("Failed to connect to node: %q sent curve25519 key that does not match pinned keys", intf.name)
|
|
||||||
return nil, fmt.Errorf("failed to connect: host sent curve25519 key that does not match pinned keys")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if pinned := intf.options.pinnedEd25519Keys; pinned != nil {
|
if pinned := intf.options.pinnedEd25519Keys; pinned != nil {
|
||||||
if _, allowed := pinned[meta.sig]; !allowed {
|
var key crypto.SigPubKey
|
||||||
|
copy(key[:], meta.key)
|
||||||
|
if _, allowed := pinned[key]; !allowed {
|
||||||
intf.links.core.log.Errorf("Failed to connect to node: %q sent ed25519 key that does not match pinned keys", intf.name)
|
intf.links.core.log.Errorf("Failed to connect to node: %q sent ed25519 key that does not match pinned keys", intf.name)
|
||||||
return nil, fmt.Errorf("failed to connect: host sent ed25519 key that does not match pinned keys")
|
return nil, fmt.Errorf("failed to connect: host sent ed25519 key that does not match pinned keys")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Check if we're authorized to connect to this key / IP
|
// Check if we're authorized to connect to this key / IP
|
||||||
|
/* TODO check allowed public keys
|
||||||
if intf.incoming && !intf.force && !intf.links.core.peers.isAllowedEncryptionPublicKey(&meta.box) {
|
if intf.incoming && !intf.force && !intf.links.core.peers.isAllowedEncryptionPublicKey(&meta.box) {
|
||||||
intf.links.core.log.Warnf("%s connection from %s forbidden: AllowedEncryptionPublicKeys does not contain key %s",
|
intf.links.core.log.Warnf("%s connection from %s forbidden: AllowedEncryptionPublicKeys does not contain key %s",
|
||||||
strings.ToUpper(intf.info.linkType), intf.info.remote, hex.EncodeToString(meta.box[:]))
|
strings.ToUpper(intf.info.linkType), intf.info.remote, hex.EncodeToString(meta.box[:]))
|
||||||
intf.msgIO.close()
|
intf.msgIO.close()
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
// Check if we already have a link to this node
|
// Check if we already have a link to this node
|
||||||
intf.info.box = meta.box
|
copy(intf.info.key[:], meta.key)
|
||||||
intf.info.sig = meta.sig
|
|
||||||
intf.links.mutex.Lock()
|
intf.links.mutex.Lock()
|
||||||
if oldIntf, isIn := intf.links.links[intf.info]; isIn {
|
if oldIntf, isIn := intf.links.links[intf.info]; isIn {
|
||||||
intf.links.mutex.Unlock()
|
intf.links.mutex.Unlock()
|
||||||
// FIXME we should really return an error and let the caller block instead
|
// FIXME we should really return an error and let the caller block instead
|
||||||
// That lets them do things like close connections on its own, avoid printing a connection message in the first place, etc.
|
// That lets them do things like close connections on its own, avoid printing a connection message in the first place, etc.
|
||||||
intf.links.core.log.Debugln("DEBUG: found existing interface for", intf.name)
|
intf.links.core.log.Debugln("DEBUG: found existing interface for", intf.name)
|
||||||
intf.msgIO.close()
|
|
||||||
return oldIntf.closed, nil
|
return oldIntf.closed, nil
|
||||||
} else {
|
} else {
|
||||||
intf.closed = make(chan struct{})
|
intf.closed = make(chan struct{})
|
||||||
@ -271,43 +233,13 @@ func (intf *link) handler() (chan struct{}, error) {
|
|||||||
intf.links.core.log.Debugln("DEBUG: registered interface for", intf.name)
|
intf.links.core.log.Debugln("DEBUG: registered interface for", intf.name)
|
||||||
}
|
}
|
||||||
intf.links.mutex.Unlock()
|
intf.links.mutex.Unlock()
|
||||||
// Create peer
|
|
||||||
shared := crypto.GetSharedKey(myLinkPriv, &meta.link)
|
|
||||||
phony.Block(&intf.links.core.peers, func() {
|
|
||||||
// FIXME don't use phony.Block, it's bad practice, even if it's safe here
|
|
||||||
intf.peer = intf.links.core.peers._newPeer(&meta.box, &meta.sig, shared, intf)
|
|
||||||
})
|
|
||||||
if intf.peer == nil {
|
|
||||||
return nil, errors.New("failed to create peer")
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
// More cleanup can go here
|
|
||||||
intf.Act(nil, func() {
|
|
||||||
intf.shutdown = true
|
|
||||||
intf.peer.Act(intf, intf.peer._removeSelf)
|
|
||||||
})
|
|
||||||
}()
|
|
||||||
themAddr := make([]byte, 16) // TODO address.AddrForNodeID(crypto.GetNodeID(&intf.info.box))
|
themAddr := make([]byte, 16) // TODO address.AddrForNodeID(crypto.GetNodeID(&intf.info.box))
|
||||||
themAddrString := net.IP(themAddr[:]).String()
|
themAddrString := net.IP(themAddr[:]).String()
|
||||||
themString := fmt.Sprintf("%s@%s", themAddrString, intf.info.remote)
|
themString := fmt.Sprintf("%s@%s", themAddrString, intf.info.remote)
|
||||||
intf.links.core.log.Infof("Connected %s: %s, source %s",
|
intf.links.core.log.Infof("Connected %s: %s, source %s",
|
||||||
strings.ToUpper(intf.info.linkType), themString, intf.info.local)
|
strings.ToUpper(intf.info.linkType), themString, intf.info.local)
|
||||||
// Start things
|
// Run the handler
|
||||||
go intf.peer.start()
|
err = intf.links.core.PacketConn.HandleConn(ed25519.PublicKey(intf.info.key[:]), intf.conn)
|
||||||
intf.Act(nil, intf._notifyIdle)
|
|
||||||
intf.reader.Act(nil, intf.reader._read)
|
|
||||||
// Wait for the reader to finish
|
|
||||||
// TODO find a way to do this without keeping live goroutines around
|
|
||||||
done := make(chan struct{})
|
|
||||||
defer close(done)
|
|
||||||
go func() {
|
|
||||||
select {
|
|
||||||
case <-intf.links.stopped:
|
|
||||||
intf.msgIO.close()
|
|
||||||
case <-done:
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
err = <-intf.reader.err
|
|
||||||
// TODO don't report an error if it's just a 'use of closed network connection'
|
// TODO don't report an error if it's just a 'use of closed network connection'
|
||||||
if err != nil {
|
if err != nil {
|
||||||
intf.links.core.log.Infof("Disconnected %s: %s, source %s; error: %s",
|
intf.links.core.log.Infof("Disconnected %s: %s, source %s; error: %s",
|
||||||
@ -319,43 +251,8 @@ func (intf *link) handler() (chan struct{}, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// link needs to match the linkInterface type needed by the peers
|
|
||||||
|
|
||||||
type linkInterface interface {
|
|
||||||
out([][]byte)
|
|
||||||
linkOut([]byte)
|
|
||||||
close()
|
|
||||||
// These next ones are only used by the API
|
|
||||||
name() string
|
|
||||||
local() string
|
|
||||||
remote() string
|
|
||||||
interfaceType() string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (intf *link) out(bss [][]byte) {
|
|
||||||
intf.Act(nil, func() {
|
|
||||||
// nil to prevent it from blocking if the link is somehow frozen
|
|
||||||
// this is safe because another packet won't be sent until the link notifies
|
|
||||||
// the peer that it's ready for one
|
|
||||||
intf.writer.sendFrom(nil, bss)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (intf *link) linkOut(bs []byte) {
|
|
||||||
intf.Act(nil, func() {
|
|
||||||
// nil to prevent it from blocking if the link is somehow frozen
|
|
||||||
// FIXME this is hypothetically not safe, the peer shouldn't be sending
|
|
||||||
// additional packets until this one finishes, otherwise this could leak
|
|
||||||
// memory if writing happens slower than link packets are generated...
|
|
||||||
// that seems unlikely, so it's a lesser evil than deadlocking for now
|
|
||||||
intf.writer.sendFrom(nil, [][]byte{bs})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (intf *link) close() {
|
func (intf *link) close() {
|
||||||
intf.Act(nil, func() { intf.msgIO.close() })
|
intf.conn.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (intf *link) name() string {
|
func (intf *link) name() string {
|
||||||
@ -373,168 +270,3 @@ func (intf *link) remote() string {
|
|||||||
func (intf *link) interfaceType() string {
|
func (intf *link) interfaceType() string {
|
||||||
return intf.info.linkType
|
return intf.info.linkType
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
const (
|
|
||||||
sendTime = 1 * time.Second // How long to wait before deciding a send is blocked
|
|
||||||
keepAliveTime = 2 * time.Second // How long to wait before sending a keep-alive response if we have no real traffic to send
|
|
||||||
stallTime = 6 * time.Second // How long to wait for response traffic before deciding the connection has stalled
|
|
||||||
closeTime = 2 * switch_timeout // How long to wait before closing the link
|
|
||||||
)
|
|
||||||
|
|
||||||
// notify the intf that we're currently sending
|
|
||||||
func (intf *link) notifySending(size int) {
|
|
||||||
intf.Act(&intf.writer, func() {
|
|
||||||
intf.sendTimer = time.AfterFunc(sendTime, intf.notifyBlockedSend)
|
|
||||||
if intf.keepAliveTimer != nil {
|
|
||||||
intf.keepAliveTimer.Stop()
|
|
||||||
intf.keepAliveTimer = nil
|
|
||||||
}
|
|
||||||
intf.peer.notifyBlocked(intf)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// This gets called from a time.AfterFunc, and notifies the switch that we appear
|
|
||||||
// to have gotten blocked on a write, so the switch should start routing traffic
|
|
||||||
// through other links, if alternatives exist
|
|
||||||
func (intf *link) notifyBlockedSend() {
|
|
||||||
intf.Act(nil, func() {
|
|
||||||
if intf.sendTimer != nil {
|
|
||||||
//As far as we know, we're still trying to send, and the timer fired.
|
|
||||||
intf.sendTimer.Stop()
|
|
||||||
intf.sendTimer = nil
|
|
||||||
if !intf.shutdown && intf.writeUnblocked {
|
|
||||||
intf.writeUnblocked = false
|
|
||||||
intf.links.core.switchTable.blockPeer(intf, intf.peer.port, true)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// notify the intf that we've finished sending, returning the peer to the switch
|
|
||||||
func (intf *link) notifySent(size int) {
|
|
||||||
intf.Act(&intf.writer, func() {
|
|
||||||
if intf.sendTimer != nil {
|
|
||||||
intf.sendTimer.Stop()
|
|
||||||
intf.sendTimer = nil
|
|
||||||
}
|
|
||||||
if intf.keepAliveTimer != nil {
|
|
||||||
// TODO? unset this when we start sending, not when we finish...
|
|
||||||
intf.keepAliveTimer.Stop()
|
|
||||||
intf.keepAliveTimer = nil
|
|
||||||
}
|
|
||||||
intf._notifyIdle()
|
|
||||||
if size > 0 && intf.stallTimer == nil {
|
|
||||||
intf.stallTimer = time.AfterFunc(stallTime, intf.notifyStalled)
|
|
||||||
}
|
|
||||||
if !intf.shutdown && !intf.writeUnblocked {
|
|
||||||
intf.writeUnblocked = true
|
|
||||||
intf.links.core.switchTable.unblockPeer(intf, intf.peer.port, true)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notify the peer that we're ready for more traffic
|
|
||||||
func (intf *link) _notifyIdle() {
|
|
||||||
intf.peer.Act(intf, intf.peer._handleIdle)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the peer as stalled, to prevent them from returning to the switch until a read succeeds
|
|
||||||
func (intf *link) notifyStalled() {
|
|
||||||
intf.Act(nil, func() { // Sent from a time.AfterFunc
|
|
||||||
if intf.stallTimer != nil {
|
|
||||||
intf.stallTimer.Stop()
|
|
||||||
intf.stallTimer = nil
|
|
||||||
if !intf.shutdown && intf.readUnblocked {
|
|
||||||
intf.readUnblocked = false
|
|
||||||
intf.links.core.switchTable.blockPeer(intf, intf.peer.port, false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset the close timer
|
|
||||||
func (intf *link) notifyReading() {
|
|
||||||
intf.Act(&intf.reader, func() {
|
|
||||||
intf.closeTimer = time.AfterFunc(closeTime, func() { intf.msgIO.close() })
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// wake up the link if it was stalled, and (if size > 0) prepare to send keep-alive traffic
|
|
||||||
func (intf *link) notifyRead(size int) {
|
|
||||||
intf.Act(&intf.reader, func() {
|
|
||||||
intf.closeTimer.Stop()
|
|
||||||
if intf.stallTimer != nil {
|
|
||||||
intf.stallTimer.Stop()
|
|
||||||
intf.stallTimer = nil
|
|
||||||
}
|
|
||||||
if size > 0 && intf.keepAliveTimer == nil {
|
|
||||||
intf.keepAliveTimer = time.AfterFunc(keepAliveTime, intf.notifyDoKeepAlive)
|
|
||||||
}
|
|
||||||
if !intf.shutdown && !intf.readUnblocked {
|
|
||||||
intf.readUnblocked = true
|
|
||||||
intf.links.core.switchTable.unblockPeer(intf, intf.peer.port, false)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// We need to send keep-alive traffic now
|
|
||||||
func (intf *link) notifyDoKeepAlive() {
|
|
||||||
intf.Act(nil, func() { // Sent from a time.AfterFunc
|
|
||||||
if intf.keepAliveTimer != nil {
|
|
||||||
intf.keepAliveTimer.Stop()
|
|
||||||
intf.keepAliveTimer = nil
|
|
||||||
intf.writer.sendFrom(nil, [][]byte{nil}) // Empty keep-alive traffic
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
type linkWriter struct {
|
|
||||||
phony.Inbox
|
|
||||||
intf *link
|
|
||||||
worker chan [][]byte
|
|
||||||
closed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *linkWriter) sendFrom(from phony.Actor, bss [][]byte) {
|
|
||||||
w.Act(from, func() {
|
|
||||||
if w.closed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var size int
|
|
||||||
for _, bs := range bss {
|
|
||||||
size += len(bs)
|
|
||||||
}
|
|
||||||
w.intf.notifySending(size)
|
|
||||||
w.worker <- bss
|
|
||||||
w.intf.notifySent(size)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
type linkReader struct {
|
|
||||||
phony.Inbox
|
|
||||||
intf *link
|
|
||||||
err chan error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *linkReader) _read() {
|
|
||||||
r.intf.notifyReading()
|
|
||||||
msg, err := r.intf.msgIO.readMsg()
|
|
||||||
r.intf.notifyRead(len(msg))
|
|
||||||
if len(msg) > 0 {
|
|
||||||
r.intf.peer.handlePacketFrom(r, msg)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
if err != io.EOF {
|
|
||||||
r.err <- err
|
|
||||||
}
|
|
||||||
close(r.err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Now try to read again
|
|
||||||
r.Act(nil, r._read)
|
|
||||||
}
|
|
||||||
|
@ -1,45 +0,0 @@
|
|||||||
package yggdrasil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"net"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Listener waits for incoming sessions
|
|
||||||
type Listener struct {
|
|
||||||
core *Core
|
|
||||||
conn chan *Conn
|
|
||||||
close chan interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accept blocks until a new incoming session is received
|
|
||||||
func (l *Listener) Accept() (net.Conn, error) {
|
|
||||||
select {
|
|
||||||
case c, ok := <-l.conn:
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("listener closed")
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
case <-l.close:
|
|
||||||
return nil, errors.New("listener closed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close will stop the listener
|
|
||||||
func (l *Listener) Close() (err error) {
|
|
||||||
defer func() {
|
|
||||||
recover()
|
|
||||||
err = errors.New("already closed")
|
|
||||||
}()
|
|
||||||
if l.core.router.sessions.listener == l {
|
|
||||||
l.core.router.sessions.listener = nil
|
|
||||||
}
|
|
||||||
close(l.close)
|
|
||||||
close(l.conn)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Addr returns the address of the listener
|
|
||||||
func (l *Listener) Addr() net.Addr {
|
|
||||||
return &l.core.boxPub
|
|
||||||
}
|
|
@ -1,209 +0,0 @@
|
|||||||
package yggdrasil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Arceliar/phony"
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/version"
|
|
||||||
)
|
|
||||||
|
|
||||||
type nodeinfo struct {
|
|
||||||
phony.Inbox
|
|
||||||
core *Core
|
|
||||||
myNodeInfo NodeInfoPayload
|
|
||||||
callbacks map[crypto.BoxPubKey]nodeinfoCallback
|
|
||||||
cache map[crypto.BoxPubKey]nodeinfoCached
|
|
||||||
table *lookupTable
|
|
||||||
}
|
|
||||||
|
|
||||||
type nodeinfoCached struct {
|
|
||||||
payload NodeInfoPayload
|
|
||||||
created time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type nodeinfoCallback struct {
|
|
||||||
call func(nodeinfo *NodeInfoPayload)
|
|
||||||
created time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// Represents a session nodeinfo packet.
|
|
||||||
type nodeinfoReqRes struct {
|
|
||||||
SendPermPub crypto.BoxPubKey // Sender's permanent key
|
|
||||||
SendCoords []byte // Sender's coords
|
|
||||||
IsResponse bool
|
|
||||||
NodeInfo NodeInfoPayload
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialises the nodeinfo cache/callback maps, and starts a goroutine to keep
|
|
||||||
// the cache/callback maps clean of stale entries
|
|
||||||
func (m *nodeinfo) init(core *Core) {
|
|
||||||
m.Act(nil, func() {
|
|
||||||
m._init(core)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *nodeinfo) _init(core *Core) {
|
|
||||||
m.core = core
|
|
||||||
m.callbacks = make(map[crypto.BoxPubKey]nodeinfoCallback)
|
|
||||||
m.cache = make(map[crypto.BoxPubKey]nodeinfoCached)
|
|
||||||
|
|
||||||
m._cleanup()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *nodeinfo) _cleanup() {
|
|
||||||
for boxPubKey, callback := range m.callbacks {
|
|
||||||
if time.Since(callback.created) > time.Minute {
|
|
||||||
delete(m.callbacks, boxPubKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for boxPubKey, cache := range m.cache {
|
|
||||||
if time.Since(cache.created) > time.Hour {
|
|
||||||
delete(m.cache, boxPubKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
time.AfterFunc(time.Second*30, func() {
|
|
||||||
m.Act(nil, m._cleanup)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a callback for a nodeinfo lookup
|
|
||||||
func (m *nodeinfo) addCallback(sender crypto.BoxPubKey, call func(nodeinfo *NodeInfoPayload)) {
|
|
||||||
m.Act(nil, func() {
|
|
||||||
m._addCallback(sender, call)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *nodeinfo) _addCallback(sender crypto.BoxPubKey, call func(nodeinfo *NodeInfoPayload)) {
|
|
||||||
m.callbacks[sender] = nodeinfoCallback{
|
|
||||||
created: time.Now(),
|
|
||||||
call: call,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handles the callback, if there is one
|
|
||||||
func (m *nodeinfo) _callback(sender crypto.BoxPubKey, nodeinfo NodeInfoPayload) {
|
|
||||||
if callback, ok := m.callbacks[sender]; ok {
|
|
||||||
callback.call(&nodeinfo)
|
|
||||||
delete(m.callbacks, sender)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the current node's nodeinfo
|
|
||||||
func (m *nodeinfo) getNodeInfo() (p NodeInfoPayload) {
|
|
||||||
phony.Block(m, func() {
|
|
||||||
p = m._getNodeInfo()
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *nodeinfo) _getNodeInfo() NodeInfoPayload {
|
|
||||||
return m.myNodeInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the current node's nodeinfo
|
|
||||||
func (m *nodeinfo) setNodeInfo(given interface{}, privacy bool) (err error) {
|
|
||||||
phony.Block(m, func() {
|
|
||||||
err = m._setNodeInfo(given, privacy)
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *nodeinfo) _setNodeInfo(given interface{}, privacy bool) error {
|
|
||||||
defaults := map[string]interface{}{
|
|
||||||
"buildname": version.BuildName(),
|
|
||||||
"buildversion": version.BuildVersion(),
|
|
||||||
"buildplatform": runtime.GOOS,
|
|
||||||
"buildarch": runtime.GOARCH,
|
|
||||||
}
|
|
||||||
newnodeinfo := make(map[string]interface{})
|
|
||||||
if !privacy {
|
|
||||||
for k, v := range defaults {
|
|
||||||
newnodeinfo[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if nodeinfomap, ok := given.(map[string]interface{}); ok {
|
|
||||||
for key, value := range nodeinfomap {
|
|
||||||
if _, ok := defaults[key]; ok {
|
|
||||||
if strvalue, strok := value.(string); strok && strings.EqualFold(strvalue, "null") || value == nil {
|
|
||||||
delete(newnodeinfo, key)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
newnodeinfo[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
newjson, err := json.Marshal(newnodeinfo)
|
|
||||||
if err == nil {
|
|
||||||
if len(newjson) > 16384 {
|
|
||||||
return errors.New("NodeInfo exceeds max length of 16384 bytes")
|
|
||||||
}
|
|
||||||
m.myNodeInfo = newjson
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add nodeinfo into the cache for a node
|
|
||||||
func (m *nodeinfo) _addCachedNodeInfo(key crypto.BoxPubKey, payload NodeInfoPayload) {
|
|
||||||
m.cache[key] = nodeinfoCached{
|
|
||||||
created: time.Now(),
|
|
||||||
payload: payload,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get a nodeinfo entry from the cache
|
|
||||||
func (m *nodeinfo) _getCachedNodeInfo(key crypto.BoxPubKey) (NodeInfoPayload, error) {
|
|
||||||
if nodeinfo, ok := m.cache[key]; ok {
|
|
||||||
return nodeinfo.payload, nil
|
|
||||||
}
|
|
||||||
return NodeInfoPayload{}, errors.New("No cache entry found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handles a nodeinfo request/response - called from the router
|
|
||||||
func (m *nodeinfo) handleNodeInfo(from phony.Actor, nodeinfo *nodeinfoReqRes) {
|
|
||||||
m.Act(from, func() {
|
|
||||||
m._handleNodeInfo(nodeinfo)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *nodeinfo) _handleNodeInfo(nodeinfo *nodeinfoReqRes) {
|
|
||||||
if nodeinfo.IsResponse {
|
|
||||||
m._callback(nodeinfo.SendPermPub, nodeinfo.NodeInfo)
|
|
||||||
m._addCachedNodeInfo(nodeinfo.SendPermPub, nodeinfo.NodeInfo)
|
|
||||||
} else {
|
|
||||||
m._sendNodeInfo(nodeinfo.SendPermPub, nodeinfo.SendCoords, true)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send nodeinfo request or response - called from the router
|
|
||||||
func (m *nodeinfo) sendNodeInfo(key crypto.BoxPubKey, coords []byte, isResponse bool) {
|
|
||||||
m.Act(nil, func() {
|
|
||||||
m._sendNodeInfo(key, coords, isResponse)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *nodeinfo) _sendNodeInfo(key crypto.BoxPubKey, coords []byte, isResponse bool) {
|
|
||||||
loc := m.table.self
|
|
||||||
nodeinfo := nodeinfoReqRes{
|
|
||||||
SendCoords: loc.getCoords(),
|
|
||||||
IsResponse: isResponse,
|
|
||||||
NodeInfo: m._getNodeInfo(),
|
|
||||||
}
|
|
||||||
bs := nodeinfo.encode()
|
|
||||||
shared := m.core.router.sessions.getSharedKey(&m.core.boxPriv, &key)
|
|
||||||
payload, nonce := crypto.BoxSeal(shared, bs, nil)
|
|
||||||
p := wire_protoTrafficPacket{
|
|
||||||
Coords: coords,
|
|
||||||
ToKey: key,
|
|
||||||
FromKey: m.core.boxPub,
|
|
||||||
Nonce: *nonce,
|
|
||||||
Payload: payload,
|
|
||||||
}
|
|
||||||
packet := p.encode()
|
|
||||||
m.core.router.out(packet)
|
|
||||||
}
|
|
@ -1,119 +0,0 @@
|
|||||||
package yggdrasil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"container/heap"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO separate queues per e.g. traffic flow
|
|
||||||
// For now, we put everything in queue
|
|
||||||
|
|
||||||
type pqStreamID string
|
|
||||||
|
|
||||||
type pqPacketInfo struct {
|
|
||||||
packet []byte
|
|
||||||
time time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type pqStream struct {
|
|
||||||
id pqStreamID
|
|
||||||
infos []pqPacketInfo
|
|
||||||
size uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
type packetQueue struct {
|
|
||||||
streams []pqStream
|
|
||||||
size uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// drop will remove a packet from the queue, returning it to the pool
|
|
||||||
// returns true if a packet was removed, false otherwise
|
|
||||||
func (q *packetQueue) drop() bool {
|
|
||||||
if q.size == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
var longestIdx int
|
|
||||||
for idx := range q.streams {
|
|
||||||
if q.streams[idx].size > q.streams[longestIdx].size {
|
|
||||||
longestIdx = idx
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stream := q.streams[longestIdx]
|
|
||||||
info := stream.infos[0]
|
|
||||||
if len(stream.infos) > 1 {
|
|
||||||
stream.infos = stream.infos[1:]
|
|
||||||
stream.size -= uint64(len(info.packet))
|
|
||||||
q.streams[longestIdx] = stream
|
|
||||||
q.size -= uint64(len(info.packet))
|
|
||||||
heap.Fix(q, longestIdx)
|
|
||||||
} else {
|
|
||||||
heap.Remove(q, longestIdx)
|
|
||||||
}
|
|
||||||
pool_putBytes(info.packet)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *packetQueue) push(packet []byte) {
|
|
||||||
_, coords := wire_getTrafficOffsetAndCoords(packet)
|
|
||||||
id := pqStreamID(coords) // just coords for now
|
|
||||||
info := pqPacketInfo{packet: packet, time: time.Now()}
|
|
||||||
for idx := range q.streams {
|
|
||||||
if q.streams[idx].id == id {
|
|
||||||
q.streams[idx].infos = append(q.streams[idx].infos, info)
|
|
||||||
q.streams[idx].size += uint64(len(packet))
|
|
||||||
q.size += uint64(len(packet))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stream := pqStream{id: id, size: uint64(len(packet))}
|
|
||||||
stream.infos = append(stream.infos, info)
|
|
||||||
heap.Push(q, stream)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *packetQueue) pop() ([]byte, bool) {
|
|
||||||
if q.size > 0 {
|
|
||||||
stream := q.streams[0]
|
|
||||||
info := stream.infos[0]
|
|
||||||
if len(stream.infos) > 1 {
|
|
||||||
stream.infos = stream.infos[1:]
|
|
||||||
stream.size -= uint64(len(info.packet))
|
|
||||||
q.streams[0] = stream
|
|
||||||
q.size -= uint64(len(info.packet))
|
|
||||||
heap.Fix(q, 0)
|
|
||||||
} else {
|
|
||||||
heap.Remove(q, 0)
|
|
||||||
}
|
|
||||||
return info.packet, true
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// Interface methods for packetQueue to satisfy heap.Interface
|
|
||||||
|
|
||||||
func (q *packetQueue) Len() int {
|
|
||||||
return len(q.streams)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *packetQueue) Less(i, j int) bool {
|
|
||||||
return q.streams[i].infos[0].time.Before(q.streams[j].infos[0].time)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *packetQueue) Swap(i, j int) {
|
|
||||||
q.streams[i], q.streams[j] = q.streams[j], q.streams[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *packetQueue) Push(x interface{}) {
|
|
||||||
stream := x.(pqStream)
|
|
||||||
q.streams = append(q.streams, stream)
|
|
||||||
q.size += stream.size
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *packetQueue) Pop() interface{} {
|
|
||||||
idx := len(q.streams) - 1
|
|
||||||
stream := q.streams[idx]
|
|
||||||
q.streams = q.streams[:idx]
|
|
||||||
q.size -= stream.size
|
|
||||||
return stream
|
|
||||||
}
|
|
@ -1,447 +0,0 @@
|
|||||||
package yggdrasil
|
|
||||||
|
|
||||||
// TODO cleanup, this file is kind of a mess
|
|
||||||
// Commented code should be removed
|
|
||||||
// Live code should be better commented
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/hex"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
|
||||||
|
|
||||||
"github.com/Arceliar/phony"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The peers struct represents peers with an active connection.
|
|
||||||
// Incoming packets are passed to the corresponding peer, which handles them somehow.
|
|
||||||
// In most cases, this involves passing the packet to the handler for outgoing traffic to another peer.
|
|
||||||
// In other cases, its link protocol traffic is used to build the spanning tree, in which case this checks signatures and passes the message along to the switch.
|
|
||||||
type peers struct {
|
|
||||||
phony.Inbox
|
|
||||||
core *Core
|
|
||||||
ports map[switchPort]*peer // use CoW semantics, share updated version with each peer
|
|
||||||
table *lookupTable // Sent from switch, share updated version with each peer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initializes the peers struct.
|
|
||||||
func (ps *peers) init(c *Core) {
|
|
||||||
ps.core = c
|
|
||||||
ps.ports = make(map[switchPort]*peer)
|
|
||||||
ps.table = new(lookupTable)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps *peers) reconfigure() {
|
|
||||||
// This is where reconfiguration would go, if we had anything to do
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if an incoming peer connection to a key is allowed, either
|
|
||||||
// because the key is in the whitelist or because the whitelist is empty.
|
|
||||||
func (ps *peers) isAllowedEncryptionPublicKey(box *crypto.BoxPubKey) bool {
|
|
||||||
boxstr := hex.EncodeToString(box[:])
|
|
||||||
ps.core.config.Mutex.RLock()
|
|
||||||
defer ps.core.config.Mutex.RUnlock()
|
|
||||||
for _, v := range ps.core.config.Current.AllowedEncryptionPublicKeys {
|
|
||||||
if v == boxstr {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(ps.core.config.Current.AllowedEncryptionPublicKeys) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds a key to the whitelist.
|
|
||||||
func (ps *peers) addAllowedEncryptionPublicKey(box string) {
|
|
||||||
ps.core.config.Mutex.RLock()
|
|
||||||
defer ps.core.config.Mutex.RUnlock()
|
|
||||||
ps.core.config.Current.AllowedEncryptionPublicKeys =
|
|
||||||
append(ps.core.config.Current.AllowedEncryptionPublicKeys, box)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Removes a key from the whitelist.
|
|
||||||
func (ps *peers) removeAllowedEncryptionPublicKey(box string) {
|
|
||||||
ps.core.config.Mutex.RLock()
|
|
||||||
defer ps.core.config.Mutex.RUnlock()
|
|
||||||
for k, v := range ps.core.config.Current.AllowedEncryptionPublicKeys {
|
|
||||||
if v == box {
|
|
||||||
ps.core.config.Current.AllowedEncryptionPublicKeys =
|
|
||||||
append(ps.core.config.Current.AllowedEncryptionPublicKeys[:k],
|
|
||||||
ps.core.config.Current.AllowedEncryptionPublicKeys[k+1:]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets the whitelist of allowed keys for incoming connections.
|
|
||||||
func (ps *peers) getAllowedEncryptionPublicKeys() []string {
|
|
||||||
ps.core.config.Mutex.RLock()
|
|
||||||
defer ps.core.config.Mutex.RUnlock()
|
|
||||||
return ps.core.config.Current.AllowedEncryptionPublicKeys
|
|
||||||
}
|
|
||||||
|
|
||||||
// Information known about a peer, including their box/sig keys, precomputed shared keys (static and ephemeral) and a handler for their outgoing traffic
|
|
||||||
type peer struct {
|
|
||||||
phony.Inbox
|
|
||||||
core *Core
|
|
||||||
intf linkInterface
|
|
||||||
port switchPort
|
|
||||||
box crypto.BoxPubKey
|
|
||||||
sig crypto.SigPubKey
|
|
||||||
shared crypto.BoxSharedKey
|
|
||||||
linkShared crypto.BoxSharedKey
|
|
||||||
endpoint string
|
|
||||||
firstSeen time.Time // To track uptime for getPeers
|
|
||||||
dinfo *dhtInfo // used to keep the DHT working
|
|
||||||
// The below aren't actually useful internally, they're just gathered for getPeers statistics
|
|
||||||
bytesSent uint64
|
|
||||||
bytesRecvd uint64
|
|
||||||
ports map[switchPort]*peer
|
|
||||||
table *lookupTable
|
|
||||||
queue packetQueue
|
|
||||||
max uint64
|
|
||||||
seq uint64 // this and idle are used to detect when to drop packets from queue
|
|
||||||
idle bool
|
|
||||||
drop bool // set to true if we're dropping packets from the queue
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps *peers) updateTables(from phony.Actor, table *lookupTable) {
|
|
||||||
ps.Act(from, func() {
|
|
||||||
ps.table = table
|
|
||||||
ps._updatePeers()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps *peers) _updatePeers() {
|
|
||||||
ports := ps.ports
|
|
||||||
table := ps.table
|
|
||||||
for _, peer := range ps.ports {
|
|
||||||
p := peer // peer is mutated during iteration
|
|
||||||
p.Act(ps, func() {
|
|
||||||
p.ports = ports
|
|
||||||
p.table = table
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a new peer with the specified box, sig, and linkShared keys, using the lowest unoccupied port number.
|
|
||||||
func (ps *peers) _newPeer(box *crypto.BoxPubKey, sig *crypto.SigPubKey, linkShared *crypto.BoxSharedKey, intf linkInterface) *peer {
|
|
||||||
now := time.Now()
|
|
||||||
p := peer{box: *box,
|
|
||||||
core: ps.core,
|
|
||||||
intf: intf,
|
|
||||||
sig: *sig,
|
|
||||||
shared: *crypto.GetSharedKey(&ps.core.boxPriv, box),
|
|
||||||
linkShared: *linkShared,
|
|
||||||
firstSeen: now,
|
|
||||||
}
|
|
||||||
oldPorts := ps.ports
|
|
||||||
newPorts := make(map[switchPort]*peer)
|
|
||||||
for k, v := range oldPorts {
|
|
||||||
newPorts[k] = v
|
|
||||||
}
|
|
||||||
for idx := switchPort(0); true; idx++ {
|
|
||||||
if _, isIn := newPorts[idx]; !isIn {
|
|
||||||
p.port = switchPort(idx)
|
|
||||||
newPorts[p.port] = &p
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ps.ports = newPorts
|
|
||||||
ps._updatePeers()
|
|
||||||
return &p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *peer) _removeSelf() {
|
|
||||||
p.core.peers.Act(p, func() {
|
|
||||||
p.core.peers._removePeer(p)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Removes a peer for a given port, if one exists.
|
|
||||||
func (ps *peers) _removePeer(p *peer) {
|
|
||||||
if q := ps.ports[p.port]; p.port == 0 || q != p {
|
|
||||||
return
|
|
||||||
} // Can't remove self peer or nonexistant peer
|
|
||||||
ps.core.switchTable.forgetPeer(ps, p.port)
|
|
||||||
oldPorts := ps.ports
|
|
||||||
newPorts := make(map[switchPort]*peer)
|
|
||||||
for k, v := range oldPorts {
|
|
||||||
newPorts[k] = v
|
|
||||||
}
|
|
||||||
delete(newPorts, p.port)
|
|
||||||
p.intf.close()
|
|
||||||
ps.ports = newPorts
|
|
||||||
ps._updatePeers()
|
|
||||||
}
|
|
||||||
|
|
||||||
// If called, sends a notification to each peer that they should send a new switch message.
|
|
||||||
// Mainly called by the switch after an update.
|
|
||||||
func (ps *peers) sendSwitchMsgs(from phony.Actor) {
|
|
||||||
ps.Act(from, func() {
|
|
||||||
for _, peer := range ps.ports {
|
|
||||||
p := peer
|
|
||||||
if p.port == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
p.Act(ps, p._sendSwitchMsg)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ps *peers) updateDHT(from phony.Actor) {
|
|
||||||
ps.Act(from, func() {
|
|
||||||
for _, peer := range ps.ports {
|
|
||||||
p := peer
|
|
||||||
if p.port == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
p.Act(ps, p._updateDHT)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// This must be launched in a separate goroutine by whatever sets up the peer struct.
|
|
||||||
func (p *peer) start() {
|
|
||||||
// Just for good measure, immediately send a switch message to this peer when we start
|
|
||||||
p.Act(nil, p._sendSwitchMsg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *peer) _updateDHT() {
|
|
||||||
if p.dinfo != nil {
|
|
||||||
p.core.router.insertPeer(p, p.dinfo)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *peer) handlePacketFrom(from phony.Actor, packet []byte) {
|
|
||||||
p.Act(from, func() {
|
|
||||||
p._handlePacket(packet)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Called to handle incoming packets.
|
|
||||||
// Passes the packet to a handler for that packet type.
|
|
||||||
func (p *peer) _handlePacket(packet []byte) {
|
|
||||||
// FIXME this is off by stream padding and msg length overhead, should be done in tcp.go
|
|
||||||
p.bytesRecvd += uint64(len(packet))
|
|
||||||
pType, pTypeLen := wire_decode_uint64(packet)
|
|
||||||
if pTypeLen == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch pType {
|
|
||||||
case wire_Traffic:
|
|
||||||
p._handleTraffic(packet)
|
|
||||||
case wire_ProtocolTraffic:
|
|
||||||
p._handleTraffic(packet)
|
|
||||||
case wire_LinkProtocolTraffic:
|
|
||||||
p._handleLinkTraffic(packet)
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Called to handle traffic or protocolTraffic packets.
|
|
||||||
// In either case, this reads from the coords of the packet header, does a switch lookup, and forwards to the next node.
|
|
||||||
func (p *peer) _handleTraffic(packet []byte) {
|
|
||||||
if _, isIn := p.table.elems[p.port]; !isIn && p.port != 0 {
|
|
||||||
// Drop traffic if the peer isn't in the switch
|
|
||||||
return
|
|
||||||
}
|
|
||||||
obs, coords := wire_getTrafficOffsetAndCoords(packet)
|
|
||||||
offset, _ := wire_decode_uint64(obs)
|
|
||||||
ports := switch_getPorts(coords)
|
|
||||||
if offset == 0 {
|
|
||||||
offset = p.table.getOffset(ports)
|
|
||||||
}
|
|
||||||
var next switchPort
|
|
||||||
if offset == 0 {
|
|
||||||
// Greedy routing, find the best next hop
|
|
||||||
next = p.table.lookup(ports)
|
|
||||||
} else {
|
|
||||||
// Source routing, read next hop from coords and update offset/obs
|
|
||||||
if int(offset) < len(ports) {
|
|
||||||
next = ports[offset]
|
|
||||||
offset += 1
|
|
||||||
// FIXME this breaks if offset is > 127, it's just for testing
|
|
||||||
wire_put_uint64(offset, obs[:0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
packet = wire_put_uint64(uint64(p.port), packet)
|
|
||||||
if nPeer, isIn := p.ports[next]; isIn {
|
|
||||||
nPeer.sendPacketFrom(p, packet)
|
|
||||||
}
|
|
||||||
//p.core.switchTable.packetInFrom(p, packet)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *peer) sendPacketFrom(from phony.Actor, packet []byte) {
|
|
||||||
p.Act(from, func() {
|
|
||||||
p._sendPacket(packet)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *peer) _sendPacket(packet []byte) {
|
|
||||||
p.queue.push(packet)
|
|
||||||
if p.idle {
|
|
||||||
p.idle = false
|
|
||||||
p._handleIdle()
|
|
||||||
} else if p.drop {
|
|
||||||
for p.queue.size > p.max {
|
|
||||||
p.queue.drop()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *peer) _handleIdle() {
|
|
||||||
var packets [][]byte
|
|
||||||
var size uint64
|
|
||||||
for {
|
|
||||||
if packet, success := p.queue.pop(); success {
|
|
||||||
packets = append(packets, packet)
|
|
||||||
size += uint64(len(packet))
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p.seq++
|
|
||||||
if len(packets) > 0 {
|
|
||||||
p.bytesSent += uint64(size)
|
|
||||||
p.intf.out(packets)
|
|
||||||
p.max = p.queue.size
|
|
||||||
} else {
|
|
||||||
p.idle = true
|
|
||||||
}
|
|
||||||
p.drop = false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *peer) notifyBlocked(from phony.Actor) {
|
|
||||||
p.Act(from, func() {
|
|
||||||
seq := p.seq
|
|
||||||
p.Act(nil, func() {
|
|
||||||
if seq == p.seq {
|
|
||||||
p.drop = true
|
|
||||||
p.max = 2*p.queue.size + streamMsgSize
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// This wraps the packet in the inner (ephemeral) and outer (permanent) crypto layers.
|
|
||||||
// It sends it to p.linkOut, which bypasses the usual packet queues.
|
|
||||||
func (p *peer) _sendLinkPacket(packet []byte) {
|
|
||||||
innerPayload, innerNonce := crypto.BoxSeal(&p.linkShared, packet, nil)
|
|
||||||
innerLinkPacket := wire_linkProtoTrafficPacket{
|
|
||||||
Nonce: *innerNonce,
|
|
||||||
Payload: innerPayload,
|
|
||||||
}
|
|
||||||
outerPayload := innerLinkPacket.encode()
|
|
||||||
bs, nonce := crypto.BoxSeal(&p.shared, outerPayload, nil)
|
|
||||||
linkPacket := wire_linkProtoTrafficPacket{
|
|
||||||
Nonce: *nonce,
|
|
||||||
Payload: bs,
|
|
||||||
}
|
|
||||||
packet = linkPacket.encode()
|
|
||||||
p.intf.linkOut(packet)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypts the outer (permanent) and inner (ephemeral) crypto layers on link traffic.
|
|
||||||
// Identifies the link traffic type and calls the appropriate handler.
|
|
||||||
func (p *peer) _handleLinkTraffic(bs []byte) {
|
|
||||||
packet := wire_linkProtoTrafficPacket{}
|
|
||||||
if !packet.decode(bs) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
outerPayload, isOK := crypto.BoxOpen(&p.shared, packet.Payload, &packet.Nonce)
|
|
||||||
if !isOK {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
innerPacket := wire_linkProtoTrafficPacket{}
|
|
||||||
if !innerPacket.decode(outerPayload) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
payload, isOK := crypto.BoxOpen(&p.linkShared, innerPacket.Payload, &innerPacket.Nonce)
|
|
||||||
if !isOK {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pType, pTypeLen := wire_decode_uint64(payload)
|
|
||||||
if pTypeLen == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch pType {
|
|
||||||
case wire_SwitchMsg:
|
|
||||||
p._handleSwitchMsg(payload)
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets a switchMsg from the switch, adds signed next-hop info for this peer, and sends it to them.
|
|
||||||
func (p *peer) _sendSwitchMsg() {
|
|
||||||
msg := p.table.getMsg()
|
|
||||||
if msg == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bs := getBytesForSig(&p.sig, msg)
|
|
||||||
msg.Hops = append(msg.Hops, switchMsgHop{
|
|
||||||
Port: p.port,
|
|
||||||
Next: p.sig,
|
|
||||||
Sig: *crypto.Sign(&p.core.sigPriv, bs),
|
|
||||||
})
|
|
||||||
packet := msg.encode()
|
|
||||||
p._sendLinkPacket(packet)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handles a switchMsg from the peer, checking signatures and passing good messages to the switch.
|
|
||||||
// Also creates a dhtInfo struct and arranges for it to be added to the dht (this is how dht bootstrapping begins).
|
|
||||||
func (p *peer) _handleSwitchMsg(packet []byte) {
|
|
||||||
var msg switchMsg
|
|
||||||
if !msg.decode(packet) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(msg.Hops) < 1 {
|
|
||||||
p._removeSelf()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var loc switchLocator
|
|
||||||
prevKey := msg.Root
|
|
||||||
for idx, hop := range msg.Hops {
|
|
||||||
// Check signatures and collect coords for dht
|
|
||||||
sigMsg := msg
|
|
||||||
sigMsg.Hops = msg.Hops[:idx]
|
|
||||||
loc.coords = append(loc.coords, hop.Port)
|
|
||||||
bs := getBytesForSig(&hop.Next, &sigMsg)
|
|
||||||
if !crypto.Verify(&prevKey, bs, &hop.Sig) {
|
|
||||||
p._removeSelf()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
prevKey = hop.Next
|
|
||||||
}
|
|
||||||
p.core.switchTable.Act(p, func() {
|
|
||||||
if !p.core.switchTable._checkRoot(&msg) {
|
|
||||||
// Bad switch message
|
|
||||||
p.Act(&p.core.switchTable, func() {
|
|
||||||
p.dinfo = nil
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
// handle the message
|
|
||||||
p.core.switchTable._handleMsg(&msg, p.port, false)
|
|
||||||
p.Act(&p.core.switchTable, func() {
|
|
||||||
// Pass a message to the dht informing it that this peer (still) exists
|
|
||||||
loc.coords = loc.coords[:len(loc.coords)-1]
|
|
||||||
p.dinfo = &dhtInfo{
|
|
||||||
key: p.box,
|
|
||||||
coords: loc.getCoords(),
|
|
||||||
}
|
|
||||||
p._updateDHT()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// This generates the bytes that we sign or check the signature of for a switchMsg.
|
|
||||||
// It begins with the next node's key, followed by the root and the timestamp, followed by coords being advertised to the next node.
|
|
||||||
func getBytesForSig(next *crypto.SigPubKey, msg *switchMsg) []byte {
|
|
||||||
var loc switchLocator
|
|
||||||
for _, hop := range msg.Hops {
|
|
||||||
loc.coords = append(loc.coords, hop.Port)
|
|
||||||
}
|
|
||||||
bs := append([]byte(nil), next[:]...)
|
|
||||||
bs = append(bs, msg.Root[:]...)
|
|
||||||
bs = append(bs, wire_encode_uint64(wire_intToUint(msg.TStamp))...)
|
|
||||||
bs = append(bs, wire_encode_coords(loc.getCoords())...)
|
|
||||||
return bs
|
|
||||||
}
|
|
@ -1,20 +0,0 @@
|
|||||||
package yggdrasil
|
|
||||||
|
|
||||||
import "sync"
|
|
||||||
|
|
||||||
// Used internally to reduce allocations in the hot loop
|
|
||||||
// I.e. packets being switched or between the crypto and the switch
|
|
||||||
// For safety reasons, these must not escape this package
|
|
||||||
var pool = sync.Pool{New: func() interface{} { return []byte(nil) }}
|
|
||||||
|
|
||||||
func pool_getBytes(size int) []byte {
|
|
||||||
bs := pool.Get().([]byte)
|
|
||||||
if cap(bs) < size {
|
|
||||||
bs = make([]byte, size)
|
|
||||||
}
|
|
||||||
return bs[:size]
|
|
||||||
}
|
|
||||||
|
|
||||||
func pool_putBytes(bs []byte) {
|
|
||||||
pool.Put(bs)
|
|
||||||
}
|
|
@ -1,289 +0,0 @@
|
|||||||
package yggdrasil
|
|
||||||
|
|
||||||
// This part does most of the work to handle packets to/from yourself
|
|
||||||
// It also manages crypto and dht info
|
|
||||||
// TODO clean up old/unused code, maybe improve comments on whatever is left
|
|
||||||
|
|
||||||
// Send:
|
|
||||||
// Receive a packet from the adapter
|
|
||||||
// Look up session (if none exists, trigger a search)
|
|
||||||
// Hand off to session (which encrypts, etc)
|
|
||||||
// Session will pass it back to router.out, which hands it off to the self peer
|
|
||||||
// The self peer triggers a lookup to find which peer to send to next
|
|
||||||
// And then passes it to that's peer's peer.out function
|
|
||||||
// The peer.out function sends it over the wire to the matching peer
|
|
||||||
|
|
||||||
// Recv:
|
|
||||||
// A packet comes in off the wire, and goes to a peer.handlePacket
|
|
||||||
// The peer does a lookup, sees no better peer than the self
|
|
||||||
// Hands it to the self peer.out, which passes it to router.in
|
|
||||||
// If it's dht/seach/etc. traffic, the router passes it to that part
|
|
||||||
// If it's an encapsulated IPv6 packet, the router looks up the session for it
|
|
||||||
// The packet is passed to the session, which decrypts it, router.recvPacket
|
|
||||||
// The router then runs some sanity checks before passing it to the adapter
|
|
||||||
|
|
||||||
import (
|
|
||||||
//"bytes"
|
|
||||||
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
|
||||||
|
|
||||||
"github.com/Arceliar/phony"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The router struct has channels to/from the adapter device and a self peer (0), which is how messages are passed between this node and the peers/switch layer.
|
|
||||||
// The router's phony.Inbox goroutine is responsible for managing all information related to the dht, searches, and crypto sessions.
|
|
||||||
type router struct {
|
|
||||||
phony.Inbox
|
|
||||||
core *Core
|
|
||||||
addr address.Address
|
|
||||||
subnet address.Subnet
|
|
||||||
out func([]byte) // packets we're sending to the network, link to peer's "in"
|
|
||||||
dht dht
|
|
||||||
nodeinfo nodeinfo
|
|
||||||
searches searches
|
|
||||||
sessions sessions
|
|
||||||
intf routerInterface
|
|
||||||
peer *peer
|
|
||||||
table *lookupTable // has a copy of our locator
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initializes the router struct, which includes setting up channels to/from the adapter.
|
|
||||||
func (r *router) init(core *Core) {
|
|
||||||
r.core = core
|
|
||||||
// TODO r.addr = *address.AddrForNodeID(&r.dht.nodeID)
|
|
||||||
// TODO r.subnet = *address.SubnetForNodeID(&r.dht.nodeID)
|
|
||||||
r.intf.router = r
|
|
||||||
phony.Block(&r.core.peers, func() {
|
|
||||||
// FIXME don't block here!
|
|
||||||
r.peer = r.core.peers._newPeer(&r.core.boxPub, &r.core.sigPub, &crypto.BoxSharedKey{}, &r.intf)
|
|
||||||
})
|
|
||||||
r.peer.Act(r, r.peer._handleIdle)
|
|
||||||
r.out = func(bs []byte) {
|
|
||||||
r.peer.handlePacketFrom(r, bs)
|
|
||||||
}
|
|
||||||
r.nodeinfo.init(r.core)
|
|
||||||
r.core.config.Mutex.RLock()
|
|
||||||
r.nodeinfo.setNodeInfo(r.core.config.Current.NodeInfo, r.core.config.Current.NodeInfoPrivacy)
|
|
||||||
r.core.config.Mutex.RUnlock()
|
|
||||||
r.dht.init(r)
|
|
||||||
r.searches.init(r)
|
|
||||||
r.sessions.init(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *router) updateTable(from phony.Actor, table *lookupTable) {
|
|
||||||
r.Act(from, func() {
|
|
||||||
r.table = table
|
|
||||||
r.nodeinfo.Act(r, func() {
|
|
||||||
r.nodeinfo.table = table
|
|
||||||
})
|
|
||||||
for _, ses := range r.sessions.sinfos {
|
|
||||||
sinfo := ses
|
|
||||||
sinfo.Act(r, func() {
|
|
||||||
sinfo.table = table
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reconfigures the router and any child modules. This should only ever be run
|
|
||||||
// by the router actor.
|
|
||||||
func (r *router) reconfigure() {
|
|
||||||
// Reconfigure the router
|
|
||||||
current := r.core.config.GetCurrent()
|
|
||||||
r.core.log.Println("Reloading NodeInfo...")
|
|
||||||
if err := r.nodeinfo.setNodeInfo(current.NodeInfo, current.NodeInfoPrivacy); err != nil {
|
|
||||||
r.core.log.Errorln("Error reloading NodeInfo:", err)
|
|
||||||
} else {
|
|
||||||
r.core.log.Infoln("NodeInfo updated")
|
|
||||||
}
|
|
||||||
// Reconfigure children
|
|
||||||
r.dht.reconfigure()
|
|
||||||
r.searches.reconfigure()
|
|
||||||
r.sessions.reconfigure()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Starts the tickerLoop goroutine.
|
|
||||||
func (r *router) start() error {
|
|
||||||
r.core.log.Infoln("Starting router")
|
|
||||||
go r.doMaintenance()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert a peer info into the dht, TODO? make the dht a separate actor
|
|
||||||
func (r *router) insertPeer(from phony.Actor, info *dhtInfo) {
|
|
||||||
r.Act(from, func() {
|
|
||||||
r.dht.insertPeer(info)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset sessions and DHT after the switch sees our coords change
|
|
||||||
func (r *router) reset(from phony.Actor) {
|
|
||||||
r.Act(from, func() {
|
|
||||||
r.sessions.reset()
|
|
||||||
r.dht.reset()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO remove reconfigure so this is just a ticker loop
|
|
||||||
// and then find something better than a ticker loop to schedule things...
|
|
||||||
func (r *router) doMaintenance() {
|
|
||||||
phony.Block(r, func() {
|
|
||||||
// Any periodic maintenance stuff goes here
|
|
||||||
r.core.switchTable.doMaintenance(r)
|
|
||||||
r.dht.doMaintenance()
|
|
||||||
r.sessions.cleanup()
|
|
||||||
})
|
|
||||||
time.AfterFunc(time.Second, r.doMaintenance)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checks incoming traffic type and passes it to the appropriate handler.
|
|
||||||
func (r *router) _handlePacket(packet []byte) {
|
|
||||||
pType, pTypeLen := wire_decode_uint64(packet)
|
|
||||||
if pTypeLen == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch pType {
|
|
||||||
case wire_Traffic:
|
|
||||||
r._handleTraffic(packet)
|
|
||||||
case wire_ProtocolTraffic:
|
|
||||||
r._handleProto(packet)
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handles incoming traffic, i.e. encapuslated ordinary IPv6 packets.
|
|
||||||
// Passes them to the crypto session worker to be decrypted and sent to the adapter.
|
|
||||||
func (r *router) _handleTraffic(packet []byte) {
|
|
||||||
p := wire_trafficPacket{}
|
|
||||||
if !p.decode(packet) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sinfo, isIn := r.sessions.getSessionForHandle(&p.Handle)
|
|
||||||
if !isIn {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sinfo.recv(r, &p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handles protocol traffic by decrypting it, checking its type, and passing it to the appropriate handler for that traffic type.
|
|
||||||
func (r *router) _handleProto(packet []byte) {
|
|
||||||
// First parse the packet
|
|
||||||
p := wire_protoTrafficPacket{}
|
|
||||||
if !p.decode(packet) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Now try to open the payload
|
|
||||||
var sharedKey *crypto.BoxSharedKey
|
|
||||||
if p.ToKey == r.core.boxPub {
|
|
||||||
// Try to open using our permanent key
|
|
||||||
sharedKey = r.sessions.getSharedKey(&r.core.boxPriv, &p.FromKey)
|
|
||||||
} else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bs, isOK := crypto.BoxOpen(sharedKey, p.Payload, &p.Nonce)
|
|
||||||
if !isOK {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Now do something with the bytes in bs...
|
|
||||||
// send dht messages to dht, sessionRefresh to sessions, data to adapter...
|
|
||||||
// For data, should check that key and IP match...
|
|
||||||
bsType, bsTypeLen := wire_decode_uint64(bs)
|
|
||||||
if bsTypeLen == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch bsType {
|
|
||||||
case wire_SessionPing:
|
|
||||||
r._handlePing(bs, &p.FromKey, p.RPath)
|
|
||||||
case wire_SessionPong:
|
|
||||||
r._handlePong(bs, &p.FromKey, p.RPath)
|
|
||||||
case wire_NodeInfoRequest:
|
|
||||||
fallthrough
|
|
||||||
case wire_NodeInfoResponse:
|
|
||||||
r._handleNodeInfo(bs, &p.FromKey)
|
|
||||||
case wire_DHTLookupRequest:
|
|
||||||
r._handleDHTReq(bs, &p.FromKey, p.RPath)
|
|
||||||
case wire_DHTLookupResponse:
|
|
||||||
r._handleDHTRes(bs, &p.FromKey, p.RPath)
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decodes session pings from wire format and passes them to sessions.handlePing where they either create or update a session.
|
|
||||||
func (r *router) _handlePing(bs []byte, fromKey *crypto.BoxPubKey, rpath []byte) {
|
|
||||||
ping := sessionPing{}
|
|
||||||
if !ping.decode(bs) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ping.SendPermPub = *fromKey
|
|
||||||
r.sessions.handlePing(&ping, rpath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handles session pongs (which are really pings with an extra flag to prevent acknowledgement).
|
|
||||||
func (r *router) _handlePong(bs []byte, fromKey *crypto.BoxPubKey, rpath []byte) {
|
|
||||||
r._handlePing(bs, fromKey, rpath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decodes dht requests and passes them to dht.handleReq to trigger a lookup/response.
|
|
||||||
func (r *router) _handleDHTReq(bs []byte, fromKey *crypto.BoxPubKey, rpath []byte) {
|
|
||||||
req := dhtReq{}
|
|
||||||
if !req.decode(bs) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
req.Key = *fromKey
|
|
||||||
r.dht.handleReq(&req, rpath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decodes dht responses and passes them to dht.handleRes to update the DHT table and further pass them to the search code (if applicable).
|
|
||||||
func (r *router) _handleDHTRes(bs []byte, fromKey *crypto.BoxPubKey, rpath []byte) {
|
|
||||||
res := dhtRes{}
|
|
||||||
if !res.decode(bs) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
res.Key = *fromKey
|
|
||||||
r.dht.handleRes(&res, rpath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decodes nodeinfo request
|
|
||||||
func (r *router) _handleNodeInfo(bs []byte, fromKey *crypto.BoxPubKey) {
|
|
||||||
req := nodeinfoReqRes{}
|
|
||||||
if !req.decode(bs) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
req.SendPermPub = *fromKey
|
|
||||||
r.nodeinfo.handleNodeInfo(r, &req)
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// routerInterface is a helper that implements linkInterface
|
|
||||||
type routerInterface struct {
|
|
||||||
router *router
|
|
||||||
}
|
|
||||||
|
|
||||||
func (intf *routerInterface) out(bss [][]byte) {
|
|
||||||
// Note that this is run in the peer's goroutine
|
|
||||||
intf.router.Act(intf.router.peer, func() {
|
|
||||||
for _, bs := range bss {
|
|
||||||
intf.router._handlePacket(bs)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
// This should now immediately make the peer idle again
|
|
||||||
// So the self-peer shouldn't end up buffering anything
|
|
||||||
// We let backpressure act as a throttle instead
|
|
||||||
intf.router.peer._handleIdle()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (intf *routerInterface) linkOut(_ []byte) {}
|
|
||||||
|
|
||||||
func (intf *routerInterface) close() {}
|
|
||||||
|
|
||||||
func (intf *routerInterface) name() string { return "(self)" }
|
|
||||||
|
|
||||||
func (intf *routerInterface) local() string { return "(self)" }
|
|
||||||
|
|
||||||
func (intf *routerInterface) remote() string { return "(self)" }
|
|
||||||
|
|
||||||
func (intf *routerInterface) interfaceType() string { return "self" }
|
|
@ -1,271 +0,0 @@
|
|||||||
package yggdrasil
|
|
||||||
|
|
||||||
// This thing manages search packets
|
|
||||||
|
|
||||||
// The basic idea is as follows:
|
|
||||||
// We may know a NodeID (with a mask) and want to connect
|
|
||||||
// We begin a search by sending a dht lookup to ourself
|
|
||||||
// Each time a node responds, we sort the results and filter to only include useful nodes
|
|
||||||
// We then periodically send a packet to the first node from the list (after re-filtering)
|
|
||||||
// This happens in parallel for each node that replies
|
|
||||||
// Meanwhile, we keep a list of the (up to) 16 closest nodes to the destination that we've visited
|
|
||||||
// We only consider an unvisited node useful if either the list isn't full or the unvisited node is closer to the destination than the furthest node on the list
|
|
||||||
// That gives the search some chance to recover if it hits a dead end where a node doesn't know everyone it should
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"sort"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This defines the time after which we time out a search (so it can restart).
|
|
||||||
const search_RETRY_TIME = 3 * time.Second
|
|
||||||
const search_STEP_TIME = time.Second
|
|
||||||
const search_MAX_RESULTS = dht_lookup_size
|
|
||||||
|
|
||||||
// Information about an ongoing search.
|
|
||||||
// Includes the target NodeID, the bitmask to match it to an IP, and the list of nodes to visit / already visited.
|
|
||||||
type searchInfo struct {
|
|
||||||
searches *searches
|
|
||||||
dest crypto.NodeID
|
|
||||||
mask crypto.NodeID
|
|
||||||
time time.Time
|
|
||||||
visited []*crypto.NodeID // Closest addresses visited so far
|
|
||||||
callback func(*sessionInfo, error)
|
|
||||||
// TODO context.Context for timeout and cancellation
|
|
||||||
send uint64 // log number of requests sent
|
|
||||||
recv uint64 // log number of responses received
|
|
||||||
}
|
|
||||||
|
|
||||||
// This stores a map of active searches.
|
|
||||||
type searches struct {
|
|
||||||
router *router
|
|
||||||
searches map[crypto.NodeID]*searchInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initializes the searches struct.
|
|
||||||
func (s *searches) init(r *router) {
|
|
||||||
s.router = r
|
|
||||||
s.searches = make(map[crypto.NodeID]*searchInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *searches) reconfigure() {
|
|
||||||
// This is where reconfiguration would go, if we had anything to do
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a new search info, adds it to the searches struct, and returns a pointer to the info.
|
|
||||||
func (s *searches) createSearch(dest *crypto.NodeID, mask *crypto.NodeID, callback func(*sessionInfo, error)) *searchInfo {
|
|
||||||
info := searchInfo{
|
|
||||||
searches: s,
|
|
||||||
dest: *dest,
|
|
||||||
mask: *mask,
|
|
||||||
time: time.Now(),
|
|
||||||
callback: callback,
|
|
||||||
}
|
|
||||||
s.searches[*dest] = &info
|
|
||||||
return &info
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// Checks if there's an ongoing search related to a dhtRes.
|
|
||||||
// If there is, it adds the response info to the search and triggers a new search step.
|
|
||||||
// If there's no ongoing search, or we if the dhtRes finished the search (it was from the target node), then don't do anything more.
|
|
||||||
func (sinfo *searchInfo) handleDHTRes(res *dhtRes) {
|
|
||||||
if nfo := sinfo.searches.searches[sinfo.dest]; nfo != sinfo {
|
|
||||||
return // already done
|
|
||||||
}
|
|
||||||
if res != nil {
|
|
||||||
sinfo.recv++
|
|
||||||
if sinfo.checkDHTRes(res) {
|
|
||||||
return // Search finished successfully
|
|
||||||
}
|
|
||||||
// Use results to start an additional search thread
|
|
||||||
infos := append([]*dhtInfo(nil), res.Infos...)
|
|
||||||
infos = sinfo.getAllowedInfos(infos)
|
|
||||||
if len(infos) > 0 {
|
|
||||||
sinfo.continueSearch(infos)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there has been no response in too long, then this cleans up the search.
|
|
||||||
// Otherwise, it pops the closest node to the destination (in keyspace) off of the toVisit list and sends a dht ping.
|
|
||||||
func (sinfo *searchInfo) doSearchStep(infos []*dhtInfo) {
|
|
||||||
if len(infos) > 0 {
|
|
||||||
// Send to the next search target
|
|
||||||
next := infos[0]
|
|
||||||
rq := dhtReqKey{next.key, sinfo.dest}
|
|
||||||
sinfo.searches.router.dht.addCallback(&rq, sinfo.handleDHTRes)
|
|
||||||
sinfo.searches.router.dht.ping(next, &sinfo.dest)
|
|
||||||
sinfo.send++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get a list of search targets that are close enough to the destination to try
|
|
||||||
// Requires an initial list as input
|
|
||||||
func (sinfo *searchInfo) getAllowedInfos(infos []*dhtInfo) []*dhtInfo {
|
|
||||||
var temp []*dhtInfo
|
|
||||||
for _, info := range infos {
|
|
||||||
if false && len(sinfo.visited) < search_MAX_RESULTS {
|
|
||||||
// We're not full on results yet, so don't block anything yet
|
|
||||||
} else if !dht_ordered(&sinfo.dest, info.getNodeID(), sinfo.visited[len(sinfo.visited)-1]) {
|
|
||||||
// Too far away
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var known bool
|
|
||||||
for _, nfo := range sinfo.visited {
|
|
||||||
if *nfo == *info.getNodeID() {
|
|
||||||
known = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !known {
|
|
||||||
temp = append(temp, info)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
infos = append(infos[:0], temp...) // restrict to only the allowed infos
|
|
||||||
sort.SliceStable(infos, func(i, j int) bool {
|
|
||||||
// Should return true if i is closer to the destination than j
|
|
||||||
return dht_ordered(&sinfo.dest, infos[i].getNodeID(), infos[j].getNodeID())
|
|
||||||
}) // Sort infos to start with the closest
|
|
||||||
if len(infos) > search_MAX_RESULTS {
|
|
||||||
infos = infos[:search_MAX_RESULTS] // Limit max number of infos
|
|
||||||
}
|
|
||||||
return infos
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run doSearchStep and schedule another continueSearch to happen after search_RETRY_TIME.
|
|
||||||
// Must not be called with an empty list of infos
|
|
||||||
func (sinfo *searchInfo) continueSearch(infos []*dhtInfo) {
|
|
||||||
sinfo.doSearchStep(infos)
|
|
||||||
infos = infos[1:] // Remove the node we just tried
|
|
||||||
// In case there's no response, try the next node in infos later
|
|
||||||
time.AfterFunc(search_STEP_TIME, func() {
|
|
||||||
sinfo.searches.router.Act(nil, func() {
|
|
||||||
// FIXME this keeps the search alive forever if not for the searches map, fix that
|
|
||||||
newSearchInfo := sinfo.searches.searches[sinfo.dest]
|
|
||||||
if newSearchInfo != sinfo {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Get good infos here instead of at the top, to make sure we can always start things off with a continueSearch call to ourself
|
|
||||||
infos = sinfo.getAllowedInfos(infos)
|
|
||||||
if len(infos) > 0 {
|
|
||||||
sinfo.continueSearch(infos)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initially start a search
|
|
||||||
func (sinfo *searchInfo) startSearch() {
|
|
||||||
var infos []*dhtInfo
|
|
||||||
infos = append(infos, &dhtInfo{
|
|
||||||
key: sinfo.searches.router.core.boxPub,
|
|
||||||
coords: sinfo.searches.router.table.self.getCoords(),
|
|
||||||
})
|
|
||||||
// Start the search by asking ourself, useful if we're the destination
|
|
||||||
sinfo.continueSearch(infos)
|
|
||||||
// Start a timer to clean up the search if everything times out
|
|
||||||
var cleanupFunc func()
|
|
||||||
cleanupFunc = func() {
|
|
||||||
sinfo.searches.router.Act(nil, func() {
|
|
||||||
// FIXME this keeps the search alive forever if not for the searches map, fix that
|
|
||||||
newSearchInfo := sinfo.searches.searches[sinfo.dest]
|
|
||||||
if newSearchInfo != sinfo {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
elapsed := time.Since(sinfo.time)
|
|
||||||
if elapsed > search_RETRY_TIME {
|
|
||||||
// cleanup
|
|
||||||
delete(sinfo.searches.searches, sinfo.dest)
|
|
||||||
sinfo.searches.router.core.log.Debugln("search timeout:", &sinfo.dest, sinfo.send, sinfo.recv)
|
|
||||||
sinfo.callback(nil, errors.New("search reached dead end"))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
time.AfterFunc(search_RETRY_TIME-elapsed, cleanupFunc)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
time.AfterFunc(search_RETRY_TIME, cleanupFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calls create search, and initializes the iterative search parts of the struct before returning it.
|
|
||||||
func (s *searches) newIterSearch(dest *crypto.NodeID, mask *crypto.NodeID, callback func(*sessionInfo, error)) *searchInfo {
|
|
||||||
sinfo := s.createSearch(dest, mask, callback)
|
|
||||||
sinfo.visited = append(sinfo.visited, &s.router.dht.nodeID)
|
|
||||||
return sinfo
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checks if a dhtRes is good (called by handleDHTRes).
|
|
||||||
// If the response is from the target, get/create a session, trigger a session ping, and return true.
|
|
||||||
// Otherwise return false.
|
|
||||||
func (sinfo *searchInfo) checkDHTRes(res *dhtRes) bool {
|
|
||||||
from := dhtInfo{key: res.Key, coords: res.Coords}
|
|
||||||
them := from.getNodeID()
|
|
||||||
var known bool
|
|
||||||
for _, v := range sinfo.visited {
|
|
||||||
if *v == *them {
|
|
||||||
known = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !known {
|
|
||||||
if len(sinfo.visited) < search_MAX_RESULTS || dht_ordered(&sinfo.dest, them, sinfo.visited[len(sinfo.visited)-1]) {
|
|
||||||
// Closer to the destination than the threshold, so update visited
|
|
||||||
sinfo.searches.router.core.log.Debugln("Updating search:", &sinfo.dest, them, sinfo.send, sinfo.recv)
|
|
||||||
sinfo.visited = append(sinfo.visited, them)
|
|
||||||
sort.SliceStable(sinfo.visited, func(i, j int) bool {
|
|
||||||
// Should return true if i is closer to the destination than j
|
|
||||||
return dht_ordered(&sinfo.dest, sinfo.visited[i], sinfo.visited[j])
|
|
||||||
}) // Sort infos to start with the closest
|
|
||||||
if len(sinfo.visited) > search_MAX_RESULTS {
|
|
||||||
sinfo.visited = sinfo.visited[:search_MAX_RESULTS]
|
|
||||||
}
|
|
||||||
sinfo.time = time.Now()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var destMasked crypto.NodeID
|
|
||||||
var themMasked crypto.NodeID
|
|
||||||
for idx := 0; idx < crypto.NodeIDLen; idx++ {
|
|
||||||
destMasked[idx] = sinfo.dest[idx] & sinfo.mask[idx]
|
|
||||||
themMasked[idx] = them[idx] & sinfo.mask[idx]
|
|
||||||
}
|
|
||||||
if themMasked != destMasked {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
finishSearch := func(sess *sessionInfo, err error) {
|
|
||||||
if sess != nil {
|
|
||||||
// FIXME (!) replay attacks could mess with coords? Give it a handle (tstamp)?
|
|
||||||
sess.Act(sinfo.searches.router, func() { sess.coords = res.Coords })
|
|
||||||
sess.ping(sinfo.searches.router)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
sinfo.callback(nil, err)
|
|
||||||
} else {
|
|
||||||
sinfo.callback(sess, nil)
|
|
||||||
}
|
|
||||||
// Cleanup
|
|
||||||
if _, isIn := sinfo.searches.searches[sinfo.dest]; isIn {
|
|
||||||
sinfo.searches.router.core.log.Debugln("Finished search:", &sinfo.dest, sinfo.send, sinfo.recv)
|
|
||||||
delete(sinfo.searches.searches, res.Dest)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// They match, so create a session and send a sessionRequest
|
|
||||||
var err error
|
|
||||||
sess, isIn := sinfo.searches.router.sessions.getByTheirPerm(&res.Key)
|
|
||||||
if !isIn {
|
|
||||||
// Don't already have a session
|
|
||||||
sess = sinfo.searches.router.sessions.createSession(&res.Key)
|
|
||||||
if sess == nil {
|
|
||||||
err = errors.New("session not allowed")
|
|
||||||
} else if _, isIn := sinfo.searches.router.sessions.getByTheirPerm(&res.Key); !isIn {
|
|
||||||
panic("This should never happen")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err = errors.New("session already exists")
|
|
||||||
}
|
|
||||||
finishSearch(sess, err)
|
|
||||||
return true
|
|
||||||
}
|
|
@ -1,551 +0,0 @@
|
|||||||
package yggdrasil
|
|
||||||
|
|
||||||
// This is the session manager
|
|
||||||
// It's responsible for keeping track of open sessions to other nodes
|
|
||||||
// The session information consists of crypto keys and coords
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
|
||||||
|
|
||||||
"github.com/Arceliar/phony"
|
|
||||||
)
|
|
||||||
|
|
||||||
// All the information we know about an active session.
|
|
||||||
// This includes coords, permanent and ephemeral keys, handles and nonces, various sorts of timing information for timeout and maintenance, and some metadata for the admin API.
|
|
||||||
type sessionInfo struct {
|
|
||||||
phony.Inbox // Protects all of the below, use it any time you read/change the contents of a session
|
|
||||||
sessions *sessions //
|
|
||||||
theirAddr address.Address //
|
|
||||||
theirSubnet address.Subnet //
|
|
||||||
theirPermPub crypto.BoxPubKey //
|
|
||||||
theirSesPub crypto.BoxPubKey //
|
|
||||||
mySesPub crypto.BoxPubKey //
|
|
||||||
mySesPriv crypto.BoxPrivKey //
|
|
||||||
sharedPermKey crypto.BoxSharedKey // used for session pings
|
|
||||||
sharedSesKey crypto.BoxSharedKey // derived from session keys
|
|
||||||
theirHandle crypto.Handle //
|
|
||||||
myHandle crypto.Handle //
|
|
||||||
theirNonce crypto.BoxNonce //
|
|
||||||
myNonce crypto.BoxNonce //
|
|
||||||
theirMTU MTU //
|
|
||||||
myMTU MTU //
|
|
||||||
wasMTUFixed bool // Was the MTU fixed by a receive error?
|
|
||||||
timeOpened time.Time // Time the session was opened
|
|
||||||
time time.Time // Time we last received a packet
|
|
||||||
mtuTime time.Time // time myMTU was last changed
|
|
||||||
pingTime time.Time // time the first ping was sent since the last received packet
|
|
||||||
coords []byte // coords of destination
|
|
||||||
reset bool // reset if coords change
|
|
||||||
tstamp int64 // ATOMIC - tstamp from their last session ping, replay attack mitigation
|
|
||||||
bytesSent uint64 // Bytes of real traffic sent in this session
|
|
||||||
bytesRecvd uint64 // Bytes of real traffic received in this session
|
|
||||||
init chan struct{} // Closed when the first session pong arrives, used to signal that the session is ready for initial use
|
|
||||||
cancel util.Cancellation // Used to terminate workers
|
|
||||||
conn *Conn // The associated Conn object
|
|
||||||
callbacks []chan func() // Finished work from crypto workers
|
|
||||||
table *lookupTable // table.self is a locator where we get our coords
|
|
||||||
path []byte // Path from self to destination
|
|
||||||
}
|
|
||||||
|
|
||||||
// Represents a session ping/pong packet, and includes information like public keys, a session handle, coords, a timestamp to prevent replays, and the tun/tap MTU.
|
|
||||||
type sessionPing struct {
|
|
||||||
SendPermPub crypto.BoxPubKey // Sender's permanent key
|
|
||||||
Handle crypto.Handle // Random number to ID session
|
|
||||||
SendSesPub crypto.BoxPubKey // Session key to use
|
|
||||||
Coords []byte //
|
|
||||||
Tstamp int64 // unix time, but the only real requirement is that it increases
|
|
||||||
IsPong bool //
|
|
||||||
MTU MTU //
|
|
||||||
}
|
|
||||||
|
|
||||||
// Updates session info in response to a ping, after checking that the ping is OK.
|
|
||||||
// Returns true if the session was updated, or false otherwise.
|
|
||||||
func (sinfo *sessionInfo) _update(p *sessionPing, rpath []byte) bool {
|
|
||||||
if !(p.Tstamp > sinfo.tstamp) {
|
|
||||||
// To protect against replay attacks
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if p.SendPermPub != sinfo.theirPermPub {
|
|
||||||
// Should only happen if two sessions got the same handle
|
|
||||||
// That shouldn't be allowed anyway, but if it happens then let one time out
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if p.SendSesPub != sinfo.theirSesPub {
|
|
||||||
sinfo.path = nil
|
|
||||||
sinfo.theirSesPub = p.SendSesPub
|
|
||||||
sinfo.theirHandle = p.Handle
|
|
||||||
sinfo.sharedSesKey = *crypto.GetSharedKey(&sinfo.mySesPriv, &sinfo.theirSesPub)
|
|
||||||
sinfo.theirNonce = crypto.BoxNonce{}
|
|
||||||
}
|
|
||||||
if p.MTU >= 1280 || p.MTU == 0 {
|
|
||||||
sinfo.theirMTU = p.MTU
|
|
||||||
if sinfo.conn != nil {
|
|
||||||
sinfo.conn.setMTU(sinfo, sinfo._getMTU())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !bytes.Equal(sinfo.coords, p.Coords) {
|
|
||||||
// allocate enough space for additional coords
|
|
||||||
sinfo.coords = append(make([]byte, 0, len(p.Coords)+11), p.Coords...)
|
|
||||||
path := switch_reverseCoordBytes(rpath)
|
|
||||||
sinfo.path = append(sinfo.path[:0], path...)
|
|
||||||
defer sinfo._sendPingPong(false, nil)
|
|
||||||
} else if p.IsPong {
|
|
||||||
path := switch_reverseCoordBytes(rpath)
|
|
||||||
sinfo.path = append(sinfo.path[:0], path...)
|
|
||||||
}
|
|
||||||
sinfo.time = time.Now()
|
|
||||||
sinfo.tstamp = p.Tstamp
|
|
||||||
sinfo.reset = false
|
|
||||||
defer func() { recover() }() // Recover if the below panics
|
|
||||||
select {
|
|
||||||
case <-sinfo.init:
|
|
||||||
default:
|
|
||||||
// Unblock anything waiting for the session to initialize
|
|
||||||
close(sinfo.init)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Struct of all active sessions.
|
|
||||||
// Sessions are indexed by handle.
|
|
||||||
// Additionally, stores maps of address/subnet onto keys, and keys onto handles.
|
|
||||||
type sessions struct {
|
|
||||||
router *router
|
|
||||||
listener *Listener
|
|
||||||
listenerMutex sync.Mutex
|
|
||||||
lastCleanup time.Time
|
|
||||||
isAllowedHandler func(pubkey *crypto.BoxPubKey, initiator bool) bool // Returns true or false if session setup is allowed
|
|
||||||
isAllowedMutex sync.RWMutex // Protects the above
|
|
||||||
myMaximumMTU MTU // Maximum allowed session MTU
|
|
||||||
permShared map[crypto.BoxPubKey]*crypto.BoxSharedKey // Maps known permanent keys to their shared key, used by DHT a lot
|
|
||||||
sinfos map[crypto.Handle]*sessionInfo // Maps handle onto session info
|
|
||||||
byTheirPerm map[crypto.BoxPubKey]*crypto.Handle // Maps theirPermPub onto handle
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initializes the session struct.
|
|
||||||
func (ss *sessions) init(r *router) {
|
|
||||||
ss.router = r
|
|
||||||
ss.permShared = make(map[crypto.BoxPubKey]*crypto.BoxSharedKey)
|
|
||||||
ss.sinfos = make(map[crypto.Handle]*sessionInfo)
|
|
||||||
ss.byTheirPerm = make(map[crypto.BoxPubKey]*crypto.Handle)
|
|
||||||
ss.lastCleanup = time.Now()
|
|
||||||
ss.myMaximumMTU = 65535
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ss *sessions) reconfigure() {
|
|
||||||
ss.router.Act(nil, func() {
|
|
||||||
for _, session := range ss.sinfos {
|
|
||||||
sinfo, mtu := session, ss.myMaximumMTU
|
|
||||||
sinfo.Act(ss.router, func() {
|
|
||||||
sinfo.myMTU = mtu
|
|
||||||
})
|
|
||||||
session.ping(ss.router)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determines whether the session with a given publickey is allowed based on
|
|
||||||
// session firewall rules.
|
|
||||||
func (ss *sessions) isSessionAllowed(pubkey *crypto.BoxPubKey, initiator bool) bool {
|
|
||||||
ss.isAllowedMutex.RLock()
|
|
||||||
defer ss.isAllowedMutex.RUnlock()
|
|
||||||
|
|
||||||
if ss.isAllowedHandler == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return ss.isAllowedHandler(pubkey, initiator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets the session corresponding to a given handle.
|
|
||||||
func (ss *sessions) getSessionForHandle(handle *crypto.Handle) (*sessionInfo, bool) {
|
|
||||||
sinfo, isIn := ss.sinfos[*handle]
|
|
||||||
return sinfo, isIn
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets a session corresponding to a permanent key used by the remote node.
|
|
||||||
func (ss *sessions) getByTheirPerm(key *crypto.BoxPubKey) (*sessionInfo, bool) {
|
|
||||||
h, isIn := ss.byTheirPerm[*key]
|
|
||||||
if !isIn {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
sinfo, isIn := ss.getSessionForHandle(h)
|
|
||||||
return sinfo, isIn
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a new session and lazily cleans up old existing sessions. This
|
|
||||||
// includes initializing session info to sane defaults (e.g. lowest supported
|
|
||||||
// MTU).
|
|
||||||
func (ss *sessions) createSession(theirPermKey *crypto.BoxPubKey) *sessionInfo {
|
|
||||||
// TODO: this check definitely needs to be moved
|
|
||||||
if !ss.isSessionAllowed(theirPermKey, true) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
sinfo := sessionInfo{}
|
|
||||||
sinfo.sessions = ss
|
|
||||||
sinfo.theirPermPub = *theirPermKey
|
|
||||||
sinfo.sharedPermKey = *ss.getSharedKey(&ss.router.core.boxPriv, &sinfo.theirPermPub)
|
|
||||||
pub, priv := crypto.NewBoxKeys()
|
|
||||||
sinfo.mySesPub = *pub
|
|
||||||
sinfo.mySesPriv = *priv
|
|
||||||
sinfo.myNonce = *crypto.NewBoxNonce()
|
|
||||||
sinfo.theirMTU = 1280
|
|
||||||
sinfo.myMTU = ss.myMaximumMTU
|
|
||||||
now := time.Now()
|
|
||||||
sinfo.timeOpened = now
|
|
||||||
sinfo.time = now
|
|
||||||
sinfo.mtuTime = now
|
|
||||||
sinfo.pingTime = now
|
|
||||||
sinfo.init = make(chan struct{})
|
|
||||||
sinfo.cancel = util.NewCancellation()
|
|
||||||
higher := false
|
|
||||||
for idx := range ss.router.core.boxPub {
|
|
||||||
if ss.router.core.boxPub[idx] > sinfo.theirPermPub[idx] {
|
|
||||||
higher = true
|
|
||||||
break
|
|
||||||
} else if ss.router.core.boxPub[idx] < sinfo.theirPermPub[idx] {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if higher {
|
|
||||||
// higher => odd nonce
|
|
||||||
sinfo.myNonce[len(sinfo.myNonce)-1] |= 0x01
|
|
||||||
} else {
|
|
||||||
// lower => even nonce
|
|
||||||
sinfo.myNonce[len(sinfo.myNonce)-1] &= 0xfe
|
|
||||||
}
|
|
||||||
sinfo.myHandle = *crypto.NewHandle()
|
|
||||||
// TODO sinfo.theirAddr = *address.AddrForNodeID(crypto.GetNodeID(&sinfo.theirPermPub))
|
|
||||||
// TODO sinfo.theirSubnet = *address.SubnetForNodeID(crypto.GetNodeID(&sinfo.theirPermPub))
|
|
||||||
sinfo.table = ss.router.table
|
|
||||||
ss.sinfos[sinfo.myHandle] = &sinfo
|
|
||||||
ss.byTheirPerm[sinfo.theirPermPub] = &sinfo.myHandle
|
|
||||||
return &sinfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ss *sessions) cleanup() {
|
|
||||||
// Time thresholds almost certainly could use some adjusting
|
|
||||||
for k := range ss.permShared {
|
|
||||||
// Delete a key, to make sure this eventually shrinks to 0
|
|
||||||
delete(ss.permShared, k)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if time.Since(ss.lastCleanup) < time.Minute {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
permShared := make(map[crypto.BoxPubKey]*crypto.BoxSharedKey, len(ss.permShared))
|
|
||||||
for k, v := range ss.permShared {
|
|
||||||
permShared[k] = v
|
|
||||||
}
|
|
||||||
ss.permShared = permShared
|
|
||||||
sinfos := make(map[crypto.Handle]*sessionInfo, len(ss.sinfos))
|
|
||||||
for k, v := range ss.sinfos {
|
|
||||||
sinfos[k] = v
|
|
||||||
}
|
|
||||||
ss.sinfos = sinfos
|
|
||||||
byTheirPerm := make(map[crypto.BoxPubKey]*crypto.Handle, len(ss.byTheirPerm))
|
|
||||||
for k, v := range ss.byTheirPerm {
|
|
||||||
byTheirPerm[k] = v
|
|
||||||
}
|
|
||||||
ss.byTheirPerm = byTheirPerm
|
|
||||||
ss.lastCleanup = time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sinfo *sessionInfo) doRemove() {
|
|
||||||
sinfo.sessions.router.Act(nil, func() {
|
|
||||||
sinfo.sessions.removeSession(sinfo)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Closes a session, removing it from sessions maps.
|
|
||||||
func (ss *sessions) removeSession(sinfo *sessionInfo) {
|
|
||||||
if s := sinfo.sessions.sinfos[sinfo.myHandle]; s == sinfo {
|
|
||||||
delete(sinfo.sessions.sinfos, sinfo.myHandle)
|
|
||||||
delete(sinfo.sessions.byTheirPerm, sinfo.theirPermPub)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a session ping appropriate for the given session info.
|
|
||||||
func (sinfo *sessionInfo) _getPing() sessionPing {
|
|
||||||
coords := sinfo.table.self.getCoords()
|
|
||||||
ping := sessionPing{
|
|
||||||
SendPermPub: sinfo.sessions.router.core.boxPub,
|
|
||||||
Handle: sinfo.myHandle,
|
|
||||||
SendSesPub: sinfo.mySesPub,
|
|
||||||
Tstamp: time.Now().Unix(),
|
|
||||||
Coords: coords,
|
|
||||||
MTU: sinfo.myMTU,
|
|
||||||
}
|
|
||||||
sinfo.myNonce.Increment()
|
|
||||||
return ping
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets the shared key for a pair of box keys.
|
|
||||||
// Used to cache recently used shared keys for protocol traffic.
|
|
||||||
// This comes up with dht req/res and session ping/pong traffic.
|
|
||||||
func (ss *sessions) getSharedKey(myPriv *crypto.BoxPrivKey,
|
|
||||||
theirPub *crypto.BoxPubKey) *crypto.BoxSharedKey {
|
|
||||||
return crypto.GetSharedKey(myPriv, theirPub)
|
|
||||||
// FIXME concurrency issues with the below, so for now we just burn the CPU every time
|
|
||||||
if skey, isIn := ss.permShared[*theirPub]; isIn {
|
|
||||||
return skey
|
|
||||||
}
|
|
||||||
// First do some cleanup
|
|
||||||
const maxKeys = 1024
|
|
||||||
for key := range ss.permShared {
|
|
||||||
// Remove a random key until the store is small enough
|
|
||||||
if len(ss.permShared) < maxKeys {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
delete(ss.permShared, key)
|
|
||||||
}
|
|
||||||
ss.permShared[*theirPub] = crypto.GetSharedKey(myPriv, theirPub)
|
|
||||||
return ss.permShared[*theirPub]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sends a session ping by calling sendPingPong in ping mode.
|
|
||||||
func (sinfo *sessionInfo) ping(from phony.Actor) {
|
|
||||||
sinfo.Act(from, func() {
|
|
||||||
sinfo._sendPingPong(false, nil)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calls getPing, sets the appropriate ping/pong flag, encodes to wire format, and send it.
|
|
||||||
// Updates the time the last ping was sent in the session info.
|
|
||||||
func (sinfo *sessionInfo) _sendPingPong(isPong bool, path []byte) {
|
|
||||||
ping := sinfo._getPing()
|
|
||||||
ping.IsPong = isPong
|
|
||||||
bs := ping.encode()
|
|
||||||
payload, nonce := crypto.BoxSeal(&sinfo.sharedPermKey, bs, nil)
|
|
||||||
p := wire_protoTrafficPacket{
|
|
||||||
Coords: sinfo.coords,
|
|
||||||
ToKey: sinfo.theirPermPub,
|
|
||||||
FromKey: sinfo.sessions.router.core.boxPub,
|
|
||||||
Nonce: *nonce,
|
|
||||||
Payload: payload,
|
|
||||||
}
|
|
||||||
if path != nil {
|
|
||||||
p.Coords = append([]byte{0}, path...)
|
|
||||||
p.Offset += 1
|
|
||||||
}
|
|
||||||
packet := p.encode()
|
|
||||||
// TODO rewrite the below if/when the peer struct becomes an actor, to not go through the router first
|
|
||||||
sinfo.sessions.router.Act(sinfo, func() { sinfo.sessions.router.out(packet) })
|
|
||||||
if !isPong {
|
|
||||||
sinfo.pingTime = time.Now()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sinfo *sessionInfo) setConn(from phony.Actor, conn *Conn) {
|
|
||||||
sinfo.Act(from, func() {
|
|
||||||
sinfo.conn = conn
|
|
||||||
sinfo.conn.setMTU(sinfo, sinfo._getMTU())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handles a session ping, creating a session if needed and calling update, then possibly responding with a pong if the ping was in ping mode and the update was successful.
|
|
||||||
// If the session has a packet cached (common when first setting up a session), it will be sent.
|
|
||||||
func (ss *sessions) handlePing(ping *sessionPing, rpath []byte) {
|
|
||||||
// Get the corresponding session (or create a new session)
|
|
||||||
sinfo, isIn := ss.getByTheirPerm(&ping.SendPermPub)
|
|
||||||
switch {
|
|
||||||
case ping.IsPong: // This is a response, not an initial ping, so ignore it.
|
|
||||||
case isIn: // Session already exists
|
|
||||||
case !ss.isSessionAllowed(&ping.SendPermPub, false): // Session is not allowed
|
|
||||||
default:
|
|
||||||
ss.listenerMutex.Lock()
|
|
||||||
if ss.listener != nil {
|
|
||||||
// This is a ping from an allowed node for which no session exists, and we have a listener ready to handle sessions.
|
|
||||||
// We need to create a session and pass it to the listener.
|
|
||||||
sinfo = ss.createSession(&ping.SendPermPub)
|
|
||||||
if s, _ := ss.getByTheirPerm(&ping.SendPermPub); s != sinfo {
|
|
||||||
panic("This should not happen")
|
|
||||||
}
|
|
||||||
conn := newConn(ss.router.core, crypto.GetNodeID(&sinfo.theirPermPub), &crypto.NodeID{}, sinfo)
|
|
||||||
for i := range conn.nodeMask {
|
|
||||||
conn.nodeMask[i] = 0xFF
|
|
||||||
}
|
|
||||||
sinfo.setConn(ss.router, conn)
|
|
||||||
c := ss.listener.conn
|
|
||||||
go func() { c <- conn }()
|
|
||||||
}
|
|
||||||
ss.listenerMutex.Unlock()
|
|
||||||
}
|
|
||||||
if sinfo != nil {
|
|
||||||
sinfo.Act(ss.router, func() {
|
|
||||||
// Update the session
|
|
||||||
if !sinfo._update(ping, rpath) { /*panic("Should not happen in testing")*/
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !ping.IsPong {
|
|
||||||
sinfo._sendPingPong(true, switch_reverseCoordBytes(rpath))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the MTU of the session.
|
|
||||||
// Will be equal to the smaller of this node's MTU or the remote node's MTU.
|
|
||||||
// If sending over links with a maximum message size (this was a thing with the old UDP code), it could be further lowered, to a minimum of 1280.
|
|
||||||
func (sinfo *sessionInfo) _getMTU() MTU {
|
|
||||||
if sinfo.theirMTU == 0 || sinfo.myMTU == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if sinfo.theirMTU < sinfo.myMTU {
|
|
||||||
return sinfo.theirMTU
|
|
||||||
}
|
|
||||||
return sinfo.myMTU
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checks if a packet's nonce is newer than any previously received
|
|
||||||
func (sinfo *sessionInfo) _nonceIsOK(theirNonce *crypto.BoxNonce) bool {
|
|
||||||
return theirNonce.Minus(&sinfo.theirNonce) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Updates the nonce mask by (possibly) shifting the bitmask and setting the bit corresponding to this nonce to 1, and then updating the most recent nonce
|
|
||||||
func (sinfo *sessionInfo) _updateNonce(theirNonce *crypto.BoxNonce) {
|
|
||||||
if theirNonce.Minus(&sinfo.theirNonce) > 0 {
|
|
||||||
// This nonce is the newest we've seen, so make a note of that
|
|
||||||
sinfo.theirNonce = *theirNonce
|
|
||||||
sinfo.time = time.Now()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resets all sessions to an uninitialized state.
|
|
||||||
// Called after coord changes, so attempts to use a session will trigger a new ping and notify the remote end of the coord change.
|
|
||||||
// Only call this from the router actor.
|
|
||||||
func (ss *sessions) reset() {
|
|
||||||
for _, _sinfo := range ss.sinfos {
|
|
||||||
sinfo := _sinfo // So we can safely put it in a closure
|
|
||||||
sinfo.Act(ss.router, func() {
|
|
||||||
sinfo.reset = true
|
|
||||||
sinfo._sendPingPong(false, sinfo.path)
|
|
||||||
sinfo._sendPingPong(false, nil)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
//////////////////////////// Worker Functions Below ////////////////////////////
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
type sessionCryptoManager struct {
|
|
||||||
phony.Inbox
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *sessionCryptoManager) workerGo(from phony.Actor, f func()) {
|
|
||||||
m.Act(from, func() {
|
|
||||||
util.WorkerGo(f)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
var manager = sessionCryptoManager{}
|
|
||||||
|
|
||||||
type FlowKeyMessage struct {
|
|
||||||
FlowKey uint64
|
|
||||||
Message []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sinfo *sessionInfo) recv(from phony.Actor, packet *wire_trafficPacket) {
|
|
||||||
sinfo.Act(from, func() {
|
|
||||||
sinfo._recvPacket(packet)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sinfo *sessionInfo) _recvPacket(p *wire_trafficPacket) {
|
|
||||||
select {
|
|
||||||
case <-sinfo.init:
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !sinfo._nonceIsOK(&p.Nonce) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
k := sinfo.sharedSesKey
|
|
||||||
var isOK bool
|
|
||||||
var bs []byte
|
|
||||||
ch := make(chan func(), 1)
|
|
||||||
poolFunc := func() {
|
|
||||||
bs, isOK = crypto.BoxOpen(&k, p.Payload, &p.Nonce)
|
|
||||||
callback := func() {
|
|
||||||
if !isOK || k != sinfo.sharedSesKey || !sinfo._nonceIsOK(&p.Nonce) {
|
|
||||||
// Either we failed to decrypt, or the session was updated, or we
|
|
||||||
// received this packet in the mean time
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sinfo._updateNonce(&p.Nonce)
|
|
||||||
sinfo.bytesRecvd += uint64(len(bs))
|
|
||||||
sinfo.conn.recvMsg(sinfo, bs)
|
|
||||||
}
|
|
||||||
ch <- callback
|
|
||||||
sinfo.checkCallbacks()
|
|
||||||
}
|
|
||||||
sinfo.callbacks = append(sinfo.callbacks, ch)
|
|
||||||
manager.workerGo(sinfo, poolFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sinfo *sessionInfo) _send(msg FlowKeyMessage) {
|
|
||||||
select {
|
|
||||||
case <-sinfo.init:
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sinfo.bytesSent += uint64(len(msg.Message))
|
|
||||||
var coords []byte
|
|
||||||
var offset uint64
|
|
||||||
if len(sinfo.path) > 0 {
|
|
||||||
coords = append([]byte{0}, sinfo.path...)
|
|
||||||
offset += 1
|
|
||||||
} else {
|
|
||||||
coords = append([]byte(nil), sinfo.coords...)
|
|
||||||
}
|
|
||||||
if msg.FlowKey != 0 {
|
|
||||||
coords = append(coords, 0)
|
|
||||||
coords = append(coords, wire_encode_uint64(msg.FlowKey)...)
|
|
||||||
}
|
|
||||||
p := wire_trafficPacket{
|
|
||||||
Offset: offset,
|
|
||||||
Coords: coords,
|
|
||||||
Handle: sinfo.theirHandle,
|
|
||||||
Nonce: sinfo.myNonce,
|
|
||||||
}
|
|
||||||
sinfo.myNonce.Increment()
|
|
||||||
k := sinfo.sharedSesKey
|
|
||||||
ch := make(chan func(), 1)
|
|
||||||
poolFunc := func() {
|
|
||||||
p.Payload, _ = crypto.BoxSeal(&k, msg.Message, &p.Nonce)
|
|
||||||
packet := p.encode()
|
|
||||||
callback := func() {
|
|
||||||
sinfo.sessions.router.Act(sinfo, func() {
|
|
||||||
sinfo.sessions.router.out(packet)
|
|
||||||
})
|
|
||||||
if time.Since(sinfo.pingTime) > 3*time.Second {
|
|
||||||
sinfo._sendPingPong(false, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ch <- callback
|
|
||||||
sinfo.checkCallbacks()
|
|
||||||
}
|
|
||||||
sinfo.callbacks = append(sinfo.callbacks, ch)
|
|
||||||
manager.workerGo(sinfo, poolFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sinfo *sessionInfo) checkCallbacks() {
|
|
||||||
sinfo.Act(nil, func() {
|
|
||||||
if len(sinfo.callbacks) > 0 {
|
|
||||||
select {
|
|
||||||
case callback := <-sinfo.callbacks[0]:
|
|
||||||
sinfo.callbacks = sinfo.callbacks[1:]
|
|
||||||
callback()
|
|
||||||
sinfo.checkCallbacks()
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,91 +0,0 @@
|
|||||||
package yggdrasil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"github.com/Arceliar/phony"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Simlink struct {
|
|
||||||
phony.Inbox
|
|
||||||
rch chan []byte
|
|
||||||
dest *Simlink
|
|
||||||
link *link
|
|
||||||
started bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simlink) readMsg() ([]byte, error) {
|
|
||||||
bs, ok := <-s.rch
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("read from closed Simlink")
|
|
||||||
}
|
|
||||||
return bs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simlink) _recvMetaBytes() ([]byte, error) {
|
|
||||||
return s.readMsg()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simlink) _sendMetaBytes(bs []byte) error {
|
|
||||||
_, err := s.writeMsgs([][]byte{bs})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simlink) close() error {
|
|
||||||
defer func() { recover() }()
|
|
||||||
close(s.rch)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simlink) writeMsgs(msgs [][]byte) (int, error) {
|
|
||||||
if s.dest == nil {
|
|
||||||
return 0, errors.New("write to unpaired Simlink")
|
|
||||||
}
|
|
||||||
var size int
|
|
||||||
for _, msg := range msgs {
|
|
||||||
size += len(msg)
|
|
||||||
bs := append([]byte(nil), msg...)
|
|
||||||
phony.Block(s, func() {
|
|
||||||
s.dest.Act(s, func() {
|
|
||||||
defer func() { recover() }()
|
|
||||||
s.dest.rch <- bs
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Core) NewSimlink() *Simlink {
|
|
||||||
s := &Simlink{rch: make(chan []byte, 1)}
|
|
||||||
n := "Simlink"
|
|
||||||
var err error
|
|
||||||
s.link, err = c.links.create(s, n, n, n, n, false, true, linkOptions{})
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simlink) SetDestination(dest *Simlink) error {
|
|
||||||
var err error
|
|
||||||
phony.Block(s, func() {
|
|
||||||
if s.dest != nil {
|
|
||||||
err = errors.New("destination already set")
|
|
||||||
} else {
|
|
||||||
s.dest = dest
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Simlink) Start() error {
|
|
||||||
var err error
|
|
||||||
phony.Block(s, func() {
|
|
||||||
if s.started {
|
|
||||||
err = errors.New("already started")
|
|
||||||
} else {
|
|
||||||
s.started = true
|
|
||||||
go s.link.handler()
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}
|
|
@ -1,120 +0,0 @@
|
|||||||
package yggdrasil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Test that this matches the interface we expect
|
|
||||||
var _ = linkMsgIO(&stream{})
|
|
||||||
|
|
||||||
type stream struct {
|
|
||||||
rwc io.ReadWriteCloser
|
|
||||||
inputBuffer *bufio.Reader
|
|
||||||
outputBuffer net.Buffers
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) close() error {
|
|
||||||
return s.rwc.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
const streamMsgSize = 2048 + 65535
|
|
||||||
|
|
||||||
var streamMsg = [...]byte{0xde, 0xad, 0xb1, 0x75} // "dead bits"
|
|
||||||
|
|
||||||
func (s *stream) init(rwc io.ReadWriteCloser) {
|
|
||||||
// TODO have this also do the metadata handshake and create the peer struct
|
|
||||||
s.rwc = rwc
|
|
||||||
// TODO call something to do the metadata exchange
|
|
||||||
s.inputBuffer = bufio.NewReaderSize(s.rwc, 2*streamMsgSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeMsg writes a message with stream padding, and is *not* thread safe.
|
|
||||||
func (s *stream) writeMsgs(bss [][]byte) (int, error) {
|
|
||||||
buf := s.outputBuffer[:0]
|
|
||||||
var written int
|
|
||||||
for _, bs := range bss {
|
|
||||||
buf = append(buf, streamMsg[:])
|
|
||||||
buf = append(buf, wire_encode_uint64(uint64(len(bs))))
|
|
||||||
buf = append(buf, bs)
|
|
||||||
written += len(bs)
|
|
||||||
}
|
|
||||||
s.outputBuffer = buf[:0] // So we can reuse the same underlying array later
|
|
||||||
_, err := buf.WriteTo(s.rwc)
|
|
||||||
for _, bs := range bss {
|
|
||||||
pool_putBytes(bs)
|
|
||||||
}
|
|
||||||
// TODO only include number of bytes from bs *successfully* written?
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// readMsg reads a message from the stream, accounting for stream padding, and is *not* thread safe.
|
|
||||||
func (s *stream) readMsg() ([]byte, error) {
|
|
||||||
for {
|
|
||||||
bs, err := s.readMsgFromBuffer()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("message error: %v", err)
|
|
||||||
}
|
|
||||||
return bs, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writes metadata bytes without stream padding, meant to be temporary
|
|
||||||
func (s *stream) _sendMetaBytes(metaBytes []byte) error {
|
|
||||||
var written int
|
|
||||||
for written < len(metaBytes) {
|
|
||||||
n, err := s.rwc.Write(metaBytes)
|
|
||||||
written += n
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reads metadata bytes without stream padding, meant to be temporary
|
|
||||||
func (s *stream) _recvMetaBytes() ([]byte, error) {
|
|
||||||
var meta version_metadata
|
|
||||||
frag := meta.encode()
|
|
||||||
metaBytes := make([]byte, 0, len(frag))
|
|
||||||
for len(metaBytes) < len(frag) {
|
|
||||||
n, err := s.rwc.Read(frag)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
metaBytes = append(metaBytes, frag[:n]...)
|
|
||||||
}
|
|
||||||
return metaBytes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reads bytes from the underlying rwc and returns 1 full message
|
|
||||||
func (s *stream) readMsgFromBuffer() ([]byte, error) {
|
|
||||||
pad := streamMsg // Copy
|
|
||||||
_, err := io.ReadFull(s.inputBuffer, pad[:])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else if pad != streamMsg {
|
|
||||||
return nil, errors.New("bad message")
|
|
||||||
}
|
|
||||||
lenSlice := make([]byte, 0, 10)
|
|
||||||
// FIXME this nextByte stuff depends on wire.go format, kind of ugly to have it here
|
|
||||||
nextByte := byte(0xff)
|
|
||||||
for nextByte > 127 {
|
|
||||||
nextByte, err = s.inputBuffer.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
lenSlice = append(lenSlice, nextByte)
|
|
||||||
}
|
|
||||||
msgLen, _ := wire_decode_uint64(lenSlice)
|
|
||||||
if msgLen > streamMsgSize {
|
|
||||||
return nil, errors.New("oversized message")
|
|
||||||
}
|
|
||||||
msg := pool_getBytes(int(msgLen + 10)) // Extra padding for up to 1 more switchPort
|
|
||||||
msg = msg[:msgLen]
|
|
||||||
_, err = io.ReadFull(s.inputBuffer, msg)
|
|
||||||
return msg, err
|
|
||||||
}
|
|
@ -1,647 +0,0 @@
|
|||||||
package yggdrasil
|
|
||||||
|
|
||||||
// This part constructs a spanning tree of the network
|
|
||||||
// It routes packets based on distance on the spanning tree
|
|
||||||
// In general, this is *not* equivalent to routing on the tree
|
|
||||||
// It falls back to the tree in the worst case, but it can take shortcuts too
|
|
||||||
// This is the part that makes routing reasonably efficient on scale-free graphs
|
|
||||||
|
|
||||||
// TODO document/comment everything in a lot more detail
|
|
||||||
|
|
||||||
// TODO? use a pre-computed lookup table (python version had this)
|
|
||||||
// A little annoying to do with constant changes from backpressure
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
|
||||||
|
|
||||||
"github.com/Arceliar/phony"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
switch_timeout = time.Minute
|
|
||||||
switch_updateInterval = switch_timeout / 2
|
|
||||||
switch_throttle = switch_updateInterval / 2
|
|
||||||
)
|
|
||||||
|
|
||||||
// The switch locator represents the topology and network state dependent info about a node, minus the signatures that go with it.
|
|
||||||
// Nodes will pick the best root they see, provided that the root continues to push out updates with new timestamps.
|
|
||||||
// The coords represent a path from the root to a node.
|
|
||||||
// This path is generally part of a spanning tree, except possibly the last hop (it can loop when sending coords to your parent, but they see this and know not to use a looping path).
|
|
||||||
type switchLocator struct {
|
|
||||||
root crypto.SigPubKey
|
|
||||||
tstamp int64
|
|
||||||
coords []switchPort
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if the first sigPubKey has a higher TreeID.
|
|
||||||
func firstIsBetter(first, second *crypto.SigPubKey) bool {
|
|
||||||
// Higher TreeID is better
|
|
||||||
ftid := crypto.GetTreeID(first)
|
|
||||||
stid := crypto.GetTreeID(second)
|
|
||||||
for idx := 0; idx < len(ftid); idx++ {
|
|
||||||
if ftid[idx] == stid[idx] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return ftid[idx] > stid[idx]
|
|
||||||
}
|
|
||||||
// Edge case, when comparing identical IDs
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a copy of the locator which can safely be mutated.
|
|
||||||
func (l *switchLocator) clone() switchLocator {
|
|
||||||
// Used to create a deep copy for use in messages
|
|
||||||
// Copy required because we need to mutate coords before sending
|
|
||||||
// (By appending the port from us to the destination)
|
|
||||||
loc := *l
|
|
||||||
loc.coords = make([]switchPort, len(l.coords), len(l.coords)+1)
|
|
||||||
copy(loc.coords, l.coords)
|
|
||||||
return loc
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets the distance a locator is from the provided destination coords, with the coords provided in []byte format (used to compress integers sent over the wire).
|
|
||||||
func (l *switchLocator) dist(dest []byte) int {
|
|
||||||
// Returns distance (on the tree) from these coords
|
|
||||||
offset := 0
|
|
||||||
fdc := 0
|
|
||||||
for {
|
|
||||||
if fdc >= len(l.coords) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
coord, length := wire_decode_uint64(dest[offset:])
|
|
||||||
if length == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if l.coords[fdc] != switchPort(coord) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
fdc++
|
|
||||||
offset += length
|
|
||||||
}
|
|
||||||
dist := len(l.coords[fdc:])
|
|
||||||
for {
|
|
||||||
_, length := wire_decode_uint64(dest[offset:])
|
|
||||||
if length == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
dist++
|
|
||||||
offset += length
|
|
||||||
}
|
|
||||||
return dist
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *switchLocator) ldist(sl *switchLocator) int {
|
|
||||||
lca := -1
|
|
||||||
for idx := 0; idx < len(l.coords); idx++ {
|
|
||||||
if idx >= len(sl.coords) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if l.coords[idx] != sl.coords[idx] {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
lca = idx
|
|
||||||
}
|
|
||||||
return len(l.coords) + len(sl.coords) - 2*(lca+1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets coords in wire encoded format, with *no* length prefix.
|
|
||||||
func (l *switchLocator) getCoords() []byte {
|
|
||||||
bs := make([]byte, 0, len(l.coords))
|
|
||||||
for _, coord := range l.coords {
|
|
||||||
c := wire_encode_uint64(uint64(coord))
|
|
||||||
bs = append(bs, c...)
|
|
||||||
}
|
|
||||||
return bs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if this locator represents an ancestor of the locator given as an argument.
|
|
||||||
// Ancestor means that it's the parent node, or the parent of parent, and so on...
|
|
||||||
func (x *switchLocator) isAncestorOf(y *switchLocator) bool {
|
|
||||||
if x.root != y.root {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(x.coords) > len(y.coords) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for idx := range x.coords {
|
|
||||||
if x.coords[idx] != y.coords[idx] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Information about a peer, used by the switch to build the tree and eventually make routing decisions.
|
|
||||||
type peerInfo struct {
|
|
||||||
key crypto.SigPubKey // ID of this peer
|
|
||||||
locator switchLocator // Should be able to respond with signatures upon request
|
|
||||||
degree uint64 // Self-reported degree
|
|
||||||
time time.Time // Time this node was last seen
|
|
||||||
port switchPort // Interface number of this peer
|
|
||||||
msg switchMsg // The wire switchMsg used
|
|
||||||
readBlock bool // True if the link notified us of a read that blocked too long
|
|
||||||
writeBlock bool // True of the link notified us of a write that blocked too long
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pinfo *peerInfo) blocked() bool {
|
|
||||||
return pinfo.readBlock || pinfo.writeBlock
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is just a uint64 with a named type for clarity reasons.
|
|
||||||
type switchPort uint64
|
|
||||||
|
|
||||||
// This is the subset of the information about a peer needed to make routing decisions, and it stored separately in an atomically accessed table, which gets hammered in the "hot loop" of the routing logic (see: peer.handleTraffic in peers.go).
|
|
||||||
type tableElem struct {
|
|
||||||
port switchPort
|
|
||||||
locator switchLocator
|
|
||||||
time time.Time
|
|
||||||
next map[switchPort]*tableElem
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is the subset of the information about all peers needed to make routing decisions, and it stored separately in an atomically accessed table, which gets hammered in the "hot loop" of the routing logic (see: peer.handleTraffic in peers.go).
|
|
||||||
type lookupTable struct {
|
|
||||||
self switchLocator
|
|
||||||
elems map[switchPort]tableElem // all switch peers, just for sanity checks + API/debugging
|
|
||||||
_start tableElem // used for lookups
|
|
||||||
_msg switchMsg
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is switch information which is mutable and needs to be modified by other goroutines, but is not accessed atomically.
|
|
||||||
// Use the switchTable functions to access it safely using the RWMutex for synchronization.
|
|
||||||
type switchData struct {
|
|
||||||
// All data that's mutable and used by exported Table methods
|
|
||||||
// To be read/written with atomic.Value Store/Load calls
|
|
||||||
locator switchLocator
|
|
||||||
peers map[switchPort]peerInfo
|
|
||||||
msg *switchMsg
|
|
||||||
}
|
|
||||||
|
|
||||||
// All the information stored by the switch.
|
|
||||||
type switchTable struct {
|
|
||||||
core *Core
|
|
||||||
key crypto.SigPubKey // Our own key
|
|
||||||
phony.Inbox // Owns the below
|
|
||||||
time time.Time // Time when locator.tstamp was last updated
|
|
||||||
drop map[crypto.SigPubKey]int64 // Tstamp associated with a dropped root
|
|
||||||
parent switchPort // Port of whatever peer is our parent, or self if we're root
|
|
||||||
data switchData //
|
|
||||||
}
|
|
||||||
|
|
||||||
// Minimum allowed total size of switch queues.
|
|
||||||
const SwitchQueueTotalMinSize = 4 * 1024 * 1024
|
|
||||||
|
|
||||||
// Initializes the switchTable struct.
|
|
||||||
func (t *switchTable) init(core *Core) {
|
|
||||||
now := time.Now()
|
|
||||||
t.core = core
|
|
||||||
t.key = t.core.sigPub
|
|
||||||
locator := switchLocator{root: t.key, tstamp: now.Unix()}
|
|
||||||
peers := make(map[switchPort]peerInfo)
|
|
||||||
t.data = switchData{locator: locator, peers: peers}
|
|
||||||
t.drop = make(map[crypto.SigPubKey]int64)
|
|
||||||
phony.Block(t, t._updateTable)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *switchTable) reconfigure() {
|
|
||||||
// This is where reconfiguration would go, if we had anything useful to do.
|
|
||||||
t.core.links.reconfigure()
|
|
||||||
t.core.peers.reconfigure()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Regular maintenance to possibly timeout/reset the root and similar.
|
|
||||||
func (t *switchTable) doMaintenance(from phony.Actor) {
|
|
||||||
t.Act(from, func() {
|
|
||||||
// Periodic maintenance work to keep things internally consistent
|
|
||||||
t._cleanRoot()
|
|
||||||
t._cleanDropped()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Updates the root periodically if it is ourself, or promotes ourself to root if we're better than the current root or if the current root has timed out.
|
|
||||||
func (t *switchTable) _cleanRoot() {
|
|
||||||
// TODO rethink how this is done?...
|
|
||||||
// Get rid of the root if it looks like its timed out
|
|
||||||
now := time.Now()
|
|
||||||
doUpdate := false
|
|
||||||
if now.Sub(t.time) > switch_timeout {
|
|
||||||
dropped := t.data.peers[t.parent]
|
|
||||||
dropped.time = t.time
|
|
||||||
t.drop[t.data.locator.root] = t.data.locator.tstamp
|
|
||||||
doUpdate = true
|
|
||||||
}
|
|
||||||
// Or, if we're better than our root, root ourself
|
|
||||||
if firstIsBetter(&t.key, &t.data.locator.root) {
|
|
||||||
doUpdate = true
|
|
||||||
}
|
|
||||||
// Or, if we are the root, possibly update our timestamp
|
|
||||||
if t.data.locator.root == t.key &&
|
|
||||||
now.Sub(t.time) > switch_updateInterval {
|
|
||||||
doUpdate = true
|
|
||||||
}
|
|
||||||
if doUpdate {
|
|
||||||
t.parent = switchPort(0)
|
|
||||||
t.time = now
|
|
||||||
if t.data.locator.root != t.key {
|
|
||||||
defer t.core.router.reset(nil)
|
|
||||||
}
|
|
||||||
t.data.locator = switchLocator{root: t.key, tstamp: now.Unix()}
|
|
||||||
t._updateTable() // updates base copy of switch msg in lookupTable
|
|
||||||
t.core.peers.sendSwitchMsgs(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Blocks and, if possible, unparents a peer
|
|
||||||
func (t *switchTable) blockPeer(from phony.Actor, port switchPort, isWrite bool) {
|
|
||||||
t.Act(from, func() {
|
|
||||||
peer, isIn := t.data.peers[port]
|
|
||||||
switch {
|
|
||||||
case isIn && !isWrite && !peer.readBlock:
|
|
||||||
peer.readBlock = true
|
|
||||||
case isIn && isWrite && !peer.writeBlock:
|
|
||||||
peer.writeBlock = true
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.data.peers[port] = peer
|
|
||||||
defer t._updateTable()
|
|
||||||
if port != t.parent {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.parent = 0
|
|
||||||
for _, info := range t.data.peers {
|
|
||||||
if info.port == port {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
t._handleMsg(&info.msg, info.port, true)
|
|
||||||
}
|
|
||||||
t._handleMsg(&peer.msg, peer.port, true)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *switchTable) unblockPeer(from phony.Actor, port switchPort, isWrite bool) {
|
|
||||||
t.Act(from, func() {
|
|
||||||
peer, isIn := t.data.peers[port]
|
|
||||||
switch {
|
|
||||||
case isIn && !isWrite && peer.readBlock:
|
|
||||||
peer.readBlock = false
|
|
||||||
case isIn && isWrite && peer.writeBlock:
|
|
||||||
peer.writeBlock = false
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.data.peers[port] = peer
|
|
||||||
t._updateTable()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Removes a peer.
|
|
||||||
// Must be called by the router actor with a lambda that calls this.
|
|
||||||
// If the removed peer was this node's parent, it immediately tries to find a new parent.
|
|
||||||
func (t *switchTable) forgetPeer(from phony.Actor, port switchPort) {
|
|
||||||
t.Act(from, func() {
|
|
||||||
delete(t.data.peers, port)
|
|
||||||
defer t._updateTable()
|
|
||||||
if port != t.parent {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.parent = 0
|
|
||||||
for _, info := range t.data.peers {
|
|
||||||
t._handleMsg(&info.msg, info.port, true)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dropped is a list of roots that are better than the current root, but stopped sending new timestamps.
|
|
||||||
// If we switch to a new root, and that root is better than an old root that previously timed out, then we can clean up the old dropped root infos.
|
|
||||||
// This function is called periodically to do that cleanup.
|
|
||||||
func (t *switchTable) _cleanDropped() {
|
|
||||||
// TODO? only call this after root changes, not periodically
|
|
||||||
for root := range t.drop {
|
|
||||||
if !firstIsBetter(&root, &t.data.locator.root) {
|
|
||||||
delete(t.drop, root)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A switchMsg contains the root node's sig key, timestamp, and signed per-hop information about a path from the root node to some other node in the network.
|
|
||||||
// This is exchanged with peers to construct the spanning tree.
|
|
||||||
// A subset of this information, excluding the signatures, is used to construct locators that are used elsewhere in the code.
|
|
||||||
type switchMsg struct {
|
|
||||||
Root crypto.SigPubKey
|
|
||||||
TStamp int64
|
|
||||||
Hops []switchMsgHop
|
|
||||||
}
|
|
||||||
|
|
||||||
// This represents the signed information about the path leading from the root the Next node, via the Port specified here.
|
|
||||||
type switchMsgHop struct {
|
|
||||||
Port switchPort
|
|
||||||
Next crypto.SigPubKey
|
|
||||||
Sig crypto.SigBytes
|
|
||||||
}
|
|
||||||
|
|
||||||
// This returns a *switchMsg to a copy of this node's current switchMsg, which can safely have additional information appended to Hops and sent to a peer.
|
|
||||||
func (t *switchTable) _getMsg() *switchMsg {
|
|
||||||
if t.parent == 0 {
|
|
||||||
return &switchMsg{Root: t.key, TStamp: t.data.locator.tstamp}
|
|
||||||
} else if parent, isIn := t.data.peers[t.parent]; isIn {
|
|
||||||
msg := parent.msg
|
|
||||||
msg.Hops = append([]switchMsgHop(nil), msg.Hops...)
|
|
||||||
return &msg
|
|
||||||
} else {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *lookupTable) getMsg() *switchMsg {
|
|
||||||
msg := t._msg
|
|
||||||
msg.Hops = append([]switchMsgHop(nil), t._msg.Hops...)
|
|
||||||
return &msg
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function checks that the root information in a switchMsg is OK.
|
|
||||||
// In particular, that the root is better, or else the same as the current root but with a good timestamp, and that this root+timestamp haven't been dropped due to timeout.
|
|
||||||
func (t *switchTable) _checkRoot(msg *switchMsg) bool {
|
|
||||||
// returns false if it's a dropped root, not a better root, or has an older timestamp
|
|
||||||
// returns true otherwise
|
|
||||||
// used elsewhere to keep inserting peers into the dht only if root info is OK
|
|
||||||
dropTstamp, isIn := t.drop[msg.Root]
|
|
||||||
switch {
|
|
||||||
case isIn && dropTstamp >= msg.TStamp:
|
|
||||||
return false
|
|
||||||
case firstIsBetter(&msg.Root, &t.data.locator.root):
|
|
||||||
return true
|
|
||||||
case t.data.locator.root != msg.Root:
|
|
||||||
return false
|
|
||||||
case t.data.locator.tstamp > msg.TStamp:
|
|
||||||
return false
|
|
||||||
default:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This updates the switch with information about a peer.
|
|
||||||
// Then the tricky part, it decides if it should update our own locator as a result.
|
|
||||||
// That happens if this node is already our parent, or is advertising a better root, or is advertising a better path to the same root, etc...
|
|
||||||
// There are a lot of very delicate order sensitive checks here, so its' best to just read the code if you need to understand what it's doing.
|
|
||||||
// It's very important to not change the order of the statements in the case function unless you're absolutely sure that it's safe, including safe if used alongside nodes that used the previous order.
|
|
||||||
// Set the third arg to true if you're reprocessing an old message, e.g. to find a new parent after one disconnects, to avoid updating some timing related things.
|
|
||||||
func (t *switchTable) _handleMsg(msg *switchMsg, fromPort switchPort, reprocessing bool) {
|
|
||||||
// TODO directly use a switchMsg instead of switchMessage + sigs
|
|
||||||
now := time.Now()
|
|
||||||
// Set up the sender peerInfo
|
|
||||||
var sender peerInfo
|
|
||||||
sender.locator.root = msg.Root
|
|
||||||
sender.locator.tstamp = msg.TStamp
|
|
||||||
prevKey := msg.Root
|
|
||||||
for _, hop := range msg.Hops {
|
|
||||||
// Build locator
|
|
||||||
sender.locator.coords = append(sender.locator.coords, hop.Port)
|
|
||||||
sender.key = prevKey
|
|
||||||
prevKey = hop.Next
|
|
||||||
}
|
|
||||||
if sender.key == t.key {
|
|
||||||
return // Don't peer with ourself via different interfaces
|
|
||||||
}
|
|
||||||
sender.msg = *msg
|
|
||||||
sender.port = fromPort
|
|
||||||
sender.time = now
|
|
||||||
// Decide what to do
|
|
||||||
equiv := func(x *switchLocator, y *switchLocator) bool {
|
|
||||||
if x.root != y.root {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(x.coords) != len(y.coords) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for idx := range x.coords {
|
|
||||||
if x.coords[idx] != y.coords[idx] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
doUpdate := false
|
|
||||||
oldSender := t.data.peers[fromPort]
|
|
||||||
if !equiv(&sender.locator, &oldSender.locator) {
|
|
||||||
doUpdate = true
|
|
||||||
}
|
|
||||||
if reprocessing {
|
|
||||||
sender.time = oldSender.time
|
|
||||||
sender.readBlock = oldSender.readBlock
|
|
||||||
sender.writeBlock = oldSender.writeBlock
|
|
||||||
}
|
|
||||||
if sender.blocked() != oldSender.blocked() {
|
|
||||||
doUpdate = true
|
|
||||||
}
|
|
||||||
// Update sender
|
|
||||||
t.data.peers[fromPort] = sender
|
|
||||||
// Decide if we should also update our root info to make the sender our parent
|
|
||||||
updateRoot := false
|
|
||||||
oldParent, isIn := t.data.peers[t.parent]
|
|
||||||
noParent := !isIn
|
|
||||||
noLoop := func() bool {
|
|
||||||
for idx := 0; idx < len(msg.Hops)-1; idx++ {
|
|
||||||
if msg.Hops[idx].Next == t.core.sigPub {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if sender.locator.root == t.core.sigPub {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}()
|
|
||||||
dropTstamp, isIn := t.drop[sender.locator.root]
|
|
||||||
// Decide if we need to update info about the root or change parents.
|
|
||||||
switch {
|
|
||||||
case !noLoop:
|
|
||||||
// This route loops, so we can't use the sender as our parent.
|
|
||||||
case isIn && dropTstamp >= sender.locator.tstamp:
|
|
||||||
// This is a known root with a timestamp older than a known timeout, so we can't trust it to be a new announcement.
|
|
||||||
case firstIsBetter(&sender.locator.root, &t.data.locator.root):
|
|
||||||
// This is a better root than what we're currently using, so we should update.
|
|
||||||
updateRoot = true
|
|
||||||
case t.data.locator.root != sender.locator.root:
|
|
||||||
// This is not the same root, and it's apparently not better (from the above), so we should ignore it.
|
|
||||||
case t.data.locator.tstamp > sender.locator.tstamp:
|
|
||||||
// This timetsamp is older than the most recently seen one from this root, so we should ignore it.
|
|
||||||
case noParent:
|
|
||||||
// We currently have no working parent, and at this point in the switch statement, anything is better than nothing.
|
|
||||||
updateRoot = true
|
|
||||||
case !sender.blocked() && oldParent.blocked():
|
|
||||||
// Replace a blocked parent
|
|
||||||
updateRoot = true
|
|
||||||
case reprocessing && sender.blocked() && !oldParent.blocked():
|
|
||||||
// Don't replace an unblocked parent when reprocessing
|
|
||||||
case sender.locator.tstamp > t.data.locator.tstamp:
|
|
||||||
// The timestamp was updated, so we need to update locally and send to our peers.
|
|
||||||
updateRoot = true
|
|
||||||
}
|
|
||||||
// Note that we depend on the LIFO order of the stack of defers here...
|
|
||||||
if updateRoot {
|
|
||||||
doUpdate = true
|
|
||||||
if !equiv(&sender.locator, &t.data.locator) {
|
|
||||||
defer t.core.router.reset(t)
|
|
||||||
}
|
|
||||||
if t.data.locator.tstamp != sender.locator.tstamp {
|
|
||||||
t.time = now
|
|
||||||
}
|
|
||||||
t.data.locator = sender.locator
|
|
||||||
t.parent = sender.port
|
|
||||||
defer t.core.peers.sendSwitchMsgs(t)
|
|
||||||
}
|
|
||||||
if doUpdate {
|
|
||||||
t._updateTable()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// The rest of these are related to the switch lookup table
|
|
||||||
|
|
||||||
func (t *switchTable) _updateTable() {
|
|
||||||
newTable := lookupTable{
|
|
||||||
self: t.data.locator.clone(),
|
|
||||||
elems: make(map[switchPort]tableElem, len(t.data.peers)),
|
|
||||||
_msg: *t._getMsg(),
|
|
||||||
}
|
|
||||||
newTable._init()
|
|
||||||
for _, pinfo := range t.data.peers {
|
|
||||||
if pinfo.blocked() || pinfo.locator.root != newTable.self.root {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
loc := pinfo.locator.clone()
|
|
||||||
loc.coords = loc.coords[:len(loc.coords)-1] // Remove the them->self link
|
|
||||||
elem := tableElem{
|
|
||||||
locator: loc,
|
|
||||||
port: pinfo.port,
|
|
||||||
time: pinfo.time,
|
|
||||||
}
|
|
||||||
newTable._insert(&elem)
|
|
||||||
newTable.elems[pinfo.port] = elem
|
|
||||||
}
|
|
||||||
t.core.peers.updateTables(t, &newTable)
|
|
||||||
t.core.router.updateTable(t, &newTable)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *lookupTable) _init() {
|
|
||||||
// WARNING: this relies on the convention that the self port is 0
|
|
||||||
self := tableElem{locator: t.self} // create self elem
|
|
||||||
t._start = self // initialize _start to self
|
|
||||||
t._insert(&self) // insert self into table
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *lookupTable) _insert(elem *tableElem) {
|
|
||||||
// This is a helper that should only be run during _updateTable
|
|
||||||
here := &t._start
|
|
||||||
for idx := 0; idx <= len(elem.locator.coords); idx++ {
|
|
||||||
refLoc := here.locator
|
|
||||||
refLoc.coords = refLoc.coords[:idx] // Note that this is length idx (starts at length 0)
|
|
||||||
oldDist := refLoc.ldist(&here.locator)
|
|
||||||
newDist := refLoc.ldist(&elem.locator)
|
|
||||||
var update bool
|
|
||||||
switch {
|
|
||||||
case newDist < oldDist: // new elem is closer to this point in the tree
|
|
||||||
update = true
|
|
||||||
case newDist > oldDist: // new elem is too far
|
|
||||||
case elem.locator.tstamp > refLoc.tstamp: // new elem has a closer timestamp
|
|
||||||
update = true
|
|
||||||
case elem.locator.tstamp < refLoc.tstamp: // new elem's timestamp is too old
|
|
||||||
case elem.time.Before(here.time): // same dist+timestamp, but new elem delivered it faster
|
|
||||||
update = true
|
|
||||||
}
|
|
||||||
if update {
|
|
||||||
here.port = elem.port
|
|
||||||
here.locator = elem.locator
|
|
||||||
here.time = elem.time
|
|
||||||
// Problem: here is a value, so this doesn't actually update anything...
|
|
||||||
}
|
|
||||||
if idx < len(elem.locator.coords) {
|
|
||||||
if here.next == nil {
|
|
||||||
here.next = make(map[switchPort]*tableElem)
|
|
||||||
}
|
|
||||||
var next *tableElem
|
|
||||||
var ok bool
|
|
||||||
if next, ok = here.next[elem.locator.coords[idx]]; !ok {
|
|
||||||
nextVal := *elem
|
|
||||||
next = &nextVal
|
|
||||||
here.next[next.locator.coords[idx]] = next
|
|
||||||
}
|
|
||||||
here = next
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Starts the switch worker
|
|
||||||
func (t *switchTable) start() error {
|
|
||||||
t.core.log.Infoln("Starting switch")
|
|
||||||
// There's actually nothing to do to start it...
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *lookupTable) lookup(ports []switchPort) switchPort {
|
|
||||||
here := &t._start
|
|
||||||
for idx := range ports {
|
|
||||||
port := ports[idx]
|
|
||||||
if next, ok := here.next[port]; ok {
|
|
||||||
here = next
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return here.port
|
|
||||||
}
|
|
||||||
|
|
||||||
func switch_getPorts(coords []byte) []switchPort {
|
|
||||||
var ports []switchPort
|
|
||||||
var offset int
|
|
||||||
for offset < len(coords) {
|
|
||||||
port, l := wire_decode_uint64(coords[offset:])
|
|
||||||
offset += l
|
|
||||||
ports = append(ports, switchPort(port))
|
|
||||||
}
|
|
||||||
return ports
|
|
||||||
}
|
|
||||||
|
|
||||||
func switch_reverseCoordBytes(coords []byte) []byte {
|
|
||||||
a := switch_getPorts(coords)
|
|
||||||
for i := len(a)/2 - 1; i >= 0; i-- {
|
|
||||||
opp := len(a) - 1 - i
|
|
||||||
a[i], a[opp] = a[opp], a[i]
|
|
||||||
}
|
|
||||||
var reversed []byte
|
|
||||||
for _, sPort := range a {
|
|
||||||
reversed = wire_put_uint64(uint64(sPort), reversed)
|
|
||||||
}
|
|
||||||
return reversed
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *lookupTable) isDescendant(ports []switchPort) bool {
|
|
||||||
// Note that this returns true for anyone in the subtree that starts at us
|
|
||||||
// That includes ourself, so we are our own descendant by this logic...
|
|
||||||
if len(t.self.coords) >= len(ports) {
|
|
||||||
// Our coords are longer, so they can't be our descendant
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for idx := range t.self.coords {
|
|
||||||
if ports[idx] != t.self.coords[idx] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *lookupTable) getOffset(ports []switchPort) uint64 {
|
|
||||||
// If they're our descendant, this returns the length of our coords, used as an offset for source routing
|
|
||||||
// If they're not our descendant, this returns 0
|
|
||||||
var offset uint64
|
|
||||||
for idx := range t.self.coords {
|
|
||||||
if idx < len(ports) && ports[idx] == t.self.coords[idx] {
|
|
||||||
offset += 1
|
|
||||||
} else {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return offset
|
|
||||||
}
|
|
@ -387,8 +387,6 @@ func (t *tcp) handler(sock net.Conn, incoming bool, options tcpOptions) chan str
|
|||||||
}
|
}
|
||||||
upgraded = true
|
upgraded = true
|
||||||
}
|
}
|
||||||
stream := stream{}
|
|
||||||
stream.init(sock)
|
|
||||||
var name, proto, local, remote string
|
var name, proto, local, remote string
|
||||||
if options.socksProxyAddr != "" {
|
if options.socksProxyAddr != "" {
|
||||||
name = "socks://" + sock.RemoteAddr().String() + "/" + options.socksPeerAddr
|
name = "socks://" + sock.RemoteAddr().String() + "/" + options.socksPeerAddr
|
||||||
@ -423,7 +421,7 @@ func (t *tcp) handler(sock net.Conn, incoming bool, options tcpOptions) chan str
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
force := net.ParseIP(strings.Split(remote, "%")[0]).IsLinkLocalUnicast()
|
force := net.ParseIP(strings.Split(remote, "%")[0]).IsLinkLocalUnicast()
|
||||||
link, err := t.links.create(&stream, name, proto, local, remote, incoming, force, options.linkOptions)
|
link, err := t.links.create(sock, name, proto, local, remote, incoming, force, options.linkOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.links.core.log.Println(err)
|
t.links.core.log.Println(err)
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -34,7 +34,7 @@ func (t *tcptls) init(tcp *tcp) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
edpriv := make(ed25519.PrivateKey, ed25519.PrivateKeySize)
|
edpriv := make(ed25519.PrivateKey, ed25519.PrivateKeySize)
|
||||||
copy(edpriv[:], tcp.links.core.sigPriv[:])
|
copy(edpriv[:], tcp.links.core.secret[:])
|
||||||
|
|
||||||
certBuf := &bytes.Buffer{}
|
certBuf := &bytes.Buffer{}
|
||||||
|
|
||||||
@ -42,7 +42,7 @@ func (t *tcptls) init(tcp *tcp) {
|
|||||||
pubtemp := x509.Certificate{
|
pubtemp := x509.Certificate{
|
||||||
SerialNumber: big.NewInt(1),
|
SerialNumber: big.NewInt(1),
|
||||||
Subject: pkix.Name{
|
Subject: pkix.Name{
|
||||||
CommonName: hex.EncodeToString(tcp.links.core.sigPub[:]),
|
CommonName: hex.EncodeToString(tcp.links.core.public[:]),
|
||||||
},
|
},
|
||||||
NotBefore: time.Now(),
|
NotBefore: time.Now(),
|
||||||
NotAfter: time.Now().Add(time.Hour * 24 * 365),
|
NotAfter: time.Now().Add(time.Hour * 24 * 365),
|
||||||
|
@ -4,19 +4,17 @@ package yggdrasil
|
|||||||
// Used in the initial connection setup and key exchange
|
// Used in the initial connection setup and key exchange
|
||||||
// Some of this could arguably go in wire.go instead
|
// Some of this could arguably go in wire.go instead
|
||||||
|
|
||||||
import "github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
import "crypto/ed25519"
|
||||||
|
|
||||||
// This is the version-specific metadata exchanged at the start of a connection.
|
// This is the version-specific metadata exchanged at the start of a connection.
|
||||||
// It must always begin with the 4 bytes "meta" and a wire formatted uint64 major version number.
|
// It must always begin with the 4 bytes "meta" and a wire formatted uint64 major version number.
|
||||||
// The current version also includes a minor version number, and the box/sig/link keys that need to be exchanged to open a connection.
|
// The current version also includes a minor version number, and the box/sig/link keys that need to be exchanged to open a connection.
|
||||||
type version_metadata struct {
|
type version_metadata struct {
|
||||||
meta [4]byte
|
meta [4]byte
|
||||||
ver uint64 // 1 byte in this version
|
ver uint8 // 1 byte in this version
|
||||||
// Everything after this point potentially depends on the version number, and is subject to change in future versions
|
// Everything after this point potentially depends on the version number, and is subject to change in future versions
|
||||||
minorVer uint64 // 1 byte in this version
|
minorVer uint8 // 1 byte in this version
|
||||||
box crypto.BoxPubKey
|
key ed25519.PublicKey
|
||||||
sig crypto.SigPubKey
|
|
||||||
link crypto.BoxPubKey
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gets a base metadata with no keys set, but with the correct version numbers.
|
// Gets a base metadata with no keys set, but with the correct version numbers.
|
||||||
@ -30,12 +28,10 @@ func version_getBaseMetadata() version_metadata {
|
|||||||
|
|
||||||
// Gets the length of the metadata for this version, used to know how many bytes to read from the start of a connection.
|
// Gets the length of the metadata for this version, used to know how many bytes to read from the start of a connection.
|
||||||
func version_getMetaLength() (mlen int) {
|
func version_getMetaLength() (mlen int) {
|
||||||
mlen += 4 // meta
|
mlen += 4 // meta
|
||||||
mlen++ // ver, as long as it's < 127, which it is in this version
|
mlen++ // ver, as long as it's < 127, which it is in this version
|
||||||
mlen++ // minorVer, as long as it's < 127, which it is in this version
|
mlen++ // minorVer, as long as it's < 127, which it is in this version
|
||||||
mlen += crypto.BoxPubKeyLen // box
|
mlen += ed25519.PublicKeySize // key
|
||||||
mlen += crypto.SigPubKeyLen // sig
|
|
||||||
mlen += crypto.BoxPubKeyLen // link
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -43,11 +39,9 @@ func version_getMetaLength() (mlen int) {
|
|||||||
func (m *version_metadata) encode() []byte {
|
func (m *version_metadata) encode() []byte {
|
||||||
bs := make([]byte, 0, version_getMetaLength())
|
bs := make([]byte, 0, version_getMetaLength())
|
||||||
bs = append(bs, m.meta[:]...)
|
bs = append(bs, m.meta[:]...)
|
||||||
bs = append(bs, wire_encode_uint64(m.ver)...)
|
bs = append(bs, m.ver)
|
||||||
bs = append(bs, wire_encode_uint64(m.minorVer)...)
|
bs = append(bs, m.minorVer)
|
||||||
bs = append(bs, m.box[:]...)
|
bs = append(bs, m.key[:]...)
|
||||||
bs = append(bs, m.sig[:]...)
|
|
||||||
bs = append(bs, m.link[:]...)
|
|
||||||
if len(bs) != version_getMetaLength() {
|
if len(bs) != version_getMetaLength() {
|
||||||
panic("Inconsistent metadata length")
|
panic("Inconsistent metadata length")
|
||||||
}
|
}
|
||||||
@ -56,20 +50,14 @@ func (m *version_metadata) encode() []byte {
|
|||||||
|
|
||||||
// Decodes version metadata from its wire format into the struct.
|
// Decodes version metadata from its wire format into the struct.
|
||||||
func (m *version_metadata) decode(bs []byte) bool {
|
func (m *version_metadata) decode(bs []byte) bool {
|
||||||
switch {
|
if len(bs) != version_getMetaLength() {
|
||||||
case !wire_chop_slice(m.meta[:], &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_uint64(&m.ver, &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_uint64(&m.minorVer, &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_slice(m.box[:], &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_slice(m.sig[:], &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_slice(m.link[:], &bs):
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
offset := 0
|
||||||
|
offset += copy(m.meta[:], bs[offset:])
|
||||||
|
m.ver, offset = bs[offset], offset+1
|
||||||
|
m.minorVer, offset = bs[offset], offset+1
|
||||||
|
m.key = append([]byte(nil), bs[offset:]...)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,521 +0,0 @@
|
|||||||
package yggdrasil
|
|
||||||
|
|
||||||
// Wire formatting tools
|
|
||||||
// These are all ugly and probably not very secure
|
|
||||||
|
|
||||||
// TODO clean up unused/commented code, and add better comments to whatever is left
|
|
||||||
|
|
||||||
// Packet types, as wire_encode_uint64(type) at the start of each packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
wire_Traffic = iota // data being routed somewhere, handle for crypto
|
|
||||||
wire_ProtocolTraffic // protocol traffic, pub keys for crypto
|
|
||||||
wire_LinkProtocolTraffic // link proto traffic, pub keys for crypto
|
|
||||||
wire_SwitchMsg // inside link protocol traffic header
|
|
||||||
wire_SessionPing // inside protocol traffic header
|
|
||||||
wire_SessionPong // inside protocol traffic header
|
|
||||||
wire_DHTLookupRequest // inside protocol traffic header
|
|
||||||
wire_DHTLookupResponse // inside protocol traffic header
|
|
||||||
wire_NodeInfoRequest // inside protocol traffic header
|
|
||||||
wire_NodeInfoResponse // inside protocol traffic header
|
|
||||||
)
|
|
||||||
|
|
||||||
// Calls wire_put_uint64 on a nil slice.
|
|
||||||
func wire_encode_uint64(elem uint64) []byte {
|
|
||||||
return wire_put_uint64(elem, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode uint64 using a variable length scheme.
|
|
||||||
// Similar to binary.Uvarint, but big-endian.
|
|
||||||
func wire_put_uint64(e uint64, out []byte) []byte {
|
|
||||||
var b [10]byte
|
|
||||||
i := len(b) - 1
|
|
||||||
b[i] = byte(e & 0x7f)
|
|
||||||
for e >>= 7; e != 0; e >>= 7 {
|
|
||||||
i--
|
|
||||||
b[i] = byte(e | 0x80)
|
|
||||||
}
|
|
||||||
return append(out, b[i:]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the length of a wire encoded uint64 of this value.
|
|
||||||
func wire_uint64_len(elem uint64) int {
|
|
||||||
l := 1
|
|
||||||
for e := elem >> 7; e > 0; e >>= 7 {
|
|
||||||
l++
|
|
||||||
}
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode uint64 from a []byte slice.
|
|
||||||
// Returns the decoded uint64 and the number of bytes used.
|
|
||||||
func wire_decode_uint64(bs []byte) (uint64, int) {
|
|
||||||
length := 0
|
|
||||||
elem := uint64(0)
|
|
||||||
for _, b := range bs {
|
|
||||||
elem <<= 7
|
|
||||||
elem |= uint64(b & 0x7f)
|
|
||||||
length++
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return elem, length
|
|
||||||
}
|
|
||||||
|
|
||||||
// Converts an int64 into uint64 so it can be written to the wire.
|
|
||||||
// Non-negative integers are mapped to even integers: 0 -> 0, 1 -> 2, etc.
|
|
||||||
// Negative integers are mapped to odd integers: -1 -> 1, -2 -> 3, etc.
|
|
||||||
// This means the least significant bit is a sign bit.
|
|
||||||
// This is known as zigzag encoding.
|
|
||||||
func wire_intToUint(i int64) uint64 {
|
|
||||||
// signed arithmetic shift
|
|
||||||
return uint64((i >> 63) ^ (i << 1))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Converts uint64 back to int64, genreally when being read from the wire.
|
|
||||||
func wire_intFromUint(u uint64) int64 {
|
|
||||||
// non-arithmetic shift
|
|
||||||
return int64((u >> 1) ^ -(u & 1))
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// Takes coords, returns coords prefixed with encoded coord length.
|
|
||||||
func wire_encode_coords(coords []byte) []byte {
|
|
||||||
coordLen := wire_encode_uint64(uint64(len(coords)))
|
|
||||||
bs := make([]byte, 0, len(coordLen)+len(coords))
|
|
||||||
bs = append(bs, coordLen...)
|
|
||||||
bs = append(bs, coords...)
|
|
||||||
return bs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Puts a length prefix and the coords into bs, returns the wire formatted coords.
|
|
||||||
// Useful in hot loops where we don't want to allocate and we know the rest of the later parts of the slice are safe to overwrite.
|
|
||||||
func wire_put_vslice(slice []byte, bs []byte) []byte {
|
|
||||||
bs = wire_put_uint64(uint64(len(slice)), bs)
|
|
||||||
bs = append(bs, slice...)
|
|
||||||
return bs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Takes a slice that begins with coords (starting with coord length).
|
|
||||||
// Returns a slice of coords and the number of bytes read.
|
|
||||||
// Used as part of various decode() functions for structs.
|
|
||||||
func wire_decode_coords(packet []byte) ([]byte, int) {
|
|
||||||
coordLen, coordBegin := wire_decode_uint64(packet)
|
|
||||||
coordEnd := coordBegin + int(coordLen)
|
|
||||||
if coordBegin == 0 || coordEnd > len(packet) {
|
|
||||||
return nil, 0
|
|
||||||
}
|
|
||||||
return packet[coordBegin:coordEnd], coordEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
// Converts a []uint64 set of coords to a []byte set of coords.
|
|
||||||
func wire_coordsUint64stoBytes(in []uint64) (out []byte) {
|
|
||||||
for _, coord := range in {
|
|
||||||
c := wire_encode_uint64(coord)
|
|
||||||
out = append(out, c...)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Converts a []byte set of coords to a []uint64 set of coords.
|
|
||||||
func wire_coordsBytestoUint64s(in []byte) (out []uint64) {
|
|
||||||
offset := 0
|
|
||||||
for {
|
|
||||||
coord, length := wire_decode_uint64(in[offset:])
|
|
||||||
if length == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
out = append(out, coord)
|
|
||||||
offset += length
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// Encodes a swtichMsg into its wire format.
|
|
||||||
func (m *switchMsg) encode() []byte {
|
|
||||||
bs := wire_encode_uint64(wire_SwitchMsg)
|
|
||||||
bs = append(bs, m.Root[:]...)
|
|
||||||
bs = append(bs, wire_encode_uint64(wire_intToUint(m.TStamp))...)
|
|
||||||
for _, hop := range m.Hops {
|
|
||||||
bs = append(bs, wire_encode_uint64(uint64(hop.Port))...)
|
|
||||||
bs = append(bs, hop.Next[:]...)
|
|
||||||
bs = append(bs, hop.Sig[:]...)
|
|
||||||
}
|
|
||||||
return bs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decodes a wire formatted switchMsg into the struct, returns true if successful.
|
|
||||||
func (m *switchMsg) decode(bs []byte) bool {
|
|
||||||
var pType uint64
|
|
||||||
var tstamp uint64
|
|
||||||
switch {
|
|
||||||
case !wire_chop_uint64(&pType, &bs):
|
|
||||||
return false
|
|
||||||
case pType != wire_SwitchMsg:
|
|
||||||
return false
|
|
||||||
case !wire_chop_slice(m.Root[:], &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_uint64(&tstamp, &bs):
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
m.TStamp = wire_intFromUint(tstamp)
|
|
||||||
for len(bs) > 0 {
|
|
||||||
var hop switchMsgHop
|
|
||||||
switch {
|
|
||||||
case !wire_chop_uint64((*uint64)(&hop.Port), &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_slice(hop.Next[:], &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_slice(hop.Sig[:], &bs):
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
m.Hops = append(m.Hops, hop)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// A utility function used to copy bytes into a slice and advance the beginning of the source slice, returns true if successful.
|
|
||||||
func wire_chop_slice(toSlice []byte, fromSlice *[]byte) bool {
|
|
||||||
if len(*fromSlice) < len(toSlice) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
copy(toSlice, *fromSlice)
|
|
||||||
*fromSlice = (*fromSlice)[len(toSlice):]
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// A utility function to extract a length-prefixed slice (such as coords) from a slice and advance the source slices, returning true if successful.
|
|
||||||
func wire_chop_vslice(toSlice *[]byte, fromSlice *[]byte) bool {
|
|
||||||
slice, sliceLen := wire_decode_coords(*fromSlice)
|
|
||||||
if sliceLen == 0 { // sliceLen is length-prefix size + slice size, in bytes
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
*toSlice = append((*toSlice)[:0], slice...)
|
|
||||||
*fromSlice = (*fromSlice)[sliceLen:]
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// A utility function to extract a wire encoded uint64 into the provided pointer while advancing the start of the source slice, returning true if successful.
|
|
||||||
func wire_chop_uint64(toUInt64 *uint64, fromSlice *[]byte) bool {
|
|
||||||
dec, decLen := wire_decode_uint64(*fromSlice)
|
|
||||||
if decLen == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
*toUInt64 = dec
|
|
||||||
*fromSlice = (*fromSlice)[decLen:]
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// Wire traffic packets
|
|
||||||
|
|
||||||
// The wire format for ordinary IPv6 traffic encapsulated by the network.
|
|
||||||
type wire_trafficPacket struct {
|
|
||||||
Offset uint64
|
|
||||||
Coords []byte
|
|
||||||
Handle crypto.Handle
|
|
||||||
Nonce crypto.BoxNonce
|
|
||||||
Payload []byte
|
|
||||||
RPath []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encodes a wire_trafficPacket into its wire format.
|
|
||||||
// The returned slice was taken from the pool.
|
|
||||||
func (p *wire_trafficPacket) encode() []byte {
|
|
||||||
bs := pool_getBytes(0)
|
|
||||||
bs = wire_put_uint64(wire_Traffic, bs)
|
|
||||||
bs = wire_put_uint64(p.Offset, bs)
|
|
||||||
bs = wire_put_vslice(p.Coords, bs)
|
|
||||||
bs = append(bs, p.Handle[:]...)
|
|
||||||
bs = append(bs, p.Nonce[:]...)
|
|
||||||
bs = wire_put_vslice(p.Payload, bs)
|
|
||||||
bs = append(bs, p.RPath...)
|
|
||||||
return bs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decodes an encoded wire_trafficPacket into the struct, returning true if successful.
|
|
||||||
// Either way, the argument slice is added to the pool.
|
|
||||||
func (p *wire_trafficPacket) decode(bs []byte) bool {
|
|
||||||
defer pool_putBytes(bs)
|
|
||||||
var pType uint64
|
|
||||||
switch {
|
|
||||||
case !wire_chop_uint64(&pType, &bs):
|
|
||||||
return false
|
|
||||||
case pType != wire_Traffic:
|
|
||||||
return false
|
|
||||||
case !wire_chop_uint64(&p.Offset, &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_vslice(&p.Coords, &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_slice(p.Handle[:], &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_slice(p.Nonce[:], &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_vslice(&p.Payload, &bs):
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
p.RPath = append(p.RPath[:0], bs...)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// The wire format for protocol traffic, such as dht req/res or session ping/pong packets.
|
|
||||||
type wire_protoTrafficPacket struct {
|
|
||||||
Offset uint64
|
|
||||||
Coords []byte
|
|
||||||
ToKey crypto.BoxPubKey
|
|
||||||
FromKey crypto.BoxPubKey
|
|
||||||
Nonce crypto.BoxNonce
|
|
||||||
Payload []byte
|
|
||||||
RPath []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encodes a wire_protoTrafficPacket into its wire format.
|
|
||||||
func (p *wire_protoTrafficPacket) encode() []byte {
|
|
||||||
bs := wire_encode_uint64(wire_ProtocolTraffic)
|
|
||||||
bs = wire_put_uint64(p.Offset, bs)
|
|
||||||
bs = wire_put_vslice(p.Coords, bs)
|
|
||||||
bs = append(bs, p.ToKey[:]...)
|
|
||||||
bs = append(bs, p.FromKey[:]...)
|
|
||||||
bs = append(bs, p.Nonce[:]...)
|
|
||||||
bs = wire_put_vslice(p.Payload, bs)
|
|
||||||
bs = append(bs, p.RPath...)
|
|
||||||
return bs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decodes an encoded wire_protoTrafficPacket into the struct, returning true if successful.
|
|
||||||
func (p *wire_protoTrafficPacket) decode(bs []byte) bool {
|
|
||||||
var pType uint64
|
|
||||||
switch {
|
|
||||||
case !wire_chop_uint64(&pType, &bs):
|
|
||||||
return false
|
|
||||||
case pType != wire_ProtocolTraffic:
|
|
||||||
return false
|
|
||||||
case !wire_chop_uint64(&p.Offset, &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_vslice(&p.Coords, &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_slice(p.ToKey[:], &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_slice(p.FromKey[:], &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_slice(p.Nonce[:], &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_vslice(&p.Payload, &bs):
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
p.RPath = append(p.RPath[:0], bs...)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the offset and coord slices of a (protocol) traffic packet without decoding
|
|
||||||
func wire_getTrafficOffsetAndCoords(packet []byte) ([]byte, []byte) {
|
|
||||||
_, offsetBegin := wire_decode_uint64(packet)
|
|
||||||
_, offsetLen := wire_decode_uint64(packet[offsetBegin:])
|
|
||||||
offsetEnd := offsetBegin + offsetLen
|
|
||||||
offset := packet[offsetBegin:offsetEnd]
|
|
||||||
coords, _ := wire_decode_coords(packet[offsetEnd:])
|
|
||||||
return offset, coords
|
|
||||||
}
|
|
||||||
|
|
||||||
// The wire format for link protocol traffic, namely switchMsg.
|
|
||||||
// There's really two layers of this, with the outer layer using permanent keys, and the inner layer using ephemeral keys.
|
|
||||||
// The keys themselves are exchanged as part of the connection setup, and then omitted from the packets.
|
|
||||||
// The two layer logic is handled in peers.go, but it's kind of ugly.
|
|
||||||
type wire_linkProtoTrafficPacket struct {
|
|
||||||
Nonce crypto.BoxNonce
|
|
||||||
Payload []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encodes a wire_linkProtoTrafficPacket into its wire format.
|
|
||||||
func (p *wire_linkProtoTrafficPacket) encode() []byte {
|
|
||||||
bs := wire_encode_uint64(wire_LinkProtocolTraffic)
|
|
||||||
bs = append(bs, p.Nonce[:]...)
|
|
||||||
bs = append(bs, p.Payload...)
|
|
||||||
return bs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decodes an encoded wire_linkProtoTrafficPacket into the struct, returning true if successful.
|
|
||||||
func (p *wire_linkProtoTrafficPacket) decode(bs []byte) bool {
|
|
||||||
var pType uint64
|
|
||||||
switch {
|
|
||||||
case !wire_chop_uint64(&pType, &bs):
|
|
||||||
return false
|
|
||||||
case pType != wire_LinkProtocolTraffic:
|
|
||||||
return false
|
|
||||||
case !wire_chop_slice(p.Nonce[:], &bs):
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
p.Payload = bs
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// Encodes a sessionPing into its wire format.
|
|
||||||
func (p *sessionPing) encode() []byte {
|
|
||||||
var pTypeVal uint64
|
|
||||||
if p.IsPong {
|
|
||||||
pTypeVal = wire_SessionPong
|
|
||||||
} else {
|
|
||||||
pTypeVal = wire_SessionPing
|
|
||||||
}
|
|
||||||
bs := wire_encode_uint64(pTypeVal)
|
|
||||||
//p.sendPermPub used in top level (crypto), so skipped here
|
|
||||||
bs = append(bs, p.Handle[:]...)
|
|
||||||
bs = append(bs, p.SendSesPub[:]...)
|
|
||||||
bs = append(bs, wire_encode_uint64(wire_intToUint(p.Tstamp))...)
|
|
||||||
coords := wire_encode_coords(p.Coords)
|
|
||||||
bs = append(bs, coords...)
|
|
||||||
bs = append(bs, wire_encode_uint64(uint64(p.MTU))...)
|
|
||||||
return bs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decodes an encoded sessionPing into the struct, returning true if successful.
|
|
||||||
func (p *sessionPing) decode(bs []byte) bool {
|
|
||||||
var pType uint64
|
|
||||||
var tstamp uint64
|
|
||||||
var mtu uint64
|
|
||||||
switch {
|
|
||||||
case !wire_chop_uint64(&pType, &bs):
|
|
||||||
return false
|
|
||||||
case pType != wire_SessionPing && pType != wire_SessionPong:
|
|
||||||
return false
|
|
||||||
//p.sendPermPub used in top level (crypto), so skipped here
|
|
||||||
case !wire_chop_slice(p.Handle[:], &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_slice(p.SendSesPub[:], &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_uint64(&tstamp, &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_vslice(&p.Coords, &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_uint64(&mtu, &bs):
|
|
||||||
mtu = 1280
|
|
||||||
}
|
|
||||||
p.Tstamp = wire_intFromUint(tstamp)
|
|
||||||
if pType == wire_SessionPong {
|
|
||||||
p.IsPong = true
|
|
||||||
}
|
|
||||||
p.MTU = MTU(mtu)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// Encodes a nodeinfoReqRes into its wire format.
|
|
||||||
func (p *nodeinfoReqRes) encode() []byte {
|
|
||||||
var pTypeVal uint64
|
|
||||||
if p.IsResponse {
|
|
||||||
pTypeVal = wire_NodeInfoResponse
|
|
||||||
} else {
|
|
||||||
pTypeVal = wire_NodeInfoRequest
|
|
||||||
}
|
|
||||||
bs := wire_encode_uint64(pTypeVal)
|
|
||||||
bs = wire_put_vslice(p.SendCoords, bs)
|
|
||||||
if pTypeVal == wire_NodeInfoResponse {
|
|
||||||
bs = append(bs, p.NodeInfo...)
|
|
||||||
}
|
|
||||||
return bs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decodes an encoded nodeinfoReqRes into the struct, returning true if successful.
|
|
||||||
func (p *nodeinfoReqRes) decode(bs []byte) bool {
|
|
||||||
var pType uint64
|
|
||||||
switch {
|
|
||||||
case !wire_chop_uint64(&pType, &bs):
|
|
||||||
return false
|
|
||||||
case pType != wire_NodeInfoRequest && pType != wire_NodeInfoResponse:
|
|
||||||
return false
|
|
||||||
case !wire_chop_vslice(&p.SendCoords, &bs):
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if p.IsResponse = pType == wire_NodeInfoResponse; p.IsResponse {
|
|
||||||
if len(bs) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
p.NodeInfo = make(NodeInfoPayload, len(bs))
|
|
||||||
if !wire_chop_slice(p.NodeInfo[:], &bs) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// Encodes a dhtReq into its wire format.
|
|
||||||
func (r *dhtReq) encode() []byte {
|
|
||||||
coords := wire_encode_coords(r.Coords)
|
|
||||||
bs := wire_encode_uint64(wire_DHTLookupRequest)
|
|
||||||
bs = append(bs, coords...)
|
|
||||||
bs = append(bs, r.Dest[:]...)
|
|
||||||
return bs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decodes an encoded dhtReq into the struct, returning true if successful.
|
|
||||||
func (r *dhtReq) decode(bs []byte) bool {
|
|
||||||
var pType uint64
|
|
||||||
switch {
|
|
||||||
case !wire_chop_uint64(&pType, &bs):
|
|
||||||
return false
|
|
||||||
case pType != wire_DHTLookupRequest:
|
|
||||||
return false
|
|
||||||
case !wire_chop_vslice(&r.Coords, &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_slice(r.Dest[:], &bs):
|
|
||||||
return false
|
|
||||||
default:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encodes a dhtRes into its wire format.
|
|
||||||
func (r *dhtRes) encode() []byte {
|
|
||||||
coords := wire_encode_coords(r.Coords)
|
|
||||||
bs := wire_encode_uint64(wire_DHTLookupResponse)
|
|
||||||
bs = append(bs, coords...)
|
|
||||||
bs = append(bs, r.Dest[:]...)
|
|
||||||
for _, info := range r.Infos {
|
|
||||||
coords = wire_encode_coords(info.coords)
|
|
||||||
bs = append(bs, info.key[:]...)
|
|
||||||
bs = append(bs, coords...)
|
|
||||||
}
|
|
||||||
return bs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decodes an encoded dhtRes into the struct, returning true if successful.
|
|
||||||
func (r *dhtRes) decode(bs []byte) bool {
|
|
||||||
var pType uint64
|
|
||||||
switch {
|
|
||||||
case !wire_chop_uint64(&pType, &bs):
|
|
||||||
return false
|
|
||||||
case pType != wire_DHTLookupResponse:
|
|
||||||
return false
|
|
||||||
case !wire_chop_vslice(&r.Coords, &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_slice(r.Dest[:], &bs):
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for len(bs) > 0 {
|
|
||||||
info := dhtInfo{}
|
|
||||||
switch {
|
|
||||||
case !wire_chop_slice(info.key[:], &bs):
|
|
||||||
return false
|
|
||||||
case !wire_chop_vslice(&info.coords, &bs):
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
r.Infos = append(r.Infos, &info)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user