2019-04-18 16:38:24 +01:00
|
|
|
package yggdrasil
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2019-04-21 12:28:46 +01:00
|
|
|
"fmt"
|
2019-04-20 11:53:38 +01:00
|
|
|
"sync"
|
2019-04-22 11:20:35 +01:00
|
|
|
"sync/atomic"
|
2019-04-18 16:38:24 +01:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
2019-04-18 23:38:23 +01:00
|
|
|
"github.com/yggdrasil-network/yggdrasil-go/src/util"
|
2019-04-18 16:38:24 +01:00
|
|
|
)
|
|
|
|
|
2019-04-18 23:38:23 +01:00
|
|
|
type Conn struct {
|
|
|
|
core *Core
|
|
|
|
nodeID *crypto.NodeID
|
|
|
|
nodeMask *crypto.NodeID
|
2019-04-21 20:38:14 -05:00
|
|
|
recv chan *wire_trafficPacket // Eventually gets attached to session.recv
|
2019-04-20 16:32:27 +01:00
|
|
|
mutex *sync.RWMutex
|
2019-04-21 20:38:14 -05:00
|
|
|
session *sessionInfo
|
2019-04-22 11:20:35 +01:00
|
|
|
readDeadline atomic.Value // time.Time // TODO timer
|
|
|
|
writeDeadline atomic.Value // time.Time // TODO timer
|
2019-04-22 15:00:19 +01:00
|
|
|
searching atomic.Value // bool
|
2019-04-18 23:38:23 +01:00
|
|
|
}
|
|
|
|
|
2019-04-21 12:28:46 +01:00
|
|
|
func (c *Conn) String() string {
|
2019-04-22 15:00:19 +01:00
|
|
|
return fmt.Sprintf("conn=%p", c)
|
2019-04-21 12:28:46 +01:00
|
|
|
}
|
|
|
|
|
2019-04-18 23:38:23 +01:00
|
|
|
// This method should only be called from the router goroutine
|
|
|
|
func (c *Conn) startSearch() {
|
2019-04-22 20:06:39 +01:00
|
|
|
// The searchCompleted callback is given to the search
|
2019-04-18 23:38:23 +01:00
|
|
|
searchCompleted := func(sinfo *sessionInfo, err error) {
|
2019-04-22 20:06:39 +01:00
|
|
|
// Update the connection with the fact that the search completed, which
|
|
|
|
// allows another search to be triggered if necessary
|
2019-04-22 15:00:19 +01:00
|
|
|
c.searching.Store(false)
|
2019-04-22 20:06:39 +01:00
|
|
|
// If the search failed for some reason, e.g. it hit a dead end or timed
|
|
|
|
// out, then do nothing
|
2019-04-18 23:38:23 +01:00
|
|
|
if err != nil {
|
2019-04-22 15:00:19 +01:00
|
|
|
c.core.log.Debugln(c.String(), "DHT search failed:", err)
|
2019-04-18 23:38:23 +01:00
|
|
|
return
|
|
|
|
}
|
2019-04-22 20:06:39 +01:00
|
|
|
// Take the connection mutex
|
|
|
|
c.mutex.Lock()
|
|
|
|
defer c.mutex.Unlock()
|
|
|
|
// Were we successfully given a sessionInfo pointeR?
|
2019-04-18 23:38:23 +01:00
|
|
|
if sinfo != nil {
|
2019-04-22 20:06:39 +01:00
|
|
|
// Store it, and update the nodeID and nodeMask (which may have been
|
|
|
|
// wildcarded before now) with their complete counterparts
|
2019-04-22 15:00:19 +01:00
|
|
|
c.core.log.Debugln(c.String(), "DHT search completed")
|
2019-04-18 23:38:23 +01:00
|
|
|
c.session = sinfo
|
2019-04-22 20:06:39 +01:00
|
|
|
c.nodeID = crypto.GetNodeID(&sinfo.theirPermPub)
|
|
|
|
for i := range c.nodeMask {
|
|
|
|
c.nodeMask[i] = 0xFF
|
|
|
|
}
|
2019-04-22 15:00:19 +01:00
|
|
|
} else {
|
2019-04-22 20:06:39 +01:00
|
|
|
// No session was returned - this shouldn't really happen because we
|
|
|
|
// should always return an error reason if we don't return a session
|
|
|
|
panic("DHT search didn't return an error or a sessionInfo")
|
2019-04-18 23:38:23 +01:00
|
|
|
}
|
|
|
|
}
|
2019-04-22 20:06:39 +01:00
|
|
|
// doSearch will be called below in response to one or more conditions
|
2019-04-18 16:38:24 +01:00
|
|
|
doSearch := func() {
|
2019-04-22 20:06:39 +01:00
|
|
|
// Store the fact that we're searching, so that we don't start additional
|
|
|
|
// searches until this one has completed
|
2019-04-22 15:00:19 +01:00
|
|
|
c.searching.Store(true)
|
2019-04-22 20:06:39 +01:00
|
|
|
// Check to see if there is a search already matching the destination
|
2019-04-18 23:38:23 +01:00
|
|
|
sinfo, isIn := c.core.searches.searches[*c.nodeID]
|
2019-04-18 16:38:24 +01:00
|
|
|
if !isIn {
|
2019-04-22 20:06:39 +01:00
|
|
|
// Nothing was found, so create a new search
|
2019-04-18 23:38:23 +01:00
|
|
|
sinfo = c.core.searches.newIterSearch(c.nodeID, c.nodeMask, searchCompleted)
|
2019-04-22 15:00:19 +01:00
|
|
|
c.core.log.Debugf("%s DHT search started: %p", c.String(), sinfo)
|
2019-04-18 16:38:24 +01:00
|
|
|
}
|
2019-04-22 20:06:39 +01:00
|
|
|
// Continue the search
|
2019-04-18 23:38:23 +01:00
|
|
|
c.core.searches.continueSearch(sinfo)
|
2019-04-18 16:38:24 +01:00
|
|
|
}
|
2019-04-22 20:06:39 +01:00
|
|
|
// Take a copy of the session object, in case it changes later
|
2019-04-21 20:38:14 -05:00
|
|
|
c.mutex.RLock()
|
2019-04-22 15:00:19 +01:00
|
|
|
sinfo := c.session
|
|
|
|
c.mutex.RUnlock()
|
2019-04-21 20:38:14 -05:00
|
|
|
if c.session == nil {
|
2019-04-22 20:06:39 +01:00
|
|
|
// No session object is present so previous searches, if we ran any, have
|
|
|
|
// not yielded a useful result (dead end, remote host not found)
|
2019-04-18 16:38:24 +01:00
|
|
|
doSearch()
|
2019-04-21 20:38:14 -05:00
|
|
|
} else {
|
|
|
|
sinfo.worker <- func() {
|
|
|
|
switch {
|
|
|
|
case !sinfo.init:
|
|
|
|
doSearch()
|
|
|
|
case time.Since(sinfo.time) > 6*time.Second:
|
|
|
|
if sinfo.time.Before(sinfo.pingTime) && time.Since(sinfo.pingTime) > 6*time.Second {
|
|
|
|
// TODO double check that the above condition is correct
|
|
|
|
doSearch()
|
|
|
|
} else {
|
|
|
|
c.core.sessions.ping(sinfo)
|
|
|
|
}
|
|
|
|
default: // Don't do anything, to keep traffic throttled
|
2019-04-18 16:38:24 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) Read(b []byte) (int, error) {
|
2019-04-22 15:00:19 +01:00
|
|
|
// Take a copy of the session object
|
2019-04-22 11:20:35 +01:00
|
|
|
c.mutex.RLock()
|
|
|
|
sinfo := c.session
|
|
|
|
c.mutex.RUnlock()
|
2019-04-22 15:00:19 +01:00
|
|
|
// If the session is not initialised, do nothing. Currently in this instance
|
|
|
|
// in a write, we would trigger a new session, but it doesn't make sense for
|
|
|
|
// us to block forever here if the session will not reopen.
|
|
|
|
// TODO: should this return an error or just a zero-length buffer?
|
2019-04-22 20:06:39 +01:00
|
|
|
if sinfo == nil || !sinfo.init {
|
2019-04-22 15:00:19 +01:00
|
|
|
return 0, errors.New("session is closed")
|
|
|
|
}
|
|
|
|
// Wait for some traffic to come through from the session
|
2019-04-19 10:55:15 +01:00
|
|
|
select {
|
2019-04-21 20:38:14 -05:00
|
|
|
// TODO...
|
|
|
|
case p, ok := <-c.recv:
|
2019-04-22 20:06:39 +01:00
|
|
|
// If the session is closed then do nothing
|
2019-04-19 10:55:15 +01:00
|
|
|
if !ok {
|
2019-04-19 23:30:43 +01:00
|
|
|
return 0, errors.New("session is closed")
|
2019-04-19 10:55:15 +01:00
|
|
|
}
|
|
|
|
defer util.PutBytes(p.Payload)
|
2019-04-21 20:38:14 -05:00
|
|
|
var err error
|
2019-04-22 15:00:19 +01:00
|
|
|
// Hand over to the session worker
|
2019-04-21 20:38:14 -05:00
|
|
|
sinfo.doWorker(func() {
|
2019-04-22 15:00:19 +01:00
|
|
|
// If the nonce is bad then drop the packet and return an error
|
2019-04-21 20:38:14 -05:00
|
|
|
if !sinfo.nonceIsOK(&p.Nonce) {
|
|
|
|
err = errors.New("packet dropped due to invalid nonce")
|
|
|
|
return
|
2019-04-19 21:23:15 +01:00
|
|
|
}
|
2019-04-22 15:00:19 +01:00
|
|
|
// Decrypt the packet
|
2019-04-21 20:38:14 -05:00
|
|
|
bs, isOK := crypto.BoxOpen(&sinfo.sharedSesKey, p.Payload, &p.Nonce)
|
2019-04-22 15:00:19 +01:00
|
|
|
// Check if we were unable to decrypt the packet for some reason and
|
|
|
|
// return an error if we couldn't
|
2019-04-19 21:23:15 +01:00
|
|
|
if !isOK {
|
|
|
|
util.PutBytes(bs)
|
2019-04-21 20:38:14 -05:00
|
|
|
err = errors.New("packet dropped due to decryption failure")
|
|
|
|
return
|
2019-04-19 21:23:15 +01:00
|
|
|
}
|
2019-04-22 15:00:19 +01:00
|
|
|
// Return the newly decrypted buffer back to the slice we were given
|
2019-04-19 23:47:11 +01:00
|
|
|
copy(b, bs)
|
2019-04-22 15:00:19 +01:00
|
|
|
// Trim the slice down to size based on the data we received
|
2019-04-19 23:47:11 +01:00
|
|
|
if len(bs) < len(b) {
|
|
|
|
b = b[:len(bs)]
|
|
|
|
}
|
2019-04-22 15:00:19 +01:00
|
|
|
// Update the session
|
2019-04-21 20:38:14 -05:00
|
|
|
sinfo.updateNonce(&p.Nonce)
|
|
|
|
sinfo.time = time.Now()
|
|
|
|
sinfo.bytesRecvd += uint64(len(b))
|
|
|
|
})
|
2019-04-22 15:00:19 +01:00
|
|
|
// Something went wrong in the session worker so abort
|
2019-04-19 21:23:15 +01:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
2019-04-19 10:55:15 +01:00
|
|
|
}
|
2019-04-22 15:00:19 +01:00
|
|
|
// If we've reached this point then everything went to plan, return the
|
|
|
|
// number of bytes we populated back into the given slice
|
2019-04-19 10:55:15 +01:00
|
|
|
return len(b), nil
|
2019-04-21 20:38:14 -05:00
|
|
|
//case <-c.recvTimeout:
|
|
|
|
//case <-c.session.closed:
|
|
|
|
// c.expired = true
|
|
|
|
// return len(b), errors.New("session is closed")
|
2019-04-18 23:38:23 +01:00
|
|
|
}
|
2019-04-18 16:38:24 +01:00
|
|
|
}
|
|
|
|
|
2019-04-19 10:55:15 +01:00
|
|
|
func (c *Conn) Write(b []byte) (bytesWritten int, err error) {
|
2019-04-22 11:20:35 +01:00
|
|
|
c.mutex.RLock()
|
|
|
|
sinfo := c.session
|
|
|
|
c.mutex.RUnlock()
|
2019-04-22 15:00:19 +01:00
|
|
|
// If the session doesn't exist, or isn't initialised (which probably means
|
2019-04-22 20:06:39 +01:00
|
|
|
// that the search didn't complete successfully) then try to search again
|
|
|
|
if sinfo == nil || !sinfo.init {
|
2019-04-22 15:00:19 +01:00
|
|
|
// Is a search already taking place?
|
|
|
|
if searching, sok := c.searching.Load().(bool); !sok || (sok && !searching) {
|
|
|
|
// No search was already taking place so start a new one
|
|
|
|
c.core.router.doAdmin(func() {
|
|
|
|
c.startSearch()
|
|
|
|
})
|
|
|
|
return 0, errors.New("starting search")
|
|
|
|
}
|
|
|
|
// A search is already taking place so wait for it to finish
|
|
|
|
return 0, errors.New("waiting for search to complete")
|
2019-04-18 23:38:23 +01:00
|
|
|
}
|
2019-04-22 20:06:39 +01:00
|
|
|
// defer util.PutBytes(b)
|
2019-04-21 20:38:14 -05:00
|
|
|
var packet []byte
|
2019-04-22 15:00:19 +01:00
|
|
|
// Hand over to the session worker
|
2019-04-21 20:38:14 -05:00
|
|
|
sinfo.doWorker(func() {
|
2019-04-22 15:00:19 +01:00
|
|
|
// Encrypt the packet
|
2019-04-21 20:38:14 -05:00
|
|
|
payload, nonce := crypto.BoxSeal(&sinfo.sharedSesKey, b, &sinfo.myNonce)
|
|
|
|
defer util.PutBytes(payload)
|
2019-04-22 15:00:19 +01:00
|
|
|
// Construct the wire packet to send to the router
|
2019-04-21 20:38:14 -05:00
|
|
|
p := wire_trafficPacket{
|
|
|
|
Coords: sinfo.coords,
|
|
|
|
Handle: sinfo.theirHandle,
|
|
|
|
Nonce: *nonce,
|
|
|
|
Payload: payload,
|
|
|
|
}
|
|
|
|
packet = p.encode()
|
|
|
|
sinfo.bytesSent += uint64(len(b))
|
|
|
|
})
|
2019-04-22 15:00:19 +01:00
|
|
|
// Give the packet to the router
|
2019-04-21 20:38:14 -05:00
|
|
|
sinfo.core.router.out(packet)
|
2019-04-22 15:00:19 +01:00
|
|
|
// Finally return the number of bytes we wrote
|
2019-04-18 23:38:23 +01:00
|
|
|
return len(b), nil
|
2019-04-18 16:38:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) Close() error {
|
2019-04-22 15:00:19 +01:00
|
|
|
// Close the session, if it hasn't been closed already
|
2019-04-19 22:57:52 +01:00
|
|
|
c.session.close()
|
2019-04-22 20:06:39 +01:00
|
|
|
c.session = nil
|
2019-04-22 15:00:19 +01:00
|
|
|
// This can't fail yet - TODO?
|
2019-04-18 16:38:24 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) LocalAddr() crypto.NodeID {
|
|
|
|
return *crypto.GetNodeID(&c.session.core.boxPub)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) RemoteAddr() crypto.NodeID {
|
2019-04-20 16:32:27 +01:00
|
|
|
c.mutex.RLock()
|
|
|
|
defer c.mutex.RUnlock()
|
|
|
|
return *c.nodeID
|
2019-04-18 16:38:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) SetDeadline(t time.Time) error {
|
|
|
|
c.SetReadDeadline(t)
|
|
|
|
c.SetWriteDeadline(t)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) SetReadDeadline(t time.Time) error {
|
2019-04-22 11:20:35 +01:00
|
|
|
c.readDeadline.Store(t)
|
2019-04-18 16:38:24 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Conn) SetWriteDeadline(t time.Time) error {
|
2019-04-22 11:20:35 +01:00
|
|
|
c.writeDeadline.Store(t)
|
2019-04-18 16:38:24 +01:00
|
|
|
return nil
|
|
|
|
}
|