From a09a83530f80b26a946c3296dbbd7193f037c63f Mon Sep 17 00:00:00 2001 From: Arceliar Date: Sun, 22 Mar 2020 18:42:42 -0500 Subject: [PATCH] update search description in comments --- src/yggdrasil/search.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/yggdrasil/search.go b/src/yggdrasil/search.go index bfec4e16..e963babc 100644 --- a/src/yggdrasil/search.go +++ b/src/yggdrasil/search.go @@ -4,15 +4,13 @@ package yggdrasil // The basic idea is as follows: // We may know a NodeID (with a mask) and want to connect -// We begin a search by initializing a list of all nodes in our DHT, sorted by closest to the destination -// We then iteratively ping nodes from the search, marking each pinged node as visited -// We add any unvisited nodes from ping responses to the search, truncating to some maximum search size -// This stops when we either run out of nodes to ping (we hit a dead end where we can't make progress without going back), or we reach the destination -// A new search packet is sent immediately after receiving a response -// A new search packet is sent periodically, once per second, in case a packet was dropped (this slowly causes the search to become parallel if the search doesn't timeout but also doesn't finish within 1 second for whatever reason) - -// TODO? -// Some kind of max search steps, in case the node is offline, so we don't crawl through too much of the network looking for a destination that isn't there? +// We beign a search by sending a dht lookup to ourself +// Each time a node responds, we sort the results and filter to only include useful nodes +// We then periodically send a packet to the first node from the list (after re-filtering) +// This happens in parallel for each node that replies +// Meanwhile, we keep a list of the (up to) 16 closest nodes to the destination that we've visited +// We only consider an unvisited node useful if either the list isn't full or the unvisited node is closer to the destination than the furthest node on the list +// That gives the search some chance to recover if it hits a dead end where a node doesn't know everyone it should import ( "errors"