Compare commits

...

293 Commits

Author SHA1 Message Date
Arceliar
8c29f4b6dc Merge pull request #115 from yggdrasil-network/develop
v0.2 changes
2018-06-13 12:53:56 -05:00
Arceliar
ad1ba2c8ca Merge pull request #117 from Arceliar/panicfix
closed channel send bugfix
2018-06-13 00:33:48 -05:00
Arceliar
cd514799da recover if p.doSend is closed due to a race between peers.doSendSwitchMsgs and peers.removePeer 2018-06-13 00:24:12 -05:00
Arceliar
f0c249a4b7 Merge pull request #116 from yggdrasil-network/Arceliar-patch-1
Update README.md
2018-06-12 19:35:06 -05:00
Arceliar
98ee657447 Update README.md
Replace some links to the old documentation with links to the .io site
2018-06-12 19:31:36 -05:00
Neil Alexander
cfcdf91444 Merge pull request #114 from Arceliar/cleanup
Code cleanup
2018-06-13 00:15:33 +01:00
Arceliar
b006748da4 code cleanup 2018-06-12 17:50:08 -05:00
Arceliar
9ce428af66 Merge pull request #112 from neilalexander/documentation
Adds more comments to ICMPv6 and TUN/TAP.
2018-06-12 17:11:24 -05:00
Neil Alexander
8e2c2aa977 Document ICMPv6 and TUN/TAP 2018-06-12 22:45:53 +01:00
Neil Alexander
54cf6b0794 Merge pull request #110 from Arceliar/config
Config cleanup
2018-06-12 20:25:31 +01:00
Arceliar
24228bd381 disable unused configuration Net field and clean up comments to remove mention of UDP where no longer applicable 2018-06-12 14:00:04 -05:00
Neil Alexander
bc82d035db Merge pull request #109 from Arceliar/dhtbackoff
Exponential DHT backoff
2018-06-12 14:04:39 +01:00
Arceliar
359af66d0d exponential dht throttle backoff, and make it based on when packets were sent as part of bootstrapping/maintenance, not when arbitrary packets were received 2018-06-12 03:16:10 -05:00
Neil Alexander
909b48f2f2 Merge pull request #108 from Arceliar/doc
Documentation, code comments, and a couple bugfixes
2018-06-11 07:05:57 +01:00
Arceliar
56802d569e minor documentation updates, code comments, and a couple of bugfixes that I noticed when going through the code to comment it 2018-06-10 18:03:28 -05:00
Neil Alexander
b0acc19e3d Merge pull request #107 from Arceliar/wire
Add version information to connection setup
2018-06-10 10:11:56 +01:00
Arceliar
038a51fd13 it helps to add new files 2018-06-09 18:44:59 -05:00
Arceliar
72cca4ea43 version check/warning adjustments 2018-06-09 18:38:30 -05:00
Arceliar
076350f963 remove old tcp key exchange code 2018-06-09 17:49:02 -05:00
Arceliar
8733099516 add version metadata to key exchange at the start of connections 2018-06-09 17:46:19 -05:00
Arceliar
f5c850f098 better way to do wire signed ints (no negative zero, remove conditionals) 2018-06-09 16:36:13 -05:00
Arceliar
b7e4ff5d5a Merge pull request #106 from Arceliar/peerauth
Peer authentication improvements
2018-06-09 16:24:27 -05:00
Arceliar
e5eb6de1f6 add inner crypto to linkProtoTraffic, using ephemeral keys, to prevent replay attacks from spoofing peer connections 2018-06-08 18:42:56 -05:00
Arceliar
1dcc60f054 check root before accepting that a message is good and unblocking a new peer 2018-06-08 17:33:16 -05:00
Arceliar
ad6ea59049 Merge pull request #105 from yggdrasil-network/localhost
Use "localhost:9001" for the default admin socket
2018-06-08 16:42:18 -05:00
Arceliar
2e1456902c Update yggdrasil.go 2018-06-08 16:38:11 -05:00
Arceliar
f30d040366 Merge branch 'master' of https://github.com/yggdrasil-network/yggdrasil-go into develop 2018-06-08 16:17:24 -05:00
Arceliar
dde7653bf4 Merge pull request #103 from Arceliar/switchMsg
Use new switchMsg format
2018-06-08 16:16:39 -05:00
Neil Alexander
89757ab5ec Merge pull request #104 from Arceliar/multicast
Enable multicast on interfaces that come up after startup
2018-06-08 22:01:26 +01:00
Arceliar
495891d9e8 remove testing panics 2018-06-07 22:32:01 -05:00
Arceliar
e29f700dd6 enable multicast on interfaces that come up after startup 2018-06-07 22:07:19 -05:00
Arceliar
ea1d21f7e5 don't change dhtInfo.pings when sending a search, to prevent multiple different searches from evicting a node 2018-06-07 21:28:08 -05:00
Arceliar
6bdc9a7eb6 fix the sim, part of it bypasses queues so it's expected to see loops in those cases while things are in the middle of updating 2018-06-07 21:06:30 -05:00
Arceliar
bced15b138 remove TTL from traffic packets 2018-06-07 20:29:22 -05:00
Arceliar
84c13fac90 don't use TTL 2018-06-07 20:18:13 -05:00
Arceliar
fe12e1509a add a throttle to nodes in the dht. the throttle is gradually increased each time the node is pinged. it determines the minimum amount of time to wait between using the node in a bootstrapping search 2018-06-07 17:55:43 -05:00
Arceliar
ec1c173ca5 it helps to check that messages decoded correctly 2018-06-07 16:53:39 -05:00
Arceliar
bcfeb22915 more tcp debugging 2018-06-07 16:49:51 -05:00
Arceliar
63feed8dc3 adjust tcp timeout and add shadow queues to track dropped packets 2018-06-07 15:04:17 -05:00
Arceliar
c1f8baf9b5 update dht.reset() to possibly play better with coord changes 2018-06-07 14:39:43 -05:00
Arceliar
d468882147 cleanup 2018-06-07 14:24:02 -05:00
Arceliar
f8ba80e7d8 remove old switchMessage and clean up related code 2018-06-07 14:13:31 -05:00
Arceliar
00e4da28c7 use/store switchMsg in the switch instead of going through the old switchMessage 2018-06-07 13:56:11 -05:00
Arceliar
3dab94be9f keep dht peers alive 2018-06-07 10:58:24 -05:00
Arceliar
deb755e3e9 remove peer.linkIn channel and related logic 2018-06-07 00:49:06 -05:00
Arceliar
ecf37cae8a make the switch react to peer coord changes immediately, and send out updates immediately 2018-06-07 00:16:47 -05:00
Arceliar
85afe187ff remove peer timeout logic from the switch, so switch peer entrires are only removed when the peer struct is removed 2018-06-06 23:23:16 -05:00
Arceliar
3b783fbf97 move periodic switch maintenance into the router instead of its own goroutine 2018-06-06 23:10:33 -05:00
Arceliar
5dc0cb5544 move wire_encode_locator logic into getBytesForSig, since that's the only place it's used 2018-06-06 23:00:17 -05:00
Arceliar
1e7d34492d fix signature checks and add some TODO reminder comments 2018-06-06 22:39:22 -05:00
Arceliar
5fb33da3a2 remove old switch anc/hop/res code 2018-06-06 21:18:21 -05:00
Arceliar
6811759fc9 add new switchMsg struct to replace old anc/hop/res approach 2018-06-06 21:11:10 -05:00
Arceliar
690d29435d adjust link packet logic so they bypass the lifo stack and are delivered first 2018-06-06 17:44:10 -05:00
Arceliar
bbae9ff8e8 Merge pull request #92 from Arceliar/backpressure
Use backpressure instead of estimated bandwidth
2018-06-06 16:58:48 -05:00
Arceliar
fad6f6b50e remove udp.go 2018-06-06 16:57:36 -05:00
Arceliar
da928af361 fix sim and run gofmt 2018-06-06 16:49:23 -05:00
Arceliar
240841eb38 remove unused recursive search packets 2018-06-06 16:46:15 -05:00
Arceliar
1b89892610 remove UDP, to be replaced with a better implementation later 2018-06-06 16:40:35 -05:00
Neil Alexander
2f8aaa5c20 Merge pull request #102 from neilalexander/ifname
Fix IfName 'none'
2018-06-02 23:37:24 +01:00
Neil Alexander
6d9d01dae1 Fix IfName='none' 2018-06-02 23:29:06 +01:00
Neil Alexander
2e188917d8 Merge pull request #99 from Arceliar/iterdht
Switch to iterative DHT searches
2018-06-02 23:10:51 +01:00
Arceliar
b9ea5350c6 update search.go comments to describe the iterative approach 2018-06-02 16:45:45 -05:00
Arceliar
3e1ac81854 allow searches to start with nodes further from the destination than ourself 2018-06-02 16:39:34 -05:00
Arceliar
45abfafbba value instead of pointer types for search dest/mask 2018-06-02 16:33:58 -05:00
Arceliar
34939d4b18 update for wire format changes 2018-06-02 16:30:05 -05:00
Arceliar
b3b729804c Merge pull request #101 from neilalexander/wire
Export fields of wire structs
2018-06-02 16:24:06 -05:00
Neil Alexander
bbdcee1015 Export fields of sessionPing, dhtReq, dhtRes 2018-06-02 22:19:42 +01:00
Neil Alexander
49af65296d Export fields of wire structs 2018-06-02 21:21:05 +01:00
Arceliar
28e6c738e2 Merge branch 'master' into iterdht 2018-06-02 15:00:24 -05:00
Arceliar
09baad48e3 retry failed iterative searches, possibly becoming parallel if things are just slow, and keep track of / skip nodes that were already visited in the search 2018-06-02 14:57:06 -05:00
Arceliar
0f96fea3fc Merge pull request #100 from neilalexander/dotstyle2
Improve formatting of dot graph
2018-06-02 13:25:01 -05:00
Neil Alexander
bc37d4fb76 Improve formatting of dot: dashed grey for unknown nodes, sans-serif text 2018-06-02 14:24:06 +01:00
Arceliar
ed6c9c2a54 deduplicate dht responses when adding them to the search, limit the search toVisit size 2018-06-02 00:29:36 -05:00
Arceliar
10a72444e3 get itersearch to run in the sim 2018-06-02 00:16:47 -05:00
Arceliar
ecf7e490d7 start of iterative dht searches 2018-06-01 23:34:21 -05:00
Arceliar
dc841c550b Merge pull request #98 from Arceliar/dotstyle
Admin "dot" style changes
2018-06-01 20:57:24 -05:00
Neil Alexander
ec80a81ed5 Mark extrapolated nodes with dashed outline 2018-06-01 23:33:02 +01:00
Neil Alexander
e4082f218f Add text to dot graph explaining how a node is known 2018-06-01 23:23:24 +01:00
Neil Alexander
a2df5107f0 Change colour scheme: green for self, yellow for peer, blue for open session, white for DHT 2018-06-01 14:20:47 +01:00
Arceliar
ca15bbea57 try color coding dot output 2018-05-31 20:28:09 -05:00
Arceliar
887b463306 Merge pull request #97 from neilalexander/master
Set allowed encryption keys after start
2018-05-29 16:20:32 -05:00
Neil Alexander
027944a14a Set allowed encryption keys after start as otherwise hits a nil pointer exception 2018-05-29 22:15:54 +01:00
Arceliar
742eded4ff Merge pull request #96 from neilalexander/dedebug
Create Core API, remove DEBUG function calls
2018-05-29 16:01:23 -05:00
Neil Alexander
bfa281c0ae Add -tags debug to run-sim 2018-05-27 23:50:22 +01:00
Neil Alexander
ccf71af6b7 Move Init from core.go to debug.go as function is only for simulator 2018-05-27 23:37:57 +01:00
Neil Alexander
460a22c063 Clean up some exported constants 2018-05-27 23:31:34 +01:00
Neil Alexander
a9e61d0d37 Add support for building "release" builds that don't contain pprof which are substantially smaller. To build a "debug" build, use "-tags debug" with "go build" 2018-05-27 23:22:50 +01:00
Neil Alexander
e9b1006dda Remove final DEBUG calls from yggdrasil.go 2018-05-27 22:36:36 +01:00
Neil Alexander
35173e2388 Fix starting TUN read/write 2018-05-27 22:35:30 +01:00
Neil Alexander
4c115de633 De-debug 2018-05-27 22:13:37 +01:00
Arceliar
38e7704161 use backpressure instead of estimated bandwidth, sorted by uptime to break ties 2018-05-27 13:37:35 -05:00
Arceliar
707e23d392 Merge pull request #95 from Arceliar/tcpfix
fix npe, these don't need to be pointers
2018-05-27 13:28:51 -05:00
Arceliar
02ba9dc97c fix npe, these don't need to be pointers 2018-05-27 13:22:21 -05:00
Neil Alexander
5131d854e5 Merge pull request #94 from Arceliar/main-fixes
hjson bugfixes
2018-05-27 19:06:16 +01:00
Arceliar
7b12493417 panic if tcp startup fails, since otherwise a nil pointer occurs in multicast. make udp do the same thing. 2018-05-27 12:56:33 -05:00
Arceliar
e62cfa8c84 revert removal of useconffile 2018-05-27 12:31:35 -05:00
Arceliar
fcaabe4aac Fix some issues with hjson by converting to json then parsing to go struct, and remove useconffile option since it doesn't seem to add anything over stdin and increases the attack surface 2018-05-26 20:40:19 -05:00
Neil Alexander
71d3a2b187 Merge pull request #93 from neilalexander/bugfixes
Fix MulticastInterfaces and accidental truncation of addresses on macOS/BSD
2018-05-26 21:55:36 +01:00
Neil Alexander
a0f547cc1b Fix being able to enable/disable multicast 2018-05-26 21:50:47 +01:00
Neil Alexander
bc899c395a Fix accidental truncation of addresses ending in 8 on macOS and BSD 2018-05-26 21:47:54 +01:00
Arceliar
fae00e962f Merge pull request #90 from neilalexander/admin
Add getMulticastInterfaces and getSelf to yggdrasilctl
2018-05-23 17:52:06 -05:00
Neil Alexander
cae589d2d3 More nice 2018-05-23 22:21:37 +01:00
Neil Alexander
5d87601118 Add getMulticastInterfaces and getSelf to yggdrasilctl 2018-05-23 22:13:52 +01:00
Neil Alexander
9b5965fd9a Merge pull request #89 from neilalexander/config
Fix debian normalisation in postinst
2018-05-23 21:35:56 +01:00
Neil Alexander
77d67a1569 Fix debian normalisation in postinst 2018-05-23 21:31:52 +01:00
Arceliar
9e5964dcd4 Merge pull request #87 from neilalexander/config
Update configuration names and update multicast behaviour
2018-05-23 13:08:34 -05:00
Neil Alexander
8456b2e1bc Fix substitution 2018-05-23 18:53:44 +01:00
Neil Alexander
1fd53bcb7d Update comments in configuration 2018-05-23 14:02:13 +01:00
Neil Alexander
8a46253c1f Normalise existing yggdrasil.conf on debian package install/upgrade 2018-05-23 13:26:08 +01:00
Neil Alexander
34f7b7ca14 Fix AllowedBoxPubs 2018-05-23 12:32:26 +01:00
Neil Alexander
ce854a76bd Add -normaliseconf option and temporarily correct old config item names in running config 2018-05-23 12:04:27 +01:00
Neil Alexander
9d9083e373 Update configuration names, fix multicast interface selection 2018-05-23 11:28:20 +01:00
Neil Alexander
6f79184c9b Move multicasting into yggdrasil package 2018-05-23 11:13:53 +01:00
Neil Alexander
388ae09fca Rename some config items 2018-05-22 23:43:29 +01:00
Arceliar
b10ae51565 Merge pull request #85 from neilalexander/yggdrasilctl
Human-readable formatting in yggdrasilctl
2018-05-21 19:21:00 -05:00
Arceliar
14b2dd0c61 Merge pull request #86 from neilalexander/youraddresses
Show IPv6 address and subnet on startup
2018-05-21 19:18:08 -05:00
Neil Alexander
f6ea6fcc85 Add addAllowedBoxPub, removeAllowedBoxPub, getAllowedBoxPubs 2018-05-21 18:12:36 +01:00
Neil Alexander
04b3e84023 Show your IPv6 address and subnet when starting 2018-05-21 16:15:31 +01:00
Neil Alexander
9567446f50 Offload formatting to yggdrasilctl and keep precision in JSON 2018-05-21 15:21:23 +01:00
Neil Alexander
59688dcab3 Fix formatting of bytes_sent, bytes_recvd, last_seen, uptime 2018-05-21 15:05:01 +01:00
Neil Alexander
cc4ee91279 Allow JSON output using -json argument 2018-05-21 14:29:27 +01:00
Neil Alexander
61c640dbde Add addPeer and removePeer 2018-05-21 14:25:11 +01:00
Neil Alexander
ca3e541d3b Friendly formats for getDHT, getSessions, setTunTap etc 2018-05-21 14:14:57 +01:00
Neil Alexander
201701ae4a Add some pretty printing to yggdrasilctl, small modifications to JSON formatting in admin socket 2018-05-21 13:54:51 +01:00
Neil Alexander
bd32f5890b Run gofmt -s -w 2018-05-21 07:28:03 +01:00
Arceliar
cc6fb8bd98 Merge pull request #84 from neilalexander/admin
JSON support for admin socket
2018-05-20 20:31:24 -05:00
Neil Alexander
ab3eb9877a Fix builds 2018-05-20 23:59:42 +01:00
Neil Alexander
cda7a2abcc Builds for yggdrasilctl plus usage info and fixes 2018-05-20 23:46:02 +01:00
Neil Alexander
ee99ae008d Add yggdrasilctl 2018-05-20 23:32:17 +01:00
Neil Alexander
79131bb959 Minor tweaks 2018-05-20 23:25:07 +01:00
Neil Alexander
aae570de2a Optional parameters are now square instead of pointed brackets, avoid nils for help 2018-05-20 21:57:05 +01:00
Neil Alexander
c765e0566f Convert rest of functions, fix setTunTap 2018-05-20 21:54:15 +01:00
Neil Alexander
c75566d5ac Various fixes and error catching, add setTunTap 2018-05-20 21:44:30 +01:00
Neil Alexander
4ecc7ce860 Add getTunTap to JSON admin socket 2018-05-20 19:42:37 +01:00
Neil Alexander
c3ca5c64b1 Add some more functions to JSON admin socket 2018-05-20 19:23:43 +01:00
Neil Alexander
9713e73969 Fix dot 2018-05-20 17:25:13 +01:00
Neil Alexander
a6ef7166ea Start JSON-ifying the admin socket 2018-05-20 17:21:14 +01:00
Neil Alexander
89b7be1409 Merge pull request #83 from Arceliar/peerInfo
Add uptime and bytes sent/recvd to  the `peer` struct and getPeers results
2018-05-19 10:09:42 +01:00
Arceliar
5c0636eb3d add uptime and bytes sent/recvd to peer struct and getPeers 2018-05-18 20:41:02 -05:00
Arceliar
fdb826578f Merge pull request #82 from neilalexander/admin
Track TX/RX bytes over session and if MTU was adjusted, adjust MTU exchange behaviour, send session pings on TUN/TAP change
2018-05-18 17:26:33 -05:00
Neil Alexander
ca96bbf014 Accept exchanging an MTU of 0 to signify that TUN/TAP is disabled, don't send traffic to a node in that case 2018-05-18 18:56:33 +01:00
Neil Alexander
ec371af84f Track TX/RX bytes over session and if MTU was adjusted, add to admin socket getSession 2018-05-18 17:59:29 +01:00
Neil Alexander
546c5f1412 Merge pull request #81 from Arceliar/dht
More DHT updates
2018-05-18 08:00:22 +01:00
Arceliar
ec8fe338d5 more insertIfNew bugfixes, and add peerOnly to getDHT output (true if a node is in the bucket.peers slice instead of bucket.others--it means they're not regularly pinged, they're only there to make sure DHT lookups include them as a result, for bootstrapping reasons) 2018-05-17 21:43:26 -05:00
Arceliar
fe518f4e3f bugfixes related to peer timeouts in the DHT, significantly improve DHT bootstrap speed 2018-05-17 21:20:31 -05:00
Arceliar
8d9887294c add dht time since last ping to admin socket, some DHT code cleanup, bugfix to insertIfNew 2018-05-17 19:32:29 -05:00
Neil Alexander
edf8f2e239 Merge pull request #79 from Arceliar/dht
DHT and Switch updates
2018-05-17 13:59:38 +01:00
Arceliar
d98640fd59 switch cleanup 2018-05-16 17:48:53 -05:00
Arceliar
6b51b44cbf slightly better variable naming 2018-05-16 17:36:37 -05:00
Arceliar
800ccaa3d4 cleanup 2018-05-16 17:32:26 -05:00
Arceliar
fe712d24f8 don't allow buckets to overflow when the next bucket isn't full 2018-05-16 17:24:38 -05:00
Arceliar
8228242eed misc dht and tree changes to stabilize coords and bootstrap the dht faster 2018-05-15 23:57:00 -05:00
Arceliar
96c55da987 Merge pull request #76 from neilalexander/hjson
Use HJSON for config instead of JSON
2018-05-13 15:09:28 -05:00
Neil Alexander
85cae4501e Merge pull request #78 from neilalexander/admin
Add getTunTap to admin socket
2018-05-10 09:52:49 +01:00
Neil Alexander
5299783e35 Add getTunTap to admin socket 2018-05-10 09:48:12 +01:00
Neil Alexander
6d944d8ec8 Merge pull request #77 from neilalexander/windows
Add ability to select specific interface by friendly name on Windows
2018-05-09 16:47:01 +01:00
Neil Alexander
e381808704 Add ability to select interface by friendly name on Windows 2018-05-09 16:42:24 +01:00
Neil Alexander
8ea3070a02 Small tweaks/clean up 2018-05-09 14:03:28 +01:00
Neil Alexander
45b106168e Use HJSON instead of JSON 2018-05-09 13:54:31 +01:00
Neil Alexander
fcf7fe71af Merge pull request #74 from Arceliar/peerauth
Partial support for authenticated peers
2018-05-07 23:29:36 +01:00
Arceliar
5dac273a3d rename to 'AllowedBoxPubs' and similar 2018-05-07 17:05:54 -05:00
Arceliar
6ce16d8192 debug admin socket 2018-05-06 19:48:26 -05:00
Arceliar
94dd231e13 add (not working) admin functions for auth keys, needs debugging 2018-05-06 19:31:19 -05:00
Arceliar
0b391b6e3a debugging and cleanup 2018-05-06 19:01:52 -05:00
Arceliar
80f893aac3 let the peer's linkLoop call close if the peer receives no announcements for too long 2018-05-06 17:17:12 -05:00
Arceliar
6026e0a014 Optional peer authentication, if non-empty then incoming TCP and all UDP peers must match one of these box keys 2018-05-06 16:32:34 -05:00
Neil Alexander
5962d009a5 Merge pull request #73 from Arceliar/removePeer
make removePeers work for TCP connections and minor admin cleanup
2018-05-06 21:14:27 +01:00
Arceliar
3a1b67da8c Update admin.go 2018-05-05 20:20:38 -05:00
Arceliar
cdedd304af make removePeers work for TCP connections and minor admin cleanup 2018-05-05 17:14:03 -05:00
Arceliar
d34e0f92c8 Merge pull request #72 from Arceliar/netnsfix
netns fix
2018-04-28 17:04:25 -05:00
Arceliar
f6cba4efc1 fix loopback devices in s channel test 2018-04-28 16:45:37 -05:00
Arceliar
8b9a91be57 possibly fix weird issue I was having with netns, still not sure I understand why it was a problem 2018-04-28 16:39:58 -05:00
Arceliar
ccb23b6a56 Merge pull request #71 from Arceliar/docs
Docs
2018-04-28 12:09:37 -05:00
Arceliar
44cd1f0cda Merge branch 'master' of https://github.com/yggdrasil-network/yggdrasil-go 2018-04-28 12:02:58 -05:00
Arceliar
893101f232 use url syntax for tcp/udp and mention socks support 2018-04-28 11:57:14 -05:00
Neil Alexander
cf2e112ae8 Merge pull request #70 from yggdrasil-network/urlfix
Default to previous scheme when url.Parse returns an error
2018-04-27 10:26:04 +01:00
Neil Alexander
bdf9e45082 Default to previous scheme when url.Parse returns an error
In response to:
```
panic: parse x.x.x.x:xxx: first path segment in URL cannot contain colon

goroutine 33 [running]:
yggdrasil.(*Core).DEBUG_addPeer(0x8e58000, 0x8e34080, 0x12)
        /go/src/github.com/{{ORG_NAME}}/{{REPO_NAME}}/src/yggdrasil/debug.go:317 +0x420
main.(*node).init.func1(0x8e48000, 0x8e58000)
        /go/src/github.com/{{ORG_NAME}}/{{REPO_NAME}}/yggdrasil.go:75 +0x70
created by main.(*node).init
        /go/src/github.com/{{ORG_NAME}}/{{REPO_NAME}}/yggdrasil.go:69 +0x410
```
2018-04-27 09:53:31 +01:00
Arceliar
76a5d69211 Merge pull request #68 from majestrate/tor-auto
Tor socks proxy support
2018-04-26 20:37:03 -05:00
Jeff Becker
fe13fea1e5 remove hard coded tor config options 2018-04-26 10:26:10 -04:00
Jeff Becker
7756891510 support socks proxy in peer url and decouple explicit tor/i2p routing 2018-04-26 10:23:21 -04:00
Jeff Becker
769b058004 more docs 2018-04-25 10:11:11 -04:00
Jeff Becker
1be8bbed1c more docs 2018-04-25 10:09:09 -04:00
Jeff Becker
de69860e9b * only enable tor proxy dialer when tor is enabled
* add docstrings
2018-04-25 10:06:13 -04:00
Jeff Becker
c16e354a44 Merge remote-tracking branch 'origin/master' into tor-auto 2018-04-25 10:01:10 -04:00
Neil Alexander
275345f3c6 Merge pull request #67 from Arceliar/sessionping
Try using session pings before searches for active sessions
2018-04-22 22:04:03 +01:00
Arceliar
8ef2e55de6 try sending pings before sending searches for an unresponsive session 2018-04-22 15:31:30 -05:00
Jeff Becker
5e23185ddc use address only not port 2018-04-20 08:41:09 -04:00
Jeff Becker
3c4fee0492 tor auto config 2018-04-19 10:30:40 -04:00
Neil Alexander
48ced483d6 Merge pull request #64 from Arceliar/dht
Limit DHT pings to 1/min per node
2018-03-17 20:46:47 +00:00
Neil Alexander
0f8aab7609 Merge pull request #62 from Arceliar/udpclose
UDP Close packet, admin removePeer
2018-03-17 20:43:57 +00:00
Arceliar
fe5ef4e867 don't add things to the dht rumorMill if they've been pinged in the last minute (arbitrary time, needs investigating) 2018-03-17 15:28:22 -05:00
Arceliar
0459f88b92 add a udp close packet, and partial support for a removePeer admin call (sends close to UDP peers, but doesn't close TCP connections yet) 2018-03-16 18:24:28 -05:00
Neil Alexander
96399d586d Merge pull request #61 from neilalexander/armhf
Add armhf target for CircleCI
2018-03-16 10:19:58 +00:00
Neil Alexander
b3117e6fe0 Fix armmhf into armhf for PKGARCH 2018-03-16 10:16:05 +00:00
Neil Alexander
0dc6e24451 Add armhf target for CircleCI 2018-03-16 10:10:44 +00:00
Neil Alexander
9f5b5ac00b Merge pull request #59 from neilalexander/launchd
Add launchd script for macOS to contrib
2018-03-11 22:57:49 +00:00
Neil Alexander
55e4c33169 Add launchd script for macOS to contrib 2018-03-11 22:53:55 +00:00
Neil Alexander
a385d5579c Merge pull request #58 from neilalexander/travisci
Remove TravisCI
2018-03-10 23:52:42 +00:00
Neil Alexander
72e20412d0 Remove TravisCI 2018-03-10 23:48:14 +00:00
Arceliar
1aa35beb55 Merge pull request #57 from yggdrasil-network/readme
Update README.md (#47)
2018-03-10 17:43:50 -06:00
Neil Alexander
eb481bc7c4 Update README.md (#47)
This adds OpenBSD-specific notes as per issue #47
2018-03-10 23:40:03 +00:00
Arceliar
225ca74a9d Merge pull request #56 from neilalexander/icmpv6
Modify ICMPv6 source address for "Packet Too Big" messages
2018-03-10 16:55:12 -06:00
Neil Alexander
bf0413a0c4 Modify ICMPv6 source address for "Packet Too Big" messages to a routable address so that it correctly routes to nodes in an yggdrasil /64 subnet 2018-03-10 22:31:36 +00:00
Neil Alexander
0dc1dd6292 Merge pull request #55 from Arceliar/dht
DHT peers/other partitioning
2018-03-10 21:32:30 +00:00
Arceliar
b34345229a remove any peers from the dht if a reset is triggered (from coord changes) 2018-03-10 15:16:39 -06:00
Arceliar
9ce0b7fbea keep peers separate from other nodes in dht 2018-03-10 13:58:48 -06:00
Arceliar
e04ab7cfe6 Merge pull request #53 from neilalexander/genconf
Generate a random port number in -genconf instead of :0
2018-03-07 22:46:45 -06:00
Neil Alexander
6388b9b99d Increase the randomly generated port in -genconf to values above 32768 2018-03-07 22:36:16 +00:00
Neil Alexander
95a6cfff10 Generate a random port number in -genconf instead of :0 (#51) 2018-03-07 19:41:56 +00:00
Arceliar
a5fcf14824 Merge pull request #52 from neilalexander/udptcp
Show both UDP and TCP listen addresses in stdout (#51)
2018-03-07 12:11:30 -06:00
Neil Alexander
1155816df8 Show both UDP and TCP listen addresses in stdout (#51) 2018-03-07 09:41:04 +00:00
Arceliar
7489704788 Merge pull request #49 from Arceliar/misc-cleanup
Misc cleanup
2018-03-05 23:04:34 -06:00
Arceliar
b6dd0c5248 try to fix the pull/N issue with circleci again 2018-03-05 22:59:48 -06:00
Arceliar
9553b1ef8f apparently PRs get a slash in the branch name, which causes problems for the circleci builds 2018-03-05 22:47:50 -06:00
Arceliar
206570770a re-add a couple files 2018-03-05 22:12:54 -06:00
Arceliar
c74ec0e32f cleanup of misc files 2018-03-05 22:06:32 -06:00
Arceliar
c1fe7d271e Merge pull request #46 from yggdrasil-network/semver
Use semantic versioning (fixes #45)
2018-03-05 16:45:09 -06:00
Neil Alexander
b1380baa9f Update versioning to v1.2.3 where derived from the tag and 3 is commits since last tag 2018-03-05 22:14:36 +00:00
Neil Alexander
c57cf73219 Try symbolic-ref to get branch name 2018-03-05 21:54:30 +00:00
Neil Alexander
d58c971559 Fix bugs in CircleCI naming 2018-03-05 20:26:55 +00:00
Neil Alexander
6366558258 Use new semver versioning for CircleCI builds 2018-03-05 20:20:17 +00:00
Neil Alexander
a75ddff9f3 Ignore non-version tags 2018-03-05 20:06:38 +00:00
Neil Alexander
f4aa4f1848 Match v* tags only 2018-03-05 19:54:18 +00:00
Neil Alexander
3e6530c813 Propose semver version script for #45 2018-03-05 19:34:23 +00:00
Arceliar
4f32d4e780 Merge pull request #40 from neilalexander/netbsd
Attempt to support NetBSD
2018-03-04 18:06:59 -06:00
Neil Alexander
64d4bbbb53 Update CircleCI config.yml to build for BSDs 2018-03-05 00:03:02 +00:00
Neil Alexander
cce10cdb8a Update README.md 2018-03-05 00:00:01 +00:00
Neil Alexander
166d25619d Attempt to support NetBSD
This code actually consolidates a lot of the BSD code together, and even setting the interface MTU with SIOCSIFMTU seems to work fine.

What doesn't work though is setting the interface address using SIOCSIFADDR_IN6, which I attempted to plagiarise from the Darwin code.

As a fallback, ifconfig is used, which solves the problem enough to get it working.
2018-03-04 23:47:01 +00:00
Arceliar
b30b6022a8 Merge pull request #39 from Arceliar/fd
Don't use water.Interface.FD()
2018-03-04 14:17:01 -06:00
Arceliar
cd0d1a1d26 get the fd instead of depending on water to do it 2018-03-04 13:57:34 -06:00
Neil Alexander
3eccca62cc Merge pull request #38 from Arceliar/bsdedupe
Deduplicate some BSD code
2018-03-04 19:23:27 +00:00
Arceliar
039c4a5a35 only try to build on openbsd and freebsd for now, darwin needs work too 2018-03-04 11:02:54 -06:00
Arceliar
478b80a07a cleanup 2018-03-04 10:56:46 -06:00
Arceliar
e7726cfb00 deduplicate some bsd tuntap code, still untested 2018-03-04 10:55:32 -06:00
Neil Alexander
91b08bf474 Update README.md 2018-03-04 16:44:59 +00:00
Arceliar
a5f053448b Merge pull request #36 from neilalexander/freebsd
Adds support for FreeBSD
2018-03-04 10:30:07 -06:00
Neil Alexander
c30792245a Add FreeBSD support in TAP mode 2018-03-04 16:24:50 +00:00
Neil Alexander
57777b6152 Merge pull request #34 from neilalexander/master
Tag CircleCI build artifacts with version and branch
2018-03-04 11:17:47 +00:00
Neil Alexander
e43776f90e Add version file 2018-03-04 11:09:17 +00:00
Neil Alexander
54a742d576 Set explicit version in CIBUILD 2018-03-04 10:58:10 +00:00
Neil Alexander
a92e6c2588 Merge pull request #29 from Arceliar/netlink
Linux: use netlink instead of ip
2018-03-04 10:47:34 +00:00
Neil Alexander
3deadee42e Tag the build artifacts with version and branch 2018-03-04 01:29:00 +00:00
Neil Alexander
6424b07ad8 Merge pull request #28 from Arceliar/panic
tun panic if from debug
2018-03-03 23:36:18 +00:00
Arceliar
5ec6265a70 use netlink instead of ip commands to set address/mtu and bring up the tuntap device on linux 2018-03-03 16:41:36 -06:00
Arceliar
0460fdd301 panic if tun.write or tun.read returns an error and was launched from debug.go, since there's no way to recover in these scenarios 2018-03-03 13:56:26 -06:00
Arceliar
56fd7bd4d4 Merge pull request #27 from Arceliar/doc
update doc to remove statements with a first person perspective
2018-03-03 13:52:09 -06:00
Arceliar
3d0eac21bb update doc to remove statements with a first person perspective 2018-03-03 13:49:26 -06:00
Arceliar
b1a4951725 Update README.md
add circleci status
2018-03-03 12:29:47 -06:00
Arceliar
233b01bedc Update README.md 2018-03-03 12:12:31 -06:00
Arceliar
a367f2ca17 gofmt 2018-03-03 11:49:24 -06:00
Arceliar
55ee8f1afa Merge pull request #26 from Arceliar/nodrop
Nodrop
2018-03-03 11:25:22 -06:00
Arceliar
0578a9f273 manual merge 2018-03-03 11:07:08 -06:00
Arceliar
606fb59c07 Merge pull request #25 from neilalexander/contrib
Add a script to generate .deb files for Debian amd64/i386
2018-03-03 11:02:02 -06:00
Arceliar
99d0740eaa Merge pull request #23 from neilalexander/bsd
Add support for running OpenBSD
2018-03-03 11:01:24 -06:00
Arceliar
3f85c7a9a5 Merge pull request #24 from neilalexander/adminfunctions
Add admin functions for adding peers and modifying TUN/TAP
2018-03-03 10:57:19 -06:00
Neil Alexander
4e5627f933 Update default interface MTU to use per-platform instead of global value 2018-03-03 12:43:39 +00:00
Neil Alexander
4917ea3dd2 Per-platform TUN defaults 2018-03-03 12:30:54 +00:00
Neil Alexander
bec898a326 Don't allow exceeding maximum MTU for a given platform 2018-03-03 11:47:14 +00:00
Neil Alexander
674830799d Add branch name into repo name 2018-03-02 22:36:22 +00:00
Neil Alexander
3c1e00f23f Add CircleCI config.yml
This is an alternative to the TravisCI build file. CircleCI can automatically collect build artifacts which is nice!
2018-03-02 19:13:20 +00:00
Neil Alexander
390f7527f6 Update generate.sh 2018-03-01 18:28:37 +00:00
Neil Alexander
9285e0fe25 Mismatching GOARCH and debian arch 2018-03-01 18:06:53 +00:00
Neil Alexander
3089f2326e Add generate script for Debian .debs into contrib 2018-03-01 18:01:18 +00:00
Neil Alexander
7c0102e43d Be a little bit less verbose on OpenBSD 2018-03-01 15:31:49 +00:00
Neil Alexander
6640b33334 Fix using 'auto' as device name on OpenBSD - default to /dev/tap0 2018-03-01 15:19:20 +00:00
Neil Alexander
24be3f1d67 Turns out FreeBSD is a bit different so restrict this to OpenBSD for now 2018-03-01 15:11:12 +00:00
Neil Alexander
90393ae03b Set interface flags properly on OpenBSD 2018-03-01 15:02:53 +00:00
Neil Alexander
9e4d169208 Set interface IP and MTU on BSD 2018-03-01 13:37:05 +00:00
Neil Alexander
f8dda26dba Add BSD support (openbsd, freebsd, solaris) 2018-03-01 11:49:49 +00:00
Neil Alexander
ebc4eacee4 Allow sane defaults on setTunTap 2018-02-28 15:27:58 +00:00
Neil Alexander
fdd32b9571 Add setTunTap 2018-02-28 15:15:57 +00:00
Neil Alexander
2b48fd1fce Add addPeer to admin socket 2018-02-28 13:43:06 +00:00
Arceliar
ddc4773b19 don't drop send/recv traffic going through the crypto workers, just block until they're free (TODO run further tests to make sure this doesn't cycle/deadlock) 2018-02-26 18:12:28 -06:00
Arceliar
e113b8d530 incrase max MTU 2018-02-25 21:56:09 -06:00
Arceliar
33c9f74f48 Merge pull request #22 from Arceliar/udp_mtu_fix
Mostly working PMTU discovery when going over UDP links
2018-02-25 20:43:10 -06:00
Arceliar
63b55cda62 Mostly working PMTU discovery when going over UDP links 2018-02-25 20:24:36 -06:00
Arceliar
0fae932512 Merge pull request #21 from Arceliar/testing
Breaking wire format cleanup and other updates
2018-02-23 17:10:45 -06:00
Arceliar
0470f6f1c1 cleanup 2018-02-23 13:04:52 -06:00
Arceliar
bb3ae8b39b temporarily limit PMTU to 2048, as a workaround to some pathelogical behavior where a TCP stream carried over a UDP peer can throttle down to 0 in the presence of significant packet loss 2018-02-23 12:46:22 -06:00
Arceliar
b4ea98862b Merge pull request #20 from neilalexander/multithreadgenkeys
Multithread misc/genkeys.go
2018-02-21 12:03:28 -06:00
Neil Alexander
7101e147f4 Multithread misc/genkeys.go 2018-02-21 15:57:03 +00:00
Arceliar
d3dc7765f2 trying to debug UDP+large MTU issues 2018-02-20 17:31:12 -06:00
Arceliar
a81c361484 tcp reconnect bufix, test with bufio, and switch back to tcp auto-peering by default to continue testing 2018-02-19 23:22:36 -06:00
Arceliar
a21a039b57 remove unused field from peer 2018-02-19 19:47:11 -06:00
Arceliar
4045597516 Use larger UDP chunks for link-local IP and let the OS fragment it. Switch to UDP for link-local peers. Minor code cleanup for TCP. 2018-02-19 19:34:51 -06:00
Arceliar
8c7d514032 Merge branch 'master' into testing 2018-02-19 18:36:06 -06:00
Arceliar
4f710ac2da bash -> sh 2018-02-18 16:07:27 -06:00
Arceliar
a1afebc0df Merge pull request #19 from neilalexander/sigterm
Catch SIGTERM and Windows service termination
2018-02-18 15:35:47 -06:00
Neil Alexander
aef62afb1b gofmt -s -w yggdrasil.go 2018-02-18 21:32:55 +00:00
Neil Alexander
0176d25235 Catch SIGTERM instead of (impossible to capture) SIGKILL, also capture Windows service termination 2018-02-18 21:16:47 +00:00
Arceliar
71150fcb86 ignore root tstamp updates if we just updated, to throttle the rate at which updates (and new signatures) can propagate 2018-02-17 23:57:24 -06:00
Arceliar
a66a29779a Slightly nicer way to throttle peer announcements 2018-02-17 23:41:42 -06:00
Arceliar
0fc7401635 check root before adding peers to the switch lookupTable, instead of during each lookup 2018-02-17 23:14:23 -06:00
82 changed files with 3849 additions and 6167 deletions

72
.circleci/config.yml Normal file
View File

@@ -0,0 +1,72 @@
# Golang CircleCI 2.0 configuration file
#
# Check https://circleci.com/docs/2.0/language-go/ for more details
version: 2
jobs:
build:
docker:
- image: circleci/golang:1.9
working_directory: /go/src/github.com/{{ORG_NAME}}/{{REPO_NAME}}
steps:
- checkout
- run:
name: Create artifact upload directory and set variables
command: |
mkdir /tmp/upload
echo 'export CINAME=$(sh contrib/semver/name.sh)' >> $BASH_ENV
echo 'export CIVERSION=$(sh contrib/semver/version.sh | cut -c 2-)' >> $BASH_ENV
- run:
name: Build for Linux (including Debian packages)
command: |
PKGARCH=amd64 sh contrib/deb/generate.sh && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-linux-amd64;
PKGARCH=i386 sh contrib/deb/generate.sh && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-linux-i386;
PKGARCH=mipsel sh contrib/deb/generate.sh && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-linux-mipsel;
PKGARCH=mips sh contrib/deb/generate.sh && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-linux-mips;
PKGARCH=armhf sh contrib/deb/generate.sh && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-linux-armhf;
mv *.deb /tmp/upload/
- run:
name: Build for macOS
command: |
GOOS=darwin GOARCH=amd64 ./build && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-darwin-amd64;
GOOS=darwin GOARCH=386 ./build && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-darwin-i386;
- run:
name: Build for OpenBSD
command: |
GOOS=openbsd GOARCH=amd64 ./build && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-openbsd-amd64;
GOOS=openbsd GOARCH=386 ./build && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-openbsd-i386;
- run:
name: Build for FreeBSD
command: |
GOOS=freebsd GOARCH=amd64 ./build && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-freebsd-amd64;
GOOS=freebsd GOARCH=386 ./build && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-freebsd-i386;
- run:
name: Build for NetBSD
command: |
GOOS=netbsd GOARCH=amd64 ./build && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-netbsd-amd64;
GOOS=netbsd GOARCH=386 ./build && mv yggdrasil /tmp/upload/$CINAME-$CIVERSION-netbsd-i386;
- run:
name: Build for Windows
command: |
GOOS=windows GOARCH=amd64 ./build && mv yggdrasil.exe /tmp/upload/$CINAME-$CIVERSION-windows-amd64.exe;
GOOS=windows GOARCH=386 ./build && mv yggdrasil.exe /tmp/upload/$CINAME-$CIVERSION-windows-i386.exe;
- run:
name: Build for EdgeRouter
command: |
git clone https://github.com/neilalexander/vyatta-yggdrasil /tmp/vyatta-yggdrasil;
cd /tmp/vyatta-yggdrasil;
BUILDDIR_YGG=$CIRCLE_WORKING_DIRECTORY ./build-edgerouter-x $CIRCLE_BRANCH;
BUILDDIR_YGG=$CIRCLE_WORKING_DIRECTORY ./build-edgerouter-lite $CIRCLE_BRANCH;
mv *.deb /tmp/upload;
- store_artifacts:
path: /tmp/upload
destination: /

View File

@@ -1,17 +0,0 @@
language: go
go:
- 1.9.x
os:
- linux
- osx
install:
- export GOPATH="$TRAVIS_BUILD_DIR:$GOPATH"
- go get -d -v
- go get -d -v yggdrasil
- for file in *.go ; do go build -v $file; done
script:
- ./yggdrasil -genconf

View File

@@ -1,21 +1,21 @@
# Yggdrasil
[![Build Status](https://api.travis-ci.org/Arceliar/yggdrasil-go.svg?branch=master)](https://travis-ci.org/Arceliar/yggdrasil-go)
[![CircleCI](https://circleci.com/gh/yggdrasil-network/yggdrasil-go.svg?style=shield&circle-token=:circle-token
)](https://circleci.com/gh/yggdrasil-network/yggdrasil-go)
## What is it?
This is a toy implementation of an encrypted IPv6 network, with many good ideas stolen from [cjdns](https://github.com/cjdelisle/cjdns), which was written to test a particular routing scheme that I cobbled together one random Wednesday afternoon.
This is a toy implementation of an encrypted IPv6 network, with many good ideas stolen from [cjdns](https://github.com/cjdelisle/cjdns), which was written to test a particular routing scheme that was cobbled together one random afternoon.
It's notably not a shortest path routing scheme, with the goal of scalable name-independent routing on dynamic networks with an internet-like topology.
It's named Yggdrasil after the world tree from Norse mythology, because that seemed like the obvious name given how it works.
For a longer, rambling version of this readme with more information, see: [doc](doc/README.md).
A very early incomplete draft of a [whitepaper](doc/Whitepaper.md) describing the protocol is also available.
More information is available at <https://yggdrasil-network.github.io/>.
This is a toy / proof-of-principle, so it's not even alpha quality software--any nontrivial update is likely to break backwards compatibility with no possibility for a clean upgrade path.
You're encouraged to play with it, but I strongly advise against using it for anything mission critical.
This is a toy / proof-of-principle, and considered alpha quality by the developers. It's not expected to be feature complete, and future updates may not be backwards compatible, though it should warn you if it sees a connection attempt with a node running a newer version.
You're encouraged to play with it, but it is strongly advised not to use it for anything mission critical.
## Building
1. Install Go (tested on 1.9, I use [godeb](https://github.com/niemeyer/godeb)).
1. Install Go (tested on 1.9+, [godeb](https://github.com/niemeyer/godeb) is recommended for debian-based linux distributions).
2. Clone this repository.
2. `./build`
@@ -43,9 +43,9 @@ In practice, you probably want to run this instead:
This keeps a persistent set of keys (and by extension, IP address) and gives you the option of editing the configuration file.
If you want to use it as an overlay network on top of e.g. the internet, then you can do so by adding the remote devices domain/address and port (as a string, e.g. `"1.2.3.4:5678"`) to the list of `Peers` in the configuration file.
You can control whether or not it peers over TCP or UDP by adding `tcp:` or `udp:` to the start of the string, i.e. `"udp:1.2.3.4:5678"`.
It is currently configured to accept incoming TCP and UDP connections.
In the interest of testing the TCP machinery, it's set to create TCP connections for auto-peering (over link-local IPv6), and to use TCP by default if no transport is specified for a manually configured peer.
By default, it peers over TCP (which can be forced with `"tcp://1.2.3.4:5678"` syntax), but it's also possible to connect over a socks proxy (`"socks://socksHost:socksPort/1.2.3.4:5678"`).
The socks proxy approach is useful for e.g. [peering over tor hidden services](https://github.com/yggdrasil-network/public-peers/blob/master/other/tor.md).
UDP support was removed as part of v0.2, and may be replaced by a better implementation at a later date.
### Platforms
@@ -67,12 +67,26 @@ journalctl -u yggdrasil
- Tested and working out of the box on macOS 10.13 High Sierra.
- May work in theory on any macOS version with `utun` support (which was added in macOS 10.7 Lion), although this is untested at present.
- TAP mode is not supported on macOS.
#### FreeBSD, NetBSD
- Works in TAP mode, but currently doesn't work in TUN mode.
- You may need to create the TAP adapter first if it doesn't already exist, i.e. `ifconfig tap0 create`.
#### OpenBSD
- Works in TAP mode, but currently doesn't work in TUN mode.
- You may need to create the TAP adapter first if it doesn't already exist, i.e. `ifconfig tap0 create`.
- OpenBSD is not capable of listening on both IPv4 and IPv6 at the same time on the same socket (unlike FreeBSD and NetBSD). This affects the `Listen` and `AdminListen` configuration options. You will need to set `Listen` and `AdminListen` to use either an IPv4 or an IPv6 address.
- You may consider using [relayd](https://man.openbsd.org/relayd.8) to allow incoming Yggdrasil connections on both IPv4 and IPv6 simultaneously.
#### Windows
- Tested and working on Windows 7 and Windows 10, and should work on any recent versions of Windows, but it depends on the [OpenVPN TAP driver](https://openvpn.net/index.php/open-source/downloads.html) being installed first.
- Has been proven to work with both the [NDIS 5](https://swupdate.openvpn.org/community/releases/tap-windows-9.9.2_3.exe) (`tap-windows-9.9.2_3`) driver and the [NDIS 6](https://swupdate.openvpn.org/community/releases/tap-windows-9.21.2.exe) (`tap-windows-9.21.2`) driver, however there are substantial performance issues with the NDIS 6 driver therefore it is recommended to use the NDIS 5 driver instead.
- Be aware that connectivity issues can occur on Windows if multiple IPv6 addresses from the `fd00::/8` prefix are assigned to the TAP interface. If this happens, then you may need to manually remove the old/unused addresses from the interface (though the code has a workaround in place to do this automatically in some cases).
- TUN mode is not supported on Windows.
- Yggdrasil can be installed as a Windows service so that it runs automatically in the background. From an Administrator Command Prompt:
```
sc create yggdrasil binpath= "\"C:\path\to\yggdrasil.exe\" -useconffile \"C:\path\to\yggdrasil.conf\""
@@ -95,7 +109,7 @@ Suppose a node has generated the address: `fd00:1111:2222:3333:4444:5555:6666:77
Then the node may also use addresses from the prefix: `fd80:1111:2222:3333::/64` (note the `fd00` changed to `fd80`, a separate `/9` is used for prefixes, but the rest of the first 64 bits are the same).
To advertise this prefix and a route to `fd00::/8`, the following seems to work for me:
To advertise this prefix and a route to `fd00::/8`, the following seems to work on the developers' networks:
1. Enable IPv6 forwarding (e.g. `sysctl -w net.ipv6.conf.all.forwarding=1` or add it to sysctl.conf).
@@ -114,21 +128,21 @@ interface eth0
};
```
This is enough to give unsupported devices on my LAN access to the network, with a few security and performance cautions outlined in the [doc](doc/README.md) file.
This is enough to give unsupported devices on the LAN access to the yggdrasil network. See the [configuration](https://yggdrasil-network.github.io/configuration.html) page for more info.
## How does it work?
I'd rather not try to explain in the readme, but I describe it further in the [doc](doc/README.md) file or the very draft of a [whitepaper](doc/Whitepaper.md), so you can check there if you're interested.
I'd rather not try to explain in the readme, but it is described further on the [about](https://yggdrasil-network.github.io/about.html) page, so you can check there if you're interested.
Be warned that it's still not a very good explanation, but it at least gives a high-level overview and links to some relevant work by other people.
## Obligatory performance propaganda
A [simplified model](misc/sim/treesim-forward.py) of this routing scheme has been tested in simulation on the 9204-node [skitter](https://www.caida.org/tools/measurement/skitter/) network topology dataset from [caida](https://www.caida.org/), and compared with results in [arxiv:0708.2309](https://arxiv.org/abs/0708.2309).
Using the routing scheme as implemented in this code, I observe an average multiplicative stretch of 1.08, with an average routing table size of 6 for a name-dependent scheme, and approximately 30 additional (but smaller) entries needed for the name-independent routing table.
The number of name-dependent routing table entries needed is proportional to node degree, so that 6 is the mean of a distribution with a long tail, but I believe this is an acceptable tradeoff.
The size of name-dependent routing table entries is relatively large, due to cryptographic signatures associated with routing table updates, but in the absence of cryptographic overhead I believe each entry is otherwise comparable to the BC routing scheme described in the above paper.
Using the routing scheme as implemented in this code, the average multiplicative stretch is observed to be about 1.08, with an average routing table size of 6 for a name-dependent scheme, and approximately 30 additional (but smaller) entries needed for the name-independent routing table.
The number of name-dependent routing table entries needed is proportional to node degree, so that 6 is the mean of a distribution with a long tail, but this may be an acceptable tradeoff(it's at least worth trying, hence this code).
The size of name-dependent routing table entries is relatively large, due to cryptographic signatures associated with routing table updates, but in the absence of cryptographic overhead, each entry should otherwise be comparable in size to the BC routing scheme described in the above paper.
A modified version of this scheme, with the same resource requirements, achieves a multiplicative stretch of 1.02, which drops to 1.01 if source routing is used.
Both of these optimizations are not present in the current implementation, as the former depends on network state information that I haven't found a way to cryptographically secure, and the latter optimization is both tedious to implement and would make debugging other aspects of the implementation more difficult.
Both of these optimizations are not present in the current implementation, as the former depends on network state information that appears difficult to cryptographically secure, and the latter optimization is both tedious to implement and would make debugging other aspects of the implementation more difficult.
## License

1
VERSION Normal file
View File

@@ -0,0 +1 @@
0.2

4
build
View File

@@ -1,11 +1,11 @@
#!/bin/bash
#!/bin/sh
export GOPATH=$PWD
echo "Downloading..."
go get -d -v
go get -d -v yggdrasil
for file in *.go ; do
echo "Building: $file"
go build -v $file
go build $@ $file
#go build -ldflags="-s -w" -v $file
#upx --brute ${file/.go/}
done

97
contrib/deb/generate.sh Normal file
View File

@@ -0,0 +1,97 @@
#!/bin/sh
# This is a lazy script to create a .deb for Debian/Ubuntu. It installs
# yggdrasil and enables it in systemd. You can give it the PKGARCH= argument
# i.e. PKGARCH=i386 sh contrib/deb/generate.sh
if [ `pwd` != `git rev-parse --show-toplevel` ]
then
echo "You should run this script from the top-level directory of the git repo"
exit -1
fi
PKGBRANCH=$(basename `git name-rev --name-only HEAD`)
PKGNAME=$(sh contrib/semver/name.sh)
PKGVERSION=$(sh contrib/semver/version.sh | cut -c 2-)
PKGARCH=${PKGARCH-amd64}
PKGFILE=$PKGNAME-$PKGVERSION-$PKGARCH.deb
if [ $PKGARCH = "amd64" ]; then GOARCH=amd64 GOOS=linux ./build
elif [ $PKGARCH = "i386" ]; then GOARCH=386 GOOS=linux ./build
elif [ $PKGARCH = "mipsel" ]; then GOARCH=mipsle GOOS=linux ./build
elif [ $PKGARCH = "mips" ]; then GOARCH=mips64 GOOS=linux ./build
elif [ $PKGARCH = "armhf" ]; then GOARCH=arm GOOS=linux GOARM=7 ./build
else
echo "Specify PKGARCH=amd64,i386,mips,mipsel,armhf"
exit -1
fi
echo "Building $PKGFILE"
mkdir -p /tmp/$PKGNAME/
mkdir -p /tmp/$PKGNAME/debian/
mkdir -p /tmp/$PKGNAME/usr/bin/
mkdir -p /tmp/$PKGNAME/etc/systemd/system/
cat > /tmp/$PKGNAME/debian/changelog << EOF
Please see https://github.com/Arceliar/yggdrasil-go/
EOF
echo 9 > /tmp/$PKGNAME/debian/compat
cat > /tmp/$PKGNAME/debian/control << EOF
Package: $PKGNAME
Version: $PKGVERSION
Section: contrib/net
Priority: extra
Architecture: $PKGARCH
Maintainer: Neil Alexander <neilalexander@users.noreply.github.com>
Description: Debian yggdrasil package
Binary yggdrasil package for Debian and Ubuntu
EOF
cat > /tmp/$PKGNAME/debian/copyright << EOF
Please see https://github.com/Arceliar/yggdrasil-go/
EOF
cat > /tmp/$PKGNAME/debian/docs << EOF
Please see https://github.com/Arceliar/yggdrasil-go/
EOF
cat > /tmp/$PKGNAME/debian/install << EOF
usr/bin/yggdrasil usr/bin
usr/bin/yggdrasilctl usr/bin
etc/systemd/system/*.service etc/systemd/system
EOF
cat > /tmp/$PKGNAME/debian/postinst << EOF
#!/bin/sh
if [ -f /etc/yggdrasil.conf ];
then
mkdir -p /var/backups
echo "Backing up configuration file to /var/backups/yggdrasil.conf.`date +%Y%m%d`"
cp /etc/yggdrasil.conf /var/backups/yggdrasil.conf.`date +%Y%m%d`
echo "Normalising /etc/yggdrasil.conf"
/usr/bin/yggdrasil -useconffile /var/backups/yggdrasil.conf.`date +%Y%m%d` -normaliseconf > /etc/yggdrasil.conf
fi
systemctl enable yggdrasil
systemctl start yggdrasil
EOF
cat > /tmp/$PKGNAME/debian/prerm << EOF
#!/bin/sh
systemctl disable yggdrasil
systemctl stop yggdrasil
EOF
cp yggdrasil /tmp/$PKGNAME/usr/bin/
cp yggdrasilctl /tmp/$PKGNAME/usr/bin/
cp contrib/systemd/yggdrasil.service /tmp/$PKGNAME/etc/systemd/system/
cp contrib/systemd/yggdrasil-resume.service /tmp/$PKGNAME/etc/systemd/system/
tar -czvf /tmp/$PKGNAME/data.tar.gz -C /tmp/$PKGNAME/ \
usr/bin/yggdrasil usr/bin/yggdrasilctl \
etc/systemd/system/yggdrasil.service \
etc/systemd/system/yggdrasil-resume.service
tar -czvf /tmp/$PKGNAME/control.tar.gz -C /tmp/$PKGNAME/debian .
echo 2.0 > /tmp/$PKGNAME/debian-binary
ar -r $PKGFILE \
/tmp/$PKGNAME/debian-binary \
/tmp/$PKGNAME/control.tar.gz \
/tmp/$PKGNAME/data.tar.gz
rm -rf /tmp/$PKGNAME

View File

@@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>yggdrasil</string>
<key>ProgramArguments</key>
<array>
<string>sh</string>
<string>-c</string>
<string>/usr/bin/yggdrasil -useconf &lt; /etc/yggdrasil.conf</string>
</array>
<key>KeepAlive</key>
<true/>
<key>RunAtLoad</key>
<true/>
<key>ProcessType</key>
<string>Interactive</string>
<key>StandardOutPath</key>
<string>/tmp/yggdrasil.stdout.log</string>
<key>StandardErrorPath</key>
<string>/tmp/yggdrasil.stderr.log</string>
</dict>
</plist>

13
contrib/semver/name.sh Normal file
View File

@@ -0,0 +1,13 @@
#!/bin/sh
# Get the branch name, removing any "/" characters from pull requests
BRANCH=$(git symbolic-ref --short HEAD | tr -d "/" 2>/dev/null)
# Check if the branch name is not master
if [ "$BRANCH" = "master" ]; then
printf "yggdrasil"
exit 0
fi
# If it is something other than master, append it
printf "yggdrasil-%s" "$BRANCH"

26
contrib/semver/version.sh Normal file
View File

@@ -0,0 +1,26 @@
#!/bin/sh
# Get the last tag
TAG=$(git describe --abbrev=0 --tags --match="v[0-9]*\.[0-9]*" 2>/dev/null)
# Get the number of commits from the last tag
COUNT=$(git rev-list $TAG..HEAD --count 2>/dev/null)
# If it fails then there's no last tag - go from the first commit
if [ $? != 0 ]; then
COUNT=$(git rev-list HEAD --count 2>/dev/null)
printf 'v0.0.%d' "$COUNT"
exit -1
fi
# Split out into major, minor and patch numbers
MAJOR=$(echo $TAG | cut -c 2- | cut -d "." -f 1)
MINOR=$(echo $TAG | cut -c 2- | cut -d "." -f 2)
# Output in the desired format
if [ $COUNT = 0 ]; then
printf 'v%d.%d' "$MAJOR" "$MINOR"
else
printf 'v%d.%d.%d' "$MAJOR" "$MINOR" "$COUNT"
fi

View File

@@ -47,11 +47,11 @@ Running `genkeys.go` will do this by default.
A distributed hash table is used to facilitate the lookup of a node's name-dependent routing `coords` from a `NodeID`.
A kademlia-like peer structure and xor metric are used in the DHT layout, but only peering info is used--there is no key:value store.
In contrast with standard kademlia, instead of using iterative parallel lookups, a recursive lookup strategy is used.
This is an intentional design decision to make the DHT more fragile--I explicitly want DHT inconsistencies to lead to lookup failures, because of concerns that the iterative parallel approach may hide DHT bugs.
This is an intentional design decision to make the DHT more fragile--the intent is for DHT inconsistencies to lead to lookup failures, because of concerns that the iterative parallel approach may hide DHT bugs.
In particular, the DHT is bootstrapped off of a node's one-hop neighbors, and I've observed that this causes a standard kademlia implementation to diverge in the general case.
To get around this, buckets are updated more aggressively, and the least recently pinged node from each bucket is flushed to make room for new nodes as soon as a response is heard from them.
This appears to fix the bootstrapping issues on all networks where I have previously found them, but recursive lookups are kept for the time being to continue monitoring the issue.
This appears to fix the bootstrapping issues on all networks where they had been observed in testing, but recursive lookups are kept for the time being to continue monitoring the issue.
However, recursive lookups require fewer round trips, so they are expected to be lower latency.
As such, even if a switch to iterative parallel lookups was made, the recursive lookup functionality may be kept and used optimistically to minimize handshake time in stable networks.
@@ -77,11 +77,11 @@ The name dependent scheme is implemented in roughly the following way:
7. The first hop, from the root, includes a signed sequence number which must increase (implemented as a unix timestamp, for convenience), which is used to detect root timeouts and prevent replays.
The highest `TreeID` approach to root selection is just to ensure that nodes select the same root, otherwise distance calculations wouldn't work.
Root selection has a minor effect on the stretch of the paths selected by the network, but this effect was seen to be small compared to the minimum stretch, for nearly all choices of root, so it seems like an OK approach to me, or at least better than any alternatives I could come up with.
Root selection has a minor effect on the stretch of the paths selected by the network, but this effect was seen to be small compared to the minimum stretch, for nearly all choices of root.
The current implementation tracks how long a neighbor has been advertising a locator for the same path, and it prefers to select a parent with a stable locator and a short distance to the root (maximize uptime/distance).
When forwarding traffic, the next hop is selected taking bandwidth to the next hop and distance to the destination into account (maximize bandwidth/distance), subject to the requirement that distance must always decrease.
The bandwidth estimation isn't very good, but it correlates well enough that e.g. when I have a slow wifi and a fast ethernet link to the same node, it typically uses the ethernet link.
The bandwidth estimation isn't very good, but it correlates well enough that e.g. when a slow wifi and a fast ethernet link to the same node are available, it typically uses the ethernet link.
However, if the ethernet link comes up while the wifi link is under heavy use, then it tends to keep using the wifi link until things settle down, and only switches to ethernet after the wireless link is no longer overloaded.
A better approach to bandwidth estimation could probably switch to the new link faster.
@@ -90,11 +90,11 @@ Note that this forwarding procedure generalizes to nodes that are not one-hop ne
## Other implementation details
In case you hadn't noticed, this implementation is written in Go.
That decision was made because I felt like learning Go, and the language seemed like an OK choice for prototyping a network application.
That decision was made because the designer and initial author (@Arceliar) felt like learning a new language when the implementation was started, and the Go language seemed like an OK choice for prototyping a network application.
While Go's GC pauses are small, they do exist, so this implementation probably isn't suited to applications that require very low latency and jitter.
Aside from that, I also tried to write each part of it to be as "bad" (i.e. fragile) as I could manage while still being technically correct.
That's a decision made for debugging purposes: I want my bugs to be obvious, so I can more easily find them and fix them.
Aside from that, an effort was made to write each part of it to be as "bad" (i.e. fragile) as could be managed while still being technically correct.
That's a decision made for debugging purposes: the intent is to make any bugs as obvious as possible, so they can more easily be found and fixed in a small or simulated network.
This implementation runs as an overlay network on top of regular IPv4 or IPv6 traffic.
It uses link-local IPv6 multicast traffic to automatically connect to devices on the same network, but it can also be fed a list of address:port pairs to connect to.
@@ -109,22 +109,22 @@ This version includes only the name-dependent part of the routing scheme, but th
In summary:
1. Multiplicative stretch is approximately 1.08 with Yggdrasil, using unweighted links undirected links, as in the paper.
2. A modified version can get this as low as 1.01, but it depends on knowing the degree of each one-hop neighbor, which I can think of no way to cryptographically secure, and it requires using source routing to find a path from A to B and from B to A, and then have both nodes use whichever path was observed to be shorter.
2. A modified version can get this as low as 1.01, but it depends on knowing the degree of each one-hop neighbor, which it is not obviously possible to cryptographically secure, and it requires using source routing to find a path from A to B and from B to A, and then have both nodes use whichever path was observed to be shorter.
3. In either case, approximately 6 routing table entries are needed, on average, for the name-dependent routing scheme, where each node needs one routing table entry per one-hop neighbor.
4. Approximately 30 DHT entries are needed to facilitate name-independent routing.
This requires a lookup and caches the results, so old information needs to time out to work on dynamic networks.
The schemes it's being compared to only work on static networks, where a similar approach would be fine, so I think it's not a terrible comparison.
The stretch of that initial lookup can be *very* high, but it's only for a couple of round trips to look up keys and then do the ephemeral key exchange, so I don't think it's likely to be a major issue (but probably still a little more expensive than a DNS lookup).
The schemes it's being compared to only work on static networks, where a similar approach would be fine, so this seems like a reasonably fair comparison.
The stretch of that initial lookup can be *very* high, but it's only for a couple of round trips to look up keys and then do the ephemeral key exchange, so this may be an acceptable tradeoff (it's probably more expensive than a DNS lookup, but is similar in principle and effect).
5. Both the name-dependent and name-independent routing table entries are of a size proportional to the length of the path between the root and the node, which is at most the diameter of the network after things have fully converged, but name-dependent routing table entries tend to be much larger in practice due to the size of cryptographic signatures (64 bytes for a signature + 32 for the signing key).
6. The name-dependent routing scheme only sends messages about one-hop neighbors on the link between those neighbors, so if you measure things by per *link* overhead instead of per *node*, then this doesn't seem so bad to me.
7. The name-independent routing scheme scales like a DHT running as an overlay on top of the router-level topology, so the per-link and per-node overhead are going to be topology dependent.
I haven't studied them in a lot of detail, but for realistic topologies, I don't see an obvious reason to think this is a problem.
This hasn't been studied in a lot of detail, but for realistic topologies, where yggdrasil routing seems to approximate shortest path routing, academic research has shown that shortest path routing does not lead to congestion.
I think the main reason Yggdrasil performs so well is because it stores information about all one-hop neighbors.
Consider that, if Yggdrasil did not maintain state about all one-hop neighbors, but the protocol still had the ability to forward to all of them, then I OS still needs a way to forward traffic to them.
In most cases, I think this would require some form of per-neighbor state to be stored by the OS, either because there's one dedicated interface per peer or because there are entries in an arp/NDP table to reach multiple devices over a shared switch.
So while compact routing schemes have nice theoretical limits that don't even require one entry per one-hop neighbor, I don't think current real machines can benefit from that property if the routing scheme is used at the router level.
As such, I don't think keeping one entry per neighbor is a problem, especially if nodes with a high degree have proportionally more resources available to them, but I could be overlooking something.
The designer (@Arceliar) believes that the main reason Yggdrasil performs so well is because it stores information about all one-hop neighbors.
Consider that, if Yggdrasil did not maintain state about all one-hop neighbors, but the protocol still had the ability to forward to all of them through some mechanism (i.e. source routing), then the OS still needs a way to forward traffic to them.
In most cases, this would require some form of per-neighbor state to be stored by the OS, either because there's one dedicated interface per peer or because there are entries in an arp/NDP table to reach multiple devices over a shared switch.
So while compact routing schemes have nice theoretical limits, which do not require even as much state as one entry per one-hop neighbor, that property does not seem realistic if the implementation is running at the router level (as opposed to the AS level).
As such, keeping one entry per neighbor may be reasonable, especially if nodes with a high degree have proportionally more resources available to them, but it is possible that something may have been overlooked in the design.
## Disclaimer

View File

@@ -15,17 +15,57 @@ package main
import "encoding/hex"
import "flag"
import "fmt"
import "runtime"
import . "yggdrasil"
var doSig = flag.Bool("sig", false, "generate new signing keys instead")
type keySet struct {
priv []byte
pub []byte
id []byte
ip string
}
func main() {
threads := runtime.GOMAXPROCS(0)
var threadChannels []chan []byte
var currentBest []byte
newKeys := make(chan keySet, threads)
flag.Parse()
switch {
case *doSig:
doSigKeys()
default:
doBoxKeys()
for i := 0; i < threads; i++ {
threadChannels = append(threadChannels, make(chan []byte, threads))
switch {
case *doSig:
go doSigKeys(newKeys, threadChannels[i])
default:
go doBoxKeys(newKeys, threadChannels[i])
}
}
for {
newKey := <-newKeys
if isBetter(currentBest[:], newKey.id[:]) || len(currentBest) == 0 {
currentBest = newKey.id
for _, channel := range threadChannels {
select {
case channel <- newKey.id:
}
}
fmt.Println("--------------------------------------------------------------------------------")
switch {
case *doSig:
fmt.Println("sigPriv:", hex.EncodeToString(newKey.priv[:]))
fmt.Println("sigPub:", hex.EncodeToString(newKey.pub[:]))
fmt.Println("TreeID:", hex.EncodeToString(newKey.id[:]))
default:
fmt.Println("boxPriv:", hex.EncodeToString(newKey.priv[:]))
fmt.Println("boxPub:", hex.EncodeToString(newKey.pub[:]))
fmt.Println("NodeID:", hex.EncodeToString(newKey.id[:]))
fmt.Println("IP:", newKey.ip)
}
}
}
}
@@ -41,7 +81,7 @@ func isBetter(oldID, newID []byte) bool {
return false
}
func doBoxKeys() {
func doBoxKeys(out chan<- keySet, in <-chan []byte) {
c := Core{}
pub, _ := c.DEBUG_newBoxKeys()
bestID := c.DEBUG_getNodeID(pub)
@@ -49,22 +89,25 @@ func doBoxKeys() {
bestID[idx] = 0
}
for {
pub, priv := c.DEBUG_newBoxKeys()
id := c.DEBUG_getNodeID(pub)
if !isBetter(bestID[:], id[:]) {
continue
select {
case newBestID := <-in:
if isBetter(bestID[:], newBestID) {
copy(bestID[:], newBestID)
}
default:
pub, priv := c.DEBUG_newBoxKeys()
id := c.DEBUG_getNodeID(pub)
if !isBetter(bestID[:], id[:]) {
continue
}
bestID = id
ip := c.DEBUG_addrForNodeID(id)
out <- keySet{priv[:], pub[:], id[:], ip}
}
bestID = id
ip := c.DEBUG_addrForNodeID(id)
fmt.Println("--------------------------------------------------------------------------------")
fmt.Println("boxPriv:", hex.EncodeToString(priv[:]))
fmt.Println("boxPub:", hex.EncodeToString(pub[:]))
fmt.Println("NodeID:", hex.EncodeToString(id[:]))
fmt.Println("IP:", ip)
}
}
func doSigKeys() {
func doSigKeys(out chan<- keySet, in <-chan []byte) {
c := Core{}
pub, _ := c.DEBUG_newSigKeys()
bestID := c.DEBUG_getTreeID(pub)
@@ -72,15 +115,19 @@ func doSigKeys() {
bestID[idx] = 0
}
for {
select {
case newBestID := <-in:
if isBetter(bestID[:], newBestID) {
copy(bestID[:], newBestID)
}
default:
}
pub, priv := c.DEBUG_newSigKeys()
id := c.DEBUG_getTreeID(pub)
if !isBetter(bestID[:], id[:]) {
continue
}
bestID = id
fmt.Println("--------------------------------------------------------------------------------")
fmt.Println("sigPriv:", hex.EncodeToString(priv[:]))
fmt.Println("sigPub:", hex.EncodeToString(pub[:]))
fmt.Println("TreeID:", hex.EncodeToString(id[:]))
out <- keySet{priv[:], pub[:], id[:], ""}
}
}

View File

@@ -1,23 +0,0 @@
#!/bin/sh
ip netns add peerns
ip link add veth0 type veth peer name veth1
ifconfig veth0 192.168.2.1/24 up
echo "1"
#tc qdisc add dev veth0 root tbf rate 8mbit burst 8192 latency 1ms
#tc qdisc add dev veth0 root netem delay 50ms 5ms distribution normal
echo "2"
ip link set veth1 netns peerns
ip netns exec peerns ifconfig veth1 192.168.2.2/24 up
echo "3"
#ip netns exec peerns tc qdisc add dev veth1 root tbf rate 8mbit burst 8192 latency 1ms
#ip netns exec peerns tc qdisc add dev veth1 root netem delay 50ms 5ms distribution normal
echo "4"
ip netns exec peerns ip addr list
#ip netns exec peerns ./run -useconf=conf2.json
ip netns exec peerns ip link set dev lo up
ip netns exec peerns ./run -autoconf -pprof
#GODEBUG=gctrace=1 ip netns exec peerns ./run -autoconf
#ip netns exec peerns ./run -useconf=conf2.json -cpuprofile=cpu2.prof -memprofile=mem2.prof
#ip netns delete peerns

View File

@@ -1,29 +0,0 @@
#!/bin/sh
ip netns add peerns3
ip link add veth23 type veth peer name veth32
ip link set veth23 netns peerns
ip netns exec peerns ifconfig veth23 192.168.3.1/24 up
#ip netns exec peerns tc qdisc add dev veth23 root tbf rate 8mbit burst 8192 latency 1ms
#ip netns exec peerns tc qdisc add dev veth23 root netem delay 50ms 5ms distribution normal
ip link set veth32 netns peerns3
ip netns exec peerns3 ifconfig veth32 192.168.3.2/24 up
#ip netns exec peerns3 tc qdisc add dev veth32 root tbf rate 8mbit burst 8192 latency 1ms
#ip netns exec peerns3 tc qdisc add dev veth32 root netem delay 50ms 5ms distribution normal
ip netns exec peerns3 ip route add 192.168.2.0/24 via 192.168.3.1
#ip link add veth13 type veth peer name veth31
#ifconfig veth13 192.168.4.1/24 up
#ip netns exec peerns tc qdisc add dev veth23 root tbf rate 8mbit burst 8192 latency 1ms
#ip netns exec peerns tc qdisc add dev veth23 root netem delay 50ms 5ms distribution normal
#ip link set veth31 netns peerns3
#ip netns exec peerns3 ifconfig veth32 192.168.4.3/24 up
#ip netns exec peerns3 tc qdisc add dev veth32 root tbf rate 8mbit burst 8192 latency 1ms
#ip netns exec peerns3 tc qdisc add dev veth32 root netem delay 50ms 5ms distribution normal
#ip netns exec peerns3 ip route add 192.168.2.0/24 via 192.168.3.1
ip netns exec peerns3 ip addr list
#ip netns exec peerns3 ./run -useconf=conf3.json
ip netns exec peerns3 ifconfig lo up
ip netns exec peerns3 ./run -autoconf
#ip netns delete peerns3

View File

@@ -1,28 +0,0 @@
#!/bin/sh
ip netns add peerns4
ip link add veth34 type veth peer name veth43
ip link set veth34 netns peerns3
ip netns exec peerns3 ifconfig veth34 192.168.4.3/24 up
#ip netns exec peerns tc qdisc add dev veth23 root tbf rate 8mbit burst 8192 latency 1ms
#ip netns exec peerns tc qdisc add dev veth23 root netem delay 50ms 5ms distribution normal
ip link set veth43 netns peerns4
ip netns exec peerns4 ifconfig veth43 192.168.4.4/24 up
#ip netns exec peerns3 tc qdisc add dev veth32 root tbf rate 8mbit burst 8192 latency 1ms
#ip netns exec peerns3 tc qdisc add dev veth32 root netem delay 50ms 5ms distribution normal
#ip netns exec peerns4 ip route add 192.168.3.0/24 via 192.168.4.3
#ip link add veth13 type veth peer name veth31
#ifconfig veth13 192.168.4.1/24 up
#ip netns exec peerns tc qdisc add dev veth23 root tbf rate 8mbit burst 8192 latency 1ms
#ip netns exec peerns tc qdisc add dev veth23 root netem delay 50ms 5ms distribution normal
#ip link set veth31 netns peerns3
#ip netns exec peerns3 ifconfig veth32 192.168.4.3/24 up
#ip netns exec peerns3 tc qdisc add dev veth32 root tbf rate 8mbit burst 8192 latency 1ms
#ip netns exec peerns3 tc qdisc add dev veth32 root netem delay 50ms 5ms distribution normal
#ip netns exec peerns3 ip route add 192.168.2.0/24 via 192.168.3.1
ip netns exec peerns4 ip addr list
#ip netns exec peerns3 ./run -useconf=conf3.json
ip netns exec peerns4 ./run -autoconf
#ip netns delete peerns3

View File

@@ -44,6 +44,13 @@ ip netns exec node5 tc qdisc add dev veth54 root tbf rate 100mbit burst 8192 lat
ip netns exec node4 tc qdisc add dev veth46 root tbf rate 10mbit burst 8192 latency 1ms
ip netns exec node6 tc qdisc add dev veth64 root tbf rate 10mbit burst 8192 latency 1ms
ip netns exec node1 ip link set lo up
ip netns exec node2 ip link set lo up
ip netns exec node3 ip link set lo up
ip netns exec node4 ip link set lo up
ip netns exec node5 ip link set lo up
ip netns exec node6 ip link set lo up
ip netns exec node1 ./run --autoconf --pprof &> /dev/null &
ip netns exec node2 ./run --autoconf --pprof &> /dev/null &
ip netns exec node3 ./run --autoconf --pprof &> /dev/null &

View File

@@ -1,60 +0,0 @@
import glob
inputDirPath = "out-skitter"
inputFilePaths = glob.glob(inputDirPath+"/*")
inputFilePaths.sort()
merged = dict()
stretches = []
total = 0
for inputFilePath in inputFilePaths:
print "Processing file {}".format(inputFilePath)
with open(inputFilePath, 'r') as f:
inData = f.readlines()
pathsChecked = 0.
avgStretch = 0.
for line in inData:
dat = line.rstrip('\n').split(' ')
eHops = int(dat[0])
nHops = int(dat[1])
count = int(dat[2])
if eHops not in merged: merged[eHops] = dict()
if nHops not in merged[eHops]: merged[eHops][nHops] = 0
merged[eHops][nHops] += count
total += count
pathsChecked += count
stretch = float(nHops)/eHops
avgStretch += stretch*count
finStretch = avgStretch / max(1, pathsChecked)
stretches.append(str(finStretch))
hopsUsed = 0.
hopsNeeded = 0.
avgStretch = 0.
results = []
for eHops in sorted(merged.keys()):
for nHops in sorted(merged[eHops].keys()):
count = merged[eHops][nHops]
result = "{} {} {}".format(eHops, nHops, count)
results.append(result)
hopsUsed += nHops*count
hopsNeeded += eHops*count
stretch = float(nHops)/eHops
avgStretch += stretch*count
print result
bandwidthUsage = hopsUsed/max(1, hopsNeeded)
avgStretch /= max(1, total)
with open("results.txt", "w") as f:
f.write('\n'.join(results))
with open("stretches.txt", "w") as f:
f.write('\n'.join(stretches))
print "Total files processed: {}".format(len(inputFilePaths))
print "Total paths found: {}".format(total)
print "Bandwidth usage: {}".format(bandwidthUsage)
print "Average stretch: {}".format(avgStretch)

View File

@@ -59,3 +59,4 @@ print "Total paths found: {}".format(total)
print "Bandwidth usage: {}".format(bandwidthUsage)
print "Average stretch: {}".format(avgStretch)

View File

@@ -1,4 +1,4 @@
#!/bin/bash
export GOPATH=$PWD
go get -d yggdrasil
go run misc/sim/treesim.go
go run -tags debug misc/sim/treesim.go

View File

@@ -1,197 +0,0 @@
package main
import "fmt"
import "bufio"
import "os"
import "strings"
import "strconv"
import "time"
import "runtime/pprof"
import "flag"
import "router"
////////////////////////////////////////////////////////////////////////////////
type Node struct {
nodeID router.NodeID
table router.Table
links []*Node
}
func (n *Node) init(nodeID router.NodeID) {
n.nodeID = nodeID
n.table.Init(nodeID)
n.links = append(n.links, n)
}
func linkNodes(m, n *Node) {
for _, o := range m.links {
if o.nodeID == n.nodeID {
// Don't allow duplicates
return
}
}
m.links = append(m.links, n)
n.links = append(n.links, m)
}
func makeStoreSquareGrid(sideLength int) map[router.NodeID]*Node {
store := make(map[router.NodeID]*Node)
nNodes := sideLength * sideLength
nodeIDs := make([]router.NodeID, 0, nNodes)
// TODO shuffle nodeIDs
for nodeID := 1; nodeID <= nNodes; nodeID++ {
nodeIDs = append(nodeIDs, router.NodeID(nodeID))
}
for _, nodeID := range nodeIDs {
node := &Node{}
node.init(nodeID)
store[nodeID] = node
}
for idx := 0; idx < nNodes; idx++ {
if (idx % sideLength) != 0 {
linkNodes(store[nodeIDs[idx]], store[nodeIDs[idx-1]])
}
if idx >= sideLength {
linkNodes(store[nodeIDs[idx]], store[nodeIDs[idx-sideLength]])
}
}
return store
}
func loadGraph(path string) map[router.NodeID]*Node {
f, err := os.Open(path)
if err != nil {
panic(err)
}
defer f.Close()
store := make(map[router.NodeID]*Node)
s := bufio.NewScanner(f)
for s.Scan() {
line := s.Text()
nodeIDstrs := strings.Split(line, " ")
nodeIDi0, _ := strconv.Atoi(nodeIDstrs[0])
nodeIDi1, _ := strconv.Atoi(nodeIDstrs[1])
nodeID0 := router.NodeID(nodeIDi0)
nodeID1 := router.NodeID(nodeIDi1)
if store[nodeID0] == nil {
node := &Node{}
node.init(nodeID0)
store[nodeID0] = node
}
if store[nodeID1] == nil {
node := &Node{}
node.init(nodeID1)
store[nodeID1] = node
}
linkNodes(store[nodeID0], store[nodeID1])
}
return store
}
////////////////////////////////////////////////////////////////////////////////
func idleUntilConverged(store map[router.NodeID]*Node) {
timeOfLastChange := 0
step := 0
// Idle untl the network has converged
for step-timeOfLastChange < 4*router.TIMEOUT {
step++
fmt.Println("Step:", step, "--", "last change:", timeOfLastChange)
for _, node := range store {
node.table.Tick()
for idx, link := range node.links[1:] {
msg := node.table.CreateMessage(router.Iface(idx))
for idx, fromNode := range link.links {
if fromNode == node {
//fmt.Println("Sending from node", node.nodeID, "to", link.nodeID)
link.table.HandleMessage(msg, router.Iface(idx))
break
}
}
}
}
//for _, node := range store {
// if node.table.DEBUG_isDirty() { timeOfLastChange = step }
//}
//time.Sleep(10*time.Millisecond)
}
}
func testPaths(store map[router.NodeID]*Node) {
nNodes := len(store)
nodeIDs := make([]router.NodeID, 0, nNodes)
for nodeID := range store {
nodeIDs = append(nodeIDs, nodeID)
}
lookups := 0
count := 0
start := time.Now()
for _, source := range store {
count++
fmt.Printf("Testing paths from node %d / %d (%d)\n", count, nNodes, source.nodeID)
for _, dest := range store {
//if source == dest { continue }
destLoc := dest.table.GetLocator()
temp := 0
for here := source; here != dest; {
temp++
if temp > 16 {
panic("Loop?")
}
next := here.links[here.table.Lookup(destLoc)]
if next == here {
//for idx, link := range here.links {
// fmt.Println("DUMP:", idx, link.nodeID)
//}
panic(fmt.Sprintln("Routing Loop:",
source.nodeID,
here.nodeID,
dest.nodeID))
}
//fmt.Println("DEBUG:", source.nodeID, here.nodeID, dest.nodeID)
here = next
lookups++
}
}
}
timed := time.Since(start)
fmt.Printf("%f lookups per second\n", float64(lookups)/timed.Seconds())
}
func dumpStore(store map[router.NodeID]*Node) {
for _, node := range store {
fmt.Println("DUMPSTORE:", node.nodeID, node.table.GetLocator())
node.table.DEBUG_dumpTable()
}
}
////////////////////////////////////////////////////////////////////////////////
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
func main() {
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err))
}
if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err))
}
defer pprof.StopCPUProfile()
}
fmt.Println("Test")
store := makeStoreSquareGrid(4)
idleUntilConverged(store)
dumpStore(store)
testPaths(store)
//panic("DYING")
store = loadGraph("hype-2016-09-19.list")
idleUntilConverged(store)
dumpStore(store)
testPaths(store)
}

View File

@@ -894,9 +894,7 @@ if __name__ == "__main__":
args = sys.argv
if len(args) == 2:
job_number = int(sys.argv[1])
#rootNodeASTest("fc00-2017-08-12.txt", "fc00", None, job_number)
#rootNodeASTest("skitter", "out-skitter", None, job_number)
rootNodeASTest("walk-1517414401.txt.map", "out-walk", None, job_number)
rootNodeASTest("fc00-2017-08-12.txt", "fc00", None, job_number)
else:
print "Usage: {} job_number".format(args[0])
print "job_number = which job set to run on this node (1-indexed)"

View File

@@ -1,907 +0,0 @@
# Tree routing scheme (named Yggdrasil, after the world tree from Norse mythology)
# Steps:
# 1: Pick any node, here I'm using highest nodeID
# 2: Build spanning tree, each node stores path back to root
# Optionally with weights for each hop
# Ties broken by preferring a parent with higher degree
# 3: Distance metric: self->peer + (via tree) peer->dest
# 4: Perform (modified) greedy lookup via this metric for each direction (A->B and B->A)
# 5: Source-route traffic using the better of those two paths
# Note: This makes no attempt to simulate a dynamic network
# E.g. A node's peers cannot be disconnected
# TODO:
# Make better use of drop?
# In particular, we should be ignoring *all* recently dropped *paths* to the root
# To minimize route flapping
# Not really an issue in the sim, but probably needed for a real network
import array
import gc
import glob
import gzip
import heapq
import os
import random
import time
#############
# Constants #
#############
# Reminder of where link cost comes in
LINK_COST = 1
# Timeout before dropping something, in simulated seconds
TIMEOUT = 60
###########
# Classes #
###########
class PathInfo:
def __init__(self, nodeID):
self.nodeID = nodeID # e.g. IP
self.coords = [] # Position in tree
self.tstamp = 0 # Timestamp from sender, to keep track of old vs new info
self.degree = 0 # Number of peers the sender has, used to break ties
# The above should be signed
self.path = [nodeID] # Path to node (in path-vector route)
self.time = 0 # Time info was updated, to keep track of e.g. timeouts
self.treeID = nodeID # Hack, let tree use different ID than IP, used so we can dijkstra once and test many roots
def clone(self):
# Return a deep-enough copy of the path
clone = PathInfo(None)
clone.nodeID = self.nodeID
clone.coords = self.coords[:]
clone.tstamp = self.tstamp
clone.degree = self.degree
clone.path = self.path[:]
clone.time = self.time
clone.treeID = self.treeID
return clone
# End class PathInfo
class Node:
def __init__(self, nodeID):
self.info = PathInfo(nodeID) # Self NodeInfo
self.root = None # PathInfo to node at root of tree
self.drop = dict() # PathInfo to nodes from clus that have timed out
self.peers = dict() # PathInfo to peers
self.links = dict() # Links to peers (to pass messages)
self.msgs = [] # Said messages
self.table = dict() # Pre-computed lookup table of peer info
def tick(self):
# Do periodic maintenance stuff, including push updates
self.info.time += 1
if self.info.time > self.info.tstamp + TIMEOUT/4:
# Update timestamp at least once every 1/4 timeout period
# This should probably be randomized in a real implementation
self.info.tstamp = self.info.time
self.info.degree = len(self.peers)
#self.info.degree = 0# TODO decide if degree should be used
changed = False # Used to track when the network has converged
changed |= self.cleanRoot()
self.cleanDropped()
# Should probably send messages infrequently if there's nothing new to report
if self.info.tstamp == self.info.time:
msg = self.createMessage()
self.sendMessage(msg)
return changed
def cleanRoot(self):
changed = False
if self.root and self.info.time - self.root.time > TIMEOUT:
print "DEBUG: clean root,", self.root.path
self.drop[self.root.treeID] = self.root
self.root = None
changed = True
if not self.root or self.root.treeID < self.info.treeID:
# No need to drop someone who'se worse than us
self.info.coords = [self.info.nodeID]
self.root = self.info.clone()
changed = True
elif self.root.treeID == self.info.treeID:
self.root = self.info.clone()
return changed
def cleanDropped(self):
# May actually be a treeID... better to iterate over keys explicitly
nodeIDs = sorted(self.drop.keys())
for nodeID in nodeIDs:
node = self.drop[nodeID]
if self.info.time - node.time > 4*TIMEOUT:
del self.drop[nodeID]
return None
def createMessage(self):
# Message is just a tuple
# First element is the sender
# Second element is the root
# We will .clone() everything during the send operation
msg = (self.info, self.root)
return msg
def sendMessage(self, msg):
for link in self.links.values():
newMsg = (msg[0].clone(), msg[1].clone())
link.msgs.append(newMsg)
return None
def handleMessages(self):
changed = False
while self.msgs:
changed |= self.handleMessage(self.msgs.pop())
return changed
def handleMessage(self, msg):
changed = False
for node in msg:
# Update the path and timestamp for the sender and root info
node.path.append(self.info.nodeID)
node.time = self.info.time
# Update the sender's info in our list of peers
sender = msg[0]
self.peers[sender.nodeID] = sender
# Decide if we want to update the root
root = msg[1]
updateRoot = False
isSameParent = False
isBetterParent = False
if len(self.root.path) > 1 and len(root.path) > 1:
parent = self.peers[self.root.path[-2]]
if parent.nodeID == sender.nodeID: isSameParent = True
if sender.degree > parent.degree:
# This would also be where you check path uptime/reliability/whatever
# All else being equal, we prefer parents with high degree
# We are trusting peers to report degree correctly in this case
# So expect some performance reduction if your peers aren't trustworthy
# (Lies can increase average stretch by a few %)
isBetterParent = True
if self.info.nodeID in root.path[:-1]: pass # No loopy routes allowed
elif root.treeID in self.drop and self.drop[root.treeID].tstamp >= root.tstamp: pass
elif not self.root: updateRoot = True
elif self.root.treeID < root.treeID: updateRoot = True
elif self.root.treeID != root.treeID: pass
elif self.root.tstamp > root.tstamp: pass
elif len(root.path) < len(self.root.path): updateRoot = True
elif isBetterParent and len(root.path) == len(self.root.path): updateRoot = True
elif isSameParent and self.root.tstamp < root.tstamp: updateRoot = True
if updateRoot:
if not self.root or self.root.path != root.path: changed = True
self.root = root
self.info.coords = self.root.path
return changed
def lookup(self, dest):
# Note: Can loop in an unconverged network
# The person looking up the route is responsible for checking for loops
best = None
bestDist = 0
bestDeg = 0
for node in self.peers.itervalues():
# dist = distance to node + dist (on tree) from node to dest
dist = len(node.path)-1 + treeDist(node.coords, dest.coords)
deg = node.degree
if not best or dist < bestDist or (best == bestDist and deg > bestDeg):
best = node
bestDist = dist
bestDeg = deg
if best:
next = best.path[-2]
assert next in self.peers
return next
else:
# We failed to look something up
# TODO some way to signal this which doesn't crash
assert False
def initTable(self):
# Pre-computes a lookup table for destination coords
# Insert parent first so you prefer them as a next-hop
self.table.clear()
parent = self.info.nodeID
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
for peer in self.peers.itervalues():
current = self.table
for coord in peer.coords:
if coord not in current: current[coord] = (peer.nodeID, dict())
old = current[coord]
next = old[1]
oldPeer = self.peers[old[0]]
oldDist = len(oldPeer.coords)
oldDeg = oldPeer.degree
newDist = len(peer.coords)
newDeg = peer.degree
# Prefer parent
# Else prefer short distance from root
# If equal distance, prefer high degree
if peer.nodeID == parent: current[coord] = (peer.nodeID, next)
elif newDist < oldDist: current[coord] = (peer.nodeID, next)
elif newDist == oldDist and newDeg > oldDeg: current[coord] = (peer.nodeID, next)
current = next
return None
def lookup_new(self, dest):
# Use pre-computed lookup table to look up next hop for dest coords
assert self.table
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
else: parent = None
current = (parent, self.table)
c = None
for coord in dest.coords:
c = coord
if coord not in current[1]: break
current = current[1][coord]
next = current[0]
if c in self.peers: next = c
if next not in self.peers:
assert next == None
# You're the root of a different connected component
# You'd drop the packet in this case
# To make the path cache not die, need to return a valid next hop...
# Returning self for that reason
next = self.info.nodeID
return next
# End class Node
####################
# Helper Functions #
####################
def getIndexOfLCA(source, dest):
# Return index of last common ancestor in source/dest coords
# -1 if no common ancestor (e.g. different roots)
lcaIdx = -1
minLen = min(len(source), len(dest))
for idx in xrange(minLen):
if source[idx] == dest[idx]: lcaIdx = idx
else: break
return lcaIdx
def treePath(source, dest):
# Return path with source at head and dest at tail
lastMatch = getIndexOfLCA(source, dest)
path = dest[-1:lastMatch:-1] + source[lastMatch:]
assert path[0] == dest[-1]
assert path[-1] == source[-1]
return path
def treeDist(source, dest):
dist = len(source) + len(dest)
lcaIdx = getIndexOfLCA(source, dest)
dist -= 2*(lcaIdx+1)
return dist
def dijkstra(nodestore, startingNodeID):
# Idea to use heapq and basic implementation taken from stackexchange post
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
nodeIDs = sorted(nodestore.keys())
nNodes = len(nodeIDs)
idxs = dict()
for nodeIdx in xrange(nNodes):
nodeID = nodeIDs[nodeIdx]
idxs[nodeID] = nodeIdx
dists = array.array("H", [0]*nNodes)
queue = [(0, startingNodeID)]
while queue:
dist, nodeID = heapq.heappop(queue)
idx = idxs[nodeID]
if not dists[idx]: # Unvisited, otherwise we skip it
dists[idx] = dist
for peer in nodestore[nodeID].links:
if not dists[idxs[peer]]:
# Peer is also unvisited, so add to queue
heapq.heappush(queue, (dist+LINK_COST, peer))
return dists
def dijkstrall(nodestore):
# Idea to use heapq and basic implementation taken from stackexchange post
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
nodeIDs = sorted(nodestore.keys())
nNodes = len(nodeIDs)
idxs = dict()
for nodeIdx in xrange(nNodes):
nodeID = nodeIDs[nodeIdx]
idxs[nodeID] = nodeIdx
dists = array.array("H", [0]*nNodes*nNodes) # use GetCacheIndex(nNodes, start, end)
for sourceIdx in xrange(nNodes):
print "Finding shortest paths for node {} / {} ({})".format(sourceIdx+1, nNodes, nodeIDs[sourceIdx])
queue = [(0, sourceIdx)]
while queue:
dist, nodeIdx = heapq.heappop(queue)
distIdx = getCacheIndex(nNodes, sourceIdx, nodeIdx)
if not dists[distIdx]: # Unvisited, otherwise we skip it
dists[distIdx] = dist
for peer in nodestore[nodeIDs[nodeIdx]].links:
pIdx = idxs[peer]
pdIdx = getCacheIndex(nNodes, sourceIdx, pIdx)
if not dists[pdIdx]:
# Peer is also unvisited, so add to queue
heapq.heappush(queue, (dist+LINK_COST, pIdx))
return dists
def linkNodes(node1, node2):
node1.links[node2.info.nodeID] = node2
node2.links[node1.info.nodeID] = node1
############################
# Store topology functions #
############################
def makeStoreSquareGrid(sideLength, randomize=True):
# Simple grid in a sideLength*sideLength square
# Just used to validate that the code runs
store = dict()
nodeIDs = list(range(sideLength*sideLength))
if randomize: random.shuffle(nodeIDs)
for nodeID in nodeIDs:
store[nodeID] = Node(nodeID)
for index in xrange(len(nodeIDs)):
if (index % sideLength != 0): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-1]])
if (index >= sideLength): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-sideLength]])
print "Grid store created, size {}".format(len(store))
return store
def makeStoreASRelGraph(pathToGraph):
#Existing network graphs, in caida.org's asrel format (ASx|ASy|z per line, z denotes relationship type)
with open(pathToGraph, "r") as f:
inData = f.readlines()
store = dict()
for line in inData:
if line.strip()[0] == "#": continue # Skip comment lines
line = line.replace('|'," ")
nodes = map(int, line.split()[0:2])
if nodes[0] not in store: store[nodes[0]] = Node(nodes[0])
if nodes[1] not in store: store[nodes[1]] = Node(nodes[1])
linkNodes(store[nodes[0]], store[nodes[1]])
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
return store
def makeStoreASRelGraphMaxDeg(pathToGraph, degIdx=0):
with open(pathToGraph, "r") as f:
inData = f.readlines()
store = dict()
nodeDeg = dict()
for line in inData:
if line.strip()[0] == "#": continue # Skip comment lines
line = line.replace('|'," ")
nodes = map(int, line.split()[0:2])
if nodes[0] not in nodeDeg: nodeDeg[nodes[0]] = 0
if nodes[1] not in nodeDeg: nodeDeg[nodes[1]] = 0
nodeDeg[nodes[0]] += 1
nodeDeg[nodes[1]] += 1
sortedNodes = sorted(nodeDeg.keys(), \
key=lambda x: (nodeDeg[x], x), \
reverse=True)
maxDegNodeID = sortedNodes[degIdx]
return makeStoreASRelGraphFixedRoot(pathToGraph, maxDegNodeID)
def makeStoreASRelGraphFixedRoot(pathToGraph, rootNodeID):
with open(pathToGraph, "r") as f:
inData = f.readlines()
store = dict()
for line in inData:
if line.strip()[0] == "#": continue # Skip comment lines
line = line.replace('|'," ")
nodes = map(int, line.split()[0:2])
if nodes[0] not in store:
store[nodes[0]] = Node(nodes[0])
if nodes[0] == rootNodeID: store[nodes[0]].info.treeID += 1000000000
if nodes[1] not in store:
store[nodes[1]] = Node(nodes[1])
if nodes[1] == rootNodeID: store[nodes[1]].info.treeID += 1000000000
linkNodes(store[nodes[0]], store[nodes[1]])
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
return store
def makeStoreDimesEdges(pathToGraph, rootNodeID=None):
# Read from a DIMES csv-formatted graph from a gzip file
store = dict()
with gzip.open(pathToGraph, "r") as f:
inData = f.readlines()
size = len(inData)
index = 0
for edge in inData:
if not index % 1000:
pct = 100.0*index/size
print "Processing edge {}, {:.2f}%".format(index, pct)
index += 1
dat = edge.rstrip().split(',')
node1 = "N" + str(dat[0].strip())
node2 = "N" + str(dat[1].strip())
if '?' in node1 or '?' in node2: continue #Unknown node
if node1 == rootNodeID: node1 = "R" + str(dat[0].strip())
if node2 == rootNodeID: node2 = "R" + str(dat[1].strip())
if node1 not in store: store[node1] = Node(node1)
if node2 not in store: store[node2] = Node(node2)
if node1 != node2: linkNodes(store[node1], store[node2])
print "DIMES graph successfully imported, size {}".format(len(store))
return store
def makeStoreGeneratedGraph(pathToGraph, root=None):
with open(pathToGraph, "r") as f:
inData = f.readlines()
store = dict()
for line in inData:
if line.strip()[0] == "#": continue # Skip comment lines
nodes = map(int, line.strip().split(' ')[0:2])
node1 = nodes[0]
node2 = nodes[1]
if node1 == root: node1 += 1000000
if node2 == root: node2 += 1000000
if node1 not in store: store[node1] = Node(node1)
if node2 not in store: store[node2] = Node(node2)
linkNodes(store[node1], store[node2])
print "Generated graph successfully imported, size {}".format(len(store))
return store
############################################
# Functions used as parts of network tests #
############################################
def idleUntilConverged(store):
nodeIDs = sorted(store.keys())
timeOfLastChange = 0
step = 0
# Idle until the network has converged
while step - timeOfLastChange < 4*TIMEOUT:
step += 1
print "Step: {}, last change: {}".format(step, timeOfLastChange)
changed = False
for nodeID in nodeIDs:
# Update node status, send messages
changed |= store[nodeID].tick()
for nodeID in nodeIDs:
# Process messages
changed |= store[nodeID].handleMessages()
if changed: timeOfLastChange = step
initTables(store)
return store
def getCacheIndex(nodes, sourceIndex, destIndex):
return sourceIndex*nodes + destIndex
def initTables(store):
nodeIDs = sorted(store.keys())
nNodes = len(nodeIDs)
print "Initializing routing tables for {} nodes".format(nNodes)
for idx in xrange(nNodes):
nodeID = nodeIDs[idx]
store[nodeID].initTable()
print "Routing tables initialized"
return None
def getCache(store):
nodeIDs = sorted(store.keys())
nNodes = len(nodeIDs)
nodeIdxs = dict()
for nodeIdx in xrange(nNodes):
nodeIdxs[nodeIDs[nodeIdx]] = nodeIdx
cache = array.array("H", [0]*nNodes*nNodes)
for sourceIdx in xrange(nNodes):
sourceID = nodeIDs[sourceIdx]
print "Building fast lookup table for node {} / {} ({})".format(sourceIdx+1, nNodes, sourceID)
for destIdx in xrange(nNodes):
destID = nodeIDs[destIdx]
if sourceID == destID: nextHop = destID # lookup would fail
else: nextHop = store[sourceID].lookup(store[destID].info)
nextHopIdx = nodeIdxs[nextHop]
cache[getCacheIndex(nNodes, sourceIdx, destIdx)] = nextHopIdx
return cache
def testPaths(store, dists):
cache = getCache(store)
nodeIDs = sorted(store.keys())
nNodes = len(nodeIDs)
idxs = dict()
for nodeIdx in xrange(nNodes):
nodeID = nodeIDs[nodeIdx]
idxs[nodeID] = nodeIdx
results = dict()
for sourceIdx in xrange(nNodes):
sourceID = nodeIDs[sourceIdx]
print "Testing paths from node {} / {} ({})".format(sourceIdx+1, len(nodeIDs), sourceID)
#dists = dijkstra(store, sourceID)
for destIdx in xrange(nNodes):
destID = nodeIDs[destIdx]
if destID == sourceID: continue # Skip self
distIdx = getCacheIndex(nNodes, sourceIdx, destIdx)
eHops = dists[distIdx]
if not eHops: continue # The network is split, no path exists
hops = 0
for pair in ((sourceIdx, destIdx), (destIdx, sourceIdx)): # Either direction because source routing
nHops = 0
locIdx = pair[0]
dIdx = pair[1]
while locIdx != dIdx:
locIdx = cache[getCacheIndex(nNodes, locIdx, dIdx)]
nHops += 1
if not hops or nHops < hops: hops = nHops
if eHops not in results: results[eHops] = dict()
if hops not in results[eHops]: results[eHops][hops] = 0
results[eHops][hops] += 1
return results
def getAvgStretch(pathMatrix):
avgStretch = 0.
checked = 0.
for eHops in sorted(pathMatrix.keys()):
for nHops in sorted(pathMatrix[eHops].keys()):
count = pathMatrix[eHops][nHops]
stretch = float(nHops)/float(max(1, eHops))
avgStretch += stretch*count
checked += count
avgStretch /= max(1, checked)
return avgStretch
def getMaxStretch(pathMatrix):
maxStretch = 0.
for eHops in sorted(pathMatrix.keys()):
for nHops in sorted(pathMatrix[eHops].keys()):
stretch = float(nHops)/float(max(1, eHops))
maxStretch = max(maxStretch, stretch)
return maxStretch
def getCertSizes(store):
# Returns nCerts frequency distribution
# De-duplicates common certs (for shared prefixes in the path)
sizes = dict()
for node in store.values():
certs = set()
for peer in node.peers.values():
pCerts = set()
assert len(peer.path) == 2
assert peer.coords[-1] == peer.path[0]
hops = peer.coords + peer.path[1:]
for hopIdx in xrange(len(hops)-1):
send = hops[hopIdx]
if send == node.info.nodeID: continue # We created it, already have it
path = hops[0:hopIdx+2]
# Each cert is signed by the sender
# Includes information about the path from the sender to the next hop
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
cert = "{}:{}".format(send, path)
certs.add(cert)
size = len(certs)
if size not in sizes: sizes[size] = 0
sizes[size] += 1
return sizes
def getMinLinkCertSizes(store):
# Returns nCerts frequency distribution
# De-duplicates common certs (for shared prefixes in the path)
# Based on the minimum number of certs that must be traded through a particular link
# Handled per link
sizes = dict()
for node in store.values():
peerCerts = dict()
for peer in node.peers.values():
pCerts = set()
assert len(peer.path) == 2
assert peer.coords[-1] == peer.path[0]
hops = peer.coords + peer.path[1:]
for hopIdx in xrange(len(hops)-1):
send = hops[hopIdx]
if send == node.info.nodeID: continue # We created it, already have it
path = hops[0:hopIdx+2]
# Each cert is signed by the sender
# Includes information about the path from the sender to the next hop
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
cert = "{}:{}".format(send, path)
pCerts.add(cert)
peerCerts[peer.nodeID] = pCerts
for peer in peerCerts:
size = 0
pCerts = peerCerts[peer]
for cert in pCerts:
required = True
for p2 in peerCerts:
if p2 == peer: continue
p2Certs = peerCerts[p2]
if cert in p2Certs: required = False
if required: size += 1
if size not in sizes: sizes[size] = 0
sizes[size] += 1
return sizes
def getPathSizes(store):
# Returns frequency distribution of the total number of hops the routing table
# I.e. a node with 3 peers, each with 5 hop coord+path, would count as 3x5=15
sizes = dict()
for node in store.values():
size = 0
for peer in node.peers.values():
assert len(peer.path) == 2
assert peer.coords[-1] == peer.path[0]
peerSize = len(peer.coords) + len(peer.path) - 1 # double-counts peer, -1
size += peerSize
if size not in sizes: sizes[size] = 0
sizes[size] += 1
return sizes
def getPeerSizes(store):
# Returns frequency distribution of the number of peers each node has
sizes = dict()
for node in store.values():
nPeers = len(node.peers)
if nPeers not in sizes: sizes[nPeers] = 0
sizes[nPeers] += 1
return sizes
def getAvgSize(sizes):
sumSizes = 0
nNodes = 0
for size in sizes:
count = sizes[size]
sumSizes += size*count
nNodes += count
avgSize = float(sumSizes)/max(1, nNodes)
return avgSize
def getMaxSize(sizes):
return max(sizes.keys())
def getMinSize(sizes):
return min(sizes.keys())
def getResults(pathMatrix):
results = []
for eHops in sorted(pathMatrix.keys()):
for nHops in sorted(pathMatrix[eHops].keys()):
count = pathMatrix[eHops][nHops]
results.append("{} {} {}".format(eHops, nHops, count))
return '\n'.join(results)
####################################
# Functions to run different tests #
####################################
def runTest(store):
# Runs the usual set of tests on the store
# Does not save results, so only meant for quick tests
# To e.g. check the code works, maybe warm up the pypy jit
for node in store.values():
node.info.time = random.randint(0, TIMEOUT)
node.info.tstamp = TIMEOUT
print "Begin testing network"
dists = None
if not dists: dists = dijkstrall(store)
idleUntilConverged(store)
pathMatrix = testPaths(store, dists)
avgStretch = getAvgStretch(pathMatrix)
maxStretch = getMaxStretch(pathMatrix)
peers = getPeerSizes(store)
certs = getCertSizes(store)
paths = getPathSizes(store)
linkCerts = getMinLinkCertSizes(store)
avgPeerSize = getAvgSize(peers)
maxPeerSize = getMaxSize(peers)
avgCertSize = getAvgSize(certs)
maxCertSize = getMaxSize(certs)
avgPathSize = getAvgSize(paths)
maxPathSize = getMaxSize(paths)
avgLinkCert = getAvgSize(linkCerts)
maxLinkCert = getMaxSize(linkCerts)
totalCerts = sum(map(lambda x: x*certs[x], certs.keys()))
totalLinks = sum(map(lambda x: x*peers[x], peers.keys())) # one-way links
avgCertsPerLink = float(totalCerts)/max(1, totalLinks)
print "Finished testing network"
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
print "Avg / Max nPeers size: {} / {}".format(avgPeerSize, maxPeerSize)
print "Avg / Max nCerts size: {} / {}".format(avgCertSize, maxCertSize)
print "Avg / Max total hops in any node's routing table: {} / {}".format(avgPathSize, maxPathSize)
print "Avg / Max lower bound cert requests per link (one-way): {} / {}".format(avgLinkCert, maxLinkCert)
print "Avg certs per link (one-way): {}".format(avgCertsPerLink)
return # End of function
def rootNodeASTest(path, outDir="output-treesim-AS", dists=None, proc = 1):
# Checks performance for every possible choice of root node
# Saves output for each root node to a separate file on disk
# path = input path to some caida.org formatted AS-relationship graph
if not os.path.exists(outDir): os.makedirs(outDir)
assert os.path.exists(outDir)
store = makeStoreASRelGraph(path)
nodes = sorted(store.keys())
for nodeIdx in xrange(len(nodes)):
if nodeIdx % proc != 0: continue # Work belongs to someone else
rootNodeID = nodes[nodeIdx]
outpath = outDir+"/{}".format(rootNodeID)
if os.path.exists(outpath):
print "Skipping {}, already processed".format(rootNodeID)
continue
store = makeStoreASRelGraphFixedRoot(path, rootNodeID)
for node in store.values():
node.info.time = random.randint(0, TIMEOUT)
node.info.tstamp = TIMEOUT
print "Beginning {}, size {}".format(nodeIdx, len(store))
if not dists: dists = dijkstrall(store)
idleUntilConverged(store)
pathMatrix = testPaths(store, dists)
avgStretch = getAvgStretch(pathMatrix)
maxStretch = getMaxStretch(pathMatrix)
results = getResults(pathMatrix)
with open(outpath, "w") as f:
f.write(results)
print "Finished test for root AS {} ({} / {})".format(rootNodeID, nodeIdx+1, len(store))
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
#break # Stop after 1, because they can take forever
return # End of function
def timelineASTest():
# Meant to study the performance of the network as a function of network size
# Loops over a set of AS-relationship graphs
# Runs a test on each graph, selecting highest-degree node as the root
# Saves results for each graph to a separate file on disk
outDir = "output-treesim-timeline-AS"
if not os.path.exists(outDir): os.makedirs(outDir)
assert os.path.exists(outDir)
paths = sorted(glob.glob("asrel/datasets/*"))
for path in paths:
date = os.path.basename(path).split(".")[0]
outpath = outDir+"/{}".format(date)
if os.path.exists(outpath):
print "Skipping {}, already processed".format(date)
continue
store = makeStoreASRelGraphMaxDeg(path)
dists = None
for node in store.values():
node.info.time = random.randint(0, TIMEOUT)
node.info.tstamp = TIMEOUT
print "Beginning {}, size {}".format(date, len(store))
if not dists: dists = dijkstrall(store)
idleUntilConverged(store)
pathMatrix = testPaths(store, dists)
avgStretch = getAvgStretch(pathMatrix)
maxStretch = getMaxStretch(pathMatrix)
results = getResults(pathMatrix)
with open(outpath, "w") as f:
f.write(results)
print "Finished {} with {} nodes".format(date, len(store))
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
#break # Stop after 1, because they can take forever
return # End of function
def timelineDimesTest():
# Meant to study the performance of the network as a function of network size
# Loops over a set of AS-relationship graphs
# Runs a test on each graph, selecting highest-degree node as the root
# Saves results for each graph to a separate file on disk
outDir = "output-treesim-timeline-dimes"
if not os.path.exists(outDir): os.makedirs(outDir)
assert os.path.exists(outDir)
# Input files are named ASEdgesX_Y where X = month (no leading 0), Y = year
paths = sorted(glob.glob("DIMES/ASEdges/*.gz"))
exists = set(glob.glob(outDir+"/*"))
for path in paths:
date = os.path.basename(path).split(".")[0]
outpath = outDir+"/{}".format(date)
if outpath in exists:
print "Skipping {}, already processed".format(date)
continue
store = makeStoreDimesEdges(path)
# Get the highest degree node and make it root
# Sorted by nodeID just to make it stable in the event of a tie
nodeIDs = sorted(store.keys())
bestRoot = ""
bestDeg = 0
for nodeID in nodeIDs:
node = store[nodeID]
if len(node.links) > bestDeg:
bestRoot = nodeID
bestDeg = len(node.links)
assert bestRoot
store = makeStoreDimesEdges(path, bestRoot)
rootID = "R" + bestRoot[1:]
assert rootID in store
# Don't forget to set random seed before setitng times
# To make results reproducible
nodeIDs = sorted(store.keys())
random.seed(12345)
for nodeID in nodeIDs:
node = store[nodeID]
node.info.time = random.randint(0, TIMEOUT)
node.info.tstamp = TIMEOUT
print "Beginning {}, size {}".format(date, len(store))
if not dists: dists = dijkstrall(store)
idleUntilConverged(store)
pathMatrix = testPaths(store, dists)
avgStretch = getAvgStretch(pathMatrix)
maxStretch = getMaxStretch(pathMatrix)
results = getResults(pathMatrix)
with open(outpath, "w") as f:
f.write(results)
print "Finished {} with {} nodes".format(date, len(store))
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
break # Stop after 1, because they can take forever
return # End of function
def scalingTest(maxTests=None, inputDir="graphs"):
# Meant to study the performance of the network as a function of network size
# Loops over a set of nodes in a previously generated graph
# Runs a test on each graph, testing each node as the root
# if maxTests is set, tests only that number of roots (highest degree first)
# Saves results for each graph to a separate file on disk
outDir = "output-treesim-{}".format(inputDir)
if not os.path.exists(outDir): os.makedirs(outDir)
assert os.path.exists(outDir)
paths = sorted(glob.glob("{}/*".format(inputDir)))
exists = set(glob.glob(outDir+"/*"))
for path in paths:
gc.collect() # pypy waits for gc to close files
graph = os.path.basename(path).split(".")[0]
store = makeStoreGeneratedGraph(path)
# Get the highest degree node and make it root
# Sorted by nodeID just to make it stable in the event of a tie
nodeIDs = sorted(store.keys(), key=lambda x: len(store[x].links), reverse=True)
dists = None
if maxTests: nodeIDs = nodeIDs[:maxTests]
for nodeID in nodeIDs:
nodeIDStr = str(nodeID).zfill(len(str(len(store)-1)))
outpath = outDir+"/{}-{}".format(graph, nodeIDStr)
if outpath in exists:
print "Skipping {}-{}, already processed".format(graph, nodeIDStr)
continue
store = makeStoreGeneratedGraph(path, nodeID)
# Don't forget to set random seed before setting times
random.seed(12345) # To make results reproducible
nIDs = sorted(store.keys())
for nID in nIDs:
node = store[nID]
node.info.time = random.randint(0, TIMEOUT)
node.info.tstamp = TIMEOUT
print "Beginning {}, size {}".format(graph, len(store))
if not dists: dists = dijkstrall(store)
idleUntilConverged(store)
pathMatrix = testPaths(store, dists)
avgStretch = getAvgStretch(pathMatrix)
maxStretch = getMaxStretch(pathMatrix)
results = getResults(pathMatrix)
with open(outpath, "w") as f:
f.write(results)
print "Finished {} with {} nodes for root {}".format(graph, len(store), nodeID)
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
return # End of function
##################
# Main Execution #
##################
if __name__ == "__main__":
if True: # Run a quick test
random.seed(12345) # DEBUG
store = makeStoreSquareGrid(4)
runTest(store) # Quick test
store = None
# Do some real work
#runTest(makeStoreDimesEdges("DIMES/ASEdges/ASEdges1_2007.csv.gz"))
#timelineDimesTest()
#rootNodeASTest("asrel/datasets/19980101.as-rel.txt")
#timelineASTest()
#rootNodeASTest("hype-2016-09-19.list", "output-treesim-hype")
#scalingTest(None, "graphs-20") # First argument 1 to only test 1 root per graph
#store = makeStoreGeneratedGraph("bgp_tables")
#store = makeStoreGeneratedGraph("skitter")
#store = makeStoreASRelGraphMaxDeg("hype-2016-09-19.list") #http://hia.cjdns.ca/watchlist/c/walk.peers.20160919
#store = makeStoreGeneratedGraph("fc00-2017-08-12.txt")
if store: runTest(store)
#rootNodeASTest("skitter", "output-treesim-skitter", None, 0, 1)
#scalingTest(1, "graphs-20") # First argument 1 to only test 1 root per graph
#scalingTest(1, "graphs-21") # First argument 1 to only test 1 root per graph
#scalingTest(1, "graphs-22") # First argument 1 to only test 1 root per graph
#scalingTest(1, "graphs-23") # First argument 1 to only test 1 root per graph
if not store:
import sys
args = sys.argv
if len(args) == 2:
job_number = int(sys.argv[1])
#rootNodeASTest("fc00-2017-08-12.txt", "fc00", None, job_number)
#rootNodeASTest("skitter", "out-skitter", None, job_number)
rootNodeASTest("walk-1517414401.txt.map", "out-walk", None, job_number)
else:
print "Usage: {} job_number".format(args[0])
print "job_number = which job set to run on this node (1-indexed)"

View File

@@ -1,907 +0,0 @@
# Tree routing scheme (named Yggdrasil, after the world tree from Norse mythology)
# Steps:
# 1: Pick any node, here I'm using highest nodeID
# 2: Build spanning tree, each node stores path back to root
# Optionally with weights for each hop
# Ties broken by preferring a parent with higher degree
# 3: Distance metric: self->peer + (via tree) peer->dest
# 4: Perform (modified) greedy lookup via this metric for each direction (A->B and B->A)
# 5: Source-route traffic using the better of those two paths
# Note: This makes no attempt to simulate a dynamic network
# E.g. A node's peers cannot be disconnected
# TODO:
# Make better use of drop?
# In particular, we should be ignoring *all* recently dropped *paths* to the root
# To minimize route flapping
# Not really an issue in the sim, but probably needed for a real network
import array
import gc
import glob
import gzip
import heapq
import os
import random
import time
#############
# Constants #
#############
# Reminder of where link cost comes in
LINK_COST = 1
# Timeout before dropping something, in simulated seconds
TIMEOUT = 60
###########
# Classes #
###########
class PathInfo:
def __init__(self, nodeID):
self.nodeID = nodeID # e.g. IP
self.coords = [] # Position in tree
self.tstamp = 0 # Timestamp from sender, to keep track of old vs new info
self.degree = 0 # Number of peers the sender has, used to break ties
# The above should be signed
self.path = [nodeID] # Path to node (in path-vector route)
self.time = 0 # Time info was updated, to keep track of e.g. timeouts
self.treeID = nodeID # Hack, let tree use different ID than IP, used so we can dijkstra once and test many roots
def clone(self):
# Return a deep-enough copy of the path
clone = PathInfo(None)
clone.nodeID = self.nodeID
clone.coords = self.coords[:]
clone.tstamp = self.tstamp
clone.degree = self.degree
clone.path = self.path[:]
clone.time = self.time
clone.treeID = self.treeID
return clone
# End class PathInfo
class Node:
def __init__(self, nodeID):
self.info = PathInfo(nodeID) # Self NodeInfo
self.root = None # PathInfo to node at root of tree
self.drop = dict() # PathInfo to nodes from clus that have timed out
self.peers = dict() # PathInfo to peers
self.links = dict() # Links to peers (to pass messages)
self.msgs = [] # Said messages
self.table = dict() # Pre-computed lookup table of peer info
def tick(self):
# Do periodic maintenance stuff, including push updates
self.info.time += 1
if self.info.time > self.info.tstamp + TIMEOUT/4:
# Update timestamp at least once every 1/4 timeout period
# This should probably be randomized in a real implementation
self.info.tstamp = self.info.time
self.info.degree = len(self.peers)
self.info.degree = 0# TODO decide if degree should be used
changed = False # Used to track when the network has converged
changed |= self.cleanRoot()
self.cleanDropped()
# Should probably send messages infrequently if there's nothing new to report
if self.info.tstamp == self.info.time:
msg = self.createMessage()
self.sendMessage(msg)
return changed
def cleanRoot(self):
changed = False
if self.root and self.info.time - self.root.time > TIMEOUT:
print "DEBUG: clean root,", self.root.path
self.drop[self.root.treeID] = self.root
self.root = None
changed = True
if not self.root or self.root.treeID < self.info.treeID:
# No need to drop someone who'se worse than us
self.info.coords = [self.info.nodeID]
self.root = self.info.clone()
changed = True
elif self.root.treeID == self.info.treeID:
self.root = self.info.clone()
return changed
def cleanDropped(self):
# May actually be a treeID... better to iterate over keys explicitly
nodeIDs = sorted(self.drop.keys())
for nodeID in nodeIDs:
node = self.drop[nodeID]
if self.info.time - node.time > 4*TIMEOUT:
del self.drop[nodeID]
return None
def createMessage(self):
# Message is just a tuple
# First element is the sender
# Second element is the root
# We will .clone() everything during the send operation
msg = (self.info, self.root)
return msg
def sendMessage(self, msg):
for link in self.links.values():
newMsg = (msg[0].clone(), msg[1].clone())
link.msgs.append(newMsg)
return None
def handleMessages(self):
changed = False
while self.msgs:
changed |= self.handleMessage(self.msgs.pop())
return changed
def handleMessage(self, msg):
changed = False
for node in msg:
# Update the path and timestamp for the sender and root info
node.path.append(self.info.nodeID)
node.time = self.info.time
# Update the sender's info in our list of peers
sender = msg[0]
self.peers[sender.nodeID] = sender
# Decide if we want to update the root
root = msg[1]
updateRoot = False
isSameParent = False
isBetterParent = False
if len(self.root.path) > 1 and len(root.path) > 1:
parent = self.peers[self.root.path[-2]]
if parent.nodeID == sender.nodeID: isSameParent = True
if sender.degree > parent.degree:
# This would also be where you check path uptime/reliability/whatever
# All else being equal, we prefer parents with high degree
# We are trusting peers to report degree correctly in this case
# So expect some performance reduction if your peers aren't trustworthy
# (Lies can increase average stretch by a few %)
isBetterParent = True
if self.info.nodeID in root.path[:-1]: pass # No loopy routes allowed
elif root.treeID in self.drop and self.drop[root.treeID].tstamp >= root.tstamp: pass
elif not self.root: updateRoot = True
elif self.root.treeID < root.treeID: updateRoot = True
elif self.root.treeID != root.treeID: pass
elif self.root.tstamp > root.tstamp: pass
elif len(root.path) < len(self.root.path): updateRoot = True
elif isBetterParent and len(root.path) == len(self.root.path): updateRoot = True
elif isSameParent and self.root.tstamp < root.tstamp: updateRoot = True
if updateRoot:
if not self.root or self.root.path != root.path: changed = True
self.root = root
self.info.coords = self.root.path
return changed
def lookup(self, dest):
# Note: Can loop in an unconverged network
# The person looking up the route is responsible for checking for loops
best = None
bestDist = 0
bestDeg = 0
for node in self.peers.itervalues():
# dist = distance to node + dist (on tree) from node to dest
dist = len(node.path)-1 + treeDist(node.coords, dest.coords)
deg = node.degree
if not best or dist < bestDist or (best == bestDist and deg > bestDeg):
best = node
bestDist = dist
bestDeg = deg
if best:
next = best.path[-2]
assert next in self.peers
return next
else:
# We failed to look something up
# TODO some way to signal this which doesn't crash
assert False
def initTable(self):
# Pre-computes a lookup table for destination coords
# Insert parent first so you prefer them as a next-hop
self.table.clear()
parent = self.info.nodeID
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
for peer in self.peers.itervalues():
current = self.table
for coord in peer.coords:
if coord not in current: current[coord] = (peer.nodeID, dict())
old = current[coord]
next = old[1]
oldPeer = self.peers[old[0]]
oldDist = len(oldPeer.coords)
oldDeg = oldPeer.degree
newDist = len(peer.coords)
newDeg = peer.degree
# Prefer parent
# Else prefer short distance from root
# If equal distance, prefer high degree
if peer.nodeID == parent: current[coord] = (peer.nodeID, next)
elif newDist < oldDist: current[coord] = (peer.nodeID, next)
elif newDist == oldDist and newDeg > oldDeg: current[coord] = (peer.nodeID, next)
current = next
return None
def lookup_new(self, dest):
# Use pre-computed lookup table to look up next hop for dest coords
assert self.table
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
else: parent = None
current = (parent, self.table)
c = None
for coord in dest.coords:
c = coord
if coord not in current[1]: break
current = current[1][coord]
next = current[0]
if c in self.peers: next = c
if next not in self.peers:
assert next == None
# You're the root of a different connected component
# You'd drop the packet in this case
# To make the path cache not die, need to return a valid next hop...
# Returning self for that reason
next = self.info.nodeID
return next
# End class Node
####################
# Helper Functions #
####################
def getIndexOfLCA(source, dest):
# Return index of last common ancestor in source/dest coords
# -1 if no common ancestor (e.g. different roots)
lcaIdx = -1
minLen = min(len(source), len(dest))
for idx in xrange(minLen):
if source[idx] == dest[idx]: lcaIdx = idx
else: break
return lcaIdx
def treePath(source, dest):
# Return path with source at head and dest at tail
lastMatch = getIndexOfLCA(source, dest)
path = dest[-1:lastMatch:-1] + source[lastMatch:]
assert path[0] == dest[-1]
assert path[-1] == source[-1]
return path
def treeDist(source, dest):
dist = len(source) + len(dest)
lcaIdx = getIndexOfLCA(source, dest)
dist -= 2*(lcaIdx+1)
return dist
def dijkstra(nodestore, startingNodeID):
# Idea to use heapq and basic implementation taken from stackexchange post
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
nodeIDs = sorted(nodestore.keys())
nNodes = len(nodeIDs)
idxs = dict()
for nodeIdx in xrange(nNodes):
nodeID = nodeIDs[nodeIdx]
idxs[nodeID] = nodeIdx
dists = array.array("H", [0]*nNodes)
queue = [(0, startingNodeID)]
while queue:
dist, nodeID = heapq.heappop(queue)
idx = idxs[nodeID]
if not dists[idx]: # Unvisited, otherwise we skip it
dists[idx] = dist
for peer in nodestore[nodeID].links:
if not dists[idxs[peer]]:
# Peer is also unvisited, so add to queue
heapq.heappush(queue, (dist+LINK_COST, peer))
return dists
def dijkstrall(nodestore):
# Idea to use heapq and basic implementation taken from stackexchange post
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
nodeIDs = sorted(nodestore.keys())
nNodes = len(nodeIDs)
idxs = dict()
for nodeIdx in xrange(nNodes):
nodeID = nodeIDs[nodeIdx]
idxs[nodeID] = nodeIdx
dists = array.array("H", [0]*nNodes*nNodes) # use GetCacheIndex(nNodes, start, end)
for sourceIdx in xrange(nNodes):
print "Finding shortest paths for node {} / {} ({})".format(sourceIdx+1, nNodes, nodeIDs[sourceIdx])
queue = [(0, sourceIdx)]
while queue:
dist, nodeIdx = heapq.heappop(queue)
distIdx = getCacheIndex(nNodes, sourceIdx, nodeIdx)
if not dists[distIdx]: # Unvisited, otherwise we skip it
dists[distIdx] = dist
for peer in nodestore[nodeIDs[nodeIdx]].links:
pIdx = idxs[peer]
pdIdx = getCacheIndex(nNodes, sourceIdx, pIdx)
if not dists[pdIdx]:
# Peer is also unvisited, so add to queue
heapq.heappush(queue, (dist+LINK_COST, pIdx))
return dists
def linkNodes(node1, node2):
node1.links[node2.info.nodeID] = node2
node2.links[node1.info.nodeID] = node1
############################
# Store topology functions #
############################
def makeStoreSquareGrid(sideLength, randomize=True):
# Simple grid in a sideLength*sideLength square
# Just used to validate that the code runs
store = dict()
nodeIDs = list(range(sideLength*sideLength))
if randomize: random.shuffle(nodeIDs)
for nodeID in nodeIDs:
store[nodeID] = Node(nodeID)
for index in xrange(len(nodeIDs)):
if (index % sideLength != 0): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-1]])
if (index >= sideLength): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-sideLength]])
print "Grid store created, size {}".format(len(store))
return store
def makeStoreASRelGraph(pathToGraph):
#Existing network graphs, in caida.org's asrel format (ASx|ASy|z per line, z denotes relationship type)
with open(pathToGraph, "r") as f:
inData = f.readlines()
store = dict()
for line in inData:
if line.strip()[0] == "#": continue # Skip comment lines
line = line.replace('|'," ")
nodes = map(int, line.split()[0:2])
if nodes[0] not in store: store[nodes[0]] = Node(nodes[0])
if nodes[1] not in store: store[nodes[1]] = Node(nodes[1])
linkNodes(store[nodes[0]], store[nodes[1]])
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
return store
def makeStoreASRelGraphMaxDeg(pathToGraph, degIdx=0):
with open(pathToGraph, "r") as f:
inData = f.readlines()
store = dict()
nodeDeg = dict()
for line in inData:
if line.strip()[0] == "#": continue # Skip comment lines
line = line.replace('|'," ")
nodes = map(int, line.split()[0:2])
if nodes[0] not in nodeDeg: nodeDeg[nodes[0]] = 0
if nodes[1] not in nodeDeg: nodeDeg[nodes[1]] = 0
nodeDeg[nodes[0]] += 1
nodeDeg[nodes[1]] += 1
sortedNodes = sorted(nodeDeg.keys(), \
key=lambda x: (nodeDeg[x], x), \
reverse=True)
maxDegNodeID = sortedNodes[degIdx]
return makeStoreASRelGraphFixedRoot(pathToGraph, maxDegNodeID)
def makeStoreASRelGraphFixedRoot(pathToGraph, rootNodeID):
with open(pathToGraph, "r") as f:
inData = f.readlines()
store = dict()
for line in inData:
if line.strip()[0] == "#": continue # Skip comment lines
line = line.replace('|'," ")
nodes = map(int, line.split()[0:2])
if nodes[0] not in store:
store[nodes[0]] = Node(nodes[0])
if nodes[0] == rootNodeID: store[nodes[0]].info.treeID += 1000000000
if nodes[1] not in store:
store[nodes[1]] = Node(nodes[1])
if nodes[1] == rootNodeID: store[nodes[1]].info.treeID += 1000000000
linkNodes(store[nodes[0]], store[nodes[1]])
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
return store
def makeStoreDimesEdges(pathToGraph, rootNodeID=None):
# Read from a DIMES csv-formatted graph from a gzip file
store = dict()
with gzip.open(pathToGraph, "r") as f:
inData = f.readlines()
size = len(inData)
index = 0
for edge in inData:
if not index % 1000:
pct = 100.0*index/size
print "Processing edge {}, {:.2f}%".format(index, pct)
index += 1
dat = edge.rstrip().split(',')
node1 = "N" + str(dat[0].strip())
node2 = "N" + str(dat[1].strip())
if '?' in node1 or '?' in node2: continue #Unknown node
if node1 == rootNodeID: node1 = "R" + str(dat[0].strip())
if node2 == rootNodeID: node2 = "R" + str(dat[1].strip())
if node1 not in store: store[node1] = Node(node1)
if node2 not in store: store[node2] = Node(node2)
if node1 != node2: linkNodes(store[node1], store[node2])
print "DIMES graph successfully imported, size {}".format(len(store))
return store
def makeStoreGeneratedGraph(pathToGraph, root=None):
with open(pathToGraph, "r") as f:
inData = f.readlines()
store = dict()
for line in inData:
if line.strip()[0] == "#": continue # Skip comment lines
nodes = map(int, line.strip().split(' ')[0:2])
node1 = nodes[0]
node2 = nodes[1]
if node1 == root: node1 += 1000000
if node2 == root: node2 += 1000000
if node1 not in store: store[node1] = Node(node1)
if node2 not in store: store[node2] = Node(node2)
linkNodes(store[node1], store[node2])
print "Generated graph successfully imported, size {}".format(len(store))
return store
############################################
# Functions used as parts of network tests #
############################################
def idleUntilConverged(store):
nodeIDs = sorted(store.keys())
timeOfLastChange = 0
step = 0
# Idle until the network has converged
while step - timeOfLastChange < 4*TIMEOUT:
step += 1
print "Step: {}, last change: {}".format(step, timeOfLastChange)
changed = False
for nodeID in nodeIDs:
# Update node status, send messages
changed |= store[nodeID].tick()
for nodeID in nodeIDs:
# Process messages
changed |= store[nodeID].handleMessages()
if changed: timeOfLastChange = step
initTables(store)
return store
def getCacheIndex(nodes, sourceIndex, destIndex):
return sourceIndex*nodes + destIndex
def initTables(store):
nodeIDs = sorted(store.keys())
nNodes = len(nodeIDs)
print "Initializing routing tables for {} nodes".format(nNodes)
for idx in xrange(nNodes):
nodeID = nodeIDs[idx]
store[nodeID].initTable()
print "Routing tables initialized"
return None
def getCache(store):
nodeIDs = sorted(store.keys())
nNodes = len(nodeIDs)
nodeIdxs = dict()
for nodeIdx in xrange(nNodes):
nodeIdxs[nodeIDs[nodeIdx]] = nodeIdx
cache = array.array("H", [0]*nNodes*nNodes)
for sourceIdx in xrange(nNodes):
sourceID = nodeIDs[sourceIdx]
print "Building fast lookup table for node {} / {} ({})".format(sourceIdx+1, nNodes, sourceID)
for destIdx in xrange(nNodes):
destID = nodeIDs[destIdx]
if sourceID == destID: nextHop = destID # lookup would fail
else: nextHop = store[sourceID].lookup(store[destID].info)
nextHopIdx = nodeIdxs[nextHop]
cache[getCacheIndex(nNodes, sourceIdx, destIdx)] = nextHopIdx
return cache
def testPaths(store, dists):
cache = getCache(store)
nodeIDs = sorted(store.keys())
nNodes = len(nodeIDs)
idxs = dict()
for nodeIdx in xrange(nNodes):
nodeID = nodeIDs[nodeIdx]
idxs[nodeID] = nodeIdx
results = dict()
for sourceIdx in xrange(nNodes):
sourceID = nodeIDs[sourceIdx]
print "Testing paths from node {} / {} ({})".format(sourceIdx+1, len(nodeIDs), sourceID)
#dists = dijkstra(store, sourceID)
for destIdx in xrange(nNodes):
destID = nodeIDs[destIdx]
if destID == sourceID: continue # Skip self
distIdx = getCacheIndex(nNodes, sourceIdx, destIdx)
eHops = dists[distIdx]
if not eHops: continue # The network is split, no path exists
hops = 0
for pair in ((sourceIdx, destIdx), (destIdx, sourceIdx)): # Either direction because source routing
nHops = 0
locIdx = pair[0]
dIdx = pair[1]
while locIdx != dIdx:
locIdx = cache[getCacheIndex(nNodes, locIdx, dIdx)]
nHops += 1
if not hops or nHops < hops: hops = nHops
if eHops not in results: results[eHops] = dict()
if hops not in results[eHops]: results[eHops][hops] = 0
results[eHops][hops] += 1
return results
def getAvgStretch(pathMatrix):
avgStretch = 0.
checked = 0.
for eHops in sorted(pathMatrix.keys()):
for nHops in sorted(pathMatrix[eHops].keys()):
count = pathMatrix[eHops][nHops]
stretch = float(nHops)/float(max(1, eHops))
avgStretch += stretch*count
checked += count
avgStretch /= max(1, checked)
return avgStretch
def getMaxStretch(pathMatrix):
maxStretch = 0.
for eHops in sorted(pathMatrix.keys()):
for nHops in sorted(pathMatrix[eHops].keys()):
stretch = float(nHops)/float(max(1, eHops))
maxStretch = max(maxStretch, stretch)
return maxStretch
def getCertSizes(store):
# Returns nCerts frequency distribution
# De-duplicates common certs (for shared prefixes in the path)
sizes = dict()
for node in store.values():
certs = set()
for peer in node.peers.values():
pCerts = set()
assert len(peer.path) == 2
assert peer.coords[-1] == peer.path[0]
hops = peer.coords + peer.path[1:]
for hopIdx in xrange(len(hops)-1):
send = hops[hopIdx]
if send == node.info.nodeID: continue # We created it, already have it
path = hops[0:hopIdx+2]
# Each cert is signed by the sender
# Includes information about the path from the sender to the next hop
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
cert = "{}:{}".format(send, path)
certs.add(cert)
size = len(certs)
if size not in sizes: sizes[size] = 0
sizes[size] += 1
return sizes
def getMinLinkCertSizes(store):
# Returns nCerts frequency distribution
# De-duplicates common certs (for shared prefixes in the path)
# Based on the minimum number of certs that must be traded through a particular link
# Handled per link
sizes = dict()
for node in store.values():
peerCerts = dict()
for peer in node.peers.values():
pCerts = set()
assert len(peer.path) == 2
assert peer.coords[-1] == peer.path[0]
hops = peer.coords + peer.path[1:]
for hopIdx in xrange(len(hops)-1):
send = hops[hopIdx]
if send == node.info.nodeID: continue # We created it, already have it
path = hops[0:hopIdx+2]
# Each cert is signed by the sender
# Includes information about the path from the sender to the next hop
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
cert = "{}:{}".format(send, path)
pCerts.add(cert)
peerCerts[peer.nodeID] = pCerts
for peer in peerCerts:
size = 0
pCerts = peerCerts[peer]
for cert in pCerts:
required = True
for p2 in peerCerts:
if p2 == peer: continue
p2Certs = peerCerts[p2]
if cert in p2Certs: required = False
if required: size += 1
if size not in sizes: sizes[size] = 0
sizes[size] += 1
return sizes
def getPathSizes(store):
# Returns frequency distribution of the total number of hops the routing table
# I.e. a node with 3 peers, each with 5 hop coord+path, would count as 3x5=15
sizes = dict()
for node in store.values():
size = 0
for peer in node.peers.values():
assert len(peer.path) == 2
assert peer.coords[-1] == peer.path[0]
peerSize = len(peer.coords) + len(peer.path) - 1 # double-counts peer, -1
size += peerSize
if size not in sizes: sizes[size] = 0
sizes[size] += 1
return sizes
def getPeerSizes(store):
# Returns frequency distribution of the number of peers each node has
sizes = dict()
for node in store.values():
nPeers = len(node.peers)
if nPeers not in sizes: sizes[nPeers] = 0
sizes[nPeers] += 1
return sizes
def getAvgSize(sizes):
sumSizes = 0
nNodes = 0
for size in sizes:
count = sizes[size]
sumSizes += size*count
nNodes += count
avgSize = float(sumSizes)/max(1, nNodes)
return avgSize
def getMaxSize(sizes):
return max(sizes.keys())
def getMinSize(sizes):
return min(sizes.keys())
def getResults(pathMatrix):
results = []
for eHops in sorted(pathMatrix.keys()):
for nHops in sorted(pathMatrix[eHops].keys()):
count = pathMatrix[eHops][nHops]
results.append("{} {} {}".format(eHops, nHops, count))
return '\n'.join(results)
####################################
# Functions to run different tests #
####################################
def runTest(store):
# Runs the usual set of tests on the store
# Does not save results, so only meant for quick tests
# To e.g. check the code works, maybe warm up the pypy jit
for node in store.values():
node.info.time = random.randint(0, TIMEOUT)
node.info.tstamp = TIMEOUT
print "Begin testing network"
dists = None
if not dists: dists = dijkstrall(store)
idleUntilConverged(store)
pathMatrix = testPaths(store, dists)
avgStretch = getAvgStretch(pathMatrix)
maxStretch = getMaxStretch(pathMatrix)
peers = getPeerSizes(store)
certs = getCertSizes(store)
paths = getPathSizes(store)
linkCerts = getMinLinkCertSizes(store)
avgPeerSize = getAvgSize(peers)
maxPeerSize = getMaxSize(peers)
avgCertSize = getAvgSize(certs)
maxCertSize = getMaxSize(certs)
avgPathSize = getAvgSize(paths)
maxPathSize = getMaxSize(paths)
avgLinkCert = getAvgSize(linkCerts)
maxLinkCert = getMaxSize(linkCerts)
totalCerts = sum(map(lambda x: x*certs[x], certs.keys()))
totalLinks = sum(map(lambda x: x*peers[x], peers.keys())) # one-way links
avgCertsPerLink = float(totalCerts)/max(1, totalLinks)
print "Finished testing network"
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
print "Avg / Max nPeers size: {} / {}".format(avgPeerSize, maxPeerSize)
print "Avg / Max nCerts size: {} / {}".format(avgCertSize, maxCertSize)
print "Avg / Max total hops in any node's routing table: {} / {}".format(avgPathSize, maxPathSize)
print "Avg / Max lower bound cert requests per link (one-way): {} / {}".format(avgLinkCert, maxLinkCert)
print "Avg certs per link (one-way): {}".format(avgCertsPerLink)
return # End of function
def rootNodeASTest(path, outDir="output-treesim-AS", dists=None, proc = 1):
# Checks performance for every possible choice of root node
# Saves output for each root node to a separate file on disk
# path = input path to some caida.org formatted AS-relationship graph
if not os.path.exists(outDir): os.makedirs(outDir)
assert os.path.exists(outDir)
store = makeStoreASRelGraph(path)
nodes = sorted(store.keys())
for nodeIdx in xrange(len(nodes)):
if nodeIdx % proc != 0: continue # Work belongs to someone else
rootNodeID = nodes[nodeIdx]
outpath = outDir+"/{}".format(rootNodeID)
if os.path.exists(outpath):
print "Skipping {}, already processed".format(rootNodeID)
continue
store = makeStoreASRelGraphFixedRoot(path, rootNodeID)
for node in store.values():
node.info.time = random.randint(0, TIMEOUT)
node.info.tstamp = TIMEOUT
print "Beginning {}, size {}".format(nodeIdx, len(store))
if not dists: dists = dijkstrall(store)
idleUntilConverged(store)
pathMatrix = testPaths(store, dists)
avgStretch = getAvgStretch(pathMatrix)
maxStretch = getMaxStretch(pathMatrix)
results = getResults(pathMatrix)
with open(outpath, "w") as f:
f.write(results)
print "Finished test for root AS {} ({} / {})".format(rootNodeID, nodeIdx+1, len(store))
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
#break # Stop after 1, because they can take forever
return # End of function
def timelineASTest():
# Meant to study the performance of the network as a function of network size
# Loops over a set of AS-relationship graphs
# Runs a test on each graph, selecting highest-degree node as the root
# Saves results for each graph to a separate file on disk
outDir = "output-treesim-timeline-AS"
if not os.path.exists(outDir): os.makedirs(outDir)
assert os.path.exists(outDir)
paths = sorted(glob.glob("asrel/datasets/*"))
for path in paths:
date = os.path.basename(path).split(".")[0]
outpath = outDir+"/{}".format(date)
if os.path.exists(outpath):
print "Skipping {}, already processed".format(date)
continue
store = makeStoreASRelGraphMaxDeg(path)
dists = None
for node in store.values():
node.info.time = random.randint(0, TIMEOUT)
node.info.tstamp = TIMEOUT
print "Beginning {}, size {}".format(date, len(store))
if not dists: dists = dijkstrall(store)
idleUntilConverged(store)
pathMatrix = testPaths(store, dists)
avgStretch = getAvgStretch(pathMatrix)
maxStretch = getMaxStretch(pathMatrix)
results = getResults(pathMatrix)
with open(outpath, "w") as f:
f.write(results)
print "Finished {} with {} nodes".format(date, len(store))
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
#break # Stop after 1, because they can take forever
return # End of function
def timelineDimesTest():
# Meant to study the performance of the network as a function of network size
# Loops over a set of AS-relationship graphs
# Runs a test on each graph, selecting highest-degree node as the root
# Saves results for each graph to a separate file on disk
outDir = "output-treesim-timeline-dimes"
if not os.path.exists(outDir): os.makedirs(outDir)
assert os.path.exists(outDir)
# Input files are named ASEdgesX_Y where X = month (no leading 0), Y = year
paths = sorted(glob.glob("DIMES/ASEdges/*.gz"))
exists = set(glob.glob(outDir+"/*"))
for path in paths:
date = os.path.basename(path).split(".")[0]
outpath = outDir+"/{}".format(date)
if outpath in exists:
print "Skipping {}, already processed".format(date)
continue
store = makeStoreDimesEdges(path)
# Get the highest degree node and make it root
# Sorted by nodeID just to make it stable in the event of a tie
nodeIDs = sorted(store.keys())
bestRoot = ""
bestDeg = 0
for nodeID in nodeIDs:
node = store[nodeID]
if len(node.links) > bestDeg:
bestRoot = nodeID
bestDeg = len(node.links)
assert bestRoot
store = makeStoreDimesEdges(path, bestRoot)
rootID = "R" + bestRoot[1:]
assert rootID in store
# Don't forget to set random seed before setitng times
# To make results reproducible
nodeIDs = sorted(store.keys())
random.seed(12345)
for nodeID in nodeIDs:
node = store[nodeID]
node.info.time = random.randint(0, TIMEOUT)
node.info.tstamp = TIMEOUT
print "Beginning {}, size {}".format(date, len(store))
if not dists: dists = dijkstrall(store)
idleUntilConverged(store)
pathMatrix = testPaths(store, dists)
avgStretch = getAvgStretch(pathMatrix)
maxStretch = getMaxStretch(pathMatrix)
results = getResults(pathMatrix)
with open(outpath, "w") as f:
f.write(results)
print "Finished {} with {} nodes".format(date, len(store))
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
break # Stop after 1, because they can take forever
return # End of function
def scalingTest(maxTests=None, inputDir="graphs"):
# Meant to study the performance of the network as a function of network size
# Loops over a set of nodes in a previously generated graph
# Runs a test on each graph, testing each node as the root
# if maxTests is set, tests only that number of roots (highest degree first)
# Saves results for each graph to a separate file on disk
outDir = "output-treesim-{}".format(inputDir)
if not os.path.exists(outDir): os.makedirs(outDir)
assert os.path.exists(outDir)
paths = sorted(glob.glob("{}/*".format(inputDir)))
exists = set(glob.glob(outDir+"/*"))
for path in paths:
gc.collect() # pypy waits for gc to close files
graph = os.path.basename(path).split(".")[0]
store = makeStoreGeneratedGraph(path)
# Get the highest degree node and make it root
# Sorted by nodeID just to make it stable in the event of a tie
nodeIDs = sorted(store.keys(), key=lambda x: len(store[x].links), reverse=True)
dists = None
if maxTests: nodeIDs = nodeIDs[:maxTests]
for nodeID in nodeIDs:
nodeIDStr = str(nodeID).zfill(len(str(len(store)-1)))
outpath = outDir+"/{}-{}".format(graph, nodeIDStr)
if outpath in exists:
print "Skipping {}-{}, already processed".format(graph, nodeIDStr)
continue
store = makeStoreGeneratedGraph(path, nodeID)
# Don't forget to set random seed before setting times
random.seed(12345) # To make results reproducible
nIDs = sorted(store.keys())
for nID in nIDs:
node = store[nID]
node.info.time = random.randint(0, TIMEOUT)
node.info.tstamp = TIMEOUT
print "Beginning {}, size {}".format(graph, len(store))
if not dists: dists = dijkstrall(store)
idleUntilConverged(store)
pathMatrix = testPaths(store, dists)
avgStretch = getAvgStretch(pathMatrix)
maxStretch = getMaxStretch(pathMatrix)
results = getResults(pathMatrix)
with open(outpath, "w") as f:
f.write(results)
print "Finished {} with {} nodes for root {}".format(graph, len(store), nodeID)
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
return # End of function
##################
# Main Execution #
##################
if __name__ == "__main__":
if True: # Run a quick test
random.seed(12345) # DEBUG
store = makeStoreSquareGrid(4)
runTest(store) # Quick test
store = None
# Do some real work
#runTest(makeStoreDimesEdges("DIMES/ASEdges/ASEdges1_2007.csv.gz"))
#timelineDimesTest()
#rootNodeASTest("asrel/datasets/19980101.as-rel.txt")
#timelineASTest()
#rootNodeASTest("hype-2016-09-19.list", "output-treesim-hype")
#scalingTest(None, "graphs-20") # First argument 1 to only test 1 root per graph
#store = makeStoreGeneratedGraph("bgp_tables")
#store = makeStoreGeneratedGraph("skitter")
#store = makeStoreASRelGraphMaxDeg("hype-2016-09-19.list") #http://hia.cjdns.ca/watchlist/c/walk.peers.20160919
#store = makeStoreGeneratedGraph("fc00-2017-08-12.txt")
if store: runTest(store)
#rootNodeASTest("skitter", "output-treesim-skitter", None, 0, 1)
#scalingTest(1, "graphs-20") # First argument 1 to only test 1 root per graph
#scalingTest(1, "graphs-21") # First argument 1 to only test 1 root per graph
#scalingTest(1, "graphs-22") # First argument 1 to only test 1 root per graph
#scalingTest(1, "graphs-23") # First argument 1 to only test 1 root per graph
if not store:
import sys
args = sys.argv
if len(args) == 2:
job_number = int(sys.argv[1])
#rootNodeASTest("fc00-2017-08-12.txt", "fc00", None, job_number)
#rootNodeASTest("skitter", "out-skitter", None, job_number)
rootNodeASTest("walk-1517414401.txt.map", "out-walk", None, job_number)
else:
print "Usage: {} job_number".format(args[0])
print "job_number = which job set to run on this node (1-indexed)"

View File

@@ -6,6 +6,7 @@ import "os"
import "strings"
import "strconv"
import "time"
import "log"
import "runtime/pprof"
import "flag"
@@ -47,16 +48,18 @@ func (n *Node) startPeers() {
func linkNodes(m, n *Node) {
// Don't allow duplicates
if m.core.DEBUG_getPeers().DEBUG_hasPeer(n.core.DEBUG_getSigPub()) {
if m.core.DEBUG_getPeers().DEBUG_hasPeer(n.core.DEBUG_getSigningPublicKey()) {
return
}
// Create peers
// Buffering reduces packet loss in the sim
// This slightly speeds up testing (fewer delays before retrying a ping)
p := m.core.DEBUG_getPeers().DEBUG_newPeer(n.core.DEBUG_getBoxPub(),
n.core.DEBUG_getSigPub())
q := n.core.DEBUG_getPeers().DEBUG_newPeer(m.core.DEBUG_getBoxPub(),
m.core.DEBUG_getSigPub())
pLinkPub, pLinkPriv := m.core.DEBUG_newBoxKeys()
qLinkPub, qLinkPriv := m.core.DEBUG_newBoxKeys()
p := m.core.DEBUG_getPeers().DEBUG_newPeer(n.core.DEBUG_getEncryptionPublicKey(),
n.core.DEBUG_getSigningPublicKey(), *m.core.DEBUG_getSharedKey(pLinkPriv, qLinkPub))
q := n.core.DEBUG_getPeers().DEBUG_newPeer(m.core.DEBUG_getEncryptionPublicKey(),
m.core.DEBUG_getSigningPublicKey(), *n.core.DEBUG_getSharedKey(qLinkPriv, pLinkPub))
DEBUG_simLinkPeers(p, q)
return
}
@@ -140,7 +143,7 @@ func startNetwork(store map[[32]byte]*Node) {
func getKeyedStore(store map[int]*Node) map[[32]byte]*Node {
newStore := make(map[[32]byte]*Node)
for _, node := range store {
newStore[node.core.DEBUG_getSigPub()] = node
newStore[node.core.DEBUG_getSigningPublicKey()] = node
}
return newStore
}
@@ -159,17 +162,13 @@ func testPaths(store map[[32]byte]*Node) bool {
ttl := ^uint64(0)
oldTTL := ttl
for here := source; here != dest; {
if ttl == 0 {
fmt.Println("Drop:", source.index, here.index, dest.index, oldTTL)
return false
}
temp++
if temp > 4096 {
panic("Loop?")
fmt.Println("Loop?")
time.Sleep(time.Second)
return false
}
oldTTL = ttl
nextPort, newTTL := here.core.DEBUG_switchLookup(coords, ttl)
ttl = newTTL
nextPort := here.core.DEBUG_switchLookup(coords)
// First check if "here" is accepting packets from the previous node
// TODO explain how this works
ports := here.core.DEBUG_getPeers().DEBUG_getPorts()
@@ -200,12 +199,16 @@ func testPaths(store map[[32]byte]*Node) bool {
source.index, source.core.DEBUG_getLocator(),
here.index, here.core.DEBUG_getLocator(),
dest.index, dest.core.DEBUG_getLocator())
here.core.DEBUG_getSwitchTable().DEBUG_dumpTable()
//here.core.DEBUG_getSwitchTable().DEBUG_dumpTable()
}
if here != source {
// This is sufficient to check for routing loops or blackholes
//break
}
if here == next {
fmt.Println("Drop:", source.index, here.index, dest.index, oldTTL)
return false
}
here = next
}
}
@@ -226,7 +229,7 @@ func stressTest(store map[[32]byte]*Node) {
start := time.Now()
for _, source := range store {
for _, coords := range dests {
source.core.DEBUG_switchLookup(coords, ^uint64(0))
source.core.DEBUG_switchLookup(coords)
lookups++
}
}
@@ -256,7 +259,7 @@ func pingNodes(store map[[32]byte]*Node) {
count++
//if count > 16 { break }
fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index)
sourceKey := source.core.DEBUG_getBoxPub()
sourceKey := source.core.DEBUG_getEncryptionPublicKey()
payload := sourceKey[:]
sourceAddr := source.core.DEBUG_getAddr()[:]
sendTo := func(bs []byte, destAddr []byte) {
@@ -328,7 +331,7 @@ func pingBench(store map[[32]byte]*Node) {
return packet
}
for _, dest := range store {
key := dest.core.DEBUG_getBoxPub()
key := dest.core.DEBUG_getEncryptionPublicKey()
loc := dest.core.DEBUG_getLocator()
coords := loc.DEBUG_getCoords()
ping := getPing(key, coords)
@@ -378,12 +381,12 @@ func dumpDHTSize(store map[[32]byte]*Node) {
fmt.Printf("DHT min %d / avg %f / max %d\n", min, avg, max)
}
func (n *Node) startUDP(listen string) {
n.core.DEBUG_setupAndStartGlobalUDPInterface(listen)
func (n *Node) startTCP(listen string) {
n.core.DEBUG_setupAndStartGlobalTCPInterface(listen)
}
func (n *Node) connectUDP(remoteAddr string) {
n.core.DEBUG_maybeSendUDPKeys(remoteAddr)
func (n *Node) connectTCP(remoteAddr string) {
n.core.AddPeer(remoteAddr)
}
////////////////////////////////////////////////////////////////////////////////
@@ -418,12 +421,12 @@ func main() {
//idxstore := loadGraph("misc/sim/fc00-2017-08-12.txt")
//idxstore := loadGraph("skitter")
kstore := getKeyedStore(idxstore)
/*
for _, n := range kstore {
log := n.core.DEBUG_getLogger()
log.SetOutput(os.Stderr)
}
*/
//*
logger := log.New(os.Stderr, "", log.Flags())
for _, n := range kstore {
n.core.DEBUG_setLogger(logger)
}
//*/
startNetwork(kstore)
//time.Sleep(10*time.Second)
// Note that testPaths only works if pressure is turend off
@@ -439,8 +442,8 @@ func main() {
if false {
// This connects the sim to the local network
for _, node := range kstore {
node.startUDP("localhost:0")
node.connectUDP("localhost:12345")
node.startTCP("localhost:0")
node.connectTCP("localhost:12345")
break // just 1
}
for _, node := range kstore {

View File

@@ -1,35 +0,0 @@
#!/usr/bin/env python2
def main():
import sys
args = sys.argv
if len(args) != 2:
print "Usage:", args[0], "path/to/walk.txt"
return
import glob
files = glob.glob(args[1])
if len(files) == 0:
print "File not found:", args[1]
return
for inFile in files:
with open(inFile, 'r') as f: lines = f.readlines()
out = []
nodes = dict()
for line in lines:
words = line.strip().strip('[').strip(']').split(',')
if len(words) < 5: continue
if words[0].strip('"') != "link": continue
first, second = words[3], words[4]
if first not in nodes: nodes[first] = len(nodes)
if second not in nodes: nodes[second] = len(nodes)
for line in lines:
words = line.strip().strip('[').strip(']').split(',')
if len(words) < 5: continue
if words[0].strip('"') != "link": continue
first, second = nodes[words[3]], nodes[words[4]]
out.append("{0} {1}".format(first, second))
with open(inFile+".map", "w") as f: f.write("\n".join(out))
# End loop over files
# End main
if __name__ == "__main__": main()

View File

@@ -1,22 +0,0 @@
package main
import "fmt"
import "time"
import "sync/atomic"
import "runtime"
func main() {
var ops uint64 = 0
for i := 0; i < 4; i++ {
go func() {
for {
atomic.AddUint64(&ops, 1)
runtime.Gosched()
}
}()
}
time.Sleep(1 * time.Second)
opsFinal := atomic.LoadUint64(&ops)
fmt.Println("ops:", opsFinal)
}

View File

@@ -1,53 +0,0 @@
package main
import "fmt"
import "net"
import "time"
func main() {
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
if err != nil {
panic(err)
}
listener, err := net.ListenTCP("tcp", addr)
if err != nil {
panic(err)
}
defer listener.Close()
packetSize := 65535
numPackets := 65535
go func() {
send, err := net.DialTCP("tcp", nil, addr)
if err != nil {
panic(err)
}
defer send.Close()
msg := make([]byte, packetSize)
for idx := 0; idx < numPackets; idx++ {
send.Write(msg)
}
}()
start := time.Now()
//msg := make([]byte, 1280)
sock, err := listener.AcceptTCP()
if err != nil {
panic(err)
}
defer sock.Close()
read := 0
buf := make([]byte, packetSize)
for {
n, err := sock.Read(buf)
read += n
if err != nil {
break
}
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
fmt.Printf("%f bits/sec\n", 8*float64(read)/timed.Seconds())
}

View File

@@ -1,36 +0,0 @@
package main
import "time"
import "fmt"
import "sync"
func main() {
fmt.Println("Testing speed of recv+send loop")
const count = 10000000
c := make(chan []byte, 1)
c <- []byte{}
var wg sync.WaitGroup
worker := func() {
for idx := 0; idx < count; idx++ {
p := <-c
select {
case c <- p:
default:
}
}
wg.Done()
}
nIter := 0
start := time.Now()
for idx := 0; idx < 1; idx++ {
go worker()
nIter += count
wg.Add(1)
}
wg.Wait()
stop := time.Now()
timed := stop.Sub(start)
fmt.Printf("%d iterations in %s\n", nIter, timed)
fmt.Printf("%f iterations per second\n", float64(nIter)/timed.Seconds())
fmt.Printf("%s per iteration\n", timed/time.Duration(nIter))
}

View File

@@ -1,56 +0,0 @@
package main
import "bytes"
import "encoding/gob"
import "time"
import "fmt"
type testStruct struct {
First uint64
Second float64
Third []byte
}
func testFunc(tickerDuration time.Duration) {
chn := make(chan []byte)
ticker := time.NewTicker(tickerDuration)
defer ticker.Stop()
send := testStruct{First: 1, Second: 2, Third: []byte{3, 4, 5}}
buf := bytes.NewBuffer(nil)
enc := gob.NewEncoder(buf)
dec := gob.NewDecoder(buf)
sendCall := func() {
err := enc.EncodeValue(&send)
if err != nil {
panic(err)
}
bs := make([]byte, buf.Len())
buf.Read(bs)
fmt.Println("send:", bs)
go func() { chn <- bs }()
}
recvCall := func(bs []byte) {
buf.Write(bs)
recv := testStruct{}
err := dec.DecodeValue(&recv)
fmt.Println("recv:", bs)
if err != nil {
panic(err)
}
}
for {
select {
case bs := <-chn:
recvCall(bs)
case <-ticker.C:
sendCall()
}
}
}
func main() {
go testFunc(100 * time.Millisecond) // Does not crash
time.Sleep(time.Second)
go testFunc(time.Nanosecond) // Does crash
time.Sleep(time.Second)
}

View File

@@ -1,22 +0,0 @@
package main
import "sync"
import "time"
import "fmt"
func main() {
const reqs = 1000000
var wg sync.WaitGroup
start := time.Now()
for idx := 0; idx < reqs; idx++ {
wg.Add(1)
go func() { wg.Done() }()
}
wg.Wait()
stop := time.Now()
timed := stop.Sub(start)
fmt.Printf("%d goroutines in %s (%f per second)\n",
reqs,
timed,
reqs/timed.Seconds())
}

View File

@@ -1,57 +0,0 @@
package main
import "fmt"
import "net"
import "time"
// TODO look into netmap + libpcap to bypass the kernel as much as possible
func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface!
//addr, err := net.ResolveUDPAddr("udp", "[ff02::1%veth0]:9001")
addr, err := net.ResolveUDPAddr("udp", "[ff02::1]:9001")
if err != nil {
panic(err)
}
sock, err := net.ListenMulticastUDP("udp", nil, addr)
if err != nil {
panic(err)
}
defer sock.Close()
go func() {
saddr, err := net.ResolveUDPAddr("udp", "[::]:0")
if err != nil {
panic(err)
}
send, err := net.ListenUDP("udp", saddr)
if err != nil {
panic(err)
}
defer send.Close()
msg := make([]byte, 1280)
for {
//fmt.Println("Sending...")
send.WriteTo(msg, addr)
}
}()
numPackets := 1000
start := time.Now()
msg := make([]byte, 2000)
for i := 0; i < numPackets; i++ {
//fmt.Println("Reading:", i)
sock.ReadFromUDP(msg)
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
}
func main() {
basic_test()
}

View File

@@ -1,92 +0,0 @@
package main
import "fmt"
import "net"
import "time"
// TODO look into netmap + libpcap to bypass the kernel as much as possible
func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface!
var ip *net.IP
ifaces, err := net.Interfaces()
if err != nil {
panic(err)
}
var zone string
for _, iface := range ifaces {
addrs, err := iface.Addrs()
if err != nil {
panic(err)
}
for _, addr := range addrs {
addrIP, _, _ := net.ParseCIDR(addr.String())
if addrIP.To4() != nil {
continue
} // IPv6 only
if !addrIP.IsLinkLocalUnicast() {
continue
}
zone = iface.Name
ip = &addrIP
}
addrs, err = iface.MulticastAddrs()
if err != nil {
panic(err)
}
for _, addr := range addrs {
fmt.Println(addr.String())
}
}
if ip == nil {
panic("No link-local IPv6 found")
}
fmt.Println("Using address:", *ip)
addr := net.UDPAddr{IP: *ip, Port: 9001, Zone: zone}
saddr := net.UDPAddr{IP: *ip, Port: 9002, Zone: zone}
send, err := net.ListenUDP("udp", &saddr)
defer send.Close()
if err != nil {
panic(err)
}
sock, err := net.ListenUDP("udp", &addr)
defer sock.Close()
if err != nil {
panic(err)
}
const buffSize = 1048576 * 100
send.SetWriteBuffer(buffSize)
sock.SetReadBuffer(buffSize)
sock.SetWriteBuffer(buffSize)
go func() {
msg := make([]byte, 1280)
for {
send.WriteTo(msg, &addr)
}
}()
numPackets := 100000
start := time.Now()
msg := make([]byte, 2000)
for i := 0; i < numPackets; i++ {
_, addr, _ := sock.ReadFrom(msg)
sock.WriteTo(msg, addr)
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
}
func main() {
basic_test()
}

View File

@@ -1,89 +0,0 @@
package main
import "fmt"
//import "net"
import "time"
import "runtime"
import "sync/atomic"
func poolbench() {
nWorkers := runtime.GOMAXPROCS(0)
work := make(chan func(), 1)
workers := make(chan chan<- func(), nWorkers)
makeWorker := func() chan<- func() {
ch := make(chan func())
go func() {
for {
f := <-ch
f()
select {
case workers <- (ch):
default:
return
}
}
}()
return ch
}
getWorker := func() chan<- func() {
select {
case ch := <-workers:
return ch
default:
return makeWorker()
}
}
dispatcher := func() {
for {
w := <-work
ch := getWorker()
ch <- w
}
}
go dispatcher()
var count uint64
const nCounts = 1000000
for idx := 0; idx < nCounts; idx++ {
f := func() { atomic.AddUint64(&count, 1) }
work <- f
}
for atomic.LoadUint64(&count) < nCounts {
}
}
func normalbench() {
var count uint64
const nCounts = 1000000
ch := make(chan struct{}, 1)
ch <- struct{}{}
for idx := 0; idx < nCounts; idx++ {
f := func() { atomic.AddUint64(&count, 1) }
f()
<-ch
ch <- struct{}{}
}
}
func gobench() {
var count uint64
const nCounts = 1000000
for idx := 0; idx < nCounts; idx++ {
f := func() { atomic.AddUint64(&count, 1) }
go f()
}
for atomic.LoadUint64(&count) < nCounts {
}
}
func main() {
start := time.Now()
poolbench()
fmt.Println(time.Since(start))
start = time.Now()
normalbench()
fmt.Println(time.Since(start))
start = time.Now()
gobench()
fmt.Println(time.Since(start))
}

View File

@@ -1,95 +0,0 @@
package main
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
quic "github.com/lucas-clemente/quic-go"
"math/big"
"sync"
"time"
)
const addr = "[::1]:9001"
func main() {
go run_server()
run_client()
}
func run_server() {
listener, err := quic.ListenAddr(addr, generateTLSConfig(), nil)
if err != nil {
panic(err)
}
ses, err := listener.Accept()
if err != nil {
panic(err)
}
for {
stream, err := ses.AcceptStream()
if err != nil {
panic(err)
}
go func() {
defer stream.Close()
bs := bytes.Buffer{}
_, err := bs.ReadFrom(stream)
if err != nil {
panic(err)
} //<-- TooManyOpenStreams
}()
}
}
func run_client() {
msgSize := 1048576
msgCount := 128
ses, err := quic.DialAddr(addr, &tls.Config{InsecureSkipVerify: true}, nil)
if err != nil {
panic(err)
}
bs := make([]byte, msgSize)
wg := sync.WaitGroup{}
start := time.Now()
for idx := 0; idx < msgCount; idx++ {
wg.Add(1)
go func() {
defer wg.Done()
stream, err := ses.OpenStreamSync()
if err != nil {
panic(err)
}
defer stream.Close()
stream.Write(bs)
}() // "go" this later
}
wg.Wait()
timed := time.Since(start)
fmt.Println("Client finished", timed, fmt.Sprintf("%f Bits/sec", 8*float64(msgSize*msgCount)/timed.Seconds()))
}
// Setup a bare-bones TLS config for the server
func generateTLSConfig() *tls.Config {
key, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
panic(err)
}
template := x509.Certificate{SerialNumber: big.NewInt(1)}
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key)
if err != nil {
panic(err)
}
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)})
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
tlsCert, err := tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
panic(err)
}
return &tls.Config{Certificates: []tls.Certificate{tlsCert}}
}

View File

@@ -1,74 +0,0 @@
package main
import "fmt"
import "net"
import "time"
import "flag"
import "os"
import "runtime/pprof"
// TODO look into netmap + libpcap to bypass the kernel as much as possible
func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface!
addr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
if err != nil {
panic(err)
}
sock, err := net.ListenUDP("udp", addr)
if err != nil {
panic(err)
}
defer sock.Close()
go func() {
send, err := net.DialUDP("udp", nil, addr)
if err != nil {
panic(err)
}
defer send.Close()
msg := make([]byte, 1280)
for {
send.Write(msg)
}
}()
numPackets := 1000000
start := time.Now()
msg := make([]byte, 2000)
for i := 0; i < numPackets; i++ {
sock.ReadFrom(msg)
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
}
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main() {
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err))
}
if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err))
}
defer pprof.StopCPUProfile()
}
if *memprofile != "" {
f, err := os.Create(*memprofile)
if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err))
}
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
}
basic_test()
}

View File

@@ -1,84 +0,0 @@
package main
import "fmt"
import "net"
import "time"
import "flag"
import "os"
import "runtime/pprof"
// TODO look into netmap + libpcap to bypass the kernel as much as possible
func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface!
addr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
if err != nil {
panic(err)
}
sock, err := net.ListenUDP("udp", addr)
if err != nil {
panic(err)
}
defer sock.Close()
go func() {
send, err := net.DialUDP("udp", nil, addr)
if err != nil {
panic(err)
}
defer send.Close()
msg := make([]byte, 1280)
bss := make(net.Buffers, 0, 1024)
for {
for len(bss) < 1024 {
bss = append(bss, msg)
}
bss.WriteTo(send)
//bss = bss[:0]
//send.Write(msg)
}
}()
numPackets := 1000
start := time.Now()
msg := make([]byte, 2000)
for i := 0; i < numPackets; i++ {
n, err := sock.Read(msg)
if err != nil {
panic(err)
}
fmt.Println(n)
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
}
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main() {
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err))
}
if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err))
}
defer pprof.StopCPUProfile()
}
if *memprofile != "" {
f, err := os.Create(*memprofile)
if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err))
}
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
}
basic_test()
}

View File

@@ -1,116 +0,0 @@
package main
import "flag"
import "fmt"
import "net"
import "os"
import "runtime/pprof"
import "time"
// TODO look into netmap + libpcap to bypass the kernel as much as possible
func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface!
var ip *net.IP
ifaces, err := net.Interfaces()
if err != nil {
panic(err)
}
var zone string
for _, iface := range ifaces {
addrs, err := iface.Addrs()
if err != nil {
panic(err)
}
for _, addr := range addrs {
addrIP, _, _ := net.ParseCIDR(addr.String())
if addrIP.To4() != nil {
continue
} // IPv6 only
if !addrIP.IsLinkLocalUnicast() {
continue
}
fmt.Println(iface.Name, addrIP)
zone = iface.Name
ip = &addrIP
}
if ip != nil {
break
}
/*
addrs, err = iface.MulticastAddrs()
if err != nil { panic(err) }
for _, addr := range addrs {
fmt.Println(addr.String())
}
*/
}
if ip == nil {
panic("No link-local IPv6 found")
}
fmt.Println("Using address:", *ip)
addr := net.UDPAddr{IP: *ip, Port: 9001, Zone: zone}
laddr, err := net.ResolveUDPAddr("udp", "[::]:9001")
if err != nil {
panic(err)
}
sock, err := net.ListenUDP("udp", laddr)
if err != nil {
panic(err)
}
defer sock.Close()
go func() {
send, err := net.DialUDP("udp", nil, &addr)
//send, err := net.ListenUDP("udp", nil)
if err != nil {
panic(err)
}
defer send.Close()
msg := make([]byte, 1280)
for {
send.Write(msg)
//send.WriteToUDP(msg, &addr)
}
}()
numPackets := 1000000
start := time.Now()
msg := make([]byte, 2000)
for i := 0; i < numPackets; i++ {
sock.ReadFromUDP(msg)
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
}
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main() {
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err))
}
if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err))
}
defer pprof.StopCPUProfile()
}
if *memprofile != "" {
f, err := os.Create(*memprofile)
if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err))
}
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
}
basic_test()
}

View File

@@ -1,103 +0,0 @@
package main
import "fmt"
import "net"
import "time"
import "flag"
import "os"
import "runtime/pprof"
// TODO look into netmap + libpcap to bypass the kernel as much as possible?
const buffSize = 32
func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface!
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
if err != nil {
panic(err)
}
listener, err := net.ListenTCP("tcp", addr)
if err != nil {
panic(err)
}
defer listener.Close()
go func() {
send, err := net.DialTCP("tcp", nil, addr)
if err != nil {
panic(err)
}
defer send.Close()
msg := make([]byte, 1280)
bss := make(net.Buffers, 0, 1024)
for {
for len(bss) < 1 { //buffSize {
bss = append(bss, msg)
}
bss := net.Buffers{[]byte{0, 1, 2, 3}, []byte{0, 1}, msg}
bss.WriteTo(send)
//send.Write(msg)
}
}()
numPackets := 1000000
start := time.Now()
//msg := make([]byte, 1280)
sock, err := listener.AcceptTCP()
if err != nil {
panic(err)
}
defer sock.Close()
for i := 0; i < numPackets; i++ {
msg := make([]byte, 1280*buffSize)
n, err := sock.Read(msg)
if err != nil {
panic(err)
}
msg = msg[:n]
for len(msg) > 1286 {
// handle message
i++
msg = msg[1286:]
}
// handle remaining fragment of message
//fmt.Println(n)
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
_ = func(in chan<- int) {
close(in)
}
}
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main() {
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err))
}
if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err))
}
defer pprof.StopCPUProfile()
}
if *memprofile != "" {
f, err := os.Create(*memprofile)
if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err))
}
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
}
basic_test()
}

View File

@@ -1,77 +0,0 @@
package main
import "fmt"
import "net"
import "time"
import "flag"
import "os"
import "runtime/pprof"
// TODO look into netmap + libpcap to bypass the kernel as much as possible
func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface!
addr, err := net.ResolveUDPAddr("udp", "[::1]:0")
if err != nil {
panic(err)
}
sock, err := net.ListenUDP("udp", addr)
if err != nil {
panic(err)
}
defer sock.Close()
go func() {
raddr := sock.LocalAddr().(*net.UDPAddr)
send, err := net.DialUDP("udp", nil, raddr)
//send, err := net.ListenUDP("udp", addr)
if err != nil {
panic(err)
}
defer send.Close()
msg := make([]byte, 1280)
for {
send.Write(msg)
//send.WriteToUDP(msg, raddr)
}
}()
numPackets := 1000000
start := time.Now()
msg := make([]byte, 2000)
for i := 0; i < numPackets; i++ {
sock.ReadFromUDP(msg)
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
}
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main() {
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err))
}
if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err))
}
defer pprof.StopCPUProfile()
}
if *memprofile != "" {
f, err := os.Create(*memprofile)
if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err))
}
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
}
basic_test()
}

View File

@@ -1,79 +0,0 @@
package main
import "fmt"
import "net"
import "time"
import "flag"
import "os"
import "runtime/pprof"
// TODO look into netmap + libpcap to bypass the kernel as much as possible
func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface!
saddr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
if err != nil {
panic(err)
}
raddr, err := net.ResolveUDPAddr("udp", "[::1]:9002")
if err != nil {
panic(err)
}
send, err := net.DialUDP("udp", saddr, raddr)
if err != nil {
panic(err)
}
defer send.Close()
recv, err := net.DialUDP("udp", raddr, saddr)
if err != nil {
panic(err)
}
defer recv.Close()
go func() {
msg := make([]byte, 1280)
for {
send.Write(msg)
}
}()
numPackets := 1000000
start := time.Now()
msg := make([]byte, 2000)
for i := 0; i < numPackets; i++ {
recv.Read(msg)
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
}
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main() {
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err))
}
if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err))
}
defer pprof.StopCPUProfile()
}
if *memprofile != "" {
f, err := os.Create(*memprofile)
if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err))
}
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
}
basic_test()
}

View File

@@ -1,92 +0,0 @@
package main
import "fmt"
import "net"
import "time"
import "flag"
import "os"
import "runtime/pprof"
// TODO look into netmap + libpcap to bypass the kernel as much as possible
func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface!
sock, err := net.ListenUDP("udp", nil)
if err != nil {
panic(err)
}
defer sock.Close()
ch := make(chan []byte, 1)
writer := func() {
raddr := sock.LocalAddr().(*net.UDPAddr)
//send, err := net.ListenUDP("udp", nil)
//if err != nil { panic(err) }
//defer send.Close()
for {
select {
case <-ch:
default:
}
msg := make([]byte, 1280)
sock.WriteToUDP(msg, raddr)
//send.WriteToUDP(msg, raddr)
}
}
go writer()
//go writer()
//go writer()
//go writer()
numPackets := 65536
size := 0
start := time.Now()
success := 0
for i := 0; i < numPackets; i++ {
msg := make([]byte, 2048)
n, _, err := sock.ReadFromUDP(msg)
if err != nil {
panic(err)
}
size += n
select {
case ch <- msg:
success += 1
default:
}
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
fmt.Printf("%f bits per second\n", 8*float64(size)/timed.Seconds())
fmt.Println("Success:", success, "/", numPackets)
}
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main() {
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err))
}
if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err))
}
defer pprof.StopCPUProfile()
}
if *memprofile != "" {
f, err := os.Create(*memprofile)
if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err))
}
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
}
basic_test()
}

View File

@@ -1,124 +0,0 @@
package main
import "fmt"
import "net"
import "time"
import "flag"
import "os"
import "runtime/pprof"
import "golang.org/x/net/ipv6"
// TODO look into netmap + libpcap to bypass the kernel as much as possible
func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface!
udpAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
if err != nil {
panic(err)
}
sock, err := net.ListenUDP("udp", udpAddr)
if err != nil {
panic(err)
}
defer sock.Close()
writer := func() {
raddr := sock.LocalAddr().(*net.UDPAddr)
send, err := net.ListenUDP("udp", nil)
if err != nil {
panic(err)
}
defer send.Close()
conn := ipv6.NewPacketConn(send)
defer conn.Close()
var msgs []ipv6.Message
for idx := 0; idx < 1024; idx++ {
msg := ipv6.Message{Addr: raddr, Buffers: [][]byte{make([]byte, 1280)}}
msgs = append(msgs, msg)
}
for {
/*
var msgs []ipv6.Message
for idx := 0 ; idx < 1024 ; idx++ {
msg := ipv6.Message{Addr: raddr, Buffers: [][]byte{make([]byte, 1280)}}
msgs = append(msgs, msg)
}
*/
conn.WriteBatch(msgs, 0)
}
}
go writer()
//go writer()
//go writer()
//go writer()
numPackets := 65536
size := 0
count := 0
start := time.Now()
/*
conn := ipv6.NewPacketConn(sock)
defer conn.Close()
for ; count < numPackets ; count++ {
msgs := make([]ipv6.Message, 1024)
for _, msg := range msgs {
msg.Buffers = append(msg.Buffers, make([]byte, 2048))
}
n, err := conn.ReadBatch(msgs, 0)
if err != nil { panic(err) }
fmt.Println("DEBUG: n", n)
for _, msg := range msgs[:n] {
fmt.Println("DEBUG: msg", msg)
size += msg.N
//for _, bs := range msg.Buffers {
// size += len(bs)
//}
count++
}
}
//*/
//*
for ; count < numPackets; count++ {
msg := make([]byte, 2048)
n, _, err := sock.ReadFromUDP(msg)
if err != nil {
panic(err)
}
size += n
}
//*/
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(count)/timed.Seconds())
fmt.Printf("%f bits/second\n", float64(8*size)/timed.Seconds())
}
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main() {
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err))
}
if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err))
}
defer pprof.StopCPUProfile()
}
if *memprofile != "" {
f, err := os.Create(*memprofile)
if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err))
}
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
}
basic_test()
}

View File

@@ -1,105 +0,0 @@
package main
import "fmt"
import "net"
import "time"
import "flag"
import "os"
import "runtime/pprof"
// TODO look into netmap + libpcap to bypass the kernel as much as possible?
const buffSize = 32
func basic_test() {
// TODO need a way to look up who our link-local neighbors are for each iface!
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
if err != nil {
panic(err)
}
listener, err := net.ListenTCP("tcp", addr)
if err != nil {
panic(err)
}
defer listener.Close()
go func() {
send, err := net.DialTCP("tcp", nil, addr)
if err != nil {
panic(err)
}
defer send.Close()
msg := make([]byte, 1280)
bss := make(net.Buffers, 0, 1024)
count := 0
for {
time.Sleep(100 * time.Millisecond)
for len(bss) < count {
bss = append(bss, msg)
}
bss.WriteTo(send)
count++
//send.Write(msg)
}
}()
numPackets := 1000000
start := time.Now()
//msg := make([]byte, 1280)
sock, err := listener.AcceptTCP()
if err != nil {
panic(err)
}
defer sock.Close()
for {
msg := make([]byte, 1280*buffSize)
n, err := sock.Read(msg)
if err != nil {
panic(err)
}
msg = msg[:n]
fmt.Println("Read:", n)
for len(msg) > 1280 {
// handle message
msg = msg[1280:]
}
// handle remaining fragment of message
//fmt.Println(n)
}
timed := time.Since(start)
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
_ = func(in chan<- int) {
close(in)
}
}
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
func main() {
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
panic(fmt.Sprintf("could not create CPU profile: ", err))
}
if err := pprof.StartCPUProfile(f); err != nil {
panic(fmt.Sprintf("could not start CPU profile: ", err))
}
defer pprof.StopCPUProfile()
}
if *memprofile != "" {
f, err := os.Create(*memprofile)
if err != nil {
panic(fmt.Sprintf("could not create memory profile: ", err))
}
defer func() { pprof.WriteHeapProfile(f); f.Close() }()
}
basic_test()
}

View File

@@ -1,83 +0,0 @@
package main
import (
"fmt"
"log"
"net"
"os/exec"
"time"
"github.com/songgao/water"
)
const mtu = 65535
func setup_dev() *water.Interface {
ifce, err := water.New(water.Config{
DeviceType: water.TUN,
})
if err != nil {
panic(err)
}
return ifce
}
func setup_dev1() *water.Interface {
ifce := setup_dev()
cmd := exec.Command("ip", "-f", "inet6",
"addr", "add", "fc00::2/8",
"dev", ifce.Name())
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
panic("Failed to assign address")
}
cmd = exec.Command("ip", "link", "set",
"dev", ifce.Name(),
"mtu", fmt.Sprintf("%d", mtu),
"up")
out, err = cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
panic("Failed to bring up interface")
}
return ifce
}
func connect(ifce *water.Interface) {
conn, err := net.DialTimeout("tcp", "192.168.2.2:9001", time.Second)
if err != nil {
panic(err)
}
sock := conn.(*net.TCPConn)
// TODO go a worker to move packets to/from the tun
}
func bench() {
}
func main() {
ifce := setup_dev1()
connect(ifce)
bench()
fmt.Println("Done?")
return
ifce, err := water.New(water.Config{
DeviceType: water.TUN,
})
if err != nil {
panic(err)
}
log.Printf("Interface Name: %s\n", ifce.Name())
packet := make([]byte, 2000)
for {
n, err := ifce.Read(packet)
if err != nil {
panic(err)
log.Fatal(err)
}
log.Printf("Packet Received: % x\n", packet[:n])
}
}

View File

@@ -1,126 +0,0 @@
package main
import (
"fmt"
"log"
"net"
"os/exec"
"github.com/songgao/water"
)
const mtu = 65535
const netnsName = "tunbenchns"
func setup_dev() *water.Interface {
ifce, err := water.New(water.Config{
DeviceType: water.TUN,
})
if err != nil {
panic(err)
}
return ifce
}
func setup_dev1() *water.Interface {
ifce := setup_dev()
cmd := exec.Command("ip", "-f", "inet6",
"addr", "add", "fc00::1/8",
"dev", ifce.Name())
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
fmt.Println(string(err))
panic("Failed to assign address")
}
cmd = exec.Command("ip", "link", "set",
"dev", tun.name,
"mtu", fmt.Sprintf("%d", mtu),
"up")
out, err = cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
panic("Failed to bring up interface")
}
return ifce
}
func addNS(name string) {
cmd := exec.COmmand("ip", "netns", "add", name)
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
panic("Failed to setup netns")
}
}
func delNS(name string) {
cmd := exec.COmmand("ip", "netns", "delete", name)
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
panic("Failed to setup netns")
}
}
func doInNetNS(comm ...string) *exec.Cmd {
return exec.Command("ip", "netns", "exec", netnsName, comm...)
}
func setup_dev2() *water.Interface {
ifce := setup_dev()
addNS(netnsName)
cmd := exec.Command("ip", "link", "set", ifce.Name(), "netns", netnsName)
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
panic("Failed to move tun to netns")
}
cmd = doInNetNS("ip", "-f", "inet6",
"addr", "add", "fc00::2/8",
"dev", ifce.Name())
out, err = cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
panic("Failed to assign address")
}
cmd = doInNetNS("ip", "link", "set",
"dev", tun.name,
"mtu", fmt.Sprintf("%d", mtu),
"up")
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
fmt.Println(string(err))
panic("Failed to bring up interface")
}
return ifce
}
func connect() {
}
func bench() {
}
func main() {
ifce, err := water.New(water.Config{
DeviceType: water.TUN,
})
if err != nil {
panic(err)
}
log.Printf("Interface Name: %s\n", ifce.Name())
packet := make([]byte, 2000)
for {
n, err := ifce.Read(packet)
if err != nil {
panic(err)
log.Fatal(err)
}
log.Printf("Packet Received: % x\n", packet[:n])
}
}

View File

@@ -1,128 +0,0 @@
package main
import (
"fmt"
"log"
"net"
"os/exec"
"github.com/songgao/water"
)
const mtu = 65535
const netnsName = "tunbenchns"
func setup_dev() *water.Interface {
ifce, err := water.New(water.Config{
DeviceType: water.TUN,
})
if err != nil {
panic(err)
}
return ifce
}
func setup_dev1() *water.Interface {
ifce := setup_dev()
cmd := exec.Command("ip", "-f", "inet6",
"addr", "add", "fc00::1/8",
"dev", ifce.Name())
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
fmt.Println(string(err))
panic("Failed to assign address")
}
cmd = exec.Command("ip", "link", "set",
"dev", tun.name,
"mtu", fmt.Sprintf("%d", mtu),
"up")
out, err = cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
panic("Failed to bring up interface")
}
return ifce
}
func addNS(name string) {
cmd := exec.COmmand("ip", "netns", "add", name)
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
panic("Failed to setup netns")
}
}
func delNS(name string) {
cmd := exec.COmmand("ip", "netns", "delete", name)
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
panic("Failed to setup netns")
}
}
func doInNetNS(comm ...string) *exec.Cmd {
return exec.Command("ip", "netns", "exec", netnsName, comm...)
}
func setup_dev2() *water.Interface {
ifce := setup_dev()
addNS(netnsName)
cmd := exec.Command("ip", "link", "set", ifce.Name(), "netns", netnsName)
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
panic("Failed to move tun to netns")
}
cmd = exec.Command(
"ip", "-f", "inet6",
"addr", "add", "fc00::2/8",
"dev", ifce.Name())
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
panic("Failed to assign address")
}
cmd = exec.Command(
"ip", "link", "set",
"dev", tun.name,
"mtu", fmt.Sprintf("%d", mtu),
"up")
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Println(string(out))
fmt.Println(string(err))
panic("Failed to bring up interface")
}
return ifce
}
func connect() {
}
func bench() {
}
func main() {
ifce, err := water.New(water.Config{
DeviceType: water.TUN,
})
if err != nil {
panic(err)
}
log.Printf("Interface Name: %s\n", ifce.Name())
packet := make([]byte, 2000)
for {
n, err := ifce.Read(packet)
if err != nil {
panic(err)
log.Fatal(err)
}
log.Printf("Packet Received: % x\n", packet[:n])
}
}

View File

@@ -1,45 +0,0 @@
package main
import (
"log"
"net"
"sync"
"github.com/FlexibleBroadband/tun-go"
)
// first start server tun server.
func main() {
wg := sync.WaitGroup{}
// local tun interface read and write channel.
rCh := make(chan []byte, 1024)
// read from local tun interface channel, and write into remote udp channel.
wg.Add(1)
go func() {
wg.Done()
for {
data := <-rCh
// if data[0]&0xf0 == 0x40
// write into udp conn.
log.Println("tun->conn:", len(data))
log.Println("read!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
log.Println("src:", net.IP(data[8:24]), "dst:", net.IP(data[24:40]))
}
}()
address := net.ParseIP("fc00::1")
tuntap, err := tun.OpenTun(address)
if err != nil {
panic(err)
}
defer tuntap.Close()
// read data from tun into rCh channel.
wg.Add(1)
go func() {
if err := tuntap.Read(rCh); err != nil {
panic(err)
}
wg.Done()
}()
wg.Wait()
}

View File

@@ -1,40 +0,0 @@
package main
import "wire"
import "fmt"
import "time"
func main() {
for idx := 0; idx < 64; idx++ {
num := uint64(1) << uint(idx)
encoded := make([]byte, 10)
length := wire.Encode_uint64(num, encoded)
decoded, _ := wire.Decode_uint64(encoded[:length])
if decoded != num {
panic(fmt.Sprintf("%d != %d", decoded, num))
}
}
const count = 1000000
start := time.Now()
encoded := make([]byte, 10)
//num := ^uint64(0) // Longest possible value for full uint64 range
num := ^uint64(0) >> 1 // Largest positive int64 (real use case)
//num := uint64(0) // Shortest possible value, most will be of this length
length := wire.Encode_uint64(num, encoded)
for idx := 0; idx < count; idx++ {
wire.Encode_uint64(num, encoded)
}
timed := time.Since(start)
fmt.Println("Ops:", count/timed.Seconds())
fmt.Println("Time:", timed.Nanoseconds()/count)
encoded = encoded[:length]
start = time.Now()
for idx := 0; idx < count; idx++ {
wire.Decode_uint64(encoded)
}
timed = time.Since(start)
fmt.Println("Ops:", count/timed.Seconds())
fmt.Println("Time:", timed.Nanoseconds()/count)
}

View File

@@ -1,10 +1,17 @@
package yggdrasil
type address [16]byte // IPv6 address within the network
type subnet [8]byte // It's a /64
// address represents an IPv6 address in the yggdrasil address range.
type address [16]byte
var address_prefix = [...]byte{0xfd} // For node addresses + local subnets
// subnet represents an IPv6 /64 subnet in the yggdrasil subnet range.
type subnet [8]byte
// address_prefix is the prefix used for all addresses and subnets in the network.
// The current implementation requires this to be a multiple of 8 bits.
// Nodes that configure this differently will be unable to communicate with eachother, though routing and the DHT machinery *should* still work.
var address_prefix = [...]byte{0xfd}
// isValid returns true if an address falls within the range used by nodes in the network.
func (a *address) isValid() bool {
for idx := range address_prefix {
if (*a)[idx] != address_prefix[idx] {
@@ -14,6 +21,7 @@ func (a *address) isValid() bool {
return (*a)[len(address_prefix)]&0x80 == 0
}
// isValid returns true if a prefix falls within the range usable by the network.
func (s *subnet) isValid() bool {
for idx := range address_prefix {
if (*s)[idx] != address_prefix[idx] {
@@ -23,6 +31,11 @@ func (s *subnet) isValid() bool {
return (*s)[len(address_prefix)]&0x80 != 0
}
// address_addrForNodeID takes a *NodeID as an argument and returns an *address.
// This address begins with the address prefix.
// The next bit is 0 for an address, and 1 for a subnet.
// The following 7 bits are set to the number of leading 1 bits in the NodeID.
// The NodeID, excluding the leading 1 bits and the first leading 1 bit, is truncated to the appropriate length and makes up the remainder of the address.
func address_addrForNodeID(nid *NodeID) *address {
// 128 bit address
// Begins with prefix
@@ -59,6 +72,11 @@ func address_addrForNodeID(nid *NodeID) *address {
return &addr
}
// address_subnetForNodeID takes a *NodeID as an argument and returns a *subnet.
// This subnet begins with the address prefix.
// The next bit is 0 for an address, and 1 for a subnet.
// The following 7 bits are set to the number of leading 1 bits in the NodeID.
// The NodeID, excluding the leading 1 bits and the first leading 1 bit, is truncated to the appropriate length and makes up the remainder of the subnet.
func address_subnetForNodeID(nid *NodeID) *subnet {
// Exactly as the address version, with two exceptions:
// 1) The first bit after the fixed prefix is a 1 instead of a 0
@@ -70,6 +88,10 @@ func address_subnetForNodeID(nid *NodeID) *subnet {
return &snet
}
// getNodeIDandMask returns two *NodeID.
// The first is a NodeID with all the bits known from the address set to their correct values.
// The second is a bitmask with 1 bit set for each bit that was known from the address.
// This is used to look up NodeIDs in the DHT and tell if they match an address.
func (a *address) getNodeIDandMask() (*NodeID, *NodeID) {
// Mask is a bitmask to mark the bits visible from the address
// This means truncated leading 1s, first leading 0, and visible part of addr
@@ -95,6 +117,10 @@ func (a *address) getNodeIDandMask() (*NodeID, *NodeID) {
return &nid, &mask
}
// getNodeIDandMask returns two *NodeID.
// The first is a NodeID with all the bits known from the address set to their correct values.
// The second is a bitmask with 1 bit set for each bit that was known from the subnet.
// This is used to look up NodeIDs in the DHT and tell if they match a subnet.
func (s *subnet) getNodeIDandMask() (*NodeID, *NodeID) {
// As with the address version, but visible parts of the subnet prefix instead
var nid NodeID

View File

@@ -1,13 +1,20 @@
package yggdrasil
import "net"
import "os"
import "bytes"
import "fmt"
import "sort"
import "strings"
import (
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"net"
"net/url"
"os"
"sort"
"strconv"
"strings"
"sync/atomic"
"time"
)
// TODO? Make all of this JSON
// TODO: Add authentication
type admin struct {
@@ -16,48 +23,211 @@ type admin struct {
handlers []admin_handlerInfo
}
type admin_info map[string]interface{}
type admin_handlerInfo struct {
name string // Checked against the first word of the api call
args []string // List of human-readable argument names
handler func(*[]byte, ...string) // First arg is pointer to the out slice, rest is args
name string // Checked against the first word of the api call
args []string // List of human-readable argument names
handler func(admin_info) (admin_info, error) // First is input map, second is output
}
func (a *admin) addHandler(name string, args []string, handler func(*[]byte, ...string)) {
// admin_pair maps things like "IP", "port", "bucket", or "coords" onto values.
type admin_pair struct {
key string
val interface{}
}
// admin_nodeInfo represents the information we know about a node for an admin response.
type admin_nodeInfo []admin_pair
// addHandler is called for each admin function to add the handler and help documentation to the API.
func (a *admin) addHandler(name string, args []string, handler func(admin_info) (admin_info, error)) {
a.handlers = append(a.handlers, admin_handlerInfo{name, args, handler})
}
// init runs the initial admin setup.
func (a *admin) init(c *Core, listenaddr string) {
a.core = c
a.listenaddr = listenaddr
a.addHandler("help", nil, func(out *[]byte, _ ...string) {
a.addHandler("help", nil, func(in admin_info) (admin_info, error) {
handlers := make(map[string]interface{})
for _, handler := range a.handlers {
tmp := append([]string{handler.name}, handler.args...)
*out = append(*out, []byte(strings.Join(tmp, " "))...)
*out = append(*out, "\n"...)
handlers[handler.name] = admin_info{"fields": handler.args}
}
return admin_info{"help": handlers}, nil
})
a.addHandler("dot", []string{}, func(in admin_info) (admin_info, error) {
return admin_info{"dot": string(a.getResponse_dot())}, nil
})
a.addHandler("getSelf", []string{}, func(in admin_info) (admin_info, error) {
self := a.getData_getSelf().asMap()
ip := fmt.Sprint(self["ip"])
delete(self, "ip")
return admin_info{"self": admin_info{ip: self}}, nil
})
a.addHandler("getPeers", []string{}, func(in admin_info) (admin_info, error) {
sort := "ip"
peers := make(admin_info)
for _, peerdata := range a.getData_getPeers() {
p := peerdata.asMap()
so := fmt.Sprint(p[sort])
peers[so] = p
delete(peers[so].(map[string]interface{}), sort)
}
return admin_info{"peers": peers}, nil
})
a.addHandler("getSwitchPeers", []string{}, func(in admin_info) (admin_info, error) {
sort := "port"
switchpeers := make(admin_info)
for _, s := range a.getData_getSwitchPeers() {
p := s.asMap()
so := fmt.Sprint(p[sort])
switchpeers[so] = p
delete(switchpeers[so].(map[string]interface{}), sort)
}
return admin_info{"switchpeers": switchpeers}, nil
})
a.addHandler("getDHT", []string{}, func(in admin_info) (admin_info, error) {
sort := "ip"
dht := make(admin_info)
for _, d := range a.getData_getDHT() {
p := d.asMap()
so := fmt.Sprint(p[sort])
dht[so] = p
delete(dht[so].(map[string]interface{}), sort)
}
return admin_info{"dht": dht}, nil
})
a.addHandler("getSessions", []string{}, func(in admin_info) (admin_info, error) {
sort := "ip"
sessions := make(admin_info)
for _, s := range a.getData_getSessions() {
p := s.asMap()
so := fmt.Sprint(p[sort])
sessions[so] = p
delete(sessions[so].(map[string]interface{}), sort)
}
return admin_info{"sessions": sessions}, nil
})
a.addHandler("addPeer", []string{"uri"}, func(in admin_info) (admin_info, error) {
if a.addPeer(in["uri"].(string)) == nil {
return admin_info{
"added": []string{
in["uri"].(string),
},
}, nil
} else {
return admin_info{
"not_added": []string{
in["uri"].(string),
},
}, errors.New("Failed to add peer")
}
})
// TODO? have other parts of the program call to add their own handlers
a.addHandler("dot", nil, func(out *[]byte, _ ...string) {
*out = a.getResponse_dot()
a.addHandler("removePeer", []string{"port"}, func(in admin_info) (admin_info, error) {
if a.removePeer(fmt.Sprint(in["port"])) == nil {
return admin_info{
"removed": []string{
fmt.Sprint(in["port"]),
},
}, nil
} else {
return admin_info{
"not_removed": []string{
fmt.Sprint(in["port"]),
},
}, errors.New("Failed to remove peer")
}
})
a.addHandler("getSelf", nil, func(out *[]byte, _ ...string) {
*out = []byte(a.printInfos([]admin_nodeInfo{*a.getData_getSelf()}))
a.addHandler("getTunTap", []string{}, func(in admin_info) (r admin_info, e error) {
defer func() {
recover()
r = admin_info{"none": admin_info{}}
e = nil
}()
return admin_info{
a.core.tun.iface.Name(): admin_info{
"tap_mode": a.core.tun.iface.IsTAP(),
"mtu": a.core.tun.mtu,
},
}, nil
})
a.addHandler("getPeers", nil, func(out *[]byte, _ ...string) {
*out = []byte(a.printInfos(a.getData_getPeers()))
a.addHandler("setTunTap", []string{"name", "[tap_mode]", "[mtu]"}, func(in admin_info) (admin_info, error) {
// Set sane defaults
iftapmode := getDefaults().defaultIfTAPMode
ifmtu := getDefaults().defaultIfMTU
// Has TAP mode been specified?
if tap, ok := in["tap_mode"]; ok {
iftapmode = tap.(bool)
}
// Check we have enough params for MTU
if mtu, ok := in["mtu"]; ok {
if mtu.(float64) >= 1280 && ifmtu <= getDefaults().maximumIfMTU {
ifmtu = int(in["mtu"].(float64))
}
}
// Start the TUN adapter
if err := a.startTunWithMTU(in["name"].(string), iftapmode, ifmtu); err != nil {
return admin_info{}, errors.New("Failed to configure adapter")
} else {
return admin_info{
a.core.tun.iface.Name(): admin_info{
"tap_mode": a.core.tun.iface.IsTAP(),
"mtu": ifmtu,
},
}, nil
}
})
a.addHandler("getSwitchPeers", nil, func(out *[]byte, _ ...string) {
*out = []byte(a.printInfos(a.getData_getSwitchPeers()))
a.addHandler("getMulticastInterfaces", []string{}, func(in admin_info) (admin_info, error) {
var intfs []string
for _, v := range a.core.multicast.interfaces() {
intfs = append(intfs, v.Name)
}
return admin_info{"multicast_interfaces": intfs}, nil
})
a.addHandler("getDHT", nil, func(out *[]byte, _ ...string) {
*out = []byte(a.printInfos(a.getData_getDHT()))
a.addHandler("getAllowedEncryptionPublicKeys", []string{}, func(in admin_info) (admin_info, error) {
return admin_info{"allowed_box_pubs": a.getAllowedEncryptionPublicKeys()}, nil
})
a.addHandler("getSessions", nil, func(out *[]byte, _ ...string) {
*out = []byte(a.printInfos(a.getData_getSessions()))
a.addHandler("addAllowedEncryptionPublicKey", []string{"box_pub_key"}, func(in admin_info) (admin_info, error) {
if a.addAllowedEncryptionPublicKey(in["box_pub_key"].(string)) == nil {
return admin_info{
"added": []string{
in["box_pub_key"].(string),
},
}, nil
} else {
return admin_info{
"not_added": []string{
in["box_pub_key"].(string),
},
}, errors.New("Failed to add allowed box pub key")
}
})
a.addHandler("removeAllowedEncryptionPublicKey", []string{"box_pub_key"}, func(in admin_info) (admin_info, error) {
if a.removeAllowedEncryptionPublicKey(in["box_pub_key"].(string)) == nil {
return admin_info{
"removed": []string{
in["box_pub_key"].(string),
},
}, nil
} else {
return admin_info{
"not_removed": []string{
in["box_pub_key"].(string),
},
}, errors.New("Failed to remove allowed box pub key")
}
})
go a.listen()
}
// start runs the admin API socket to listen for / respond to admin API calls.
func (a *admin) start() error {
go a.listen()
return nil
}
// listen is run by start and manages API connections.
func (a *admin) listen() {
l, err := net.Listen("tcp", a.listenaddr)
if err != nil {
@@ -74,56 +244,108 @@ func (a *admin) listen() {
}
}
// handleRequest calls the request handler for each request sent to the admin API.
func (a *admin) handleRequest(conn net.Conn) {
buf := make([]byte, 1024)
_, err := conn.Read(buf)
if err != nil {
a.core.log.Printf("Admin socket failed to read: %v", err)
conn.Close()
return
}
var out []byte
buf = bytes.Trim(buf, "\x00\r\n\t")
call := strings.Split(string(buf), " ")
var cmd string
var args []string
if len(call) > 0 {
cmd = call[0]
args = call[1:]
}
done := false
for _, handler := range a.handlers {
if cmd == handler.name {
handler.handler(&out, args...)
done = true
break
decoder := json.NewDecoder(conn)
encoder := json.NewEncoder(conn)
encoder.SetIndent("", " ")
recv := make(admin_info)
send := make(admin_info)
defer func() {
r := recover()
if r != nil {
send = admin_info{
"status": "error",
"error": "Unrecoverable error, possibly as a result of invalid input types or malformed syntax",
}
fmt.Println("Admin socket error:", r)
if err := encoder.Encode(&send); err != nil {
fmt.Println("Admin socket JSON encode error:", err)
}
conn.Close()
}
}()
for {
// Start with a clean slate on each request
recv = admin_info{}
send = admin_info{}
// Decode the input
if err := decoder.Decode(&recv); err != nil {
// fmt.Println("Admin socket JSON decode error:", err)
return
}
// Send the request back with the response, and default to "error"
// unless the status is changed below by one of the handlers
send["request"] = recv
send["status"] = "error"
handlers:
for _, handler := range a.handlers {
// We've found the handler that matches the request
if recv["request"] == handler.name {
// Check that we have all the required arguments
for _, arg := range handler.args {
// An argument in [square brackets] is optional and not required,
// so we can safely ignore those
if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") {
continue
}
// Check if the field is missing
if _, ok := recv[arg]; !ok {
send = admin_info{
"status": "error",
"error": "Expected field missing: " + arg,
"expecting": arg,
}
break handlers
}
}
// By this point we should have all the fields we need, so call
// the handler
response, err := handler.handler(recv)
if err != nil {
send["error"] = err.Error()
if response != nil {
send["response"] = response
}
} else {
send["status"] = "success"
if response != nil {
send["response"] = response
}
}
break
}
}
// Send the response back
if err := encoder.Encode(&send); err != nil {
return
}
// If "keepalive" isn't true then close the connection
if keepalive, ok := recv["keepalive"]; !ok || !keepalive.(bool) {
conn.Close()
}
}
if !done {
out = []byte("I didn't understand that!\n")
}
_, err = conn.Write(out)
if err != nil {
a.core.log.Printf("Admin socket error: %v", err)
}
conn.Close()
}
// Maps things like "IP", "port", "bucket", or "coords" onto strings
type admin_pair struct {
key string
val string
}
type admin_nodeInfo []admin_pair
func (n *admin_nodeInfo) asMap() map[string]string {
m := make(map[string]string, len(*n))
// asMap converts an admin_nodeInfo into a map of key/value pairs.
func (n *admin_nodeInfo) asMap() map[string]interface{} {
m := make(map[string]interface{}, len(*n))
for _, p := range *n {
m[p.key] = p.val
}
return m
}
// toString creates a printable string representation of an admin_nodeInfo.
func (n *admin_nodeInfo) toString() string {
// TODO return something nicer looking than this
var out []string
@@ -134,6 +356,7 @@ func (n *admin_nodeInfo) toString() string {
return fmt.Sprint(*n)
}
// printInfos returns a newline separated list of strings from admin_nodeInfos, e.g. a printable string of info about all peers.
func (a *admin) printInfos(infos []admin_nodeInfo) string {
var out []string
for _, info := range infos {
@@ -143,17 +366,82 @@ func (a *admin) printInfos(infos []admin_nodeInfo) string {
return strings.Join(out, "\n")
}
// addPeer triggers a connection attempt to a node.
func (a *admin) addPeer(addr string) error {
u, err := url.Parse(addr)
if err == nil {
switch strings.ToLower(u.Scheme) {
case "tcp":
a.core.tcp.connect(u.Host)
case "socks":
a.core.tcp.connectSOCKS(u.Host, u.Path[1:])
default:
return errors.New("invalid peer: " + addr)
}
} else {
// no url scheme provided
addr = strings.ToLower(addr)
if strings.HasPrefix(addr, "tcp:") {
addr = addr[4:]
}
a.core.tcp.connect(addr)
return nil
}
return nil
}
// removePeer disconnects an existing node (given by the node's port number).
func (a *admin) removePeer(p string) error {
iport, err := strconv.Atoi(p)
if err != nil {
return err
}
a.core.peers.removePeer(switchPort(iport))
return nil
}
// startTunWithMTU creates the tun/tap device, sets its address, and sets the MTU to the provided value.
func (a *admin) startTunWithMTU(ifname string, iftapmode bool, ifmtu int) error {
// Close the TUN first if open
_ = a.core.tun.close()
// Then reconfigure and start it
addr := a.core.router.addr
straddr := fmt.Sprintf("%s/%v", net.IP(addr[:]).String(), 8*len(address_prefix))
if ifname != "none" {
err := a.core.tun.setup(ifname, iftapmode, straddr, ifmtu)
if err != nil {
return err
}
// If we have open sessions then we need to notify them
// that our MTU has now changed
for _, sinfo := range a.core.sessions.sinfos {
if ifname == "none" {
sinfo.myMTU = 0
} else {
sinfo.myMTU = uint16(ifmtu)
}
a.core.sessions.sendPingPong(sinfo, false)
}
// Aaaaand... go!
go a.core.tun.read()
}
go a.core.tun.write()
return nil
}
// getData_getSelf returns the self node's info for admin responses.
func (a *admin) getData_getSelf() *admin_nodeInfo {
table := a.core.switchTable.table.Load().(lookupTable)
addr := a.core.router.addr
coords := table.self.getCoords()
self := admin_nodeInfo{
{"IP", net.IP(addr[:]).String()},
{"ip", a.core.GetAddress().String()},
{"subnet", a.core.GetSubnet().String()},
{"coords", fmt.Sprint(coords)},
}
return &self
}
// getData_getPeers returns info from Core.peers for an admin response.
func (a *admin) getData_getPeers() []admin_nodeInfo {
ports := a.core.peers.ports.Load().(map[switchPort]*peer)
var peerInfos []admin_nodeInfo
@@ -166,14 +454,18 @@ func (a *admin) getData_getPeers() []admin_nodeInfo {
p := ports[port]
addr := *address_addrForNodeID(getNodeID(&p.box))
info := admin_nodeInfo{
{"IP", net.IP(addr[:]).String()},
{"port", fmt.Sprint(port)},
{"ip", net.IP(addr[:]).String()},
{"port", port},
{"uptime", int(time.Since(p.firstSeen).Seconds())},
{"bytes_sent", atomic.LoadUint64(&p.bytesSent)},
{"bytes_recvd", atomic.LoadUint64(&p.bytesRecvd)},
}
peerInfos = append(peerInfos, info)
}
return peerInfos
}
// getData_getSwitchPeers returns info from Core.switchTable for an admin response.
func (a *admin) getData_getSwitchPeers() []admin_nodeInfo {
var peerInfos []admin_nodeInfo
table := a.core.switchTable.table.Load().(lookupTable)
@@ -186,44 +478,56 @@ func (a *admin) getData_getSwitchPeers() []admin_nodeInfo {
addr := *address_addrForNodeID(getNodeID(&peer.box))
coords := elem.locator.getCoords()
info := admin_nodeInfo{
{"IP", net.IP(addr[:]).String()},
{"ip", net.IP(addr[:]).String()},
{"coords", fmt.Sprint(coords)},
{"port", fmt.Sprint(elem.port)},
{"port", elem.port},
}
peerInfos = append(peerInfos, info)
}
return peerInfos
}
// getData_getDHT returns info from Core.dht for an admin response.
func (a *admin) getData_getDHT() []admin_nodeInfo {
var infos []admin_nodeInfo
now := time.Now()
getDHT := func() {
for i := 0; i < a.core.dht.nBuckets(); i++ {
b := a.core.dht.getBucket(i)
for _, v := range b.infos {
addr := *address_addrForNodeID(v.getNodeID())
info := admin_nodeInfo{
{"IP", net.IP(addr[:]).String()},
{"coords", fmt.Sprint(v.coords)},
{"bucket", fmt.Sprint(i)},
getInfo := func(vs []*dhtInfo, isPeer bool) {
for _, v := range vs {
addr := *address_addrForNodeID(v.getNodeID())
info := admin_nodeInfo{
{"ip", net.IP(addr[:]).String()},
{"coords", fmt.Sprint(v.coords)},
{"bucket", i},
{"peer_only", isPeer},
{"last_seen", int(now.Sub(v.recv).Seconds())},
}
infos = append(infos, info)
}
infos = append(infos, info)
}
getInfo(b.other, false)
getInfo(b.peers, true)
}
}
a.core.router.doAdmin(getDHT)
return infos
}
// getData_getSessions returns info from Core.sessions for an admin response.
func (a *admin) getData_getSessions() []admin_nodeInfo {
var infos []admin_nodeInfo
getSessions := func() {
for _, sinfo := range a.core.sessions.sinfos {
// TODO? skipped known but timed out sessions?
info := admin_nodeInfo{
{"IP", net.IP(sinfo.theirAddr[:]).String()},
{"ip", net.IP(sinfo.theirAddr[:]).String()},
{"coords", fmt.Sprint(sinfo.coords)},
{"MTU", fmt.Sprint(sinfo.getMTU())},
{"mtu", sinfo.getMTU()},
{"was_mtu_fixed", sinfo.wasMTUFixed},
{"bytes_sent", sinfo.bytesSent},
{"bytes_recvd", sinfo.bytesRecvd},
}
infos = append(infos, info)
}
@@ -232,42 +536,75 @@ func (a *admin) getData_getSessions() []admin_nodeInfo {
return infos
}
// getAllowedEncryptionPublicKeys returns the public keys permitted for incoming peer connections.
func (a *admin) getAllowedEncryptionPublicKeys() []string {
pubs := a.core.peers.getAllowedEncryptionPublicKeys()
var out []string
for _, pub := range pubs {
out = append(out, hex.EncodeToString(pub[:]))
}
return out
}
// addAllowedEncryptionPublicKey whitelists a key for incoming peer connections.
func (a *admin) addAllowedEncryptionPublicKey(bstr string) (err error) {
boxBytes, err := hex.DecodeString(bstr)
if err == nil {
var box boxPubKey
copy(box[:], boxBytes)
a.core.peers.addAllowedEncryptionPublicKey(&box)
}
return
}
// removeAllowedEncryptionPublicKey removes a key from the whitelist for incoming peer connections.
// If none are set, an empty list permits all incoming connections.
func (a *admin) removeAllowedEncryptionPublicKey(bstr string) (err error) {
boxBytes, err := hex.DecodeString(bstr)
if err == nil {
var box boxPubKey
copy(box[:], boxBytes)
a.core.peers.removeAllowedEncryptionPublicKey(&box)
}
return
}
// getResponse_dot returns a response for a graphviz dot formatted representation of the known parts of the network.
// This is color-coded and labeled, and includes the self node, switch peers, nodes known to the DHT, and nodes with open sessions.
// The graph is structured as a tree with directed links leading away from the root.
func (a *admin) getResponse_dot() []byte {
self := a.getData_getSelf().asMap()
myAddr := self["IP"]
self := a.getData_getSelf()
peers := a.getData_getSwitchPeers()
dht := a.getData_getDHT()
sessions := a.getData_getSessions()
// Map of coords onto IP
m := make(map[string]string)
m[self["coords"]] = self["IP"]
for _, peer := range peers {
p := peer.asMap()
m[p["coords"]] = p["IP"]
}
for _, node := range dht {
n := node.asMap()
m[n["coords"]] = n["IP"]
}
for _, node := range sessions {
n := node.asMap()
m[n["coords"]] = n["IP"]
}
// Start building a tree from all known nodes
type nodeInfo struct {
name string
key string
parent string
name string
key string
parent string
options string
}
infos := make(map[string]nodeInfo)
// First fill the tree with all known nodes, no parents
for k, n := range m {
infos[k] = nodeInfo{
name: n,
key: k,
addInfo := func(nodes []admin_nodeInfo, options string, tag string) {
for _, node := range nodes {
n := node.asMap()
info := nodeInfo{
key: n["coords"].(string),
options: options,
}
if len(tag) > 0 {
info.name = fmt.Sprintf("%s\n%s", n["ip"].(string), tag)
} else {
info.name = n["ip"].(string)
}
infos[info.key] = info
}
}
addInfo(dht, "fillcolor=\"#ffffff\" style=filled fontname=\"sans serif\"", "Known in DHT") // white
addInfo(sessions, "fillcolor=\"#acf3fd\" style=filled fontname=\"sans serif\"", "Open session") // blue
addInfo(peers, "fillcolor=\"#ffffb5\" style=filled fontname=\"sans serif\"", "Connected peer") // yellow
addInfo(append([]admin_nodeInfo(nil), *self), "fillcolor=\"#a5ff8a\" style=filled fontname=\"sans serif\"", "This node") // green
// Get coords as a slice of strings, FIXME? this looks very fragile
coordSlice := func(coords string) []string {
tmp := strings.Replace(coords, "[", "", -1)
@@ -286,6 +623,7 @@ func (a *admin) getResponse_dot() []byte {
}
newInfo.name = "?"
newInfo.key = key
newInfo.options = "fontname=\"sans serif\" style=dashed color=\"#999999\" fontcolor=\"#999999\""
infos[key] = newInfo
}
}
@@ -303,7 +641,7 @@ func (a *admin) getResponse_dot() []byte {
for _, info := range infos {
keys = append(keys, info.key)
}
// TODO sort
// sort
less := func(i, j int) bool {
return keys[i] < keys[j]
}
@@ -317,11 +655,7 @@ func (a *admin) getResponse_dot() []byte {
// First set the labels
for _, key := range keys {
info := infos[key]
if info.name == myAddr {
put(fmt.Sprintf("\"%v\" [ style = \"filled\", label = \"%v\" ];\n", info.key, info.name))
} else {
put(fmt.Sprintf("\"%v\" [ label = \"%v\" ];\n", info.key, info.name))
}
put(fmt.Sprintf("\"%v\" [ label = \"%v\" %v ];\n", info.key, info.name, info.options))
}
// Then print the tree structure
for _, key := range keys {
@@ -334,7 +668,11 @@ func (a *admin) getResponse_dot() []byte {
continue
}
port := coordsSplit[len(coordsSplit)-1]
put(fmt.Sprintf(" \"%+v\" -> \"%+v\" [ label = \"%v\" ];\n", info.parent, info.key, port))
style := "fontname=\"sans serif\""
if infos[info.parent].name == "?" || infos[info.key].name == "?" {
style = "fontname=\"sans serif\" style=dashed color=\"#999999\" fontcolor=\"#999999\""
}
put(fmt.Sprintf(" \"%+v\" -> \"%+v\" [ label = \"%v\" %s ];\n", info.parent, info.key, port, style))
}
put("}\n")
return out

View File

@@ -0,0 +1,24 @@
package config
// NodeConfig defines all configuration values needed to run a signle yggdrasil node
type NodeConfig struct {
Listen string `comment:"Listen address for peer connections. Default is to listen for all\nTCP connections over IPv4 and IPv6 with a random port."`
AdminListen string `comment:"Listen address for admin connections Default is to listen for local\nconnections only on TCP port 9001."`
Peers []string `comment:"List of connection strings for static peers in URI format, i.e.\ntcp://a.b.c.d:e or socks://a.b.c.d:e/f.g.h.i:j"`
AllowedEncryptionPublicKeys []string `comment:"List of peer encryption public keys to allow or incoming TCP\nconnections from. If left empty/undefined then all connections\nwill be allowed by default."`
EncryptionPublicKey string `comment:"Your public encryption key. Your peers may ask you for this to put\ninto their AllowedEncryptionPublicKeys configuration."`
EncryptionPrivateKey string `comment:"Your private encryption key. DO NOT share this with anyone!"`
SigningPublicKey string `comment:"Your public signing key. You should not ordinarily need to share\nthis with anyone."`
SigningPrivateKey string `comment:"Your private signing key. DO NOT share this with anyone!"`
MulticastInterfaces []string `comment:"Regular expressions for which interfaces multicast peer discovery\nshould be enabled on. If none specified, multicast peer discovery is\ndisabled. The default value is .* which uses all interfaces."`
IfName string `comment:"Local network interface name for TUN/TAP adapter, or \"auto\" to select\nan interface automatically, or \"none\" to run without TUN/TAP."`
IfTAPMode bool `comment:"Set local network interface to TAP mode rather than TUN mode if\nsupported by your platform - option will be ignored if not."`
IfMTU int `comment:"Maximux Transmission Unit (MTU) size for your local TUN/TAP interface.\nDefault is the largest supported size for your platform. The lowest\npossible value is 1280."`
//Net NetConfig `comment:"Extended options for connecting to peers over other networks."`
}
// NetConfig defines network/proxy related configuration values
type NetConfig struct {
Tor TorConfig `comment:"Experimental options for configuring peerings over Tor."`
I2P I2PConfig `comment:"Experimental options for configuring peerings over I2P."`
}

View File

@@ -0,0 +1,8 @@
package config
// I2PConfig is the configuration structure for i2p related configuration
type I2PConfig struct {
Keyfile string // private key file or empty string for ephemeral keys
Addr string // address of i2p api connector
Enabled bool
}

View File

@@ -0,0 +1,8 @@
package config
// TorConfig is the configuration structure for Tor Proxy related values
type TorConfig struct {
OnionKeyfile string // hidden service private key for ADD_ONION (currently unimplemented)
ControlAddr string // tor control port address
Enabled bool
}

View File

@@ -1,9 +1,18 @@
package yggdrasil
import "io/ioutil"
import "log"
import "regexp"
import (
"encoding/hex"
"fmt"
"io/ioutil"
"log"
"net"
"regexp"
"yggdrasil/config"
)
// The Core object represents the Yggdrasil node. You should create a Core
// object for each Yggdrasil node you plan to run.
type Core struct {
// This is the main data structure that holds everything else for a node
boxPub boxPubKey
@@ -19,17 +28,10 @@ type Core struct {
tun tunDevice
admin admin
searches searches
tcp *tcpInterface
udp *udpInterface
multicast multicast
tcp tcpInterface
log *log.Logger
ifceExpr *regexp.Regexp // the zone of link-local IPv6 peers must match this
}
func (c *Core) Init() {
// Only called by the simulator, to set up nodes with random keys
bpub, bpriv := newBoxKeys()
spub, spriv := newSigKeys()
c.init(bpub, bpriv, spub, spriv)
ifceExpr []*regexp.Regexp // the zone of link-local IPv6 peers must match this
}
func (c *Core) init(bpub *boxPubKey,
@@ -41,23 +43,183 @@ func (c *Core) init(bpub *boxPubKey,
// Start launches goroutines that depend on structs being set up
// This is pretty much required to completely avoid race conditions
util_initByteStore()
c.log = log.New(ioutil.Discard, "", 0)
if c.log == nil {
c.log = log.New(ioutil.Discard, "", 0)
}
c.boxPub, c.boxPriv = *bpub, *bpriv
c.sigPub, c.sigPriv = *spub, *spriv
c.admin.core = c
c.sigs.init()
c.searches.init(c)
c.dht.init(c)
c.sessions.init(c)
c.multicast.init(c)
c.peers.init(c)
c.router.init(c)
c.switchTable.init(c, c.sigPub) // TODO move before peers? before router?
c.tun.init(c)
}
// Starts up Yggdrasil using the provided NodeConfig, and outputs debug logging
// through the provided log.Logger. The started stack will include TCP and UDP
// sockets, a multicast discovery socket, an admin socket, router, switch and
// DHT node.
func (c *Core) Start(nc *config.NodeConfig, log *log.Logger) error {
c.log = log
c.log.Println("Starting up...")
var boxPub boxPubKey
var boxPriv boxPrivKey
var sigPub sigPubKey
var sigPriv sigPrivKey
boxPubHex, err := hex.DecodeString(nc.EncryptionPublicKey)
if err != nil {
return err
}
boxPrivHex, err := hex.DecodeString(nc.EncryptionPrivateKey)
if err != nil {
return err
}
sigPubHex, err := hex.DecodeString(nc.SigningPublicKey)
if err != nil {
return err
}
sigPrivHex, err := hex.DecodeString(nc.SigningPrivateKey)
if err != nil {
return err
}
copy(boxPub[:], boxPubHex)
copy(boxPriv[:], boxPrivHex)
copy(sigPub[:], sigPubHex)
copy(sigPriv[:], sigPrivHex)
c.init(&boxPub, &boxPriv, &sigPub, &sigPriv)
c.admin.init(c, nc.AdminListen)
if err := c.tcp.init(c, nc.Listen); err != nil {
c.log.Println("Failed to start TCP interface")
return err
}
if err := c.router.start(); err != nil {
c.log.Println("Failed to start router")
return err
}
if err := c.admin.start(); err != nil {
c.log.Println("Failed to start admin socket")
return err
}
if err := c.multicast.start(); err != nil {
c.log.Println("Failed to start multicast interface")
return err
}
ip := net.IP(c.router.addr[:]).String()
if err := c.tun.start(nc.IfName, nc.IfTAPMode, fmt.Sprintf("%s/8", ip), nc.IfMTU); err != nil {
c.log.Println("Failed to start TUN/TAP")
return err
}
c.log.Println("Startup complete")
return nil
}
// Stops the Yggdrasil node.
func (c *Core) Stop() {
c.log.Println("Stopping...")
c.tun.close()
}
// Generates a new encryption keypair. The encryption keys are used to
// encrypt traffic and to derive the IPv6 address/subnet of the node.
func (c *Core) NewEncryptionKeys() (*boxPubKey, *boxPrivKey) {
return newBoxKeys()
}
// Generates a new signing keypair. The signing keys are used to derive the
// structure of the spanning tree.
func (c *Core) NewSigningKeys() (*sigPubKey, *sigPrivKey) {
return newSigKeys()
}
// Gets the node ID.
func (c *Core) GetNodeID() *NodeID {
return getNodeID(&c.boxPub)
}
// Gets the tree ID.
func (c *Core) GetTreeID() *TreeID {
return getTreeID(&c.sigPub)
}
// Gets the IPv6 address of the Yggdrasil node. This is always a /128.
func (c *Core) GetAddress() *net.IP {
address := net.IP(address_addrForNodeID(c.GetNodeID())[:])
return &address
}
// Gets the routed IPv6 subnet of the Yggdrasil node. This is always a /64.
func (c *Core) GetSubnet() *net.IPNet {
subnet := address_subnetForNodeID(c.GetNodeID())[:]
subnet = append(subnet, 0, 0, 0, 0, 0, 0, 0, 0)
return &net.IPNet{IP: subnet, Mask: net.CIDRMask(64, 128)}
}
// Sets the output logger of the Yggdrasil node after startup. This may be
// useful if you want to redirect the output later.
func (c *Core) SetLogger(log *log.Logger) {
c.log = log
}
// Adds a peer. This should be specified in the peer URI format, i.e.
// tcp://a.b.c.d:e, udp://a.b.c.d:e, socks://a.b.c.d:e/f.g.h.i:j
func (c *Core) AddPeer(addr string) error {
return c.admin.addPeer(addr)
}
// Adds an expression to select multicast interfaces for peer discovery. This
// should be done before calling Start. This function can be called multiple
// times to add multiple search expressions.
func (c *Core) AddMulticastInterfaceExpr(expr *regexp.Regexp) {
c.ifceExpr = append(c.ifceExpr, expr)
}
// Adds an allowed public key. This allow peerings to be restricted only to
// keys that you have selected.
func (c *Core) AddAllowedEncryptionPublicKey(boxStr string) error {
return c.admin.addAllowedEncryptionPublicKey(boxStr)
}
// Gets the default TUN/TAP interface name for your platform.
func (c *Core) GetTUNDefaultIfName() string {
return getDefaults().defaultIfName
}
// Gets the default TUN/TAP interface MTU for your platform. This can be as high
// as 65535, depending on platform, but is never lower than 1280.
func (c *Core) GetTUNDefaultIfMTU() int {
return getDefaults().defaultIfMTU
}
// Gets the maximum supported TUN/TAP interface MTU for your platform. This
// can be as high as 65535, depending on platform, but is never lower than 1280.
func (c *Core) GetTUNMaximumIfMTU() int {
return getDefaults().maximumIfMTU
}
// Gets the default TUN/TAP interface mode for your platform.
func (c *Core) GetTUNDefaultIfTAPMode() bool {
return getDefaults().defaultIfTAPMode
}
// Gets the current TUN/TAP interface name.
func (c *Core) GetTUNIfName() string {
return c.tun.iface.Name()
}
// Gets the current TUN/TAP interface MTU.
func (c *Core) GetTUNIfMTU() int {
return c.tun.mtu
}

View File

@@ -10,10 +10,13 @@ It also defines NodeID and TreeID as hashes of keys, and wraps hash functions
*/
import "crypto/rand"
import "crypto/sha512"
import "golang.org/x/crypto/ed25519"
import "golang.org/x/crypto/nacl/box"
import (
"crypto/rand"
"crypto/sha512"
"golang.org/x/crypto/ed25519"
"golang.org/x/crypto/nacl/box"
)
////////////////////////////////////////////////////////////////////////////////
@@ -91,6 +94,7 @@ const boxPubKeyLen = 32
const boxPrivKeyLen = 32
const boxSharedKeyLen = 32
const boxNonceLen = 24
const boxOverhead = box.Overhead
type boxPubKey [boxPubKeyLen]byte
type boxPrivKey [boxPrivKeyLen]byte
@@ -120,7 +124,6 @@ func boxOpen(shared *boxSharedKey,
boxed []byte,
nonce *boxNonce) ([]byte, bool) {
out := util_getBytes()
//return append(out, boxed...), true // XXX HACK to test without encryption
s := (*[boxSharedKeyLen]byte)(shared)
n := (*[boxNonceLen]byte)(nonce)
unboxed, success := box.OpenAfterPrecomputation(out, boxed, n, s)
@@ -133,7 +136,6 @@ func boxSeal(shared *boxSharedKey, unboxed []byte, nonce *boxNonce) ([]byte, *bo
}
nonce.update()
out := util_getBytes()
//return append(out, unboxed...), nonce // XXX HACK to test without encryption
s := (*[boxSharedKeyLen]byte)(shared)
n := (*[boxNonceLen]byte)(nonce)
boxed := box.SealAfterPrecomputation(out, unboxed, n, s)

View File

@@ -1,3 +1,5 @@
// +build debug
package yggdrasil
// These are functions that should not exist
@@ -8,18 +10,43 @@ package yggdrasil
import _ "golang.org/x/net/ipv6" // TODO put this somewhere better
import "golang.org/x/net/proxy"
import "fmt"
import "net"
import "log"
import "regexp"
import _ "net/http/pprof"
import "net/http"
import "runtime"
// Starts the function profiler. This is only supported when built with
// '-tags build'.
func StartProfiler(log *log.Logger) error {
runtime.SetBlockProfileRate(1)
go func() { log.Println(http.ListenAndServe("localhost:6060", nil)) }()
return nil
}
// This function is only called by the simulator to set up a node with random
// keys. It should not be used and may be removed in the future.
func (c *Core) Init() {
bpub, bpriv := newBoxKeys()
spub, spriv := newSigKeys()
c.init(bpub, bpriv, spub, spriv)
c.router.start()
}
////////////////////////////////////////////////////////////////////////////////
// Core
func (c *Core) DEBUG_getSigPub() sigPubKey {
func (c *Core) DEBUG_getSigningPublicKey() sigPubKey {
return (sigPubKey)(c.sigPub)
}
func (c *Core) DEBUG_getBoxPub() boxPubKey {
func (c *Core) DEBUG_getEncryptionPublicKey() boxPubKey {
return (boxPubKey)(c.boxPub)
}
@@ -37,11 +64,10 @@ func (c *Core) DEBUG_getPeers() *peers {
return &c.peers
}
func (ps *peers) DEBUG_newPeer(box boxPubKey,
sig sigPubKey) *peer {
func (ps *peers) DEBUG_newPeer(box boxPubKey, sig sigPubKey, link boxSharedKey) *peer {
//in <-chan []byte,
//out chan<- []byte) *peer {
return ps.newPeer(&box, &sig) //, in, out)
return ps.newPeer(&box, &sig, &link) //, in, out)
}
/*
@@ -99,8 +125,8 @@ func (l *switchLocator) DEBUG_getCoords() []byte {
return l.getCoords()
}
func (c *Core) DEBUG_switchLookup(dest []byte, ttl uint64) (switchPort, uint64) {
return c.switchTable.lookup(dest, ttl)
func (c *Core) DEBUG_switchLookup(dest []byte) switchPort {
return c.switchTable.lookup(dest)
}
/*
@@ -154,11 +180,26 @@ func (c *Core) DEBUG_getDHTSize() int {
total := 0
for bidx := 0; bidx < c.dht.nBuckets(); bidx++ {
b := c.dht.getBucket(bidx)
total += len(b.infos)
total += len(b.peers)
total += len(b.other)
}
return total
}
// TUN defaults
func (c *Core) DEBUG_GetTUNDefaultIfName() string {
return getDefaults().defaultIfName
}
func (c *Core) DEBUG_GetTUNDefaultIfMTU() int {
return getDefaults().defaultIfMTU
}
func (c *Core) DEBUG_GetTUNDefaultIfTAPMode() bool {
return getDefaults().defaultIfTAPMode
}
// udpInterface
// FIXME udpInterface isn't exported
// So debug functions need to work differently...
@@ -217,9 +258,10 @@ func (c *Core) DEBUG_startTunWithMTU(ifname string, iftapmode bool, mtu int) {
if err != nil {
panic(err)
}
go c.tun.read()
c.log.Println("Setup TUN/TAP:", c.tun.iface.Name(), straddr)
go func() { panic(c.tun.read()) }()
}
go c.tun.write()
go func() { panic(c.tun.write()) }()
}
func (c *Core) DEBUG_stopTun() {
@@ -232,6 +274,10 @@ func (c *Core) DEBUG_newBoxKeys() (*boxPubKey, *boxPrivKey) {
return newBoxKeys()
}
func (c *Core) DEBUG_getSharedKey(myPrivKey *boxPrivKey, othersPubKey *boxPubKey) *boxSharedKey {
return getSharedKey(myPrivKey, othersPubKey)
}
func (c *Core) DEBUG_newSigKeys() (*sigPubKey, *sigPrivKey) {
return newSigKeys()
}
@@ -261,14 +307,21 @@ func (c *Core) DEBUG_init(bpub []byte,
copy(sigPub[:], spub)
copy(sigPriv[:], spriv)
c.init(&boxPub, &boxPriv, &sigPub, &sigPriv)
if err := c.router.start(); err != nil {
panic(err)
}
}
////////////////////////////////////////////////////////////////////////////////
/*
func (c *Core) DEBUG_setupAndStartGlobalUDPInterface(addrport string) {
iface := udpInterface{}
iface.init(c, addrport)
c.udp = &iface
if err := c.udp.init(c, addrport); err != nil {
c.log.Println("Failed to start UDP interface:", err)
panic(err)
}
}
func (c *Core) DEBUG_getGlobalUDPAddr() *net.UDPAddr {
@@ -289,14 +342,41 @@ func (c *Core) DEBUG_maybeSendUDPKeys(saddr string) {
c.udp.sendKeys(addr)
}
}
*/
////////////////////////////////////////////////////////////////////////////////
func (c *Core) DEBUG_addPeer(addr string) {
err := c.admin.addPeer(addr)
if err != nil {
panic(err)
}
}
func (c *Core) DEBUG_addSOCKSConn(socksaddr, peeraddr string) {
go func() {
dialer, err := proxy.SOCKS5("tcp", socksaddr, nil, proxy.Direct)
if err == nil {
conn, err := dialer.Dial("tcp", peeraddr)
if err == nil {
c.tcp.callWithConn(&wrappedConn{
c: conn,
raddr: &wrappedAddr{
network: "tcp",
addr: peeraddr,
},
})
}
}
}()
}
//*
func (c *Core) DEBUG_setupAndStartGlobalTCPInterface(addrport string) {
iface := tcpInterface{}
iface.init(c, addrport)
c.tcp = &iface
if err := c.tcp.init(c, addrport); err != nil {
c.log.Println("Failed to start TCP interface:", err)
panic(err)
}
}
func (c *Core) DEBUG_getGlobalTCPAddr() *net.TCPAddr {
@@ -344,6 +424,13 @@ func (c *Core) DEBUG_setupAndStartAdminInterface(addrport string) {
c.admin = a
}
func (c *Core) DEBUG_setupAndStartMulticastInterface() {
m := multicast{}
m.init(c)
c.multicast = m
m.start()
}
////////////////////////////////////////////////////////////////////////////////
func (c *Core) DEBUG_setLogger(log *log.Logger) {
@@ -351,25 +438,66 @@ func (c *Core) DEBUG_setLogger(log *log.Logger) {
}
func (c *Core) DEBUG_setIfceExpr(expr *regexp.Regexp) {
c.ifceExpr = expr
c.ifceExpr = append(c.ifceExpr, expr)
}
func (c *Core) DEBUG_addAllowedEncryptionPublicKey(boxStr string) {
err := c.admin.addAllowedEncryptionPublicKey(boxStr)
if err != nil {
panic(err)
}
}
////////////////////////////////////////////////////////////////////////////////
func DEBUG_simLinkPeers(p, q *peer) {
// Sets q.out() to point to p and starts p.linkLoop()
plinkIn := make(chan []byte, 1)
qlinkIn := make(chan []byte, 1)
p.linkOut, q.linkOut = make(chan []byte, 1), make(chan []byte, 1)
go func() {
for bs := range p.linkOut {
q.handlePacket(bs)
}
}()
go func() {
for bs := range q.linkOut {
p.handlePacket(bs)
}
}()
p.out = func(bs []byte) {
go q.handlePacket(bs, qlinkIn)
go q.handlePacket(bs)
}
q.out = func(bs []byte) {
go p.handlePacket(bs, plinkIn)
go p.handlePacket(bs)
}
go p.linkLoop(plinkIn)
go q.linkLoop(qlinkIn)
go p.linkLoop()
go q.linkLoop()
}
func (c *Core) DEBUG_simFixMTU() {
c.tun.mtu = 65535
}
////////////////////////////////////////////////////////////////////////////////
func Util_testAddrIDMask() {
for idx := 0; idx < 16; idx++ {
var orig NodeID
orig[8] = 42
for bidx := 0; bidx < idx; bidx++ {
orig[bidx/8] |= (0x80 >> uint8(bidx%8))
}
addr := address_addrForNodeID(&orig)
nid, mask := addr.getNodeIDandMask()
for b := 0; b < len(mask); b++ {
nid[b] &= mask[b]
orig[b] &= mask[b]
}
if *nid != orig {
fmt.Println(orig)
fmt.Println(*addr)
fmt.Println(*nid)
fmt.Println(*mask)
panic(idx)
}
}
}

View File

@@ -18,26 +18,37 @@ Slight changes *do* make it blackhole hard, bootstrapping isn't an easy problem
*/
import "sort"
import "time"
import (
"sort"
"time"
)
//import "fmt"
// Number of DHT buckets, equal to the number of bits in a NodeID.
// Note that, in practice, nearly all of these will be empty.
const dht_bucket_number = 8 * NodeIDLen
// Maximum size for buckets and lookups
// Exception for buckets if the next one is non-full
const dht_bucket_size = 2 // This should be at least 2
const dht_lookup_size = 2 // This should be at least 1, below 2 is impractical
const dht_bucket_number = 8 * NodeIDLen // This shouldn't be changed
// Number of nodes to keep in each DHT bucket.
// Additional entries may be kept for peers, for bootstrapping reasons, if they don't already have an entry in the bucket.
const dht_bucket_size = 2
// Number of responses to include in a lookup.
// If extras are given, they will be truncated from the response handler to prevent abuse.
const dht_lookup_size = 16
// dhtInfo represents everything we know about a node in the DHT.
// This includes its key, a cache of it's NodeID, coords, and timing/ping related info for deciding who/when to ping nodes for maintenance.
type dhtInfo struct {
nodeID_hidden *NodeID
key boxPubKey
coords []byte
send time.Time // When we last sent a message
recv time.Time // When we last received a message
pings int // Decide when to drop
send time.Time // When we last sent a message
recv time.Time // When we last received a message
pings int // Decide when to drop
throttle time.Duration // Time to wait before pinging a node to bootstrap buckets, increases exponentially from 1 second to 1 minute
bootstrapSend time.Time // The time checked/updated as part of throttle checks
}
// Returns the *NodeID associated with dhtInfo.key, calculating it on the fly the first time or from a cache all subsequent times.
func (info *dhtInfo) getNodeID() *NodeID {
if info.nodeID_hidden == nil {
info.nodeID_hidden = getNodeID(&info.key)
@@ -45,28 +56,40 @@ func (info *dhtInfo) getNodeID() *NodeID {
return info.nodeID_hidden
}
// The nodes we known in a bucket (a region of keyspace with a matching prefix of some length).
type bucket struct {
infos []*dhtInfo
peers []*dhtInfo
other []*dhtInfo
}
// Request for a node to do a lookup.
// Includes our key and coords so they can send a response back, and the destination NodeID we want to ask about.
type dhtReq struct {
key boxPubKey // Key of whoever asked
coords []byte // Coords of whoever asked
dest NodeID // NodeID they're asking about
Key boxPubKey // Key of whoever asked
Coords []byte // Coords of whoever asked
Dest NodeID // NodeID they're asking about
}
// Response to a DHT lookup.
// Includes the key and coords of the node that's responding, and the destination they were asked about.
// The main part is Infos []*dhtInfo, the lookup response.
type dhtRes struct {
key boxPubKey // key to respond to
coords []byte // coords to respond to
dest NodeID
infos []*dhtInfo // response
Key boxPubKey // key to respond to
Coords []byte // coords to respond to
Dest NodeID
Infos []*dhtInfo // response
}
// Information about a node, either taken from our table or from a lookup response.
// Used to schedule pings at a later time (they're throttled to 1/second for background maintenance traffic).
type dht_rumor struct {
info *dhtInfo
target *NodeID
}
// The main DHT struct.
// Includes a slice of buckets, to organize known nodes based on their region of keyspace.
// Also includes information about outstanding DHT requests and the rumor mill of nodes to ping at some point.
type dht struct {
core *Core
nodeID NodeID
@@ -77,47 +100,56 @@ type dht struct {
rumorMill []dht_rumor
}
// Initializes the DHT.
func (t *dht) init(c *Core) {
t.core = c
t.nodeID = *t.core.GetNodeID()
t.peers = make(chan *dhtInfo, 1)
t.peers = make(chan *dhtInfo, 1024)
t.reqs = make(map[boxPubKey]map[NodeID]time.Time)
}
// Reads a request, performs a lookup, and responds.
// If the node that sent the request isn't in our DHT, but should be, then we add them.
func (t *dht) handleReq(req *dhtReq) {
// Send them what they asked for
loc := t.core.switchTable.getLocator()
coords := loc.getCoords()
res := dhtRes{
key: t.core.boxPub,
coords: coords,
dest: req.dest,
infos: t.lookup(&req.dest),
Key: t.core.boxPub,
Coords: coords,
Dest: req.Dest,
Infos: t.lookup(&req.Dest, false),
}
t.sendRes(&res, req)
// Also (possibly) add them to our DHT
info := dhtInfo{
key: req.key,
coords: req.coords,
key: req.Key,
coords: req.Coords,
}
t.insertIfNew(&info) // This seems DoSable (we just trust their coords...)
t.insertIfNew(&info, false) // This seems DoSable (we just trust their coords...)
//if req.dest != t.nodeID { t.ping(&info, info.getNodeID()) } // Or spam...
}
// Reads a lookup response, checks that we had sent a matching request, and processes the response info.
// This mainly consists of updating the node we asked in our DHT (they responded, so we know they're still alive), and adding the response info to the rumor mill.
func (t *dht) handleRes(res *dhtRes) {
reqs, isIn := t.reqs[res.key]
t.core.searches.handleDHTRes(res)
reqs, isIn := t.reqs[res.Key]
if !isIn {
return
}
_, isIn = reqs[res.dest]
_, isIn = reqs[res.Dest]
if !isIn {
return
}
now := time.Now()
rinfo := dhtInfo{
key: res.key,
coords: res.coords,
send: time.Now(), // Technically wrong but should be OK...
recv: time.Now(),
key: res.Key,
coords: res.Coords,
send: now, // Technically wrong but should be OK...
recv: now,
throttle: time.Second,
bootstrapSend: now,
}
// If they're already in the table, then keep the correct send time
bidx, isOK := t.getBucketIndex(rinfo.getNodeID())
@@ -125,35 +157,38 @@ func (t *dht) handleRes(res *dhtRes) {
return
}
b := t.getBucket(bidx)
for _, oldinfo := range b.infos {
for _, oldinfo := range b.peers {
if oldinfo.key == rinfo.key {
rinfo.send = oldinfo.send
rinfo.throttle = oldinfo.throttle
rinfo.bootstrapSend = oldinfo.bootstrapSend
}
}
for _, oldinfo := range b.other {
if oldinfo.key == rinfo.key {
rinfo.send = oldinfo.send
rinfo.throttle = oldinfo.throttle
rinfo.bootstrapSend = oldinfo.bootstrapSend
}
}
// Insert into table
t.insert(&rinfo)
if res.dest == *rinfo.getNodeID() {
t.insert(&rinfo, false)
if res.Dest == *rinfo.getNodeID() {
return
} // No infinite recursions
// ping the nodes we were told about
if len(res.infos) > dht_lookup_size {
if len(res.Infos) > dht_lookup_size {
// Ignore any "extra" lookup results
res.infos = res.infos[:dht_lookup_size]
res.Infos = res.Infos[:dht_lookup_size]
}
for _, info := range res.infos {
bidx, isOK := t.getBucketIndex(info.getNodeID())
if !isOK {
continue
for _, info := range res.Infos {
if dht_firstCloserThanThird(info.getNodeID(), &res.Dest, rinfo.getNodeID()) {
t.addToMill(info, info.getNodeID())
}
b := t.getBucket(bidx)
if b.contains(info) {
continue
} // wait for maintenance cycle to get them
t.addToMill(info, info.getNodeID())
}
}
func (t *dht) lookup(nodeID *NodeID) []*dhtInfo {
// Does a DHT lookup and returns the results, sorted in ascending order of distance from the destination.
func (t *dht) lookup(nodeID *NodeID, allowCloser bool) []*dhtInfo {
// FIXME this allocates a bunch, sorts, and keeps the part it likes
// It would be better to only track the part it likes to begin with
addInfos := func(res []*dhtInfo, infos []*dhtInfo) []*dhtInfo {
@@ -161,7 +196,7 @@ func (t *dht) lookup(nodeID *NodeID) []*dhtInfo {
if info == nil {
panic("Should never happen!")
}
if true || dht_firstCloserThanThird(info.getNodeID(), nodeID, &t.nodeID) {
if allowCloser || dht_firstCloserThanThird(info.getNodeID(), nodeID, &t.nodeID) {
res = append(res, info)
}
}
@@ -170,7 +205,8 @@ func (t *dht) lookup(nodeID *NodeID) []*dhtInfo {
var res []*dhtInfo
for bidx := 0; bidx < t.nBuckets(); bidx++ {
b := t.getBucket(bidx)
res = addInfos(res, b.infos)
res = addInfos(res, b.peers)
res = addInfos(res, b.other)
}
doSort := func(infos []*dhtInfo) {
less := func(i, j int) bool {
@@ -187,35 +223,38 @@ func (t *dht) lookup(nodeID *NodeID) []*dhtInfo {
return res
}
// Gets the bucket for a specified matching prefix length.
func (t *dht) getBucket(bidx int) *bucket {
return &t.buckets_hidden[bidx]
}
// Lists the number of buckets.
func (t *dht) nBuckets() int {
return len(t.buckets_hidden)
}
func (t *dht) insertIfNew(info *dhtInfo) {
//fmt.Println("DEBUG: dht insertIfNew:", info.getNodeID(), info.coords)
// Insert a peer if and only if the bucket doesn't already contain it
// Inserts a node into the DHT if they meet certain requirements.
// In particular, they must either be a peer that's not already in the DHT, or else be someone we should insert into the DHT (see: shouldInsert).
func (t *dht) insertIfNew(info *dhtInfo, isPeer bool) {
// Insert if no "other" entry already exists
nodeID := info.getNodeID()
bidx, isOK := t.getBucketIndex(nodeID)
if !isOK {
return
}
b := t.getBucket(bidx)
if !b.contains(info) {
if (isPeer && !b.containsOther(info)) || t.shouldInsert(info) {
// We've never heard this node before
// TODO is there a better time than "now" to set send/recv to?
// (Is there another "natural" choice that bootstraps faster?)
info.send = time.Now()
info.recv = info.send
t.insert(info)
t.insert(info, isPeer)
}
}
func (t *dht) insert(info *dhtInfo) {
//fmt.Println("DEBUG: dht insert:", info.getNodeID(), info.coords)
// Adds a node to the DHT, possibly removing another node in the process.
func (t *dht) insert(info *dhtInfo, isPeer bool) {
// First update the time on this info
info.recv = time.Now()
// Get the bucket for this node
@@ -225,24 +264,30 @@ func (t *dht) insert(info *dhtInfo) {
return
}
b := t.getBucket(bidx)
if !isPeer && !b.containsOther(info) {
// This is a new entry, give it an old age so it's pinged sooner
// This speeds up bootstrapping
info.recv = info.recv.Add(-time.Hour)
}
if isPeer || info.throttle > time.Minute {
info.throttle = time.Minute
}
// First drop any existing entry from the bucket
b.drop(&info.key)
// Now add to the *end* of the bucket
b.infos = append(b.infos, info)
// Check if the next bucket is non-full and return early if it is
if bidx+1 == t.nBuckets() {
return
}
bnext := t.getBucket(bidx + 1)
if len(bnext.infos) < dht_bucket_size {
if isPeer {
// TODO make sure we don't duplicate peers in b.other too
b.peers = append(b.peers, info)
return
}
b.other = append(b.other, info)
// Shrink from the *front* to requied size
for len(b.infos) > dht_bucket_size {
b.infos = b.infos[1:]
for len(b.other) > dht_bucket_size {
b.other = b.other[1:]
}
}
// Gets the bucket index for the bucket where we would put the given NodeID.
func (t *dht) getBucketIndex(nodeID *NodeID) (int, bool) {
for bidx := 0; bidx < t.nBuckets(); bidx++ {
them := nodeID[bidx/8] & (0x80 >> byte(bidx%8))
@@ -254,27 +299,53 @@ func (t *dht) getBucketIndex(nodeID *NodeID) (int, bool) {
return t.nBuckets(), false
}
func (b *bucket) contains(ninfo *dhtInfo) bool {
// Helper called by containsPeer, containsOther, and contains.
// Returns true if a node with the same ID *and coords* is already in the given part of the bucket.
func dht_bucket_check(newInfo *dhtInfo, infos []*dhtInfo) bool {
// Compares if key and coords match
for _, info := range b.infos {
if newInfo == nil {
panic("Should never happen")
}
for _, info := range infos {
if info == nil {
panic("Should never happen")
}
if info.key == ninfo.key {
if len(info.coords) != len(ninfo.coords) {
return false
}
for idx := 0; idx < len(info.coords); idx++ {
if info.coords[idx] != ninfo.coords[idx] {
return false
}
if info.key != newInfo.key {
continue
}
if len(info.coords) != len(newInfo.coords) {
continue
}
match := true
for idx := 0; idx < len(info.coords); idx++ {
if info.coords[idx] != newInfo.coords[idx] {
match = false
break
}
}
if match {
return true
}
}
return false
}
// Calls bucket_check over the bucket's peers infos.
func (b *bucket) containsPeer(info *dhtInfo) bool {
return dht_bucket_check(info, b.peers)
}
// Calls bucket_check over the bucket's other info.
func (b *bucket) containsOther(info *dhtInfo) bool {
return dht_bucket_check(info, b.other)
}
// returns containsPeer || containsOther
func (b *bucket) contains(info *dhtInfo) bool {
return b.containsPeer(info) || b.containsOther(info)
}
// Removes a node with the corresponding key, if any, from a bucket.
func (b *bucket) drop(key *boxPubKey) {
clean := func(infos []*dhtInfo) []*dhtInfo {
cleaned := infos[:0]
@@ -286,21 +357,22 @@ func (b *bucket) drop(key *boxPubKey) {
}
return cleaned
}
b.infos = clean(b.infos)
b.peers = clean(b.peers)
b.other = clean(b.other)
}
// Sends a lookup request to the specified node.
func (t *dht) sendReq(req *dhtReq, dest *dhtInfo) {
// Send a dhtReq to the node in dhtInfo
bs := req.encode()
shared := t.core.sessions.getSharedKey(&t.core.boxPriv, &dest.key)
payload, nonce := boxSeal(shared, bs, nil)
p := wire_protoTrafficPacket{
ttl: ^uint64(0),
coords: dest.coords,
toKey: dest.key,
fromKey: t.core.boxPub,
nonce: *nonce,
payload: payload,
Coords: dest.coords,
ToKey: dest.key,
FromKey: t.core.boxPub,
Nonce: *nonce,
Payload: payload,
}
packet := p.encode()
t.core.router.out(packet)
@@ -312,30 +384,34 @@ func (t *dht) sendReq(req *dhtReq, dest *dhtInfo) {
panic("This should never happen")
}
}
reqsToDest[req.dest] = time.Now()
reqsToDest[req.Dest] = time.Now()
}
// Sends a lookup response to the specified node.
func (t *dht) sendRes(res *dhtRes, req *dhtReq) {
// Send a reply for a dhtReq
bs := res.encode()
shared := t.core.sessions.getSharedKey(&t.core.boxPriv, &req.key)
shared := t.core.sessions.getSharedKey(&t.core.boxPriv, &req.Key)
payload, nonce := boxSeal(shared, bs, nil)
p := wire_protoTrafficPacket{
ttl: ^uint64(0),
coords: req.coords,
toKey: req.key,
fromKey: t.core.boxPub,
nonce: *nonce,
payload: payload,
Coords: req.Coords,
ToKey: req.Key,
FromKey: t.core.boxPub,
Nonce: *nonce,
Payload: payload,
}
packet := p.encode()
t.core.router.out(packet)
}
// Returns true of a bucket contains no peers and no other nodes.
func (b *bucket) isEmpty() bool {
return len(b.infos) == 0
return len(b.peers)+len(b.other) == 0
}
// Gets the next node that should be pinged from the bucket.
// There's a cooldown of 6 seconds between ping attempts for each node, to give them time to respond.
// It returns the least recently pinged node, subject to that send cooldown.
func (b *bucket) nextToPing() *dhtInfo {
// Check the nodes in the bucket
// Return whichever one responded least recently
@@ -343,23 +419,31 @@ func (b *bucket) nextToPing() *dhtInfo {
// Gives them time to respond
// And time between traffic loss from short term congestion in the network
var toPing *dhtInfo
for _, next := range b.infos {
if time.Since(next.send) < 6*time.Second {
continue
}
if toPing == nil || next.recv.Before(toPing.recv) {
toPing = next
update := func(infos []*dhtInfo) {
for _, next := range infos {
if time.Since(next.send) < 6*time.Second {
continue
}
if toPing == nil || next.recv.Before(toPing.recv) {
toPing = next
}
}
}
update(b.peers)
update(b.other)
return toPing
}
// Returns a useful target address to ask about for pings.
// Equal to the our node's ID, except for exactly 1 bit at the bucket index.
func (t *dht) getTarget(bidx int) *NodeID {
targetID := t.nodeID
targetID[bidx/8] ^= 0x80 >> byte(bidx%8)
return &targetID
}
// Sends a ping to a node, or removes the node if it has failed to respond to too many pings.
// If target is nil, we will ask the node about our own NodeID.
func (t *dht) ping(info *dhtInfo, target *NodeID) {
if info.pings > 2 {
bidx, isOK := t.getBucketIndex(info.getNodeID())
@@ -376,15 +460,17 @@ func (t *dht) ping(info *dhtInfo, target *NodeID) {
loc := t.core.switchTable.getLocator()
coords := loc.getCoords()
req := dhtReq{
key: t.core.boxPub,
coords: coords,
dest: *target,
Key: t.core.boxPub,
Coords: coords,
Dest: *target,
}
info.pings++
info.send = time.Now()
t.sendReq(&req, info)
}
// Adds a node info and target to the rumor mill.
// The node will be asked about the target at a later point, if doing so would still be useful at the time.
func (t *dht) addToMill(info *dhtInfo, target *NodeID) {
rumor := dht_rumor{
info: info,
@@ -393,6 +479,11 @@ func (t *dht) addToMill(info *dhtInfo, target *NodeID) {
t.rumorMill = append(t.rumorMill, rumor)
}
// Regular periodic maintenance.
// If the mill is empty, it adds two pings to the rumor mill.
// The first is to the node that responded least recently, provided that it's been at least 1 minute, to make sure we eventually detect and remove unresponsive nodes.
// The second is used for bootstrapping, and attempts to fill some bucket, iterating over buckets and resetting after it hits the last non-empty one.
// If the mill is not empty, it pops nodes from the mill until it finds one that would be useful to ping (see: shouldInsert), and then pings it.
func (t *dht) doMaintenance() {
// First clean up reqs
for key, reqs := range t.reqs {
@@ -423,27 +514,81 @@ func (t *dht) doMaintenance() {
}
}
}
if oldest != nil {
if oldest != nil && time.Since(oldest.recv) > time.Minute {
// Ping the oldest node in the DHT, but don't ping nodes that have been checked within the last minute
t.addToMill(oldest, nil)
} // if the DHT isn't empty
}
// Refresh buckets
if t.offset > last {
t.offset = 0
}
target := t.getTarget(t.offset)
for _, info := range t.lookup(target) {
t.addToMill(info, target)
break
}
t.offset++
func() {
closer := t.lookup(target, false)
for _, info := range closer {
// Throttled ping of a node that's closer to the destination
if time.Since(info.recv) > info.throttle {
t.addToMill(info, target)
t.offset++
info.bootstrapSend = time.Now()
info.throttle *= 2
if info.throttle > time.Minute {
info.throttle = time.Minute
}
return
}
}
if len(closer) == 0 {
// If we don't know of anyone closer at all, then there's a hole in our dht
// Ping the closest node we know and ignore the throttle, to try to fill it
for _, info := range t.lookup(target, true) {
t.addToMill(info, target)
t.offset++
return
}
}
}()
//t.offset++
}
if len(t.rumorMill) > 0 {
for len(t.rumorMill) > 0 {
var rumor dht_rumor
rumor, t.rumorMill = t.rumorMill[0], t.rumorMill[1:]
if rumor.target == rumor.info.getNodeID() {
// Note that the above is a pointer comparison, and target can be nil
// This is only for adding new nodes (learned from other lookups)
// It only makes sense to ping if the node isn't already in the table
if !t.shouldInsert(rumor.info) {
continue
}
}
t.ping(rumor.info, rumor.target)
break
}
}
// Returns true if it would be worth pinging the specified node.
// This requires that the bucket doesn't already contain the node, and that either the bucket isn't full yet or the node is closer to us in keyspace than some other node in that bucket.
func (t *dht) shouldInsert(info *dhtInfo) bool {
bidx, isOK := t.getBucketIndex(info.getNodeID())
if !isOK {
return false
}
b := t.getBucket(bidx)
if b.containsOther(info) {
return false
}
if len(b.other) < dht_bucket_size {
return true
}
for _, other := range b.other {
if dht_firstCloserThanThird(info.getNodeID(), &t.nodeID, other.getNodeID()) {
return true
}
}
return false
}
// Returns true if the keyspace distance between the first and second node is smaller than the keyspace distance between the second and third node.
func dht_firstCloserThanThird(first *NodeID,
second *NodeID,
third *NodeID) bool {
@@ -457,3 +602,22 @@ func dht_firstCloserThanThird(first *NodeID,
}
return false
}
// Resets the DHT in response to coord changes.
// This empties all buckets, resets the bootstrapping cycle to 0, and empties the rumor mill.
// It adds all old "other" node info to the rumor mill, so they'll be pinged quickly.
// If those nodes haven't also changed coords, then this is a relatively quick way to notify those nodes of our new coords and re-add them to our own DHT if they respond.
func (t *dht) reset() {
// This is mostly so bootstrapping will reset to resend coords into the network
t.offset = 0
t.rumorMill = nil // reset mill
for _, b := range t.buckets_hidden {
b.peers = b.peers[:0]
for _, info := range b.other {
// Add other nodes to the rumor mill so they'll be pinged soon
// This will hopefully tell them our coords and re-learn theirs quickly if they haven't changed
t.addToMill(info, info.getNodeID())
}
b.other = b.other[:0]
}
}

58
src/yggdrasil/dial.go Normal file
View File

@@ -0,0 +1,58 @@
package yggdrasil
import (
"net"
"time"
)
// wrappedConn implements net.Conn
type wrappedConn struct {
c net.Conn
raddr net.Addr
}
// wrappedAddr implements net.Addr
type wrappedAddr struct {
network string
addr string
}
func (a *wrappedAddr) Network() string {
return a.network
}
func (a *wrappedAddr) String() string {
return a.addr
}
func (c *wrappedConn) Write(data []byte) (int, error) {
return c.c.Write(data)
}
func (c *wrappedConn) Read(data []byte) (int, error) {
return c.c.Read(data)
}
func (c *wrappedConn) SetDeadline(t time.Time) error {
return c.c.SetDeadline(t)
}
func (c *wrappedConn) SetReadDeadline(t time.Time) error {
return c.c.SetReadDeadline(t)
}
func (c *wrappedConn) SetWriteDeadline(t time.Time) error {
return c.c.SetWriteDeadline(t)
}
func (c *wrappedConn) Close() error {
return c.c.Close()
}
func (c *wrappedConn) LocalAddr() net.Addr {
return c.c.LocalAddr()
}
func (c *wrappedConn) RemoteAddr() net.Addr {
return c.raddr
}

View File

@@ -1,18 +1,26 @@
package yggdrasil
// The NDP functions are needed when you are running with a
// TAP adapter - as the operating system expects neighbor solicitations
// for on-link traffic, this goroutine provides them
// The ICMPv6 module implements functions to easily create ICMPv6
// packets. These functions, when mixed with the built-in Go IPv6
// and ICMP libraries, can be used to send control messages back
// to the host. Examples include:
// - NDP messages, when running in TAP mode
// - Packet Too Big messages, when packets exceed the session MTU
// - Destination Unreachable messages, when a session prohibits
// incoming traffic
import "net"
import "golang.org/x/net/ipv6"
import "golang.org/x/net/icmp"
import "encoding/binary"
import "errors"
import (
"encoding/binary"
"errors"
"net"
"golang.org/x/net/icmp"
"golang.org/x/net/ipv6"
)
type macAddress [6]byte
const ETHER = 14
const len_ETHER = 14
type icmpv6 struct {
tun *tunDevice
@@ -39,6 +47,9 @@ func ipv6Header_Marshal(h *ipv6.Header) ([]byte, error) {
return b, nil
}
// Initialises the ICMPv6 module by assigning our link-local IPv6 address and
// our MAC address. ICMPv6 messages will always appear to originate from these
// addresses.
func (i *icmpv6) init(t *tunDevice) {
i.tun = t
@@ -50,6 +61,10 @@ func (i *icmpv6) init(t *tunDevice) {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xFE}
}
// Parses an incoming ICMPv6 packet. The packet provided may be either an
// ethernet frame containing an IP packet, or the IP packet alone. This is
// determined by whether the TUN/TAP adapter is running in TUN (layer 3) or
// TAP (layer 2) mode.
func (i *icmpv6) parse_packet(datain []byte) {
var response []byte
var err error
@@ -69,6 +84,10 @@ func (i *icmpv6) parse_packet(datain []byte) {
i.tun.iface.Write(response)
}
// Unwraps the ethernet headers of an incoming ICMPv6 packet and hands off
// the IP packet to the parse_packet_tun function for further processing.
// A response buffer is also created for the response message, also complete
// with ethernet headers.
func (i *icmpv6) parse_packet_tap(datain []byte) ([]byte, error) {
// Store the peer MAC address
copy(i.peermac[:6], datain[6:12])
@@ -79,13 +98,13 @@ func (i *icmpv6) parse_packet_tap(datain []byte) ([]byte, error) {
}
// Hand over to parse_packet_tun to interpret the IPv6 packet
ipv6packet, err := i.parse_packet_tun(datain[ETHER:])
ipv6packet, err := i.parse_packet_tun(datain[len_ETHER:])
if err != nil {
return nil, err
}
// Create the response buffer
dataout := make([]byte, ETHER+ipv6.HeaderLen+32)
dataout := make([]byte, len_ETHER+ipv6.HeaderLen+32)
// Populate the response ethernet headers
copy(dataout[:6], datain[6:12])
@@ -93,10 +112,14 @@ func (i *icmpv6) parse_packet_tap(datain []byte) ([]byte, error) {
binary.BigEndian.PutUint16(dataout[12:14], uint16(0x86DD))
// Copy the returned packet to our response ethernet frame
copy(dataout[ETHER:], ipv6packet)
copy(dataout[len_ETHER:], ipv6packet)
return dataout, nil
}
// Unwraps the IP headers of an incoming IPv6 packet and performs various
// sanity checks on the packet - i.e. is the packet an ICMPv6 packet, does the
// ICMPv6 message match a known expected type. The relevant handler function
// is then called and a response packet may be returned.
func (i *icmpv6) parse_packet_tun(datain []byte) ([]byte, error) {
// Parse the IPv6 packet headers
ipv6Header, err := ipv6.ParseHeader(datain[:ipv6.HeaderLen])
@@ -130,7 +153,9 @@ func (i *icmpv6) parse_packet_tun(datain []byte) ([]byte, error) {
response, err := i.handle_ndp(datain[ipv6.HeaderLen:])
if err == nil {
// Create our ICMPv6 response
responsePacket, err := i.create_icmpv6_tun(ipv6Header.Src, ipv6.ICMPTypeNeighborAdvertisement, 0,
responsePacket, err := i.create_icmpv6_tun(
ipv6Header.Src, i.mylladdr,
ipv6.ICMPTypeNeighborAdvertisement, 0,
&icmp.DefaultMessageBody{Data: response})
if err != nil {
return nil, err
@@ -147,15 +172,18 @@ func (i *icmpv6) parse_packet_tun(datain []byte) ([]byte, error) {
return nil, errors.New("ICMPv6 type not matched")
}
func (i *icmpv6) create_icmpv6_tap(dstmac macAddress, dst net.IP, mtype ipv6.ICMPType, mcode int, mbody icmp.MessageBody) ([]byte, error) {
// Creates an ICMPv6 packet based on the given icmp.MessageBody and other
// parameters, complete with ethernet and IP headers, which can be written
// directly to a TAP adapter.
func (i *icmpv6) create_icmpv6_tap(dstmac macAddress, dst net.IP, src net.IP, mtype ipv6.ICMPType, mcode int, mbody icmp.MessageBody) ([]byte, error) {
// Pass through to create_icmpv6_tun
ipv6packet, err := i.create_icmpv6_tun(dst, mtype, mcode, mbody)
ipv6packet, err := i.create_icmpv6_tun(dst, src, mtype, mcode, mbody)
if err != nil {
return nil, err
}
// Create the response buffer
dataout := make([]byte, ETHER+len(ipv6packet))
dataout := make([]byte, len_ETHER+len(ipv6packet))
// Populate the response ethernet headers
copy(dataout[:6], dstmac[:6])
@@ -163,11 +191,15 @@ func (i *icmpv6) create_icmpv6_tap(dstmac macAddress, dst net.IP, mtype ipv6.ICM
binary.BigEndian.PutUint16(dataout[12:14], uint16(0x86DD))
// Copy the returned packet to our response ethernet frame
copy(dataout[ETHER:], ipv6packet)
copy(dataout[len_ETHER:], ipv6packet)
return dataout, nil
}
func (i *icmpv6) create_icmpv6_tun(dst net.IP, mtype ipv6.ICMPType, mcode int, mbody icmp.MessageBody) ([]byte, error) {
// Creates an ICMPv6 packet based on the given icmp.MessageBody and other
// parameters, complete with IP headers only, which can be written directly to
// a TUN adapter, or called directly by the create_icmpv6_tap function when
// generating a message for TAP adapters.
func (i *icmpv6) create_icmpv6_tun(dst net.IP, src net.IP, mtype ipv6.ICMPType, mcode int, mbody icmp.MessageBody) ([]byte, error) {
// Create the ICMPv6 message
icmpMessage := icmp.Message{
Type: mtype,
@@ -176,7 +208,7 @@ func (i *icmpv6) create_icmpv6_tun(dst net.IP, mtype ipv6.ICMPType, mcode int, m
}
// Convert the ICMPv6 message into []byte
icmpMessageBuf, err := icmpMessage.Marshal(icmp.IPv6PseudoHeader(i.mylladdr, dst))
icmpMessageBuf, err := icmpMessage.Marshal(icmp.IPv6PseudoHeader(src, dst))
if err != nil {
return nil, err
}
@@ -187,7 +219,7 @@ func (i *icmpv6) create_icmpv6_tun(dst net.IP, mtype ipv6.ICMPType, mcode int, m
NextHeader: 58,
PayloadLen: len(icmpMessageBuf),
HopLimit: 255,
Src: i.mylladdr,
Src: src,
Dst: dst,
}
@@ -206,9 +238,20 @@ func (i *icmpv6) create_icmpv6_tun(dst net.IP, mtype ipv6.ICMPType, mcode int, m
return responsePacket, nil
}
// Generates a response to an NDP discovery packet. This is effectively called
// when the host operating system generates an NDP request for any address in
// the fd00::/8 range, so that the operating system knows to route that traffic
// to the Yggdrasil TAP adapter.
func (i *icmpv6) handle_ndp(in []byte) ([]byte, error) {
// Ignore NDP requests for anything outside of fd00::/8
if in[8] != 0xFD {
var source address
copy(source[:], in[8:])
var snet subnet
copy(snet[:], in[8:])
switch {
case source.isValid():
case snet.isValid():
default:
return nil, errors.New("Not an NDP for fd00::/8")
}

158
src/yggdrasil/multicast.go Normal file
View File

@@ -0,0 +1,158 @@
package yggdrasil
import (
"fmt"
"net"
"time"
"golang.org/x/net/ipv6"
)
type multicast struct {
core *Core
sock *ipv6.PacketConn
groupAddr string
}
func (m *multicast) init(core *Core) {
m.core = core
m.groupAddr = "[ff02::114]:9001"
// Check if we've been given any expressions
if len(m.core.ifceExpr) == 0 {
return
}
// Ask the system for network interfaces
m.core.log.Println("Found", len(m.interfaces()), "multicast interface(s)")
}
func (m *multicast) start() error {
if len(m.core.ifceExpr) == 0 {
m.core.log.Println("Multicast discovery is disabled")
} else {
m.core.log.Println("Multicast discovery is enabled")
addr, err := net.ResolveUDPAddr("udp", m.groupAddr)
if err != nil {
return err
}
listenString := fmt.Sprintf("[::]:%v", addr.Port)
conn, err := net.ListenPacket("udp6", listenString)
if err != nil {
return err
}
m.sock = ipv6.NewPacketConn(conn)
if err = m.sock.SetControlMessage(ipv6.FlagDst, true); err != nil {
// Windows can't set this flag, so we need to handle it in other ways
}
go m.listen()
go m.announce()
}
return nil
}
func (m *multicast) interfaces() []net.Interface {
// Ask the system for network interfaces
var interfaces []net.Interface
allifaces, err := net.Interfaces()
if err != nil {
panic(err)
}
// Work out which interfaces to announce on
for _, iface := range allifaces {
if iface.Flags&net.FlagUp == 0 {
// Ignore interfaces that are down
continue
}
if iface.Flags&net.FlagMulticast == 0 {
// Ignore non-multicast interfaces
continue
}
if iface.Flags&net.FlagPointToPoint != 0 {
// Ignore point-to-point interfaces
continue
}
for _, expr := range m.core.ifceExpr {
if expr.MatchString(iface.Name) {
interfaces = append(interfaces, iface)
}
}
}
return interfaces
}
func (m *multicast) announce() {
groupAddr, err := net.ResolveUDPAddr("udp6", m.groupAddr)
if err != nil {
panic(err)
}
var anAddr net.TCPAddr
myAddr := m.core.tcp.getAddr()
anAddr.Port = myAddr.Port
destAddr, err := net.ResolveUDPAddr("udp6", m.groupAddr)
if err != nil {
panic(err)
}
for {
for _, iface := range m.interfaces() {
m.sock.JoinGroup(&iface, groupAddr)
addrs, err := iface.Addrs()
if err != nil {
panic(err)
}
for _, addr := range addrs {
addrIP, _, _ := net.ParseCIDR(addr.String())
if addrIP.To4() != nil {
continue
} // IPv6 only
if !addrIP.IsLinkLocalUnicast() {
continue
}
anAddr.IP = addrIP
anAddr.Zone = iface.Name
destAddr.Zone = iface.Name
msg := []byte(anAddr.String())
m.sock.WriteTo(msg, nil, destAddr)
break
}
time.Sleep(time.Second)
}
time.Sleep(time.Second)
}
}
func (m *multicast) listen() {
groupAddr, err := net.ResolveUDPAddr("udp6", m.groupAddr)
if err != nil {
panic(err)
}
bs := make([]byte, 2048)
for {
nBytes, rcm, fromAddr, err := m.sock.ReadFrom(bs)
if err != nil {
panic(err)
}
if rcm != nil {
// Windows can't set the flag needed to return a non-nil value here
// So only make these checks if we get something useful back
// TODO? Skip them always, I'm not sure if they're really needed...
if !rcm.Dst.IsLinkLocalMulticast() {
continue
}
if !rcm.Dst.Equal(groupAddr.IP) {
continue
}
}
anAddr := string(bs[:nBytes])
addr, err := net.ResolveTCPAddr("tcp6", anAddr)
if err != nil {
continue
}
from := fromAddr.(*net.UDPAddr)
if addr.IP.String() != from.IP.String() {
continue
}
addr.Zone = from.Zone
saddr := addr.String()
m.core.tcp.connect(saddr)
}
}

View File

@@ -4,108 +4,116 @@ package yggdrasil
// Commented code should be removed
// Live code should be better commented
// FIXME (!) this part may be at least sligtly vulnerable to replay attacks
// The switch message part should catch / drop old tstamps
// So the damage is limited
// But you could still mess up msgAnc / msgHops and break some things there
// It needs to ignore messages with a lower seq
// Probably best to start setting seq to a timestamp in that case...
// FIXME (!?) if it takes too long to communicate all the msgHops, then things hit a horizon
// That could happen with a peer over a high-latency link, with many msgHops
// Possible workarounds:
// 1. Pre-emptively send all hops when one is requested, or after any change
// Maybe requires changing how the throttle works and msgHops are saved
// In case some arrive out of order or are dropped
// This is relatively easy to implement, but could be wasteful
// 2. Save your old locator, sigs, etc, so you can respond to older ancs
// And finish requesting an old anc before updating to a new one
// But that may lead to other issues if not done carefully...
import "time"
import "sync"
import "sync/atomic"
import "math"
//import "fmt"
import (
"sync"
"sync/atomic"
"time"
)
// The peers struct represents peers with an active connection.
// Incomping packets are passed to the corresponding peer, which handles them somehow.
// In most cases, this involves passing the packet to the handler for outgoing traffic to another peer.
// In other cases, it's link protocol traffic used to build the spanning tree, in which case this checks signatures and passes the message along to the switch.
type peers struct {
core *Core
mutex sync.Mutex // Synchronize writes to atomic
ports atomic.Value //map[Port]*peer, use CoW semantics
//ports map[Port]*peer
core *Core
mutex sync.Mutex // Synchronize writes to atomic
ports atomic.Value //map[Port]*peer, use CoW semantics
authMutex sync.RWMutex
allowedEncryptionPublicKeys map[boxPubKey]struct{}
}
// Initializes the peers struct.
func (ps *peers) init(c *Core) {
ps.mutex.Lock()
defer ps.mutex.Unlock()
ps.putPorts(make(map[switchPort]*peer))
ps.core = c
ps.allowedEncryptionPublicKeys = make(map[boxPubKey]struct{})
}
// Returns true if an incoming peer connection to a key is allowed, either because the key is in the whitelist or because the whitelist is empty.
func (ps *peers) isAllowedEncryptionPublicKey(box *boxPubKey) bool {
ps.authMutex.RLock()
defer ps.authMutex.RUnlock()
_, isIn := ps.allowedEncryptionPublicKeys[*box]
return isIn || len(ps.allowedEncryptionPublicKeys) == 0
}
// Adds a key to the whitelist.
func (ps *peers) addAllowedEncryptionPublicKey(box *boxPubKey) {
ps.authMutex.Lock()
defer ps.authMutex.Unlock()
ps.allowedEncryptionPublicKeys[*box] = struct{}{}
}
// Removes a key from the whitelist.
func (ps *peers) removeAllowedEncryptionPublicKey(box *boxPubKey) {
ps.authMutex.Lock()
defer ps.authMutex.Unlock()
delete(ps.allowedEncryptionPublicKeys, *box)
}
// Gets the whitelist of allowed keys for incoming connections.
func (ps *peers) getAllowedEncryptionPublicKeys() []boxPubKey {
ps.authMutex.RLock()
defer ps.authMutex.RUnlock()
keys := make([]boxPubKey, 0, len(ps.allowedEncryptionPublicKeys))
for key := range ps.allowedEncryptionPublicKeys {
keys = append(keys, key)
}
return keys
}
// Atomically gets a map[switchPort]*peer of known peers.
func (ps *peers) getPorts() map[switchPort]*peer {
return ps.ports.Load().(map[switchPort]*peer)
}
// Stores a map[switchPort]*peer (note that you should take a mutex before store operations to avoid conflicts with other nodes attempting to read/change/store at the same time).
func (ps *peers) putPorts(ports map[switchPort]*peer) {
ps.ports.Store(ports)
}
// Information known about a peer, including thier box/sig keys, precomputed shared keys (static and ephemeral), a handler for their outgoing traffic, and queue sizes for local backpressure.
type peer struct {
// Rolling approximation of bandwidth, in bps, used by switch, updated by packet sends
// use get/update methods only! (atomic accessors as float64)
bandwidth uint64
queueSize int64 // used to track local backpressure
bytesSent uint64 // To track bandwidth usage for getPeers
bytesRecvd uint64 // To track bandwidth usage for getPeers
// BUG: sync/atomic, 32 bit platforms need the above to be the first element
box boxPubKey
sig sigPubKey
shared boxSharedKey
//in <-chan []byte
//out chan<- []byte
//in func([]byte)
out func([]byte)
core *Core
port switchPort
msgAnc *msgAnnounce
msgHops []*msgHop
myMsg *switchMessage
mySigs []sigInfo
// This is used to limit how often we perform expensive operations
// Specifically, processing switch messages, signing, and verifying sigs
// Resets at the start of each tick
throttle uint8
lastSend time.Time // To throttle sends, use only from linkLoop goroutine
core *Core
port switchPort
box boxPubKey
sig sigPubKey
shared boxSharedKey
linkShared boxSharedKey
firstSeen time.Time // To track uptime for getPeers
linkOut (chan []byte) // used for protocol traffic (to bypass queues)
doSend (chan struct{}) // tell the linkLoop to send a switchMsg
dinfo *dhtInfo // used to keep the DHT working
out func([]byte) // Set up by whatever created the peers struct, used to send packets to other nodes
close func() // Called when a peer is removed, to close the underlying connection, or via admin api
}
const peer_Throttle = 1
func (p *peer) getBandwidth() float64 {
bits := atomic.LoadUint64(&p.bandwidth)
return math.Float64frombits(bits)
// Size of the queue of packets to be sent to the node.
func (p *peer) getQueueSize() int64 {
return atomic.LoadInt64(&p.queueSize)
}
func (p *peer) updateBandwidth(bytes int, duration time.Duration) {
if p == nil {
return
}
for ok := false; !ok; {
oldBits := atomic.LoadUint64(&p.bandwidth)
oldBandwidth := math.Float64frombits(oldBits)
bandwidth := oldBandwidth*7/8 + float64(bytes)/duration.Seconds()
bits := math.Float64bits(bandwidth)
ok = atomic.CompareAndSwapUint64(&p.bandwidth, oldBits, bits)
}
// Used to increment or decrement the queue.
func (p *peer) updateQueueSize(delta int64) {
atomic.AddInt64(&p.queueSize, delta)
}
func (ps *peers) newPeer(box *boxPubKey,
sig *sigPubKey) *peer {
//in <-chan []byte,
//out chan<- []byte) *peer {
// Creates a new peer with the specified box, sig, and linkShared keys, using the lowest unocupied port number.
func (ps *peers) newPeer(box *boxPubKey, sig *sigPubKey, linkShared *boxSharedKey) *peer {
now := time.Now()
p := peer{box: *box,
sig: *sig,
shared: *getSharedKey(&ps.core.boxPriv, box),
//in: in,
//out: out,
core: ps.core}
sig: *sig,
shared: *getSharedKey(&ps.core.boxPriv, box),
linkShared: *linkShared,
firstSeen: now,
doSend: make(chan struct{}, 1),
core: ps.core}
ps.mutex.Lock()
defer ps.mutex.Unlock()
oldPorts := ps.getPorts()
@@ -124,49 +132,80 @@ func (ps *peers) newPeer(box *boxPubKey,
return &p
}
func (p *peer) linkLoop(in <-chan []byte) {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
p.lastSend = time.Now()
var lastRSeq uint64
// Removes a peer for a given port, if one exists.
func (ps *peers) removePeer(port switchPort) {
if port == 0 {
return
} // Can't remove self peer
ps.core.router.doAdmin(func() {
ps.core.switchTable.unlockedRemovePeer(port)
})
ps.mutex.Lock()
oldPorts := ps.getPorts()
p, isIn := oldPorts[port]
newPorts := make(map[switchPort]*peer)
for k, v := range oldPorts {
newPorts[k] = v
}
delete(newPorts, port)
ps.putPorts(newPorts)
ps.mutex.Unlock()
if isIn {
if p.close != nil {
p.close()
}
close(p.doSend)
}
}
// If called, sends a notification to each peer that they should send a new switch message.
// Mainly called by the switch after an update.
func (ps *peers) sendSwitchMsgs() {
ports := ps.getPorts()
for _, p := range ports {
if p.port == 0 {
continue
}
p.doSendSwitchMsgs()
}
}
// If called, sends a notification to the peer's linkLoop to trigger a switchMsg send.
// Mainly called by sendSwitchMsgs or during linkLoop startup.
func (p *peer) doSendSwitchMsgs() {
defer func() { recover() }() // In case there's a race with close(p.doSend)
select {
case p.doSend <- struct{}{}:
default:
}
}
// This must be launched in a separate goroutine by whatever sets up the peer struct.
// It handles link protocol traffic.
func (p *peer) linkLoop() {
go p.doSendSwitchMsgs()
tick := time.NewTicker(time.Second)
defer tick.Stop()
for {
select {
case packet, ok := <-in:
case _, ok := <-p.doSend:
if !ok {
return
}
p.handleLinkTraffic(packet)
case <-ticker.C:
{
p.throttle = 0
if p.port == 0 {
continue
} // Don't send announces on selfInterface
p.myMsg, p.mySigs = p.core.switchTable.createMessage(p.port)
var update bool
switch {
case p.msgAnc == nil:
update = true
case lastRSeq != p.msgAnc.seq:
update = true
case p.msgAnc.rseq != p.myMsg.seq:
update = true
case time.Since(p.lastSend) > 3*time.Second:
update = true
}
if update {
if p.msgAnc != nil {
lastRSeq = p.msgAnc.seq
}
p.lastSend = time.Now()
p.sendSwitchAnnounce()
}
p.sendSwitchMsg()
case _ = <-tick.C:
if p.dinfo != nil {
p.core.dht.peers <- p.dinfo
}
}
}
}
func (p *peer) handlePacket(packet []byte, linkIn chan<- []byte) {
// Called to handle incoming packets.
// Passes the packet to a handler for that packet type.
func (p *peer) handlePacket(packet []byte) {
// FIXME this is off by stream padding and msg length overhead, should be done in tcp.go
atomic.AddUint64(&p.bytesRecvd, uint64(len(packet)))
pType, pTypeLen := wire_decode_uint64(packet)
if pTypeLen == 0 {
return
@@ -177,27 +216,24 @@ func (p *peer) handlePacket(packet []byte, linkIn chan<- []byte) {
case wire_ProtocolTraffic:
p.handleTraffic(packet, pTypeLen)
case wire_LinkProtocolTraffic:
{
select {
case linkIn <- packet:
default:
}
}
default: /*panic(pType) ;*/
return
p.handleLinkTraffic(packet)
default:
util_putBytes(packet)
}
}
// Called to handle traffic or protocolTraffic packets.
// In either case, this reads from the coords of the packet header, does a switch lookup, and forwards to the next node.
func (p *peer) handleTraffic(packet []byte, pTypeLen int) {
ttl, ttlLen := wire_decode_uint64(packet[pTypeLen:])
ttlBegin := pTypeLen
ttlEnd := pTypeLen + ttlLen
coords, coordLen := wire_decode_coords(packet[ttlEnd:])
coordEnd := ttlEnd + coordLen
if coordEnd == len(packet) {
if p.port != 0 && p.dinfo == nil {
// Drop traffic until the peer manages to send us at least one good switchMsg
return
}
coords, coordLen := wire_decode_coords(packet[pTypeLen:])
if coordLen >= len(packet) {
return
} // No payload
toPort, newTTL := p.core.switchTable.lookup(coords, ttl)
toPort := p.core.switchTable.lookup(coords)
if toPort == p.port {
return
}
@@ -205,46 +241,50 @@ func (p *peer) handleTraffic(packet []byte, pTypeLen int) {
if to == nil {
return
}
// This mutates the packet in-place if the length of the TTL changes!
ttlSlice := wire_encode_uint64(newTTL)
newTTLLen := len(ttlSlice)
shift := ttlLen - newTTLLen
copy(packet[shift:], packet[:pTypeLen])
copy(packet[ttlBegin+shift:], ttlSlice)
packet = packet[shift:]
to.sendPacket(packet)
}
// This just calls p.out(packet) for now.
func (p *peer) sendPacket(packet []byte) {
// Is there ever a case where something more complicated is needed?
// What if p.out blocks?
p.out(packet)
}
// This wraps the packet in the inner (ephemeral) and outer (permanent) crypto layers.
// It sends it to p.linkOut, which bypasses the usual packet queues.
func (p *peer) sendLinkPacket(packet []byte) {
bs, nonce := boxSeal(&p.shared, packet, nil)
innerPayload, innerNonce := boxSeal(&p.linkShared, packet, nil)
innerLinkPacket := wire_linkProtoTrafficPacket{
Nonce: *innerNonce,
Payload: innerPayload,
}
outerPayload := innerLinkPacket.encode()
bs, nonce := boxSeal(&p.shared, outerPayload, nil)
linkPacket := wire_linkProtoTrafficPacket{
//toKey: p.box,
//fromKey: p.core.boxPub,
nonce: *nonce,
payload: bs,
Nonce: *nonce,
Payload: bs,
}
packet = linkPacket.encode()
p.sendPacket(packet)
p.linkOut <- packet
}
// Decrypts the outer (permanent) and inner (ephemeral) crypto layers on link traffic.
// Identifies the link traffic type and calls the appropriate handler.
func (p *peer) handleLinkTraffic(bs []byte) {
packet := wire_linkProtoTrafficPacket{}
if !packet.decode(bs) {
return
}
//if packet.toKey != p.core.boxPub {
// return
//}
//if packet.fromKey != p.box {
// return
//}
payload, isOK := boxOpen(&p.shared, packet.payload, &packet.nonce)
outerPayload, isOK := boxOpen(&p.shared, packet.Payload, &packet.Nonce)
if !isOK {
return
}
innerPacket := wire_linkProtoTrafficPacket{}
if !innerPacket.decode(outerPayload) {
return
}
payload, isOK := boxOpen(&p.linkShared, innerPacket.Payload, &innerPacket.Nonce)
if !isOK {
return
}
@@ -253,218 +293,80 @@ func (p *peer) handleLinkTraffic(bs []byte) {
return
}
switch pType {
case wire_SwitchAnnounce:
p.handleSwitchAnnounce(payload)
case wire_SwitchHopRequest:
p.handleSwitchHopRequest(payload)
case wire_SwitchHop:
p.handleSwitchHop(payload)
case wire_SwitchMsg:
p.handleSwitchMsg(payload)
default:
util_putBytes(bs)
}
}
func (p *peer) handleSwitchAnnounce(packet []byte) {
//p.core.log.Println("DEBUG: handleSwitchAnnounce")
anc := msgAnnounce{}
//err := wire_decode_struct(packet, &anc)
//if err != nil { return }
if !anc.decode(packet) {
// Gets a switchMsg from the switch, adds signed next-hop info for this peer, and sends it to them.
func (p *peer) sendSwitchMsg() {
msg := p.core.switchTable.getMsg()
if msg == nil {
return
}
//if p.msgAnc != nil && anc.Seq != p.msgAnc.Seq { p.msgHops = nil }
if p.msgAnc == nil ||
anc.root != p.msgAnc.root ||
anc.tstamp != p.msgAnc.tstamp ||
anc.seq != p.msgAnc.seq {
p.msgHops = nil
}
p.msgAnc = &anc
p.processSwitchMessage()
}
func (p *peer) requestHop(hop uint64) {
//p.core.log.Println("DEBUG requestHop")
req := msgHopReq{}
req.root = p.msgAnc.root
req.tstamp = p.msgAnc.tstamp
req.seq = p.msgAnc.seq
req.hop = hop
packet := req.encode()
bs := getBytesForSig(&p.sig, msg)
msg.Hops = append(msg.Hops, switchMsgHop{
Port: p.port,
Next: p.sig,
Sig: *sign(&p.core.sigPriv, bs),
})
packet := msg.encode()
p.sendLinkPacket(packet)
}
func (p *peer) handleSwitchHopRequest(packet []byte) {
//p.core.log.Println("DEBUG: handleSwitchHopRequest")
if p.throttle > peer_Throttle {
// Handles a switchMsg from the peer, checking signatures and passing good messages to the switch.
// Also creates a dhtInfo struct and arranges for it to be added to the dht (this is how dht bootstrapping begins).
func (p *peer) handleSwitchMsg(packet []byte) {
var msg switchMsg
if !msg.decode(packet) {
return
}
if p.myMsg == nil {
return
if len(msg.Hops) < 1 {
p.core.peers.removePeer(p.port)
}
req := msgHopReq{}
if !req.decode(packet) {
return
}
if req.root != p.myMsg.locator.root {
return
}
if req.tstamp != p.myMsg.locator.tstamp {
return
}
if req.seq != p.myMsg.seq {
return
}
if uint64(len(p.myMsg.locator.coords)) <= req.hop {
return
}
res := msgHop{}
res.root = p.myMsg.locator.root
res.tstamp = p.myMsg.locator.tstamp
res.seq = p.myMsg.seq
res.hop = req.hop
res.port = p.myMsg.locator.coords[res.hop]
sinfo := p.getSig(res.hop)
//p.core.log.Println("DEBUG sig:", sinfo)
res.next = sinfo.next
res.sig = sinfo.sig
packet = res.encode()
p.sendLinkPacket(packet)
}
func (p *peer) handleSwitchHop(packet []byte) {
//p.core.log.Println("DEBUG: handleSwitchHop")
if p.throttle > peer_Throttle {
return
}
if p.msgAnc == nil {
return
}
res := msgHop{}
if !res.decode(packet) {
return
}
if res.root != p.msgAnc.root {
return
}
if res.tstamp != p.msgAnc.tstamp {
return
}
if res.seq != p.msgAnc.seq {
return
}
if res.hop != uint64(len(p.msgHops)) {
return
} // always process in order
loc := switchLocator{coords: make([]switchPort, 0, len(p.msgHops)+1)}
loc.root = res.root
loc.tstamp = res.tstamp
for _, hop := range p.msgHops {
loc.coords = append(loc.coords, hop.port)
}
loc.coords = append(loc.coords, res.port)
thisHopKey := &res.root
if res.hop != 0 {
thisHopKey = &p.msgHops[res.hop-1].next
}
bs := getBytesForSig(&res.next, &loc)
if p.core.sigs.check(thisHopKey, &res.sig, bs) {
p.msgHops = append(p.msgHops, &res)
p.processSwitchMessage()
} else {
p.throttle++
}
}
func (p *peer) processSwitchMessage() {
//p.core.log.Println("DEBUG: processSwitchMessage")
if p.throttle > peer_Throttle {
return
}
if p.msgAnc == nil {
return
}
if uint64(len(p.msgHops)) < p.msgAnc.len {
p.requestHop(uint64(len(p.msgHops)))
return
}
p.throttle++
if p.msgAnc.len != uint64(len(p.msgHops)) {
return
}
msg := switchMessage{}
coords := make([]switchPort, 0, len(p.msgHops))
sigs := make([]sigInfo, 0, len(p.msgHops))
for idx, hop := range p.msgHops {
// Consistency checks, should be redundant (already checked these...)
if hop.root != p.msgAnc.root {
return
var loc switchLocator
prevKey := msg.Root
for idx, hop := range msg.Hops {
// Check signatures and collect coords for dht
sigMsg := msg
sigMsg.Hops = msg.Hops[:idx]
loc.coords = append(loc.coords, hop.Port)
bs := getBytesForSig(&hop.Next, &sigMsg)
if !p.core.sigs.check(&prevKey, &hop.Sig, bs) {
p.core.peers.removePeer(p.port)
}
if hop.tstamp != p.msgAnc.tstamp {
return
}
if hop.seq != p.msgAnc.seq {
return
}
if hop.hop != uint64(idx) {
return
}
coords = append(coords, hop.port)
sigs = append(sigs, sigInfo{next: hop.next, sig: hop.sig})
prevKey = hop.Next
}
msg.from = p.sig
msg.locator.root = p.msgAnc.root
msg.locator.tstamp = p.msgAnc.tstamp
msg.locator.coords = coords
msg.seq = p.msgAnc.seq
//msg.RSeq = p.msgAnc.RSeq
//msg.Degree = p.msgAnc.Deg
p.core.switchTable.handleMessage(&msg, p.port, sigs)
if len(coords) == 0 {
p.core.switchTable.handleMsg(&msg, p.port)
if !p.core.switchTable.checkRoot(&msg) {
// Bad switch message
// Stop forwarding traffic from it
// Stop refreshing it in the DHT
p.dinfo = nil
return
}
// Reuse locator, set the coords to the peer's coords, to use in dht
msg.locator.coords = coords[:len(coords)-1]
// Pass a mesage to the dht informing it that this peer (still) exists
loc.coords = loc.coords[:len(loc.coords)-1]
dinfo := dhtInfo{
key: p.box,
coords: msg.locator.getCoords(),
coords: loc.getCoords(),
}
p.core.dht.peers <- &dinfo
p.dinfo = &dinfo
}
func (p *peer) sendSwitchAnnounce() {
anc := msgAnnounce{}
anc.root = p.myMsg.locator.root
anc.tstamp = p.myMsg.locator.tstamp
anc.seq = p.myMsg.seq
anc.len = uint64(len(p.myMsg.locator.coords))
//anc.Deg = p.myMsg.Degree
if p.msgAnc != nil {
anc.rseq = p.msgAnc.seq
// This generates the bytes that we sign or check the signature of for a switchMsg.
// It begins with the next node's key, followed by the root and the timetsamp, followed by coords being advertised to the next node.
func getBytesForSig(next *sigPubKey, msg *switchMsg) []byte {
var loc switchLocator
for _, hop := range msg.Hops {
loc.coords = append(loc.coords, hop.Port)
}
packet := anc.encode()
p.sendLinkPacket(packet)
}
func (p *peer) getSig(hop uint64) sigInfo {
//p.core.log.Println("DEBUG getSig:", len(p.mySigs), hop)
if hop < uint64(len(p.mySigs)) {
return p.mySigs[hop]
}
bs := getBytesForSig(&p.sig, &p.myMsg.locator)
sig := sigInfo{}
sig.next = p.sig
sig.sig = *sign(&p.core.sigPriv, bs)
p.mySigs = append(p.mySigs, sig)
//p.core.log.Println("DEBUG sig bs:", bs)
return sig
}
func getBytesForSig(next *sigPubKey, loc *switchLocator) []byte {
//bs, err := wire_encode_locator(loc)
//if err != nil { panic(err) }
bs := append([]byte(nil), next[:]...)
bs = append(bs, wire_encode_locator(loc)...)
//bs := wire_encode_locator(loc)
//bs = append(next[:], bs...)
bs = append(bs, msg.Root[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(msg.TStamp))...)
bs = append(bs, wire_encode_coords(loc.getCoords())...)
return bs
}

14
src/yggdrasil/release.go Normal file
View File

@@ -0,0 +1,14 @@
// +build !debug
package yggdrasil
import (
"errors"
"log"
)
// Starts the function profiler. This is only supported when built with
// '-tags build'.
func StartProfiler(_ *log.Logger) error {
return errors.New("Release builds do not support -pprof, build using '-tags debug'")
}

View File

@@ -22,13 +22,15 @@ package yggdrasil
// The packet is passed to the session, which decrypts it, router.recvPacket
// The router then runs some sanity checks before passing it to the tun
import "time"
import "golang.org/x/net/icmp"
import "golang.org/x/net/ipv6"
import (
"time"
//import "fmt"
//import "net"
"golang.org/x/net/icmp"
"golang.org/x/net/ipv6"
)
// The router struct has channels to/from the tun/tap device and a self peer (0), which is how messages are passed between this node and the peers/switch layer.
// The router's mainLoop goroutine is responsible for managing all information related to the dht, searches, and crypto sessions.
type router struct {
core *Core
addr address
@@ -40,11 +42,12 @@ type router struct {
admin chan func() // pass a lambda for the admin socket to query stuff
}
// Initializes the router struct, which includes setting up channels to/from the tun/tap.
func (r *router) init(core *Core) {
r.core = core
r.addr = *address_addrForNodeID(&r.core.dht.nodeID)
in := make(chan []byte, 32) // TODO something better than this...
p := r.core.peers.newPeer(&r.core.boxPub, &r.core.sigPub) //, out, in)
in := make(chan []byte, 32) // TODO something better than this...
p := r.core.peers.newPeer(&r.core.boxPub, &r.core.sigPub, &boxSharedKey{})
p.out = func(packet []byte) {
// This is to make very sure it never blocks
select {
@@ -55,7 +58,7 @@ func (r *router) init(core *Core) {
}
}
r.in = in
r.out = func(packet []byte) { p.handlePacket(packet, nil) } // The caller is responsible for go-ing if it needs to not block
r.out = func(packet []byte) { p.handlePacket(packet) } // The caller is responsible for go-ing if it needs to not block
recv := make(chan []byte, 32)
send := make(chan []byte, 32)
r.recv = recv
@@ -64,9 +67,20 @@ func (r *router) init(core *Core) {
r.core.tun.send = send
r.reset = make(chan struct{}, 1)
r.admin = make(chan func())
go r.mainLoop()
// go r.mainLoop()
}
// Starts the mainLoop goroutine.
func (r *router) start() error {
r.core.log.Println("Starting router")
go r.mainLoop()
return nil
}
// Takes traffic from the tun/tap and passes it to router.send, or from r.in and handles incoming traffic.
// Also adds new peer info to the DHT.
// Also resets the DHT and sesssions in the event of a coord change.
// Also does periodic maintenance stuff.
func (r *router) mainLoop() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
@@ -77,12 +91,15 @@ func (r *router) mainLoop() {
case p := <-r.send:
r.sendPacket(p)
case info := <-r.core.dht.peers:
r.core.dht.insert(info) //r.core.dht.insertIfNew(info)
r.core.dht.insertIfNew(info, false) // Insert as a normal node
r.core.dht.insertIfNew(info, true) // Insert as a peer
case <-r.reset:
r.core.sessions.resetInits()
r.core.dht.reset()
case <-ticker.C:
{
// Any periodic maintenance stuff goes here
r.core.switchTable.doMaintenance()
r.core.dht.doMaintenance()
util_getBytes() // To slowly drain things
}
@@ -92,6 +109,11 @@ func (r *router) mainLoop() {
}
}
// Checks a packet's to/from address to make sure it's in the allowed range.
// If a session to the destination exists, gets the session and passes the packet to it.
// If no session exists, it triggers (or continues) a search.
// If the session hasn't responded recently, it triggers a ping or search to keep things alive or deal with broken coords *relatively* quickly.
// It also deals with oversized packets if there are MTU issues by calling into icmpv6.go to spoof PacketTooBig traffic, or DestinationUnreachable if the other side has their tun/tap disabled.
func (r *router) sendPacket(bs []byte) {
if len(bs) < 40 {
panic("Tried to send a packet shorter than a header...")
@@ -120,12 +142,12 @@ func (r *router) sendPacket(bs []byte) {
}
sinfo, isIn := r.core.searches.searches[*nodeID]
if !isIn {
sinfo = r.core.searches.createSearch(nodeID, mask)
sinfo = r.core.searches.newIterSearch(nodeID, mask)
}
if packet != nil {
sinfo.packet = packet
}
r.core.searches.sendSearch(sinfo)
r.core.searches.continueSearch(sinfo)
}
var sinfo *sessionInfo
var isIn bool
@@ -140,13 +162,53 @@ func (r *router) sendPacket(bs []byte) {
// No or unintiialized session, so we need to search first
doSearch(bs)
case time.Since(sinfo.time) > 6*time.Second:
// We haven't heard from the dest in a while; they may have changed coords
// Maybe the connection is idle, or maybe one of us changed coords
// Try searching to either ping them (a little overhead) or fix the coords
doSearch(nil)
fallthrough
//default: go func() { sinfo.send<-bs }()
if sinfo.time.Before(sinfo.pingTime) && time.Since(sinfo.pingTime) > 6*time.Second {
// We haven't heard from the dest in a while
// We tried pinging but didn't get a response
// They may have changed coords
// Try searching to discover new coords
// Note that search spam is throttled internally
doSearch(nil)
} else {
// We haven't heard about the dest in a while
now := time.Now()
if !sinfo.time.Before(sinfo.pingTime) {
// Update pingTime to start the clock for searches (above)
sinfo.pingTime = now
}
if time.Since(sinfo.pingSend) > time.Second {
// Send at most 1 ping per second
sinfo.pingSend = now
r.core.sessions.sendPingPong(sinfo, false)
}
}
fallthrough // Also send the packet
default:
// Drop packets if the session MTU is 0 - this means that one or other
// side probably has their TUN adapter disabled
if sinfo.getMTU() == 0 {
// Get the size of the oversized payload, up to a max of 900 bytes
window := 900
if len(bs) < window {
window = len(bs)
}
// Create the Destination Unreachable response
ptb := &icmp.DstUnreach{
Data: bs[:window],
}
// Create the ICMPv6 response from it
icmpv6Buf, err := r.core.tun.icmpv6.create_icmpv6_tun(
bs[8:24], bs[24:40],
ipv6.ICMPTypeDestinationUnreachable, 1, ptb)
if err == nil {
r.recv <- icmpv6Buf
}
// Don't continue - drop the packet
return
}
// Generate an ICMPv6 Packet Too Big for packets larger than session MTU
if len(bs) > int(sinfo.getMTU()) {
// Get the size of the oversized payload, up to a max of 900 bytes
@@ -162,7 +224,9 @@ func (r *router) sendPacket(bs []byte) {
}
// Create the ICMPv6 response from it
icmpv6Buf, err := r.core.tun.icmpv6.create_icmpv6_tun(bs[8:24], ipv6.ICMPTypePacketTooBig, 0, ptb)
icmpv6Buf, err := r.core.tun.icmpv6.create_icmpv6_tun(
bs[8:24], bs[24:40],
ipv6.ICMPTypePacketTooBig, 0, ptb)
if err == nil {
r.recv <- icmpv6Buf
}
@@ -170,17 +234,14 @@ func (r *router) sendPacket(bs []byte) {
// Don't continue - drop the packet
return
}
select {
case sinfo.send <- bs:
default:
util_putBytes(bs)
}
sinfo.send <- bs
}
}
// Called for incoming traffic by the session worker for that connection.
// Checks that the IP address is correct (matches the session) and passes the packet to the tun/tap.
func (r *router) recvPacket(bs []byte, theirAddr *address, theirSubnet *subnet) {
// Note: called directly by the session worker, not the router goroutine
//fmt.Println("Recv packet")
if len(bs) < 24 {
util_putBytes(bs)
return
@@ -200,6 +261,7 @@ func (r *router) recvPacket(bs []byte, theirAddr *address, theirSubnet *subnet)
r.recv <- bs
}
// Checks incoming traffic type and passes it to the appropriate handler.
func (r *router) handleIn(packet []byte) {
pType, pTypeLen := wire_decode_uint64(packet)
if pTypeLen == 0 {
@@ -210,28 +272,26 @@ func (r *router) handleIn(packet []byte) {
r.handleTraffic(packet)
case wire_ProtocolTraffic:
r.handleProto(packet)
default: /*panic("Should not happen in testing") ;*/
default:
}
}
// Handles incoming traffic, i.e. encapuslated ordinary IPv6 packets.
// Passes them to the crypto session worker to be decrypted and sent to the tun/tap.
func (r *router) handleTraffic(packet []byte) {
defer util_putBytes(packet)
p := wire_trafficPacket{}
if !p.decode(packet) {
return
}
sinfo, isIn := r.core.sessions.getSessionForHandle(&p.handle)
sinfo, isIn := r.core.sessions.getSessionForHandle(&p.Handle)
if !isIn {
return
}
//go func () { sinfo.recv<-&p }()
select {
case sinfo.recv <- &p:
default:
util_putBytes(p.payload)
}
sinfo.recv <- &p
}
// Handles protocol traffic by decrypting it, checking its type, and passing it to the appropriate handler for that traffic type.
func (r *router) handleProto(packet []byte) {
// First parse the packet
p := wire_protoTrafficPacket{}
@@ -240,14 +300,13 @@ func (r *router) handleProto(packet []byte) {
}
// Now try to open the payload
var sharedKey *boxSharedKey
//var theirPermPub *boxPubKey
if p.toKey == r.core.boxPub {
if p.ToKey == r.core.boxPub {
// Try to open using our permanent key
sharedKey = r.core.sessions.getSharedKey(&r.core.boxPriv, &p.fromKey)
sharedKey = r.core.sessions.getSharedKey(&r.core.boxPriv, &p.FromKey)
} else {
return
}
bs, isOK := boxOpen(sharedKey, p.payload, &p.nonce)
bs, isOK := boxOpen(sharedKey, p.Payload, &p.Nonce)
if !isOK {
return
}
@@ -258,78 +317,58 @@ func (r *router) handleProto(packet []byte) {
if bsTypeLen == 0 {
return
}
//fmt.Println("RECV bytes:", bs)
switch bsType {
case wire_SessionPing:
r.handlePing(bs, &p.fromKey)
r.handlePing(bs, &p.FromKey)
case wire_SessionPong:
r.handlePong(bs, &p.fromKey)
r.handlePong(bs, &p.FromKey)
case wire_DHTLookupRequest:
r.handleDHTReq(bs, &p.fromKey)
r.handleDHTReq(bs, &p.FromKey)
case wire_DHTLookupResponse:
r.handleDHTRes(bs, &p.fromKey)
case wire_SearchRequest:
r.handleSearchReq(bs)
case wire_SearchResponse:
r.handleSearchRes(bs)
default: /*panic("Should not happen in testing") ;*/
return
r.handleDHTRes(bs, &p.FromKey)
default:
util_putBytes(packet)
}
}
// Decodes session pings from wire format and passes them to sessions.handlePing where they either create or update a session.
func (r *router) handlePing(bs []byte, fromKey *boxPubKey) {
ping := sessionPing{}
if !ping.decode(bs) {
return
}
ping.sendPermPub = *fromKey
ping.SendPermPub = *fromKey
r.core.sessions.handlePing(&ping)
}
// Handles session pongs (which are really pings with an extra flag to prevent acknowledgement).
func (r *router) handlePong(bs []byte, fromKey *boxPubKey) {
r.handlePing(bs, fromKey)
}
// Decodes dht requests and passes them to dht.handleReq to trigger a lookup/response.
func (r *router) handleDHTReq(bs []byte, fromKey *boxPubKey) {
req := dhtReq{}
if !req.decode(bs) {
return
}
//if req.key != *fromKey {
// return
//}
req.key = *fromKey
req.Key = *fromKey
r.core.dht.handleReq(&req)
}
// Decodes dht responses and passes them to dht.handleRes to update the DHT table and further pass them to the search code (if applicable).
func (r *router) handleDHTRes(bs []byte, fromKey *boxPubKey) {
res := dhtRes{}
if !res.decode(bs) {
return
}
//if res.key != *fromKey {
// return
//}
res.key = *fromKey
res.Key = *fromKey
r.core.dht.handleRes(&res)
}
func (r *router) handleSearchReq(bs []byte) {
req := searchReq{}
if !req.decode(bs) {
return
}
r.core.searches.handleSearchReq(&req)
}
func (r *router) handleSearchRes(bs []byte) {
res := searchRes{}
if !res.decode(bs) {
return
}
r.core.searches.handleSearchRes(&res)
}
// Passed a function to call.
// This will send the function to r.admin and block until it finishes.
// It's used by the admin socket to ask the router mainLoop goroutine about information in the session or dht structs, which cannot be read safely from outside that goroutine.
func (r *router) doAdmin(f func()) {
// Pass this a function that needs to be run by the router's main goroutine
// It will pass the function to the router and wait for the router to finish

View File

@@ -4,39 +4,50 @@ package yggdrasil
// The basic idea is as follows:
// We may know a NodeID (with a mask) and want to connect
// We forward a searchReq packet through the dht
// The last person in the dht will respond with a searchRes
// If the responders nodeID is close enough to the requested key, it matches
// The "close enough" is handled by a bitmask, set when the request is sent
// For testing in the sim, it must match exactly
// For the real world, the mask would need to map it to the desired IPv6
// This is also where we store the temporary keys used to send a request
// Would go in sessions, but can't open one without knowing perm key
// This is largely to avoid using an iterative DHT lookup approach
// The iterative parallel lookups from kad can skip over some DHT blackholes
// This hides bugs, which I don't want to do right now
// We begin a search by initializing a list of all nodes in our DHT, sorted by closest to the destination
// We then iteratively ping nodes from the search, marking each pinged node as visited
// We add any unvisited nodes from ping responses to the search, truncating to some maximum search size
// This stops when we either run out of nodes to ping (we hit a dead end where we can't make progress without going back), or we reach the destination
// A new search packet is sent immediately after receiving a response
// A new search packet is sent periodically, once per second, in case a packet was dropped (this slowly causes the search to become parallel if the search doesn't timeout but also doesn't finish within 1 second for whatever reason)
import "time"
import (
"sort"
"time"
)
//import "fmt"
// This defines the maximum number of dhtInfo that we keep track of for nodes to query in an ongoing search.
const search_MAX_SEARCH_SIZE = 16
// This defines the time after which we send a new search packet.
// Search packets are sent automatically immediately after a response is received.
// So this allows for timeouts and for long searches to become increasingly parallel.
const search_RETRY_TIME = time.Second
// Information about an ongoing search.
// Includes the targed NodeID, the bitmask to match it to an IP, and the list of nodes to visit / already visited.
type searchInfo struct {
dest *NodeID
mask *NodeID
time time.Time
packet []byte
dest NodeID
mask NodeID
time time.Time
packet []byte
toVisit []*dhtInfo
visited map[NodeID]bool
}
// This stores a map of active searches.
type searches struct {
core *Core
searches map[NodeID]*searchInfo
}
// Intializes the searches struct.
func (s *searches) init(core *Core) {
s.core = core
s.searches = make(map[NodeID]*searchInfo)
}
// Creates a new search info, adds it to the searches struct, and returns a pointer to the info.
func (s *searches) createSearch(dest *NodeID, mask *NodeID) *searchInfo {
now := time.Now()
for dest, sinfo := range s.searches {
@@ -45,8 +56,8 @@ func (s *searches) createSearch(dest *NodeID, mask *NodeID) *searchInfo {
}
}
info := searchInfo{
dest: dest,
mask: mask,
dest: *dest,
mask: *mask,
time: now.Add(-time.Second),
}
s.searches[*dest] = &info
@@ -55,124 +66,134 @@ func (s *searches) createSearch(dest *NodeID, mask *NodeID) *searchInfo {
////////////////////////////////////////////////////////////////////////////////
type searchReq struct {
key boxPubKey // Who I am
coords []byte // Where I am
dest NodeID // Who I'm trying to connect to
}
type searchRes struct {
key boxPubKey // Who I am
coords []byte // Where I am
dest NodeID // Who I was asked about
}
func (s *searches) sendSearch(info *searchInfo) {
now := time.Now()
if now.Sub(info.time) < time.Second {
// Checks if there's an ongoing search relaed to a dhtRes.
// If there is, it adds the response info to the search and triggers a new search step.
// If there's no ongoing search, or we if the dhtRes finished the search (it was from the target node), then don't do anything more.
func (s *searches) handleDHTRes(res *dhtRes) {
sinfo, isIn := s.searches[res.Dest]
if !isIn || s.checkDHTRes(sinfo, res) {
// Either we don't recognize this search, or we just finished it
return
} else {
// Add to the search and continue
s.addToSearch(sinfo, res)
s.doSearchStep(sinfo)
}
loc := s.core.switchTable.getLocator()
coords := loc.getCoords()
req := searchReq{
key: s.core.boxPub,
coords: coords,
dest: *info.dest,
}
info.time = time.Now()
s.handleSearchReq(&req)
}
func (s *searches) handleSearchReq(req *searchReq) {
lookup := s.core.dht.lookup(&req.dest)
sent := false
//fmt.Println("DEBUG len:", len(lookup))
for _, info := range lookup {
//fmt.Println("DEBUG lup:", info.getNodeID())
if dht_firstCloserThanThird(info.getNodeID(),
&req.dest,
&s.core.dht.nodeID) {
s.forwardSearch(req, info)
sent = true
break
// Adds the information from a dhtRes to an ongoing search.
// Info about a node that has already been visited is not re-added to the search.
// Duplicate information about nodes toVisit is deduplicated (the newest information is kept).
// The toVisit list is sorted in ascending order of keyspace distance from the destination.
func (s *searches) addToSearch(sinfo *searchInfo, res *dhtRes) {
// Add responses to toVisit if closer to dest than the res node
from := dhtInfo{key: res.Key, coords: res.Coords}
for _, info := range res.Infos {
if sinfo.visited[*info.getNodeID()] {
continue
}
if dht_firstCloserThanThird(info.getNodeID(), &res.Dest, from.getNodeID()) {
sinfo.toVisit = append(sinfo.toVisit, info)
}
}
if !sent {
s.sendSearchRes(req)
// Deduplicate
vMap := make(map[NodeID]*dhtInfo)
for _, info := range sinfo.toVisit {
vMap[*info.getNodeID()] = info
}
sinfo.toVisit = sinfo.toVisit[:0]
for _, info := range vMap {
sinfo.toVisit = append(sinfo.toVisit, info)
}
// Sort
sort.SliceStable(sinfo.toVisit, func(i, j int) bool {
return dht_firstCloserThanThird(sinfo.toVisit[i].getNodeID(), &res.Dest, sinfo.toVisit[j].getNodeID())
})
// Truncate to some maximum size
if len(sinfo.toVisit) > search_MAX_SEARCH_SIZE {
sinfo.toVisit = sinfo.toVisit[:search_MAX_SEARCH_SIZE]
}
}
func (s *searches) forwardSearch(req *searchReq, next *dhtInfo) {
//fmt.Println("DEBUG fwd:", req.dest, next.getNodeID())
bs := req.encode()
shared := s.core.sessions.getSharedKey(&s.core.boxPriv, &next.key)
payload, nonce := boxSeal(shared, bs, nil)
p := wire_protoTrafficPacket{
ttl: ^uint64(0),
coords: next.coords,
toKey: next.key,
fromKey: s.core.boxPub,
nonce: *nonce,
payload: payload,
// If there are no nodes left toVisit, then this cleans up the search.
// Otherwise, it pops the closest node to the destination (in keyspace) off of the toVisit list and sends a dht ping.
func (s *searches) doSearchStep(sinfo *searchInfo) {
if len(sinfo.toVisit) == 0 {
// Dead end, do cleanup
delete(s.searches, sinfo.dest)
return
} else {
// Send to the next search target
var next *dhtInfo
next, sinfo.toVisit = sinfo.toVisit[0], sinfo.toVisit[1:]
var oldPings int
oldPings, next.pings = next.pings, 0
s.core.dht.ping(next, &sinfo.dest)
next.pings = oldPings // Don't evict a node for searching with it too much
sinfo.visited[*next.getNodeID()] = true
}
packet := p.encode()
s.core.router.out(packet)
}
func (s *searches) sendSearchRes(req *searchReq) {
//fmt.Println("DEBUG res:", req.dest, s.core.dht.nodeID)
loc := s.core.switchTable.getLocator()
coords := loc.getCoords()
res := searchRes{
key: s.core.boxPub,
coords: coords,
dest: req.dest,
}
bs := res.encode()
shared := s.core.sessions.getSharedKey(&s.core.boxPriv, &req.key)
payload, nonce := boxSeal(shared, bs, nil)
p := wire_protoTrafficPacket{
ttl: ^uint64(0),
coords: req.coords,
toKey: req.key,
fromKey: s.core.boxPub,
nonce: *nonce,
payload: payload,
}
packet := p.encode()
s.core.router.out(packet)
}
func (s *searches) handleSearchRes(res *searchRes) {
info, isIn := s.searches[res.dest]
if !isIn {
// If we've recenty sent a ping for this search, do nothing.
// Otherwise, doSearchStep and schedule another continueSearch to happen after search_RETRY_TIME.
func (s *searches) continueSearch(sinfo *searchInfo) {
if time.Since(sinfo.time) < search_RETRY_TIME {
return
}
them := getNodeID(&res.key)
sinfo.time = time.Now()
s.doSearchStep(sinfo)
// In case the search dies, try to spawn another thread later
// Note that this will spawn multiple parallel searches as time passes
// Any that die aren't restarted, but a new one will start later
retryLater := func() {
newSearchInfo := s.searches[sinfo.dest]
if newSearchInfo != sinfo {
return
}
s.continueSearch(sinfo)
}
go func() {
time.Sleep(search_RETRY_TIME)
s.core.router.admin <- retryLater
}()
}
// Calls create search, and initializes the iterative search parts of the struct before returning it.
func (s *searches) newIterSearch(dest *NodeID, mask *NodeID) *searchInfo {
sinfo := s.createSearch(dest, mask)
sinfo.toVisit = s.core.dht.lookup(dest, true)
sinfo.visited = make(map[NodeID]bool)
return sinfo
}
// Checks if a dhtRes is good (called by handleDHTRes).
// If the response is from the target, get/create a session, trigger a session ping, and return true.
// Otherwise return false.
func (s *searches) checkDHTRes(info *searchInfo, res *dhtRes) bool {
them := getNodeID(&res.Key)
var destMasked NodeID
var themMasked NodeID
for idx := 0; idx < NodeIDLen; idx++ {
destMasked[idx] = info.dest[idx] & info.mask[idx]
themMasked[idx] = them[idx] & info.mask[idx]
}
//fmt.Println("DEBUG search res1:", themMasked, destMasked)
//fmt.Println("DEBUG search res2:", *them, *info.dest, *info.mask)
if themMasked != destMasked {
return
return false
}
// They match, so create a session and send a sessionRequest
sinfo, isIn := s.core.sessions.getByTheirPerm(&res.key)
sinfo, isIn := s.core.sessions.getByTheirPerm(&res.Key)
if !isIn {
sinfo = s.core.sessions.createSession(&res.key)
_, isIn := s.core.sessions.getByTheirPerm(&res.key)
sinfo = s.core.sessions.createSession(&res.Key)
_, isIn := s.core.sessions.getByTheirPerm(&res.Key)
if !isIn {
panic("This should never happen")
}
}
// FIXME (!) replay attacks could mess with coords? Give it a handle (tstamp)?
sinfo.coords = res.coords
sinfo.coords = res.Coords
sinfo.packet = info.packet
s.core.sessions.ping(sinfo)
// Cleanup
delete(s.searches, res.dest)
delete(s.searches, res.Dest)
return true
}

View File

@@ -6,6 +6,8 @@ package yggdrasil
import "time"
// All the information we know about an active session.
// This includes coords, permanent and ephemeral keys, handles and nonces, various sorts of timing information for timeout and maintenance, and some metadata for the admin API.
type sessionInfo struct {
core *Core
theirAddr address
@@ -21,6 +23,7 @@ type sessionInfo struct {
myNonce boxNonce
theirMTU uint16
myMTU uint16
wasMTUFixed bool // Was the MTU fixed by a receive error?
time time.Time // Time we last received a packet
coords []byte // coords of destination
packet []byte // a buffered packet, sent immediately on ping/pong
@@ -28,51 +31,63 @@ type sessionInfo struct {
send chan []byte
recv chan *wire_trafficPacket
nonceMask uint64
tstamp int64 // tstamp from their last session ping, replay attack mitigation
tstamp int64 // tstamp from their last session ping, replay attack mitigation
mtuTime time.Time // time myMTU was last changed
pingTime time.Time // time the first ping was sent since the last received packet
pingSend time.Time // time the last ping was sent
bytesSent uint64 // Bytes of real traffic sent in this session
bytesRecvd uint64 // Bytes of real traffic received in this session
}
// Represents a session ping/pong packet, andincludes information like public keys, a session handle, coords, a timestamp to prevent replays, and the tun/tap MTU.
type sessionPing struct {
sendPermPub boxPubKey // Sender's permanent key
handle handle // Random number to ID session
sendSesPub boxPubKey // Session key to use
coords []byte
tstamp int64 // unix time, but the only real requirement is that it increases
isPong bool
mtu uint16
SendPermPub boxPubKey // Sender's permanent key
Handle handle // Random number to ID session
SendSesPub boxPubKey // Session key to use
Coords []byte
Tstamp int64 // unix time, but the only real requirement is that it increases
IsPong bool
MTU uint16
}
// Returns true if the session was updated, false otherwise
// Updates session info in response to a ping, after checking that the ping is OK.
// Returns true if the session was updated, or false otherwise.
func (s *sessionInfo) update(p *sessionPing) bool {
if !(p.tstamp > s.tstamp) {
if !(p.Tstamp > s.tstamp) {
// To protect against replay attacks
return false
}
if p.sendPermPub != s.theirPermPub {
if p.SendPermPub != s.theirPermPub {
// Should only happen if two sessions got the same handle
// That shouldn't be allowed anyway, but if it happens then let one time out
return false
}
if p.sendSesPub != s.theirSesPub {
s.theirSesPub = p.sendSesPub
s.theirHandle = p.handle
if p.SendSesPub != s.theirSesPub {
s.theirSesPub = p.SendSesPub
s.theirHandle = p.Handle
s.sharedSesKey = *getSharedKey(&s.mySesPriv, &s.theirSesPub)
s.theirNonce = boxNonce{}
s.nonceMask = 0
}
if p.mtu >= 1280 {
s.theirMTU = p.mtu
if p.MTU >= 1280 || p.MTU == 0 {
s.theirMTU = p.MTU
}
s.coords = append([]byte{}, p.coords...)
s.time = time.Now()
s.tstamp = p.tstamp
s.coords = append([]byte{}, p.Coords...)
now := time.Now()
s.time = now
s.tstamp = p.Tstamp
s.init = true
return true
}
// Returns true if the session has been idle for longer than the allowed timeout.
func (s *sessionInfo) timedout() bool {
return time.Since(s.time) > time.Minute
}
// Struct of all active sessions.
// Sessions are indexed by handle.
// Additionally, stores maps of address/subnet onto keys, and keys onto handles.
type sessions struct {
core *Core
// Maps known permanent keys to their shared key, used by DHT a lot
@@ -87,6 +102,7 @@ type sessions struct {
subnetToPerm map[subnet]*boxPubKey
}
// Initializes the session struct.
func (ss *sessions) init(core *Core) {
ss.core = core
ss.permShared = make(map[boxPubKey]*boxSharedKey)
@@ -97,6 +113,7 @@ func (ss *sessions) init(core *Core) {
ss.subnetToPerm = make(map[subnet]*boxPubKey)
}
// Gets the session corresponding to a given handle.
func (ss *sessions) getSessionForHandle(handle *handle) (*sessionInfo, bool) {
sinfo, isIn := ss.sinfos[*handle]
if isIn && sinfo.timedout() {
@@ -106,6 +123,7 @@ func (ss *sessions) getSessionForHandle(handle *handle) (*sessionInfo, bool) {
return sinfo, isIn
}
// Gets a session corresponding to an ephemeral session key used by this node.
func (ss *sessions) getByMySes(key *boxPubKey) (*sessionInfo, bool) {
h, isIn := ss.byMySes[*key]
if !isIn {
@@ -115,6 +133,7 @@ func (ss *sessions) getByMySes(key *boxPubKey) (*sessionInfo, bool) {
return sinfo, isIn
}
// Gets a session corresponding to a permanent key used by the remote node.
func (ss *sessions) getByTheirPerm(key *boxPubKey) (*sessionInfo, bool) {
h, isIn := ss.byTheirPerm[*key]
if !isIn {
@@ -124,6 +143,7 @@ func (ss *sessions) getByTheirPerm(key *boxPubKey) (*sessionInfo, bool) {
return sinfo, isIn
}
// Gets a session corresponding to an IPv6 address used by the remote node.
func (ss *sessions) getByTheirAddr(addr *address) (*sessionInfo, bool) {
p, isIn := ss.addrToPerm[*addr]
if !isIn {
@@ -133,6 +153,7 @@ func (ss *sessions) getByTheirAddr(addr *address) (*sessionInfo, bool) {
return sinfo, isIn
}
// Gets a session corresponding to an IPv6 /64 subnet used by the remote node/network.
func (ss *sessions) getByTheirSubnet(snet *subnet) (*sessionInfo, bool) {
p, isIn := ss.subnetToPerm[*snet]
if !isIn {
@@ -142,6 +163,8 @@ func (ss *sessions) getByTheirSubnet(snet *subnet) (*sessionInfo, bool) {
return sinfo, isIn
}
// Creates a new session and lazily cleans up old/timedout existing sessions.
// This includse initializing session info to sane defaults (e.g. lowest supported MTU).
func (ss *sessions) createSession(theirPermKey *boxPubKey) *sessionInfo {
sinfo := sessionInfo{}
sinfo.core = ss.core
@@ -152,6 +175,11 @@ func (ss *sessions) createSession(theirPermKey *boxPubKey) *sessionInfo {
sinfo.myNonce = *newBoxNonce()
sinfo.theirMTU = 1280
sinfo.myMTU = uint16(ss.core.tun.mtu)
now := time.Now()
sinfo.time = now
sinfo.mtuTime = now
sinfo.pingTime = now
sinfo.pingSend = now
higher := false
for idx := range ss.core.boxPub {
if ss.core.boxPub[idx] > sinfo.theirPermPub[idx] {
@@ -174,7 +202,6 @@ func (ss *sessions) createSession(theirPermKey *boxPubKey) *sessionInfo {
sinfo.send = make(chan []byte, 32)
sinfo.recv = make(chan *wire_trafficPacket, 32)
go sinfo.doWorker()
sinfo.time = time.Now()
// Do some cleanup
// Time thresholds almost certainly could use some adjusting
for _, s := range ss.sinfos {
@@ -190,6 +217,7 @@ func (ss *sessions) createSession(theirPermKey *boxPubKey) *sessionInfo {
return &sinfo
}
// Closes a session, removing it from sessions maps and killing the worker goroutine.
func (sinfo *sessionInfo) close() {
delete(sinfo.core.sessions.sinfos, sinfo.myHandle)
delete(sinfo.core.sessions.byMySes, sinfo.mySesPub)
@@ -200,21 +228,25 @@ func (sinfo *sessionInfo) close() {
close(sinfo.recv)
}
// Returns a session ping appropriate for the given session info.
func (ss *sessions) getPing(sinfo *sessionInfo) sessionPing {
loc := ss.core.switchTable.getLocator()
coords := loc.getCoords()
ref := sessionPing{
sendPermPub: ss.core.boxPub,
handle: sinfo.myHandle,
sendSesPub: sinfo.mySesPub,
tstamp: time.Now().Unix(),
coords: coords,
mtu: sinfo.myMTU,
SendPermPub: ss.core.boxPub,
Handle: sinfo.myHandle,
SendSesPub: sinfo.mySesPub,
Tstamp: time.Now().Unix(),
Coords: coords,
MTU: sinfo.myMTU,
}
sinfo.myNonce.update()
return ref
}
// Gets the shared key for a pair of box keys.
// Used to cache recently used shared keys for protocol traffic.
// This comes up with dht req/res and session ping/pong traffic.
func (ss *sessions) getSharedKey(myPriv *boxPrivKey,
theirPub *boxPubKey) *boxSharedKey {
if skey, isIn := ss.permShared[*theirPub]; isIn {
@@ -233,37 +265,44 @@ func (ss *sessions) getSharedKey(myPriv *boxPrivKey,
return ss.permShared[*theirPub]
}
// Sends a session ping by calling sendPingPong in ping mode.
func (ss *sessions) ping(sinfo *sessionInfo) {
ss.sendPingPong(sinfo, false)
}
// Calls getPing, sets the appropriate ping/pong flag, encodes to wire format, and send it.
// Updates the time the last ping was sent in the session info.
func (ss *sessions) sendPingPong(sinfo *sessionInfo, isPong bool) {
ping := ss.getPing(sinfo)
ping.isPong = isPong
ping.IsPong = isPong
bs := ping.encode()
shared := ss.getSharedKey(&ss.core.boxPriv, &sinfo.theirPermPub)
payload, nonce := boxSeal(shared, bs, nil)
p := wire_protoTrafficPacket{
ttl: ^uint64(0),
coords: sinfo.coords,
toKey: sinfo.theirPermPub,
fromKey: ss.core.boxPub,
nonce: *nonce,
payload: payload,
Coords: sinfo.coords,
ToKey: sinfo.theirPermPub,
FromKey: ss.core.boxPub,
Nonce: *nonce,
Payload: payload,
}
packet := p.encode()
ss.core.router.out(packet)
if !isPong {
sinfo.pingSend = time.Now()
}
}
// Handles a session ping, creating a session if needed and calling update, then possibly responding with a pong if the ping was in ping mode and the update was successful.
// If the session has a packet cached (common when first setting up a session), it will be sent.
func (ss *sessions) handlePing(ping *sessionPing) {
// Get the corresponding session (or create a new session)
sinfo, isIn := ss.getByTheirPerm(&ping.sendPermPub)
sinfo, isIn := ss.getByTheirPerm(&ping.SendPermPub)
if !isIn || sinfo.timedout() {
if isIn {
sinfo.close()
}
ss.createSession(&ping.sendPermPub)
sinfo, isIn = ss.getByTheirPerm(&ping.sendPermPub)
ss.createSession(&ping.SendPermPub)
sinfo, isIn = ss.getByTheirPerm(&ping.SendPermPub)
if !isIn {
panic("This should not happen")
}
@@ -272,7 +311,7 @@ func (ss *sessions) handlePing(ping *sessionPing) {
if !sinfo.update(ping) { /*panic("Should not happen in testing")*/
return
}
if !ping.isPong {
if !ping.IsPong {
ss.sendPingPong(sinfo, true)
}
if sinfo.packet != nil {
@@ -283,6 +322,9 @@ func (ss *sessions) handlePing(ping *sessionPing) {
}
}
// Used to subtract one nonce from another, staying in the range +- 64.
// This is used by the nonce progression machinery to advance the bitmask of recently received packets (indexed by nonce), or to check the appropriate bit of the bitmask.
// It's basically part of the machinery that prevents replays and duplicate packets.
func (n *boxNonce) minus(m *boxNonce) int64 {
diff := int64(0)
for idx := range n {
@@ -298,13 +340,20 @@ func (n *boxNonce) minus(m *boxNonce) int64 {
return diff
}
// Get the MTU of the session.
// Will be equal to the smaller of this node's MTU or the remote node's MTU.
// If sending over links with a maximum message size (this was a thing with the old UDP code), it could be further lowered, to a minimum of 1280.
func (sinfo *sessionInfo) getMTU() uint16 {
if sinfo.theirMTU == 0 || sinfo.myMTU == 0 {
return 0
}
if sinfo.theirMTU < sinfo.myMTU {
return sinfo.theirMTU
}
return sinfo.myMTU
}
// Checks if a packet's nonce is recent enough to fall within the window of allowed packets, and not already received.
func (sinfo *sessionInfo) nonceIsOK(theirNonce *boxNonce) bool {
// The bitmask is to allow for some non-duplicate out-of-order packets
diff := theirNonce.minus(&sinfo.theirNonce)
@@ -314,19 +363,24 @@ func (sinfo *sessionInfo) nonceIsOK(theirNonce *boxNonce) bool {
return ^sinfo.nonceMask&(0x01<<uint64(-diff)) != 0
}
// Updates the nonce mask by (possibly) shifting the bitmask and setting the bit corresponding to this nonce to 1, and then updating the most recent nonce
func (sinfo *sessionInfo) updateNonce(theirNonce *boxNonce) {
// Shift nonce mask if needed
// Set bit
diff := theirNonce.minus(&sinfo.theirNonce)
if diff > 0 {
// This nonce is newer, so shift the window before setting the bit, and update theirNonce in the session info.
sinfo.nonceMask <<= uint64(diff)
sinfo.nonceMask &= 0x01
sinfo.theirNonce = *theirNonce
} else {
// This nonce is older, so set the bit but do not shift the window.
sinfo.nonceMask &= 0x01 << uint64(-diff)
}
sinfo.theirNonce = *theirNonce
}
// Resets all sessions to an uninitialized state.
// Called after coord changes, so attemtps to use a session will trigger a new ping and notify the remote end of the coord change.
func (ss *sessions) resetInits() {
for _, sinfo := range ss.sinfos {
sinfo.init = false
@@ -335,10 +389,9 @@ func (ss *sessions) resetInits() {
////////////////////////////////////////////////////////////////////////////////
// This is for a per-session worker
// It handles calling the relatively expensive crypto operations
// It's also responsible for keeping nonces consistent
// This is for a per-session worker.
// It handles calling the relatively expensive crypto operations.
// It's also responsible for checking nonces and dropping out-of-date/duplicate packets, or else calling the function to update nonces if the packet is OK.
func (sinfo *sessionInfo) doWorker() {
for {
select {
@@ -358,6 +411,7 @@ func (sinfo *sessionInfo) doWorker() {
}
}
// This encrypts a packet, creates a trafficPacket struct, encodes it, and sends it to router.out to pass it to the switch layer.
func (sinfo *sessionInfo) doSend(bs []byte) {
defer util_putBytes(bs)
if !sinfo.init {
@@ -366,27 +420,60 @@ func (sinfo *sessionInfo) doSend(bs []byte) {
payload, nonce := boxSeal(&sinfo.sharedSesKey, bs, &sinfo.myNonce)
defer util_putBytes(payload)
p := wire_trafficPacket{
ttl: ^uint64(0),
coords: sinfo.coords,
handle: sinfo.theirHandle,
nonce: *nonce,
payload: payload,
Coords: sinfo.coords,
Handle: sinfo.theirHandle,
Nonce: *nonce,
Payload: payload,
}
packet := p.encode()
sinfo.bytesSent += uint64(len(bs))
sinfo.core.router.out(packet)
}
// This takes a trafficPacket and checks the nonce.
// If the nonce is OK, it decrypts the packet.
// If the decrypted packet is OK, it calls router.recvPacket to pass the packet to the tun/tap.
// If a packet does not decrypt successfully, it assumes the packet was truncated, and updates the MTU accordingly.
// TODO? remove the MTU updating part? That should never happen with TCP peers, and the old UDP code that caused it was removed (and if replaced, should be replaced with something that can reliably send messages with an arbitrary size).
func (sinfo *sessionInfo) doRecv(p *wire_trafficPacket) {
defer util_putBytes(p.payload)
if !sinfo.nonceIsOK(&p.nonce) {
defer util_putBytes(p.Payload)
payloadSize := uint16(len(p.Payload))
if !sinfo.nonceIsOK(&p.Nonce) {
return
}
bs, isOK := boxOpen(&sinfo.sharedSesKey, p.payload, &p.nonce)
bs, isOK := boxOpen(&sinfo.sharedSesKey, p.Payload, &p.Nonce)
if !isOK {
// We're going to guess that the session MTU is too large
// Set myMTU to the largest value we think we can receive
fixSessionMTU := func() {
// This clamps down to 1280 almost immediately over ipv4
// Over link-local ipv6, it seems to approach link MTU
// So maybe it's doing the right thing?...
//sinfo.core.log.Println("DEBUG got bad packet:", payloadSize)
newMTU := payloadSize - boxOverhead
if newMTU < 1280 {
newMTU = 1280
}
if newMTU < sinfo.myMTU {
sinfo.myMTU = newMTU
sinfo.core.sessions.sendPingPong(sinfo, false)
sinfo.mtuTime = time.Now()
sinfo.wasMTUFixed = true
}
}
go func() { sinfo.core.router.admin <- fixSessionMTU }()
util_putBytes(bs)
return
}
sinfo.updateNonce(&p.nonce)
fixSessionMTU := func() {
if time.Since(sinfo.mtuTime) > time.Minute {
sinfo.myMTU = uint16(sinfo.core.tun.mtu)
sinfo.mtuTime = time.Now()
}
}
go func() { sinfo.core.router.admin <- fixSessionMTU }()
sinfo.updateNonce(&p.Nonce)
sinfo.time = time.Now()
sinfo.bytesRecvd += uint64(len(bs))
sinfo.core.router.recvPacket(bs, &sinfo.theirAddr, &sinfo.theirSubnet)
}

View File

@@ -3,43 +3,57 @@ package yggdrasil
// This is where we record which signatures we've previously checked
// It's so we can avoid needlessly checking them again
import "sync"
import "time"
import (
"sync"
"time"
)
// This keeps track of what signatures have already been checked.
// It's used to skip expensive crypto operations, given that many signatures are likely to be the same for the average node's peers.
type sigManager struct {
mutex sync.RWMutex
checked map[sigBytes]knownSig
lastCleaned time.Time
}
// Represents a known signature.
// Includes the key, the signature bytes, the bytes that were signed, and the time it was last used.
type knownSig struct {
key sigPubKey
sig sigBytes
bs []byte
time time.Time
}
// Initializes the signature manager.
func (m *sigManager) init() {
m.checked = make(map[sigBytes]knownSig)
}
// Checks if a key and signature match the supplied bytes.
// If the same key/sig/bytes have been checked before, it returns true from the cached results.
// If not, it checks the key, updates it in the cache if successful, and returns the checked results.
func (m *sigManager) check(key *sigPubKey, sig *sigBytes, bs []byte) bool {
if m.isChecked(sig, bs) {
if m.isChecked(key, sig, bs) {
return true
}
verified := verify(key, bs, sig)
if verified {
m.putChecked(sig, bs)
m.putChecked(key, sig, bs)
}
return verified
}
func (m *sigManager) isChecked(sig *sigBytes, bs []byte) bool {
// Checks the cache to see if this key/sig/bytes combination has already been verified.
// Returns true if it finds a match.
func (m *sigManager) isChecked(key *sigPubKey, sig *sigBytes, bs []byte) bool {
m.mutex.RLock()
defer m.mutex.RUnlock()
k, isIn := m.checked[*sig]
if !isIn {
return false
}
if len(bs) != len(k.bs) {
if k.key != *key || k.sig != *sig || len(bs) != len(k.bs) {
return false
}
for idx := 0; idx < len(bs); idx++ {
@@ -51,7 +65,10 @@ func (m *sigManager) isChecked(sig *sigBytes, bs []byte) bool {
return true
}
func (m *sigManager) putChecked(newsig *sigBytes, bs []byte) {
// Puts a new result into the cache.
// This result is then used by isChecked to skip the expensive crypto verification if it's needed again.
// This is useful because, for nodes with multiple peers, there is often a lot of overlap between the signatures provided by each peer.
func (m *sigManager) putChecked(key *sigPubKey, newsig *sigBytes, bs []byte) {
m.mutex.Lock()
defer m.mutex.Unlock()
now := time.Now()
@@ -64,6 +81,6 @@ func (m *sigManager) putChecked(newsig *sigBytes, bs []byte) {
}
m.lastCleaned = now
}
k := knownSig{bs: bs, time: now}
k := knownSig{key: *key, sig: *newsig, bs: bs, time: now}
m.checked[*newsig] = k
}

View File

@@ -9,30 +9,30 @@ package yggdrasil
// TODO document/comment everything in a lot more detail
// TODO? use a pre-computed lookup table (python version had this)
// A little annoying to do with constant changes from bandwidth estimates
// A little annoying to do with constant changes from backpressure
// FIXME (!) throttle how often root updates are accepted
// If the root starts spaming with new timestamps, it should only affect their neighbors
// The rest of the network should see announcements at a somewhat reasonable rate
// Maybe no faster than 2x the usual update interval
import "time"
import "sync"
import "sync/atomic"
//import "fmt"
import (
"sort"
"sync"
"sync/atomic"
"time"
)
const switch_timeout = time.Minute
const switch_updateInterval = switch_timeout / 2
const switch_throttle = switch_updateInterval / 2
// You should be able to provide crypto signatures for this
// 1 signature per coord, from the *sender* to that coord
// E.g. A->B->C has sigA(A->B) and sigB(A->B->C)
// The switch locator represents the topology and network state dependent info about a node, minus the signatures that go with it.
// Nodes will pick the best root they see, provided that the root continues to push out updates with new timestamps.
// The coords represent a path from the root to a node.
// This path is generally part of a spanning tree, except possibly the last hop (it can loop when sending coords to your parent, but they see this and know not to use a looping path).
type switchLocator struct {
root sigPubKey
tstamp int64
coords []switchPort
}
// Returns true if the first sigPubKey has a higher TreeID.
func firstIsBetter(first, second *sigPubKey) bool {
// Higher TreeID is better
ftid := getTreeID(first)
@@ -47,6 +47,7 @@ func firstIsBetter(first, second *sigPubKey) bool {
return false
}
// Returns a copy of the locator which can safely be mutated.
func (l *switchLocator) clone() switchLocator {
// Used to create a deep copy for use in messages
// Copy required because we need to mutate coords before sending
@@ -57,6 +58,7 @@ func (l *switchLocator) clone() switchLocator {
return loc
}
// Gets the distance a locator is from the provided destination coords, with the coords provided in []byte format (used to compress integers sent over the wire).
func (l *switchLocator) dist(dest []byte) int {
// Returns distance (on the tree) from these coords
offset := 0
@@ -87,6 +89,7 @@ func (l *switchLocator) dist(dest []byte) int {
return dist
}
// Gets coords in wire encoded format, with *no* length prefix.
func (l *switchLocator) getCoords() []byte {
bs := make([]byte, 0, len(l.coords))
for _, coord := range l.coords {
@@ -96,6 +99,8 @@ func (l *switchLocator) getCoords() []byte {
return bs
}
// Returns true if the this locator represents an ancestor of the locator given as an argument.
// Ancestor means that it's the parent node, or the parent of parent, and so on...
func (x *switchLocator) isAncestorOf(y *switchLocator) bool {
if x.root != y.root {
return false
@@ -111,44 +116,44 @@ func (x *switchLocator) isAncestorOf(y *switchLocator) bool {
return true
}
// Information about a peer, used by the switch to build the tree and eventually make routing decisions.
type peerInfo struct {
key sigPubKey // ID of this peer
locator switchLocator // Should be able to respond with signatures upon request
degree uint64 // Self-reported degree
coords []switchPort // Coords of this peer (taken from coords of the sent locator)
time time.Time // Time this node was last seen
firstSeen time.Time
port switchPort // Interface number of this peer
seq uint64 // Seq number we last saw this peer advertise
}
type switchMessage struct {
from sigPubKey // key of the sender
locator switchLocator // Locator advertised for the receiver, not the sender's loc!
seq uint64
msg switchMsg // The wire switchMsg used
}
// This is just a uint64 with a named type for clarity reasons.
type switchPort uint64
// This is the subset of the information about a peer needed to make routing decisions, and it stored separately in an atomically accessed table, which gets hammered in the "hot loop" of the routing logic (see: peer.handleTraffic in peers.go).
type tableElem struct {
port switchPort
firstSeen time.Time
locator switchLocator
port switchPort
locator switchLocator
}
// This is the subset of the information about all peers needed to make routing decisions, and it stored separately in an atomically accessed table, which gets hammered in the "hot loop" of the routing logic (see: peer.handleTraffic in peers.go).
type lookupTable struct {
self switchLocator
elems []tableElem
}
// This is switch information which is mutable and needs to be modified by other goroutines, but is not accessed atomically.
// Use the switchTable functions to access it safely using the RWMutex for synchronization.
type switchData struct {
// All data that's mutable and used by exported Table methods
// To be read/written with atomic.Value Store/Load calls
locator switchLocator
seq uint64 // Sequence number, reported to peers, so they know about changes
peers map[switchPort]peerInfo
sigs []sigInfo
msg *switchMsg
}
// All the information stored by the switch.
type switchTable struct {
core *Core
key sigPubKey // Our own key
@@ -161,6 +166,7 @@ type switchTable struct {
table atomic.Value //lookupTable
}
// Initializes the switchTable struct.
func (t *switchTable) init(core *Core, key sigPubKey) {
now := time.Now()
t.core = core
@@ -171,56 +177,43 @@ func (t *switchTable) init(core *Core, key sigPubKey) {
t.updater.Store(&sync.Once{})
t.table.Store(lookupTable{})
t.drop = make(map[sigPubKey]int64)
doTicker := func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
<-ticker.C
t.Tick()
}
}
go doTicker()
}
// Safely gets a copy of this node's locator.
func (t *switchTable) getLocator() switchLocator {
t.mutex.RLock()
defer t.mutex.RUnlock()
return t.data.locator.clone()
}
func (t *switchTable) Tick() {
// Regular maintenance to possibly timeout/reset the root and similar.
func (t *switchTable) doMaintenance() {
// Periodic maintenance work to keep things internally consistent
t.mutex.Lock() // Write lock
defer t.mutex.Unlock() // Release lock when we're done
t.cleanRoot()
t.cleanPeers()
t.cleanDropped()
}
// Updates the root periodically if it is ourself, or promotes ourself to root if we're better than the current root or if the current root has timed out.
func (t *switchTable) cleanRoot() {
// TODO rethink how this is done?...
// Get rid of the root if it looks like its timed out
now := time.Now()
doUpdate := false
//fmt.Println("DEBUG clean root:", now.Sub(t.time))
if now.Sub(t.time) > switch_timeout {
//fmt.Println("root timed out", t.data.locator)
dropped := t.data.peers[t.parent]
dropped.time = t.time
t.drop[t.data.locator.root] = t.data.locator.tstamp
doUpdate = true
//t.core.log.Println("DEBUG: switch root timeout", len(t.drop))
}
// Or, if we're better than our root, root ourself
if firstIsBetter(&t.key, &t.data.locator.root) {
//fmt.Println("root is worse than us", t.data.locator.Root)
doUpdate = true
//t.core.log.Println("DEBUG: switch root replace with self", t.data.locator.Root)
}
// Or, if we are the root, possibly update our timestamp
if t.data.locator.root == t.key &&
now.Sub(t.time) > switch_timeout/2 {
//fmt.Println("root is self and old, updating", t.data.locator.Root)
now.Sub(t.time) > switch_updateInterval {
doUpdate = true
}
if doUpdate {
@@ -235,25 +228,27 @@ func (t *switchTable) cleanRoot() {
}
}
t.data.locator = switchLocator{root: t.key, tstamp: now.Unix()}
t.data.sigs = nil
t.core.peers.sendSwitchMsgs()
}
}
func (t *switchTable) cleanPeers() {
now := time.Now()
changed := false
for idx, info := range t.data.peers {
if info.port != switchPort(0) && now.Sub(info.time) > 6*time.Second /*switch_timeout*/ {
//fmt.Println("peer timed out", t.key, info.locator)
delete(t.data.peers, idx)
changed = true
}
// Removes a peer.
// Must be called by the router mainLoop goroutine, e.g. call router.doAdmin with a lambda that calls this.
// If the removed peer was this node's parent, it immediately tries to find a new parent.
func (t *switchTable) unlockedRemovePeer(port switchPort) {
delete(t.data.peers, port)
t.updater.Store(&sync.Once{})
if port != t.parent {
return
}
if changed {
t.updater.Store(&sync.Once{})
for _, info := range t.data.peers {
t.unlockedHandleMsg(&info.msg, info.port)
}
}
// Dropped is a list of roots that are better than the current root, but stopped sending new timestamps.
// If we switch to a new root, and that root is better than an old root that previously timed out, then we can clean up the old dropped root infos.
// This function is called periodically to do that cleanup.
func (t *switchTable) cleanDropped() {
// TODO? only call this after root changes, not periodically
for root := range t.drop {
@@ -263,33 +258,95 @@ func (t *switchTable) cleanDropped() {
}
}
func (t *switchTable) createMessage(port switchPort) (*switchMessage, []sigInfo) {
t.mutex.RLock()
defer t.mutex.RUnlock()
msg := switchMessage{from: t.key, locator: t.data.locator.clone()}
msg.locator.coords = append(msg.locator.coords, port)
msg.seq = t.data.seq
return &msg, t.data.sigs
// A switchMsg contains the root node's sig key, timestamp, and signed per-hop information about a path from the root node to some other node in the network.
// This is exchanged with peers to construct the spanning tree.
// A subset of this information, excluding the signatures, is used to construct locators that are used elsewhere in the code.
type switchMsg struct {
Root sigPubKey
TStamp int64
Hops []switchMsgHop
}
func (t *switchTable) handleMessage(msg *switchMessage, fromPort switchPort, sigs []sigInfo) {
// This represents the signed information about the path leading from the root the Next node, via the Port specified here.
type switchMsgHop struct {
Port switchPort
Next sigPubKey
Sig sigBytes
}
// This returns a *switchMsg to a copy of this node's current switchMsg, which can safely have additional information appended to Hops and sent to a peer.
func (t *switchTable) getMsg() *switchMsg {
t.mutex.RLock()
defer t.mutex.RUnlock()
if t.parent == 0 {
return &switchMsg{Root: t.key, TStamp: t.data.locator.tstamp}
} else if parent, isIn := t.data.peers[t.parent]; isIn {
msg := parent.msg
msg.Hops = append([]switchMsgHop(nil), msg.Hops...)
return &msg
} else {
return nil
}
}
// This function checks that the root information in a switchMsg is OK.
// In particular, that the root is better, or else the same as the current root but with a good timestamp, and that this root+timestamp haven't been dropped due to timeout.
func (t *switchTable) checkRoot(msg *switchMsg) bool {
// returns false if it's a dropped root, not a better root, or has an older timestamp
// returns true otherwise
// used elsewhere to keep inserting peers into the dht only if root info is OK
t.mutex.RLock()
defer t.mutex.RUnlock()
dropTstamp, isIn := t.drop[msg.Root]
switch {
case isIn && dropTstamp >= msg.TStamp:
return false
case firstIsBetter(&msg.Root, &t.data.locator.root):
return true
case t.data.locator.root != msg.Root:
return false
case t.data.locator.tstamp > msg.TStamp:
return false
default:
return true
}
}
// This is a mutexed wrapper to unlockedHandleMsg, and is called by the peer structs in peers.go to pass a switchMsg for that peer into the switch.
func (t *switchTable) handleMsg(msg *switchMsg, fromPort switchPort) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.unlockedHandleMsg(msg, fromPort)
}
// This updates the switch with information about a peer.
// Then the tricky part, it decides if it should update our own locator as a result.
// That happens if this node is already our parent, or is advertising a better root, or is advertising a better path to the same root, etc...
// There are a lot of very delicate order sensitive checks here, so its' best to just read the code if you need to understand what it's doing.
// It's very important to not change the order of the statements in the case function unless you're absolutely sure that it's safe, including safe if used along side nodes that used the previous order.
func (t *switchTable) unlockedHandleMsg(msg *switchMsg, fromPort switchPort) {
// TODO directly use a switchMsg instead of switchMessage + sigs
now := time.Now()
if len(msg.locator.coords) == 0 {
return
} // Should always have >=1 links
// Set up the sender peerInfo
var sender peerInfo
sender.locator.root = msg.Root
sender.locator.tstamp = msg.TStamp
prevKey := msg.Root
for _, hop := range msg.Hops {
// Build locator
sender.locator.coords = append(sender.locator.coords, hop.Port)
sender.key = prevKey
prevKey = hop.Next
}
sender.msg = *msg
oldSender, isIn := t.data.peers[fromPort]
if !isIn {
oldSender.firstSeen = now
}
sender := peerInfo{key: msg.from,
locator: msg.locator,
coords: msg.locator.coords[:len(msg.locator.coords)-1],
time: now,
firstSeen: oldSender.firstSeen,
port: fromPort,
seq: msg.seq}
sender.firstSeen = oldSender.firstSeen
sender.port = fromPort
sender.time = now
// Decide what to do
equiv := func(x *switchLocator, y *switchLocator) bool {
if x.root != y.root {
return false
@@ -305,21 +362,21 @@ func (t *switchTable) handleMessage(msg *switchMessage, fromPort switchPort, sig
return true
}
doUpdate := false
if !equiv(&msg.locator, &oldSender.locator) {
if !equiv(&sender.locator, &oldSender.locator) {
doUpdate = true
sender.firstSeen = now
//sender.firstSeen = now // TODO? uncomment to prevent flapping?
}
t.data.peers[fromPort] = sender
updateRoot := false
oldParent, isIn := t.data.peers[t.parent]
noParent := !isIn
noLoop := func() bool {
for idx := 0; idx < len(sigs)-1; idx++ {
if sigs[idx].next == t.core.sigPub {
for idx := 0; idx < len(msg.Hops)-1; idx++ {
if msg.Hops[idx].Next == t.core.sigPub {
return false
}
}
if msg.locator.root == t.core.sigPub {
if sender.locator.root == t.core.sigPub {
return false
}
return true
@@ -328,44 +385,43 @@ func (t *switchTable) handleMessage(msg *switchMessage, fromPort switchPort, sig
pTime := oldParent.time.Sub(oldParent.firstSeen) + switch_timeout
// Really want to compare sLen/sTime and pLen/pTime
// Cross multiplied to avoid divide-by-zero
cost := len(msg.locator.coords) * int(pTime.Seconds())
cost := len(sender.locator.coords) * int(pTime.Seconds())
pCost := len(t.data.locator.coords) * int(sTime.Seconds())
dropTstamp, isIn := t.drop[msg.locator.root]
dropTstamp, isIn := t.drop[sender.locator.root]
// Here be dragons
switch {
case !noLoop: // do nothing
case isIn && dropTstamp >= msg.locator.tstamp: // do nothing
case firstIsBetter(&msg.locator.root, &t.data.locator.root):
case isIn && dropTstamp >= sender.locator.tstamp: // do nothing
case firstIsBetter(&sender.locator.root, &t.data.locator.root):
updateRoot = true
case t.data.locator.root != msg.locator.root: // do nothing
case t.data.locator.tstamp > msg.locator.tstamp: // do nothing
case t.data.locator.root != sender.locator.root: // do nothing
case t.data.locator.tstamp > sender.locator.tstamp: // do nothing
case noParent:
updateRoot = true
case cost < pCost:
updateRoot = true
case sender.port == t.parent &&
(msg.locator.tstamp > t.data.locator.tstamp ||
!equiv(&msg.locator, &t.data.locator)):
case sender.port != t.parent: // do nothing
case !equiv(&sender.locator, &t.data.locator):
updateRoot = true
case now.Sub(t.time) < switch_throttle: // do nothing
case sender.locator.tstamp > t.data.locator.tstamp:
updateRoot = true
}
if updateRoot {
if !equiv(&msg.locator, &t.data.locator) {
if !equiv(&sender.locator, &t.data.locator) {
doUpdate = true
t.data.seq++
select {
case t.core.router.reset <- struct{}{}:
default:
}
//t.core.log.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords)
//fmt.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords)
}
if t.data.locator.tstamp != msg.locator.tstamp {
if t.data.locator.tstamp != sender.locator.tstamp {
t.time = now
}
t.data.locator = msg.locator
t.data.locator = sender.locator
t.parent = sender.port
t.data.sigs = sigs
//t.core.log.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords)
t.core.peers.sendSwitchMsgs()
}
if doUpdate {
t.updater.Store(&sync.Once{})
@@ -373,6 +429,7 @@ func (t *switchTable) handleMessage(msg *switchMessage, fromPort switchPort, sig
return
}
// This is called via a sync.Once to update the atomically readable subset of switch information that gets used for routing decisions.
func (t *switchTable) updateTable() {
// WARNING this should only be called from within t.data.updater.Do()
// It relies on the sync.Once for synchronization with messages and lookups
@@ -390,63 +447,53 @@ func (t *switchTable) updateTable() {
}
for _, pinfo := range t.data.peers {
//if !pinfo.forward { continue }
if pinfo.locator.root != newTable.self.root {
continue
}
loc := pinfo.locator.clone()
loc.coords = loc.coords[:len(loc.coords)-1] // Remove the them->self link
newTable.elems = append(newTable.elems, tableElem{
locator: loc,
//degree: pinfo.degree,
firstSeen: pinfo.firstSeen,
//forward: pinfo.forward,
port: pinfo.port,
port: pinfo.port,
})
}
sort.SliceStable(newTable.elems, func(i, j int) bool {
return t.data.peers[newTable.elems[i].port].firstSeen.Before(t.data.peers[newTable.elems[j].port].firstSeen)
})
t.table.Store(newTable)
}
func (t *switchTable) lookup(dest []byte, ttl uint64) (switchPort, uint64) {
// This does the switch layer lookups that decide how to route traffic.
// Traffic uses greedy routing in a metric space, where the metric distance between nodes is equal to the distance between them on the tree.
// Traffic must be routed to a node that is closer to the destination via the metric space distance.
// In the event that two nodes are equally close, it gets routed to the one with the longest uptime (due to the order that things are iterated over).
// The size of the outgoing packet queue is added to a node's tree distance when the cost of forwarding to a node, subject to the constraint that the real tree distance puts them closer to the destination than ourself.
// Doing so adds a limited form of backpressure routing, based on local information, which allows us to forward traffic around *local* bottlenecks, provided that another greedy path exists.
func (t *switchTable) lookup(dest []byte) switchPort {
t.updater.Load().(*sync.Once).Do(t.updateTable)
table := t.table.Load().(lookupTable)
myDist := table.self.dist(dest)
if myDist == 0 {
return 0
}
// cost is in units of (expected distance) + (expected queue size), where expected distance is used as an approximation of the minimum backpressure gradient needed for packets to flow
ports := t.core.peers.getPorts()
getBandwidth := func(port switchPort) float64 {
var bandwidth float64
if p, isIn := ports[port]; isIn {
bandwidth = p.getBandwidth()
}
return bandwidth
}
var best switchPort
myDist := table.self.dist(dest) //getDist(table.self.coords)
if !(uint64(myDist) < ttl) {
return 0, 0
}
// score is in units of bandwidth / distance
bestScore := float64(-1)
bestCost := int64(^uint64(0) >> 1)
for _, info := range table.elems {
if info.locator.root != table.self.root {
continue
}
dist := info.locator.dist(dest) //getDist(info.locator.coords)
dist := info.locator.dist(dest)
if !(dist < myDist) {
continue
}
score := getBandwidth(info.port)
score /= float64(1 + dist)
if score > bestScore {
p, isIn := ports[info.port]
if !isIn {
continue
}
cost := int64(dist) + p.getQueueSize()
if cost < bestCost {
best = info.port
bestScore = score
bestCost = cost
}
}
//t.core.log.Println("DEBUG: sending to", best, "bandwidth", getBandwidth(best))
return best, uint64(myDist)
return best
}
////////////////////////////////////////////////////////////////////////////////
//Signature stuff
type sigInfo struct {
next sigPubKey
sig sigBytes
}
////////////////////////////////////////////////////////////////////////////////

View File

@@ -10,56 +10,134 @@ package yggdrasil
// Could be used to DoS (connect, give someone else's keys, spew garbage)
// I guess the "peer" part should watch for link packets, disconnect?
import "net"
import "time"
import "errors"
import "sync"
import "fmt"
// TCP connections start with a metadata exchange.
// It involves exchanging version numbers and crypto keys
// See version.go for version metadata format
import (
"errors"
"fmt"
"net"
"sync"
"sync/atomic"
"time"
"golang.org/x/net/proxy"
)
const tcp_msgSize = 2048 + 65535 // TODO figure out what makes sense
// Wrapper function for non tcp/ip connections.
func setNoDelay(c net.Conn, delay bool) {
tcp, ok := c.(*net.TCPConn)
if ok {
tcp.SetNoDelay(delay)
}
}
// The TCP listener and information about active TCP connections, to avoid duplication.
type tcpInterface struct {
core *Core
serv *net.TCPListener
serv net.Listener
mutex sync.Mutex // Protecting the below
calls map[string]struct{}
conns map[tcpInfo](chan struct{})
}
// This is used as the key to a map that tracks existing connections, to prevent multiple connections to the same keys and local/remote address pair from occuring.
// Different address combinations are allowed, so multi-homing is still technically possible (but not necessarily advisable).
type tcpInfo struct {
box boxPubKey
sig sigPubKey
localAddr string // net.IPAddr.String(), not TCPAddr, don't care about port
localAddr string
remoteAddr string
}
func (iface *tcpInterface) init(core *Core, addr string) {
iface.core = core
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
panic(err)
}
iface.serv, err = net.ListenTCP("tcp", tcpAddr)
if err != nil {
panic(err)
}
iface.calls = make(map[string]struct{})
iface.conns = make(map[tcpInfo](chan struct{}))
go iface.listener()
// Returns the address of the listener.
func (iface *tcpInterface) getAddr() *net.TCPAddr {
return iface.serv.Addr().(*net.TCPAddr)
}
// Attempts to initiate a connection to the provided address.
func (iface *tcpInterface) connect(addr string) {
iface.call(addr)
}
// Attempst to initiate a connection to the provided address, viathe provided socks proxy address.
func (iface *tcpInterface) connectSOCKS(socksaddr, peeraddr string) {
// TODO make sure this doesn't keep attempting/killing connections when one is already active.
// I think some of the interaction between this and callWithConn needs work, so the dial isn't even attempted if there's already an outgoing call to peeraddr.
// Or maybe only if there's already an outgoing call to peeraddr via this socksaddr?
go func() {
dialer, err := proxy.SOCKS5("tcp", socksaddr, nil, proxy.Direct)
if err == nil {
conn, err := dialer.Dial("tcp", peeraddr)
if err == nil {
iface.callWithConn(&wrappedConn{
c: conn,
raddr: &wrappedAddr{
network: "tcp",
addr: peeraddr,
},
})
}
}
}()
}
// Initializes the struct.
func (iface *tcpInterface) init(core *Core, addr string) (err error) {
iface.core = core
iface.serv, err = net.Listen("tcp", addr)
if err == nil {
iface.calls = make(map[string]struct{})
iface.conns = make(map[tcpInfo](chan struct{}))
go iface.listener()
}
return err
}
// Runs the listener, which spawns off goroutines for incoming connections.
func (iface *tcpInterface) listener() {
defer iface.serv.Close()
iface.core.log.Println("Listening on:", iface.serv.Addr().String())
iface.core.log.Println("Listening for TCP on:", iface.serv.Addr().String())
for {
sock, err := iface.serv.AcceptTCP()
sock, err := iface.serv.Accept()
if err != nil {
panic(err)
}
go iface.handler(sock)
go iface.handler(sock, true)
}
}
// Called by connectSOCKS, it's like call but with the connection already established.
func (iface *tcpInterface) callWithConn(conn net.Conn) {
go func() {
raddr := conn.RemoteAddr().String()
iface.mutex.Lock()
_, isIn := iface.calls[raddr]
iface.mutex.Unlock()
if !isIn {
iface.mutex.Lock()
iface.calls[raddr] = struct{}{}
iface.mutex.Unlock()
defer func() {
iface.mutex.Lock()
delete(iface.calls, raddr)
iface.mutex.Unlock()
}()
iface.handler(conn, false)
}
}()
}
// Checks if a connection already exists.
// If not, it adds it to the list of active outgoing calls (to block future attempts) and dials the address.
// If the dial is successful, it launches the handler.
// When finished, it removes the outgoing call, so reconnection attempts can be made later.
// This all happens in a separate goroutine that it spawns.
func (iface *tcpInterface) call(saddr string) {
go func() {
quit := false
@@ -76,39 +154,54 @@ func (iface *tcpInterface) call(saddr string) {
}
iface.mutex.Unlock()
if !quit {
conn, err := net.DialTimeout("tcp", saddr, 6*time.Second)
conn, err := net.Dial("tcp", saddr)
if err != nil {
return
}
sock := conn.(*net.TCPConn)
iface.handler(sock)
iface.handler(conn, false)
}
}()
}
func (iface *tcpInterface) handler(sock *net.TCPConn) {
// This exchanges/checks connection metadata, sets up the peer struct, sets up the writer goroutine, and then runs the reader within the current goroutine.
// It defers a bunch of cleanup stuff to tear down all of these things when the reader exists (e.g. due to a closed connection or a timeout).
func (iface *tcpInterface) handler(sock net.Conn, incoming bool) {
defer sock.Close()
// Get our keys
keys := []byte{}
keys = append(keys, tcp_key[:]...)
keys = append(keys, iface.core.boxPub[:]...)
keys = append(keys, iface.core.sigPub[:]...)
_, err := sock.Write(keys)
myLinkPub, myLinkPriv := newBoxKeys() // ephemeral link keys
meta := version_getBaseMetadata()
meta.box = iface.core.boxPub
meta.sig = iface.core.sigPub
meta.link = *myLinkPub
metaBytes := meta.encode()
_, err := sock.Write(metaBytes)
if err != nil {
return
}
timeout := time.Now().Add(6 * time.Second)
sock.SetReadDeadline(timeout)
n, err := sock.Read(keys)
_, err = sock.Read(metaBytes)
if err != nil {
return
}
if n < len(keys) { /*panic("Partial key packet?") ;*/
meta = version_metadata{} // Reset to zero value
if !meta.decode(metaBytes) || !meta.check() {
// Failed to decode and check the metadata
// If it's a version mismatch issue, then print an error message
base := version_getBaseMetadata()
if meta.meta == base.meta {
if meta.ver > base.ver {
iface.core.log.Println("Failed to connect to node:", sock.RemoteAddr().String(), "version:", meta.ver)
} else if meta.ver == base.ver && meta.minorVer > base.minorVer {
iface.core.log.Println("Failed to connect to node:", sock.RemoteAddr().String(), "version:", fmt.Sprintf("%d.%d", meta.ver, meta.minorVer))
}
}
// TODO? Block forever to prevent future connection attempts? suppress future messages about the same node?
return
}
info := tcpInfo{}
if !tcp_chop_keys(&info.box, &info.sig, &keys) { /*panic("Invalid key packet?") ;*/
return
info := tcpInfo{ // used as a map key, so don't include ephemeral link key
box: meta.box,
sig: meta.sig,
}
// Quit the parent call if this is a connection to ourself
equiv := func(k1, k2 []byte) bool {
@@ -121,23 +214,22 @@ func (iface *tcpInterface) handler(sock *net.TCPConn) {
}
if equiv(info.box[:], iface.core.boxPub[:]) {
return
} // testing
}
if equiv(info.sig[:], iface.core.sigPub[:]) {
return
}
// Check if we're authorized to connect to this key / IP
if incoming && !iface.core.peers.isAllowedEncryptionPublicKey(&info.box) {
// Allow unauthorized peers if they're link-local
raddrStr, _, _ := net.SplitHostPort(sock.RemoteAddr().String())
raddr := net.ParseIP(raddrStr)
if !raddr.IsLinkLocalUnicast() {
return
}
}
// Check if we already have a connection to this node, close and block if yes
local := sock.LocalAddr().(*net.TCPAddr)
laddr := net.IPAddr{
IP: local.IP,
Zone: local.Zone,
}
info.localAddr = laddr.String()
remote := sock.RemoteAddr().(*net.TCPAddr)
raddr := net.IPAddr{
IP: remote.IP,
Zone: remote.Zone,
}
info.remoteAddr = raddr.String()
info.localAddr, _, _ = net.SplitHostPort(sock.LocalAddr().String())
info.remoteAddr, _, _ = net.SplitHostPort(sock.RemoteAddr().String())
iface.mutex.Lock()
if blockChan, isIn := iface.conns[info]; isIn {
iface.mutex.Unlock()
@@ -148,56 +240,78 @@ func (iface *tcpInterface) handler(sock *net.TCPConn) {
blockChan := make(chan struct{})
iface.conns[info] = blockChan
iface.mutex.Unlock()
defer close(blockChan)
defer func() {
iface.mutex.Lock()
delete(iface.conns, info)
iface.mutex.Unlock()
close(blockChan)
}()
// Note that multiple connections to the same node are allowed
// E.g. over different interfaces
linkIn := make(chan []byte, 1)
p := iface.core.peers.newPeer(&info.box, &info.sig) //, in, out)
p := iface.core.peers.newPeer(&info.box, &info.sig, getSharedKey(myLinkPriv, &meta.link))
p.linkOut = make(chan []byte, 1)
in := func(bs []byte) {
p.handlePacket(bs, linkIn)
p.handlePacket(bs)
}
out := make(chan []byte, 32) // TODO? what size makes sense
defer close(out)
go func() {
var shadow int64
var stack [][]byte
put := func(msg []byte) {
stack = append(stack, msg)
for len(stack) > 32 {
util_putBytes(stack[0])
stack = stack[1:]
shadow++
}
}
send := func() {
msg := stack[len(stack)-1]
stack = stack[:len(stack)-1]
buf := net.Buffers{tcp_msg[:],
wire_encode_uint64(uint64(len(msg))),
msg}
size := 0
for _, bs := range buf {
size += len(bs)
}
start := time.Now()
send := func(msg []byte) {
msgLen := wire_encode_uint64(uint64(len(msg)))
buf := net.Buffers{tcp_msg[:], msgLen, msg}
buf.WriteTo(sock)
timed := time.Since(start)
pType, _ := wire_decode_uint64(msg)
if pType == wire_LinkProtocolTraffic {
p.updateBandwidth(size, timed)
}
atomic.AddUint64(&p.bytesSent, uint64(len(tcp_msg)+len(msgLen)+len(msg)))
util_putBytes(msg)
}
for msg := range out {
put(msg)
timerInterval := 4 * time.Second
timer := time.NewTimer(timerInterval)
defer timer.Stop()
for {
if shadow != 0 {
p.updateQueueSize(-shadow)
shadow = 0
}
timer.Stop()
select {
case <-timer.C:
default:
}
timer.Reset(timerInterval)
select {
case _ = <-timer.C:
send(nil) // TCP keep-alive traffic
case msg := <-p.linkOut:
send(msg)
case msg, ok := <-out:
if !ok {
return
}
put(msg)
}
for len(stack) > 0 {
// Keep trying to fill the stack (LIFO order) while sending
select {
case msg := <-p.linkOut:
send(msg)
case msg, ok := <-out:
if !ok {
return
}
put(msg)
default:
send()
msg := stack[len(stack)-1]
stack = stack[:len(stack)-1]
send(msg)
p.updateQueueSize(-1)
}
}
}
@@ -206,26 +320,19 @@ func (iface *tcpInterface) handler(sock *net.TCPConn) {
defer func() { recover() }()
select {
case out <- msg:
p.updateQueueSize(1)
default:
util_putBytes(msg)
}
}
sock.SetNoDelay(true)
go p.linkLoop(linkIn)
p.close = func() { sock.Close() }
setNoDelay(sock, true)
go p.linkLoop()
defer func() {
// Put all of our cleanup here...
p.core.peers.mutex.Lock()
oldPorts := p.core.peers.getPorts()
newPorts := make(map[switchPort]*peer)
for k, v := range oldPorts {
newPorts[k] = v
}
delete(newPorts, p.port)
p.core.peers.putPorts(newPorts)
p.core.peers.mutex.Unlock()
close(linkIn)
p.core.peers.removePeer(p.port)
}()
them := sock.RemoteAddr().(*net.TCPAddr)
them, _, _ := net.SplitHostPort(sock.RemoteAddr().String())
themNodeID := getNodeID(&info.box)
themAddr := address_addrForNodeID(themNodeID)
themAddrString := net.IP(themAddr[:]).String()
@@ -236,7 +343,10 @@ func (iface *tcpInterface) handler(sock *net.TCPConn) {
return
}
func (iface *tcpInterface) reader(sock *net.TCPConn, in func([]byte)) {
// This reads from the socket into a []byte buffer for incomping messages.
// It copies completed messages out of the cache into a new slice, and passes them to the peer struct via the provided `in func([]byte)` argument.
// Then it shifts the incomplete fragments of data forward so future reads won't overwrite it.
func (iface *tcpInterface) reader(sock net.Conn, in func([]byte)) {
bs := make([]byte, 2*tcp_msgSize)
frag := bs[:0]
for {
@@ -265,29 +375,13 @@ func (iface *tcpInterface) reader(sock *net.TCPConn, in func([]byte)) {
////////////////////////////////////////////////////////////////////////////////
// Magic bytes to check
var tcp_key = [...]byte{'k', 'e', 'y', 's'}
// These are 4 bytes of padding used to catch if something went horribly wrong with the tcp connection.
var tcp_msg = [...]byte{0xde, 0xad, 0xb1, 0x75} // "dead bits"
func tcp_chop_keys(box *boxPubKey, sig *sigPubKey, bs *[]byte) bool {
// This one is pretty simple: we know how long the message should be
// So don't call this with a message that's too short
if len(*bs) < len(tcp_key)+len(*box)+len(*sig) {
return false
}
for idx := range tcp_key {
if (*bs)[idx] != tcp_key[idx] {
return false
}
}
(*bs) = (*bs)[len(tcp_key):]
copy(box[:], *bs)
(*bs) = (*bs)[len(box):]
copy(sig[:], *bs)
(*bs) = (*bs)[len(sig):]
return true
}
// This takes a pointer to a slice as an argument.
// It checks if there's a complete message and, if so, slices out those parts and returns the message, true, and nil.
// If there's no error, but also no complete message, it returns nil, false, and nil.
// If there's an error, it returns nil, false, and the error, which the reader then handles (currently, by returning from the reader, which causes the connection to close).
func tcp_chop_msg(bs *[]byte) ([]byte, bool, error) {
// Returns msg, ok, err
if len(*bs) < len(tcp_msg) {

View File

@@ -2,34 +2,66 @@ package yggdrasil
// This manages the tun driver to send/recv packets to/from applications
import ethernet "github.com/songgao/packets/ethernet"
import (
"github.com/songgao/packets/ethernet"
"github.com/yggdrasil-network/water"
)
const IPv6_HEADER_LENGTH = 40
const ETHER_HEADER_LENGTH = 14
type tunInterface interface {
IsTUN() bool
IsTAP() bool
Name() string
Read(to []byte) (int, error)
Write(from []byte) (int, error)
Close() error
}
const tun_IPv6_HEADER_LENGTH = 40
const tun_ETHER_HEADER_LENGTH = 14
// Represents a running TUN/TAP interface.
type tunDevice struct {
core *Core
icmpv6 icmpv6
send chan<- []byte
recv <-chan []byte
mtu int
iface tunInterface
iface *water.Interface
}
// Defines which parameters are expected by default for a TUN/TAP adapter on a
// specific platform. These values are populated in the relevant tun_*.go for
// the platform being targeted. They must be set.
type tunDefaultParameters struct {
maximumIfMTU int
defaultIfMTU int
defaultIfName string
defaultIfTAPMode bool
}
// Gets the maximum supported MTU for the platform based on the defaults in
// getDefaults().
func getSupportedMTU(mtu int) int {
if mtu > getDefaults().maximumIfMTU {
return getDefaults().maximumIfMTU
}
return mtu
}
// Initialises the TUN/TAP adapter.
func (tun *tunDevice) init(core *Core) {
tun.core = core
tun.icmpv6.init(tun)
}
// Starts the setup process for the TUN/TAP adapter, and if successful, starts
// the read/write goroutines to handle packets on that interface.
func (tun *tunDevice) start(ifname string, iftapmode bool, addr string, mtu int) error {
if ifname == "none" {
return nil
}
if err := tun.setup(ifname, iftapmode, addr, mtu); err != nil {
return err
}
go func() { panic(tun.read()) }()
go func() { panic(tun.write()) }()
return nil
}
// Writes a packet to the TUN/TAP adapter. If the adapter is running in TAP
// mode then additional ethernet encapsulation is added for the benefit of the
// host operating system.
func (tun *tunDevice) write() error {
for {
data := <-tun.recv
@@ -44,7 +76,7 @@ func (tun *tunDevice) write() error {
ethernet.NotTagged, // VLAN tagging
ethernet.IPv6, // Ethertype
len(data)) // Payload length
copy(frame[ETHER_HEADER_LENGTH:], data[:])
copy(frame[tun_ETHER_HEADER_LENGTH:], data[:])
if _, err := tun.iface.Write(frame); err != nil {
panic(err)
}
@@ -57,23 +89,28 @@ func (tun *tunDevice) write() error {
}
}
// Reads any packets that are waiting on the TUN/TAP adapter. If the adapter
// is running in TAP mode then the ethernet headers will automatically be
// processed and stripped if necessary. If an ICMPv6 packet is found, then
// the relevant helper functions in icmpv6.go are called.
func (tun *tunDevice) read() error {
mtu := tun.mtu
if tun.iface.IsTAP() {
mtu += ETHER_HEADER_LENGTH
mtu += tun_ETHER_HEADER_LENGTH
}
buf := make([]byte, mtu)
for {
n, err := tun.iface.Read(buf)
if err != nil {
panic(err)
// panic(err)
return err
}
o := 0
if tun.iface.IsTAP() {
o = ETHER_HEADER_LENGTH
o = tun_ETHER_HEADER_LENGTH
}
if buf[o]&0xf0 != 0x60 ||
n != 256*int(buf[o+4])+int(buf[o+5])+IPv6_HEADER_LENGTH+o {
n != 256*int(buf[o+4])+int(buf[o+5])+tun_IPv6_HEADER_LENGTH+o {
// Either not an IPv6 packet or not the complete packet for some reason
//panic("Should not happen in testing")
continue
@@ -90,6 +127,9 @@ func (tun *tunDevice) read() error {
}
}
// Closes the TUN/TAP adapter. This is only usually called when the Yggdrasil
// process stops. Typically this operation will happen quickly, but on macOS
// it can block until a read operation is completed.
func (tun *tunDevice) close() error {
if tun.iface == nil {
return nil

171
src/yggdrasil/tun_bsd.go Normal file
View File

@@ -0,0 +1,171 @@
// +build openbsd freebsd netbsd
package yggdrasil
import (
"encoding/binary"
"os/exec"
"strconv"
"strings"
"syscall"
"unsafe"
"golang.org/x/sys/unix"
"github.com/yggdrasil-network/water"
)
const SIOCSIFADDR_IN6 = (0x80000000) | ((288 & 0x1fff) << 16) | uint32(byte('i'))<<8 | 12
type in6_addrlifetime struct {
ia6t_expire float64
ia6t_preferred float64
ia6t_vltime uint32
ia6t_pltime uint32
}
type sockaddr_in6 struct {
sin6_len uint8
sin6_family uint8
sin6_port uint8
sin6_flowinfo uint32
sin6_addr [8]uint16
sin6_scope_id uint32
}
/*
from <netinet6/in6_var.h>
struct in6_ifreq {
277 char ifr_name[IFNAMSIZ];
278 union {
279 struct sockaddr_in6 ifru_addr;
280 struct sockaddr_in6 ifru_dstaddr;
281 int ifru_flags;
282 int ifru_flags6;
283 int ifru_metric;
284 caddr_t ifru_data;
285 struct in6_addrlifetime ifru_lifetime;
286 struct in6_ifstat ifru_stat;
287 struct icmp6_ifstat ifru_icmp6stat;
288 u_int32_t ifru_scope_id[16];
289 } ifr_ifru;
290 };
*/
type in6_ifreq_mtu struct {
ifr_name [syscall.IFNAMSIZ]byte
ifru_mtu int
}
type in6_ifreq_addr struct {
ifr_name [syscall.IFNAMSIZ]byte
ifru_addr sockaddr_in6
}
type in6_ifreq_flags struct {
ifr_name [syscall.IFNAMSIZ]byte
flags int
}
type in6_ifreq_lifetime struct {
ifr_name [syscall.IFNAMSIZ]byte
ifru_addrlifetime in6_addrlifetime
}
// Sets the IPv6 address of the utun adapter. On all BSD platforms (FreeBSD,
// OpenBSD, NetBSD) an attempt is made to set the adapter properties by using
// a system socket and making syscalls to the kernel. This is not refined though
// and often doesn't work (if at all), therefore if a call fails, it resorts
// to calling "ifconfig" instead.
func (tun *tunDevice) setup(ifname string, iftapmode bool, addr string, mtu int) error {
var config water.Config
if ifname[:4] == "auto" {
ifname = "/dev/tap0"
}
if len(ifname) < 9 {
panic("TUN/TAP name must be in format /dev/tunX or /dev/tapX")
}
switch {
case iftapmode || ifname[:8] == "/dev/tap":
config = water.Config{DeviceType: water.TAP}
case !iftapmode || ifname[:8] == "/dev/tun":
panic("TUN mode is not currently supported on this platform, please use TAP instead")
default:
panic("TUN/TAP name must be in format /dev/tunX or /dev/tapX")
}
config.Name = ifname
iface, err := water.New(config)
if err != nil {
panic(err)
}
tun.iface = iface
tun.mtu = getSupportedMTU(mtu)
return tun.setupAddress(addr)
}
func (tun *tunDevice) setupAddress(addr string) error {
var sfd int
var err error
// Create system socket
if sfd, err = unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, 0); err != nil {
tun.core.log.Printf("Create AF_INET socket failed: %v.", err)
return err
}
// Friendly output
tun.core.log.Printf("Interface name: %s", tun.iface.Name())
tun.core.log.Printf("Interface IPv6: %s", addr)
tun.core.log.Printf("Interface MTU: %d", tun.mtu)
// Create the MTU request
var ir in6_ifreq_mtu
copy(ir.ifr_name[:], tun.iface.Name())
ir.ifru_mtu = int(tun.mtu)
// Set the MTU
if _, _, errno := unix.Syscall(unix.SYS_IOCTL, uintptr(sfd), uintptr(syscall.SIOCSIFMTU), uintptr(unsafe.Pointer(&ir))); errno != 0 {
err = errno
tun.core.log.Printf("Error in SIOCSIFMTU: %v", errno)
// Fall back to ifconfig to set the MTU
cmd := exec.Command("ifconfig", tun.iface.Name(), "mtu", string(tun.mtu))
tun.core.log.Printf("Using ifconfig as fallback: %v", strings.Join(cmd.Args, " "))
output, err := cmd.CombinedOutput()
if err != nil {
tun.core.log.Printf("SIOCSIFMTU fallback failed: %v.", err)
tun.core.log.Println(string(output))
}
}
// Create the address request
// FIXME: I don't work!
var ar in6_ifreq_addr
copy(ar.ifr_name[:], tun.iface.Name())
ar.ifru_addr.sin6_len = uint8(unsafe.Sizeof(ar.ifru_addr))
ar.ifru_addr.sin6_family = unix.AF_INET6
parts := strings.Split(strings.Split(addr, "/")[0], ":")
for i := 0; i < 8; i++ {
addr, _ := strconv.ParseUint(parts[i], 16, 16)
b := make([]byte, 16)
binary.LittleEndian.PutUint16(b, uint16(addr))
ar.ifru_addr.sin6_addr[i] = uint16(binary.BigEndian.Uint16(b))
}
// Set the interface address
if _, _, errno := unix.Syscall(unix.SYS_IOCTL, uintptr(sfd), uintptr(SIOCSIFADDR_IN6), uintptr(unsafe.Pointer(&ar))); errno != 0 {
err = errno
tun.core.log.Printf("Error in SIOCSIFADDR_IN6: %v", errno)
// Fall back to ifconfig to set the address
cmd := exec.Command("ifconfig", tun.iface.Name(), "inet6", addr)
tun.core.log.Printf("Using ifconfig as fallback: %v", strings.Join(cmd.Args, " "))
output, err := cmd.CombinedOutput()
if err != nil {
tun.core.log.Printf("SIOCSIFADDR_IN6 fallback failed: %v.", err)
tun.core.log.Println(string(output))
}
}
return nil
}

View File

@@ -2,14 +2,29 @@ package yggdrasil
// The darwin platform specific tun parts
import "unsafe"
import "strings"
import "strconv"
import "encoding/binary"
import "golang.org/x/sys/unix"
import (
"encoding/binary"
"strconv"
"strings"
"unsafe"
import water "github.com/songgao/water"
"golang.org/x/sys/unix"
water "github.com/yggdrasil-network/water"
)
// Sane defaults for the Darwin/macOS platform. The "default" options may be
// may be replaced by the running configuration.
func getDefaults() tunDefaultParameters {
return tunDefaultParameters{
maximumIfMTU: 65535,
defaultIfMTU: 65535,
defaultIfName: "auto",
defaultIfTAPMode: false,
}
}
// Configures the "utun" adapter with the correct IPv6 address and MTU.
func (tun *tunDevice) setup(ifname string, iftapmode bool, addr string, mtu int) error {
if iftapmode {
tun.core.log.Printf("TAP mode is not supported on this platform, defaulting to TUN")
@@ -20,11 +35,11 @@ func (tun *tunDevice) setup(ifname string, iftapmode bool, addr string, mtu int)
panic(err)
}
tun.iface = iface
tun.mtu = mtu
tun.mtu = getSupportedMTU(mtu)
return tun.setupAddress(addr)
}
const SIOCAIFADDR_IN6 = 2155899162
const darwin_SIOCAIFADDR_IN6 = 2155899162
type in6_addrlifetime struct {
ia6t_expire float64
@@ -56,6 +71,8 @@ type ifreq struct {
ifru_mtu uint32
}
// Sets the IPv6 address of the utun adapter. On Darwin/macOS this is done using
// a system socket and making direct syscalls to the kernel.
func (tun *tunDevice) setupAddress(addr string) error {
var fd int
var err error
@@ -75,7 +92,7 @@ func (tun *tunDevice) setupAddress(addr string) error {
ar.ifra_addr.sin6_len = uint8(unsafe.Sizeof(ar.ifra_addr))
ar.ifra_addr.sin6_family = unix.AF_INET6
parts := strings.Split(strings.TrimRight(addr, "/8"), ":")
parts := strings.Split(strings.Split(addr, "/")[0], ":")
for i := 0; i < 8; i++ {
addr, _ := strconv.ParseUint(parts[i], 16, 16)
b := make([]byte, 16)
@@ -94,9 +111,9 @@ func (tun *tunDevice) setupAddress(addr string) error {
tun.core.log.Printf("Interface IPv6: %s", addr)
tun.core.log.Printf("Interface MTU: %d", ir.ifru_mtu)
if _, _, errno := unix.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(SIOCAIFADDR_IN6), uintptr(unsafe.Pointer(&ar))); errno != 0 {
if _, _, errno := unix.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(darwin_SIOCAIFADDR_IN6), uintptr(unsafe.Pointer(&ar))); errno != 0 {
err = errno
tun.core.log.Printf("Error in SIOCAIFADDR_IN6: %v", errno)
tun.core.log.Printf("Error in darwin_SIOCAIFADDR_IN6: %v", errno)
return err
}

View File

@@ -0,0 +1,12 @@
package yggdrasil
// Sane defaults for the FreeBSD platform. The "default" options may be
// may be replaced by the running configuration.
func getDefaults() tunDefaultParameters {
return tunDefaultParameters{
maximumIfMTU: 32767,
defaultIfMTU: 32767,
defaultIfName: "/dev/tap0",
defaultIfTAPMode: true,
}
}

View File

@@ -1,14 +1,29 @@
package yggdrasil
// The linux platform specific tun parts
// It depends on iproute2 being installed to set things on the tun device
import "fmt"
import "os/exec"
import "strings"
import (
"errors"
"fmt"
"net"
import water "github.com/songgao/water"
"github.com/docker/libcontainer/netlink"
water "github.com/yggdrasil-network/water"
)
// Sane defaults for the Linux platform. The "default" options may be
// may be replaced by the running configuration.
func getDefaults() tunDefaultParameters {
return tunDefaultParameters{
maximumIfMTU: 65535,
defaultIfMTU: 65535,
defaultIfName: "auto",
defaultIfTAPMode: false,
}
}
// Configures the TAP adapter with the correct IPv6 address and MTU.
func (tun *tunDevice) setup(ifname string, iftapmode bool, addr string, mtu int) error {
var config water.Config
if iftapmode {
@@ -24,32 +39,44 @@ func (tun *tunDevice) setup(ifname string, iftapmode bool, addr string, mtu int)
panic(err)
}
tun.iface = iface
tun.mtu = mtu //1280 // Lets default to the smallest thing allowed for now
tun.mtu = getSupportedMTU(mtu)
return tun.setupAddress(addr)
}
// Configures the TAP adapter with the correct IPv6 address and MTU. Netlink
// is used to do this, so there is not a hard requirement on "ip" or "ifconfig"
// to exist on the system, but this will fail if Netlink is not present in the
// kernel (it nearly always is).
func (tun *tunDevice) setupAddress(addr string) error {
// Set address
cmd := exec.Command("ip", "-f", "inet6",
"addr", "add", addr,
"dev", tun.iface.Name())
tun.core.log.Printf("ip command: %v", strings.Join(cmd.Args, " "))
output, err := cmd.CombinedOutput()
var netIF *net.Interface
ifces, err := net.Interfaces()
if err != nil {
tun.core.log.Printf("Linux ip failed: %v.", err)
tun.core.log.Println(string(output))
return err
}
// Set MTU and bring device up
cmd = exec.Command("ip", "link", "set",
"dev", tun.iface.Name(),
"mtu", fmt.Sprintf("%d", tun.mtu),
"up")
tun.core.log.Printf("ip command: %v", strings.Join(cmd.Args, " "))
output, err = cmd.CombinedOutput()
for _, ifce := range ifces {
if ifce.Name == tun.iface.Name() {
var newIF = ifce
netIF = &newIF // Don't point inside ifces, it's apparently unsafe?...
}
}
if netIF == nil {
return errors.New(fmt.Sprintf("Failed to find interface: %s", tun.iface.Name()))
}
ip, ipNet, err := net.ParseCIDR(addr)
if err != nil {
return err
}
err = netlink.NetworkLinkAddIp(netIF, ip, ipNet)
if err != nil {
return err
}
err = netlink.NetworkSetMTU(netIF, tun.mtu)
if err != nil {
return err
}
netlink.NetworkLinkUp(netIF)
if err != nil {
tun.core.log.Printf("Linux ip failed: %v.", err)
tun.core.log.Println(string(output))
return err
}
return nil

View File

@@ -0,0 +1,12 @@
package yggdrasil
// Sane defaults for the NetBSD platform. The "default" options may be
// may be replaced by the running configuration.
func getDefaults() tunDefaultParameters {
return tunDefaultParameters{
maximumIfMTU: 9000,
defaultIfMTU: 9000,
defaultIfName: "/dev/tap0",
defaultIfTAPMode: true,
}
}

View File

@@ -0,0 +1,12 @@
package yggdrasil
// Sane defaults for the OpenBSD platform. The "default" options may be
// may be replaced by the running configuration.
func getDefaults() tunDefaultParameters {
return tunDefaultParameters{
maximumIfMTU: 16384,
defaultIfMTU: 16384,
defaultIfName: "/dev/tap0",
defaultIfTAPMode: true,
}
}

View File

@@ -1,14 +1,25 @@
// +build !linux
// +build !darwin
// +build !windows
// +build !linux,!darwin,!windows,!openbsd,!freebsd,!netbsd
package yggdrasil
import water "github.com/songgao/water"
import water "github.com/yggdrasil-network/water"
// This is to catch unsupported platforms
// If your platform supports tun devices, you could try configuring it manually
// These are sane defaults for any platform that has not been matched by one of
// the other tun_*.go files.
func getDefaults() tunDefaultParameters {
return tunDefaultParameters{
maximumIfMTU: 65535,
defaultIfMTU: 65535,
defaultIfName: "none",
defaultIfTAPMode: false,
}
}
// Creates the TUN/TAP adapter, if supported by the Water library. Note that
// no guarantees are made at this point on an unsupported platform.
func (tun *tunDevice) setup(ifname string, iftapmode bool, addr string, mtu int) error {
var config water.Config
if iftapmode {
@@ -21,10 +32,12 @@ func (tun *tunDevice) setup(ifname string, iftapmode bool, addr string, mtu int)
panic(err)
}
tun.iface = iface
tun.mtu = mtu //1280 // Lets default to the smallest thing allowed for now
tun.mtu = getSupportedMTU(mtu)
return tun.setupAddress(addr)
}
// We don't know how to set the IPv6 address on an unknown platform, therefore
// write about it to stdout and don't try to do anything further.
func (tun *tunDevice) setupAddress(addr string) error {
tun.core.log.Println("Platform not supported, you must set the address of", tun.iface.Name(), "to", addr)
return nil

View File

@@ -1,12 +1,29 @@
package yggdrasil
import water "github.com/songgao/water"
import "os/exec"
import "strings"
import "fmt"
import (
"fmt"
"os/exec"
"strings"
water "github.com/yggdrasil-network/water"
)
// This is to catch Windows platforms
// Sane defaults for the Windows platform. The "default" options may be
// may be replaced by the running configuration.
func getDefaults() tunDefaultParameters {
return tunDefaultParameters{
maximumIfMTU: 65535,
defaultIfMTU: 65535,
defaultIfName: "auto",
defaultIfTAPMode: true,
}
}
// Configures the TAP adapter with the correct IPv6 address and MTU. On Windows
// we don't make use of a direct operating system API to do this - we instead
// delegate the hard work to "netsh".
func (tun *tunDevice) setup(ifname string, iftapmode bool, addr string, mtu int) error {
if !iftapmode {
tun.core.log.Printf("TUN mode is not supported on this platform, defaulting to TAP")
@@ -14,6 +31,11 @@ func (tun *tunDevice) setup(ifname string, iftapmode bool, addr string, mtu int)
config := water.Config{DeviceType: water.TAP}
config.PlatformSpecificParams.ComponentID = "tap0901"
config.PlatformSpecificParams.Network = "169.254.0.1/32"
if ifname == "auto" {
config.PlatformSpecificParams.InterfaceName = ""
} else {
config.PlatformSpecificParams.InterfaceName = ifname
}
iface, err := water.New(config)
if err != nil {
panic(err)
@@ -41,7 +63,7 @@ func (tun *tunDevice) setup(ifname string, iftapmode bool, addr string, mtu int)
panic(err)
}
tun.iface = iface
tun.mtu = mtu
tun.mtu = getSupportedMTU(mtu)
err = tun.setupMTU(tun.mtu)
if err != nil {
panic(err)
@@ -49,6 +71,7 @@ func (tun *tunDevice) setup(ifname string, iftapmode bool, addr string, mtu int)
return tun.setupAddress(addr)
}
// Sets the MTU of the TAP adapter.
func (tun *tunDevice) setupMTU(mtu int) error {
// Set MTU
cmd := exec.Command("netsh", "interface", "ipv6", "set", "subinterface",
@@ -65,6 +88,7 @@ func (tun *tunDevice) setupMTU(mtu int) error {
return nil
}
// Sets the IPv6 address of the TAP adapter.
func (tun *tunDevice) setupAddress(addr string) error {
// Set address
cmd := exec.Command("netsh", "interface", "ipv6", "add", "address",

View File

@@ -1,333 +0,0 @@
package yggdrasil
// This communicates with peers via UDP
// It's not as well tested or debugged as the TCP transport
// It's intended to use UDP, so debugging/optimzing this is a high priority
// TODO? use golang.org/x/net/ipv6.PacketConn's ReadBatch and WriteBatch?
// To send all chunks of a message / recv all available chunks in one syscall
// That might be faster on supported platforms, but it needs investigation
// Chunks are currently murged, but outgoing messages aren't chunked
// This is just to support chunking in the future, if it's needed and debugged
// Basically, right now we might send UDP packets that are too large
// TODO remove old/unused code and better document live code
import "net"
import "time"
import "sync"
import "fmt"
type udpInterface struct {
core *Core
sock *net.UDPConn // Or more general PacketConn?
mutex sync.RWMutex // each conn has an owner goroutine
conns map[connAddr]*connInfo
}
type connAddr struct {
ip [16]byte
port int
zone string
}
func (c *connAddr) fromUDPAddr(u *net.UDPAddr) {
copy(c.ip[:], u.IP.To16())
c.port = u.Port
c.zone = u.Zone
}
func (c *connAddr) toUDPAddr() *net.UDPAddr {
var u net.UDPAddr
u.IP = make([]byte, 16)
copy(u.IP, c.ip[:])
u.Port = c.port
u.Zone = c.zone
return &u
}
type connInfo struct {
name string
addr connAddr
peer *peer
linkIn chan []byte
keysIn chan *udpKeys
timeout int // count of how many heartbeats have been missed
in func([]byte)
out chan []byte
countIn uint8
countOut uint8
}
type udpKeys struct {
box boxPubKey
sig sigPubKey
}
func (iface *udpInterface) init(core *Core, addr string) {
iface.core = core
udpAddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
panic(err)
}
iface.sock, err = net.ListenUDP("udp", udpAddr)
if err != nil {
panic(err)
}
iface.conns = make(map[connAddr]*connInfo)
go iface.reader()
}
func (iface *udpInterface) sendKeys(addr connAddr) {
udpAddr := addr.toUDPAddr()
msg := []byte{}
msg = udp_encode(msg, 0, 0, 0, nil)
msg = append(msg, iface.core.boxPub[:]...)
msg = append(msg, iface.core.sigPub[:]...)
iface.sock.WriteToUDP(msg, udpAddr)
}
func udp_isKeys(msg []byte) bool {
keyLen := 3 + boxPubKeyLen + sigPubKeyLen
return len(msg) == keyLen && msg[0] == 0x00
}
func (iface *udpInterface) startConn(info *connInfo) {
ticker := time.NewTicker(6 * time.Second)
defer ticker.Stop()
defer func() {
// Cleanup
// FIXME this still leaks a peer struct
iface.mutex.Lock()
delete(iface.conns, info.addr)
iface.mutex.Unlock()
iface.core.peers.mutex.Lock()
oldPorts := iface.core.peers.getPorts()
newPorts := make(map[switchPort]*peer)
for k, v := range oldPorts {
newPorts[k] = v
}
delete(newPorts, info.peer.port)
iface.core.peers.putPorts(newPorts)
iface.core.peers.mutex.Unlock()
close(info.linkIn)
close(info.keysIn)
close(info.out)
iface.core.log.Println("Removing peer:", info.name)
}()
for {
select {
case ks := <-info.keysIn:
{
// FIXME? need signatures/sequence-numbers or something
// Spoofers could lock out a peer with fake/bad keys
if ks.box == info.peer.box && ks.sig == info.peer.sig {
info.timeout = 0
}
}
case <-ticker.C:
{
if info.timeout > 10 {
return
}
info.timeout++
iface.sendKeys(info.addr)
}
}
}
}
func (iface *udpInterface) handleKeys(msg []byte, addr connAddr) {
//defer util_putBytes(msg)
var ks udpKeys
_, _, _, bs := udp_decode(msg)
switch {
case !wire_chop_slice(ks.box[:], &bs):
return
case !wire_chop_slice(ks.sig[:], &bs):
return
}
if ks.box == iface.core.boxPub {
return
}
if ks.sig == iface.core.sigPub {
return
}
iface.mutex.RLock()
conn, isIn := iface.conns[addr]
iface.mutex.RUnlock()
if !isIn {
udpAddr := addr.toUDPAddr()
themNodeID := getNodeID(&ks.box)
themAddr := address_addrForNodeID(themNodeID)
themAddrString := net.IP(themAddr[:]).String()
themString := fmt.Sprintf("%s@%s", themAddrString, udpAddr.String())
conn = &connInfo{
name: themString,
addr: connAddr(addr),
peer: iface.core.peers.newPeer(&ks.box, &ks.sig),
linkIn: make(chan []byte, 1),
keysIn: make(chan *udpKeys, 1),
out: make(chan []byte, 32),
}
/*
conn.in = func (msg []byte) { conn.peer.handlePacket(msg, conn.linkIn) }
conn.peer.out = func (msg []byte) {
start := time.Now()
iface.sock.WriteToUDP(msg, udpAddr)
timed := time.Since(start)
conn.peer.updateBandwidth(len(msg), timed)
util_putBytes(msg)
} // Old version, always one syscall per packet
//*/
/*
conn.peer.out = func (msg []byte) {
defer func() { recover() }()
select {
case conn.out<-msg:
default: util_putBytes(msg)
}
}
go func () {
for msg := range conn.out {
start := time.Now()
iface.sock.WriteToUDP(msg, udpAddr)
timed := time.Since(start)
conn.peer.updateBandwidth(len(msg), timed)
util_putBytes(msg)
}
}()
//*/
//*
var inChunks uint8
var inBuf []byte
conn.in = func(bs []byte) {
//defer util_putBytes(bs)
chunks, chunk, count, payload := udp_decode(bs)
//iface.core.log.Println("DEBUG:", addr, chunks, chunk, count, len(payload))
//iface.core.log.Println("DEBUG: payload:", payload)
if count != conn.countIn {
inChunks = 0
inBuf = inBuf[:0]
conn.countIn = count
}
if chunk <= chunks && chunk == inChunks+1 {
//iface.core.log.Println("GOING:", addr, chunks, chunk, count, len(payload))
inChunks += 1
inBuf = append(inBuf, payload...)
if chunks != chunk {
return
}
msg := append(util_getBytes(), inBuf...)
conn.peer.handlePacket(msg, conn.linkIn)
//iface.core.log.Println("DONE:", addr, chunks, chunk, count, len(payload))
}
}
conn.peer.out = func(msg []byte) {
defer func() { recover() }()
select {
case conn.out <- msg:
default:
util_putBytes(msg)
}
}
go func() {
var out []byte
var chunks [][]byte
for msg := range conn.out {
chunks = chunks[:0]
bs := msg
for len(bs) > udp_chunkSize {
chunks, bs = append(chunks, bs[:udp_chunkSize]), bs[udp_chunkSize:]
}
chunks = append(chunks, bs)
//iface.core.log.Println("DEBUG: out chunks:", len(chunks), len(msg))
if len(chunks) > 255 {
continue
}
start := time.Now()
for idx, bs := range chunks {
nChunks, nChunk, count := uint8(len(chunks)), uint8(idx)+1, conn.countOut
out = udp_encode(out[:0], nChunks, nChunk, count, bs)
//iface.core.log.Println("DEBUG out:", nChunks, nChunk, count, len(bs))
iface.sock.WriteToUDP(out, udpAddr)
}
timed := time.Since(start)
conn.countOut += 1
conn.peer.updateBandwidth(len(msg), timed)
util_putBytes(msg)
}
}()
//*/
iface.mutex.Lock()
iface.conns[addr] = conn
iface.mutex.Unlock()
iface.core.log.Println("Adding peer:", conn.name)
go iface.startConn(conn)
go conn.peer.linkLoop(conn.linkIn)
iface.sendKeys(conn.addr)
}
func() {
defer func() { recover() }()
select {
case conn.keysIn <- &ks:
default:
}
}()
}
func (iface *udpInterface) handlePacket(msg []byte, addr connAddr) {
iface.mutex.RLock()
if conn, isIn := iface.conns[addr]; isIn {
conn.in(msg)
}
iface.mutex.RUnlock()
}
func (iface *udpInterface) reader() {
bs := make([]byte, 2048) // This needs to be large enough for everything...
for {
//iface.core.log.Println("Starting read")
n, udpAddr, err := iface.sock.ReadFromUDP(bs)
//iface.core.log.Println("Read", n, udpAddr.String(), err)
if err != nil {
panic(err)
break
}
if n > 1500 {
panic(n)
}
//msg := append(util_getBytes(), bs[:n]...)
msg := bs[:n]
var addr connAddr
addr.fromUDPAddr(udpAddr)
if udp_isKeys(msg) {
var them address
copy(them[:], udpAddr.IP.To16())
if them.isValid() {
continue
}
if udpAddr.IP.IsLinkLocalUnicast() &&
!iface.core.ifceExpr.MatchString(udpAddr.Zone) {
continue
}
iface.handleKeys(msg, addr)
} else {
iface.handlePacket(msg, addr)
}
}
}
////////////////////////////////////////////////////////////////////////////////
const udp_chunkSize = 508 // Apparently the maximum guaranteed safe IPv4 size
func udp_decode(bs []byte) (chunks, chunk, count uint8, payload []byte) {
if len(bs) >= 3 {
chunks, chunk, count, payload = bs[0], bs[1], bs[2], bs[3:]
}
return
}
func udp_encode(out []byte, chunks, chunk, count uint8, payload []byte) []byte {
return append(append(out, chunks, chunk, count), payload...)
}

View File

@@ -2,68 +2,35 @@ package yggdrasil
// These are misc. utility functions that didn't really fit anywhere else
import "fmt"
import "runtime"
//import "sync"
func Util_testAddrIDMask() {
for idx := 0; idx < 16; idx++ {
var orig NodeID
orig[8] = 42
for bidx := 0; bidx < idx; bidx++ {
orig[bidx/8] |= (0x80 >> uint8(bidx%8))
}
addr := address_addrForNodeID(&orig)
nid, mask := addr.getNodeIDandMask()
for b := 0; b < len(mask); b++ {
nid[b] &= mask[b]
orig[b] &= mask[b]
}
if *nid != orig {
fmt.Println(orig)
fmt.Println(*addr)
fmt.Println(*nid)
fmt.Println(*mask)
panic(idx)
}
}
}
// A wrapper around runtime.Gosched() so it doesn't need to be imported elsewhere.
func util_yield() {
runtime.Gosched()
}
// A wrapper around runtime.LockOSThread() so it doesn't need to be imported elsewhere.
func util_lockthread() {
runtime.LockOSThread()
}
// A wrapper around runtime.UnlockOSThread() so it doesn't need to be imported elsewhere.
func util_unlockthread() {
runtime.UnlockOSThread()
}
/* Used previously, but removed because casting to an interface{} allocates...
var byteStore sync.Pool = sync.Pool{
New: func () interface{} { return []byte(nil) },
}
func util_getBytes() []byte {
return byteStore.Get().([]byte)[:0]
}
func util_putBytes(bs []byte) {
byteStore.Put(bs) // This is the part that allocates
}
*/
// This is used to buffer recently used slices of bytes, to prevent allocations in the hot loops.
// It's used like a sync.Pool, but with a fixed size and typechecked without type casts to/from interface{} (which were making the profiles look ugly).
var byteStore chan []byte
// Initializes the byteStore
func util_initByteStore() {
if byteStore == nil {
byteStore = make(chan []byte, 32)
}
}
// Gets an empty slice from the byte store, if one is available, or else returns a new nil slice.
func util_getBytes() []byte {
select {
case bs := <-byteStore:
@@ -73,6 +40,7 @@ func util_getBytes() []byte {
}
}
// Puts a slice in the store, if there's room, or else returns and lets the slice get collected.
func util_putBytes(bs []byte) {
select {
case byteStore <- bs:

78
src/yggdrasil/version.go Normal file
View File

@@ -0,0 +1,78 @@
package yggdrasil
// This file contains the version metadata struct
// Used in the inital connection setup and key exchange
// Some of this could arguably go in wire.go instead
// This is the version-specific metadata exchanged at the start of a connection.
// It must always beign with the 4 bytes "meta" and a wire formatted uint64 major version number.
// The current version also includes a minor version number, and the box/sig/link keys that need to be exchanged to open an connection.
type version_metadata struct {
meta [4]byte
ver uint64 // 1 byte in this version
// Everything after this point potentially depends on the version number, and is subject to change in future versions
minorVer uint64 // 1 byte in this version
box boxPubKey
sig sigPubKey
link boxPubKey
}
// Gets a base metadata with no keys set, but with the correct version numbers.
func version_getBaseMetadata() version_metadata {
return version_metadata{
meta: [4]byte{'m', 'e', 't', 'a'},
ver: 0,
minorVer: 2,
}
}
// Gest the length of the metadata for this version, used to know how many bytes to read from the start of a connection.
func version_getMetaLength() (mlen int) {
mlen += 4 // meta
mlen += 1 // ver, as long as it's < 127, which it is in this version
mlen += 1 // minorVer, as long as it's < 127, which it is in this version
mlen += boxPubKeyLen // box
mlen += sigPubKeyLen // sig
mlen += boxPubKeyLen // link
return
}
// Encodes version metadata into its wire format.
func (m *version_metadata) encode() []byte {
bs := make([]byte, 0, version_getMetaLength())
bs = append(bs, m.meta[:]...)
bs = append(bs, wire_encode_uint64(m.ver)...)
bs = append(bs, wire_encode_uint64(m.minorVer)...)
bs = append(bs, m.box[:]...)
bs = append(bs, m.sig[:]...)
bs = append(bs, m.link[:]...)
if len(bs) != version_getMetaLength() {
panic("Inconsistent metadata length")
}
return bs
}
// Decodes version metadata from its wire format into the struct.
func (m *version_metadata) decode(bs []byte) bool {
switch {
case !wire_chop_slice(m.meta[:], &bs):
return false
case !wire_chop_uint64(&m.ver, &bs):
return false
case !wire_chop_uint64(&m.minorVer, &bs):
return false
case !wire_chop_slice(m.box[:], &bs):
return false
case !wire_chop_slice(m.sig[:], &bs):
return false
case !wire_chop_slice(m.link[:], &bs):
return false
}
return true
}
// Checks that the "meta" bytes and the version numbers are the expected values.
func (m *version_metadata) check() bool {
base := version_getBaseMetadata()
return base.meta == m.meta && base.ver == m.ver && base.minorVer == m.minorVer
}

View File

@@ -5,32 +5,26 @@ package yggdrasil
// TODO clean up unused/commented code, and add better comments to whatever is left
// Packet types, as an Encode_uint64 at the start of each packet
// TODO? make things still work after reordering (after things stabilize more?)
// Type safety would also be nice, `type wire_type uint64`, rewrite as needed?
// Packet types, as wire_encode_uint64(type) at the start of each packet
const (
wire_Traffic = iota // data being routed somewhere, handle for crypto
wire_ProtocolTraffic // protocol traffic, pub keys for crypto
wire_LinkProtocolTraffic // link proto traffic, pub keys for crypto
wire_SwitchAnnounce // inside protocol traffic header
wire_SwitchHopRequest // inside protocol traffic header
wire_SwitchHop // inside protocol traffic header
wire_SwitchMsg // inside link protocol traffic header
wire_SessionPing // inside protocol traffic header
wire_SessionPong // inside protocol traffic header
wire_DHTLookupRequest // inside protocol traffic header
wire_DHTLookupResponse // inside protocol traffic header
wire_SearchRequest // inside protocol traffic header
wire_SearchResponse // inside protocol traffic header
//wire_Keys // udp key packet (boxPub, sigPub)
)
// Encode uint64 using a variable length scheme
// Similar to binary.Uvarint, but big-endian
// Calls wire_put_uint64 on a nil slice.
func wire_encode_uint64(elem uint64) []byte {
return wire_put_uint64(elem, nil)
}
// Occasionally useful for appending to an existing slice (if there's room)
// Encode uint64 using a variable length scheme.
// Similar to binary.Uvarint, but big-endian.
func wire_put_uint64(elem uint64, out []byte) []byte {
bs := make([]byte, 0, 10)
bs = append(bs, byte(elem&0x7f))
@@ -46,6 +40,7 @@ func wire_put_uint64(elem uint64, out []byte) []byte {
return append(out, bs...)
}
// Returns the length of a wire encoded uint64 of this value.
func wire_uint64_len(elem uint64) int {
l := 1
for e := elem >> 7; e > 0; e >>= 7 {
@@ -54,8 +49,8 @@ func wire_uint64_len(elem uint64) int {
return l
}
// Decode uint64 from a []byte slice
// Returns the decoded uint64 and the number of bytes used
// Decode uint64 from a []byte slice.
// Returns the decoded uint64 and the number of bytes used.
func wire_decode_uint64(bs []byte) (uint64, int) {
length := 0
elem := uint64(0)
@@ -70,29 +65,22 @@ func wire_decode_uint64(bs []byte) (uint64, int) {
return elem, length
}
// Converts an int64 into uint64 so it can be written to the wire.
// Non-negative integers are mapped to even integers: 0 -> 0, 1 -> 2, etc.
// Negative integres are mapped to odd integes: -1 -> 1, -2 -> 3, etc.
// This means the least significant bit is a sign bit.
func wire_intToUint(i int64) uint64 {
var u uint64
if i < 0 {
u = uint64(-i) << 1
u |= 0x01 // sign bit
} else {
u = uint64(i) << 1
}
return u
return ((uint64(-(i+1))<<1)|0x01)*(uint64(i)>>63) + (uint64(i)<<1)*(^uint64(i)>>63)
}
// Converts uint64 back to int64, genreally when being read from the wire.
func wire_intFromUint(u uint64) int64 {
var i int64
i = int64(u >> 1)
if u&0x01 != 0 {
i *= -1
}
return i
return int64(u&0x01)*(-int64(u>>1)-1) + int64(^u&0x01)*int64(u>>1)
}
////////////////////////////////////////////////////////////////////////////////
// Takes coords, returns coords prefixed with encoded coord length
// Takes coords, returns coords prefixed with encoded coord length.
func wire_encode_coords(coords []byte) []byte {
coordLen := wire_encode_uint64(uint64(len(coords)))
bs := make([]byte, 0, len(coordLen)+len(coords))
@@ -101,19 +89,20 @@ func wire_encode_coords(coords []byte) []byte {
return bs
}
// Puts a length prefix and the coords into bs, returns the wire formatted coords.
// Useful in hot loops where we don't want to allocate and we know the rest of the later parts of the slice are safe to overwrite.
func wire_put_coords(coords []byte, bs []byte) []byte {
bs = wire_put_uint64(uint64(len(coords)), bs)
bs = append(bs, coords...)
return bs
}
// Takes a packet that begins with coords (starting with coord length)
// Returns a slice of coords and the number of bytes read
// Takes a slice that begins with coords (starting with coord length).
// Returns a slice of coords and the number of bytes read.
// Used as part of various decode() functions for structs.
func wire_decode_coords(packet []byte) ([]byte, int) {
coordLen, coordBegin := wire_decode_uint64(packet)
coordEnd := coordBegin + int(coordLen)
//if coordBegin == 0 { panic("No coords found") } // Testing
//if coordEnd > len(packet) { panic("Packet too short") } // Testing
if coordBegin == 0 || coordEnd > len(packet) {
return nil, 0
}
@@ -122,147 +111,52 @@ func wire_decode_coords(packet []byte) ([]byte, int) {
////////////////////////////////////////////////////////////////////////////////
// Announces that we can send parts of a Message with a particular seq
type msgAnnounce struct {
root sigPubKey
tstamp int64
seq uint64
len uint64
//Deg uint64
rseq uint64
}
func (m *msgAnnounce) encode() []byte {
bs := wire_encode_uint64(wire_SwitchAnnounce)
bs = append(bs, m.root[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...)
bs = append(bs, wire_encode_uint64(m.seq)...)
bs = append(bs, wire_encode_uint64(m.len)...)
//bs = append(bs, wire_encode_uint64(m.Deg)...)
bs = append(bs, wire_encode_uint64(m.rseq)...)
// Encodes a swtichMsg into its wire format.
func (m *switchMsg) encode() []byte {
bs := wire_encode_uint64(wire_SwitchMsg)
bs = append(bs, m.Root[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(m.TStamp))...)
for _, hop := range m.Hops {
bs = append(bs, wire_encode_uint64(uint64(hop.Port))...)
bs = append(bs, hop.Next[:]...)
bs = append(bs, hop.Sig[:]...)
}
return bs
}
func (m *msgAnnounce) decode(bs []byte) bool {
// Decodes a wire formatted switchMsg into the struct, returns true if successful.
func (m *switchMsg) decode(bs []byte) bool {
var pType uint64
var tstamp uint64
switch {
case !wire_chop_uint64(&pType, &bs):
return false
case pType != wire_SwitchAnnounce:
case pType != wire_SwitchMsg:
return false
case !wire_chop_slice(m.root[:], &bs):
case !wire_chop_slice(m.Root[:], &bs):
return false
case !wire_chop_uint64(&tstamp, &bs):
return false
case !wire_chop_uint64(&m.seq, &bs):
return false
case !wire_chop_uint64(&m.len, &bs):
return false
//case !wire_chop_uint64(&m.Deg, &bs): return false
case !wire_chop_uint64(&m.rseq, &bs):
return false
}
m.tstamp = wire_intFromUint(tstamp)
m.TStamp = wire_intFromUint(tstamp)
for len(bs) > 0 {
var hop switchMsgHop
switch {
case !wire_chop_uint64((*uint64)(&hop.Port), &bs):
return false
case !wire_chop_slice(hop.Next[:], &bs):
return false
case !wire_chop_slice(hop.Sig[:], &bs):
return false
}
m.Hops = append(m.Hops, hop)
}
return true
}
type msgHopReq struct {
root sigPubKey
tstamp int64
seq uint64
hop uint64
}
func (m *msgHopReq) encode() []byte {
bs := wire_encode_uint64(wire_SwitchHopRequest)
bs = append(bs, m.root[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...)
bs = append(bs, wire_encode_uint64(m.seq)...)
bs = append(bs, wire_encode_uint64(m.hop)...)
return bs
}
func (m *msgHopReq) decode(bs []byte) bool {
var pType uint64
var tstamp uint64
switch {
case !wire_chop_uint64(&pType, &bs):
return false
case pType != wire_SwitchHopRequest:
return false
case !wire_chop_slice(m.root[:], &bs):
return false
case !wire_chop_uint64(&tstamp, &bs):
return false
case !wire_chop_uint64(&m.seq, &bs):
return false
case !wire_chop_uint64(&m.hop, &bs):
return false
}
m.tstamp = wire_intFromUint(tstamp)
return true
}
type msgHop struct {
root sigPubKey
tstamp int64
seq uint64
hop uint64
port switchPort
next sigPubKey
sig sigBytes
}
func (m *msgHop) encode() []byte {
bs := wire_encode_uint64(wire_SwitchHop)
bs = append(bs, m.root[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...)
bs = append(bs, wire_encode_uint64(m.seq)...)
bs = append(bs, wire_encode_uint64(m.hop)...)
bs = append(bs, wire_encode_uint64(uint64(m.port))...)
bs = append(bs, m.next[:]...)
bs = append(bs, m.sig[:]...)
return bs
}
func (m *msgHop) decode(bs []byte) bool {
var pType uint64
var tstamp uint64
switch {
case !wire_chop_uint64(&pType, &bs):
return false
case pType != wire_SwitchHop:
return false
case !wire_chop_slice(m.root[:], &bs):
return false
case !wire_chop_uint64(&tstamp, &bs):
return false
case !wire_chop_uint64(&m.seq, &bs):
return false
case !wire_chop_uint64(&m.hop, &bs):
return false
case !wire_chop_uint64((*uint64)(&m.port), &bs):
return false
case !wire_chop_slice(m.next[:], &bs):
return false
case !wire_chop_slice(m.sig[:], &bs):
return false
}
m.tstamp = wire_intFromUint(tstamp)
return true
}
// Format used to check signatures only, so no need to also support decoding
func wire_encode_locator(loc *switchLocator) []byte {
coords := wire_encode_coords(loc.getCoords())
var bs []byte
bs = append(bs, loc.root[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(loc.tstamp))...)
bs = append(bs, coords...)
return bs
}
////////////////////////////////////////////////////////////////////////////////
// A utility function used to copy bytes into a slice and advance the beginning of the source slice, returns true if successful.
func wire_chop_slice(toSlice []byte, fromSlice *[]byte) bool {
if len(*fromSlice) < len(toSlice) {
return false
@@ -272,6 +166,7 @@ func wire_chop_slice(toSlice []byte, fromSlice *[]byte) bool {
return true
}
// A utility function to extract coords from a slice and advance the source slices, returning true if successful.
func wire_chop_coords(toCoords *[]byte, fromSlice *[]byte) bool {
coords, coordLen := wire_decode_coords(*fromSlice)
if coordLen == 0 {
@@ -282,6 +177,7 @@ func wire_chop_coords(toCoords *[]byte, fromSlice *[]byte) bool {
return true
}
// A utility function to extract a wire encoded uint64 into the provided pointer while advancing the start of the source slice, returning true if successful.
func wire_chop_uint64(toUInt64 *uint64, fromSlice *[]byte) bool {
dec, decLen := wire_decode_uint64(*fromSlice)
if decLen == 0 {
@@ -296,27 +192,26 @@ func wire_chop_uint64(toUInt64 *uint64, fromSlice *[]byte) bool {
// Wire traffic packets
// The wire format for ordinary IPv6 traffic encapsulated by the network.
type wire_trafficPacket struct {
ttl uint64
coords []byte
handle handle
nonce boxNonce
payload []byte
Coords []byte
Handle handle
Nonce boxNonce
Payload []byte
}
// This is basically MarshalBinary, but decode doesn't allow that...
// Encodes a wire_trafficPacket into its wire format.
func (p *wire_trafficPacket) encode() []byte {
bs := util_getBytes()
bs = wire_put_uint64(wire_Traffic, bs)
bs = wire_put_uint64(p.ttl, bs)
bs = wire_put_coords(p.coords, bs)
bs = append(bs, p.handle[:]...)
bs = append(bs, p.nonce[:]...)
bs = append(bs, p.payload...)
bs = wire_put_coords(p.Coords, bs)
bs = append(bs, p.Handle[:]...)
bs = append(bs, p.Nonce[:]...)
bs = append(bs, p.Payload...)
return bs
}
// Not just UnmarshalBinary becuase the original slice isn't always copied from
// Decodes an encoded wire_trafficPacket into the struct, returning true if successful.
func (p *wire_trafficPacket) decode(bs []byte) bool {
var pType uint64
switch {
@@ -324,40 +219,39 @@ func (p *wire_trafficPacket) decode(bs []byte) bool {
return false
case pType != wire_Traffic:
return false
case !wire_chop_uint64(&p.ttl, &bs):
case !wire_chop_coords(&p.Coords, &bs):
return false
case !wire_chop_coords(&p.coords, &bs):
case !wire_chop_slice(p.Handle[:], &bs):
return false
case !wire_chop_slice(p.handle[:], &bs):
return false
case !wire_chop_slice(p.nonce[:], &bs):
case !wire_chop_slice(p.Nonce[:], &bs):
return false
}
p.payload = append(util_getBytes(), bs...)
p.Payload = append(util_getBytes(), bs...)
return true
}
// The wire format for protocol traffic, such as dht req/res or session ping/pong packets.
type wire_protoTrafficPacket struct {
ttl uint64
coords []byte
toKey boxPubKey
fromKey boxPubKey
nonce boxNonce
payload []byte
Coords []byte
ToKey boxPubKey
FromKey boxPubKey
Nonce boxNonce
Payload []byte
}
// Encodes a wire_protoTrafficPacket into its wire format.
func (p *wire_protoTrafficPacket) encode() []byte {
coords := wire_encode_coords(p.coords)
coords := wire_encode_coords(p.Coords)
bs := wire_encode_uint64(wire_ProtocolTraffic)
bs = append(bs, wire_encode_uint64(p.ttl)...)
bs = append(bs, coords...)
bs = append(bs, p.toKey[:]...)
bs = append(bs, p.fromKey[:]...)
bs = append(bs, p.nonce[:]...)
bs = append(bs, p.payload...)
bs = append(bs, p.ToKey[:]...)
bs = append(bs, p.FromKey[:]...)
bs = append(bs, p.Nonce[:]...)
bs = append(bs, p.Payload...)
return bs
}
// Decodes an encoded wire_protoTrafficPacket into the struct, returning true if successful.
func (p *wire_protoTrafficPacket) decode(bs []byte) bool {
var pType uint64
switch {
@@ -365,37 +259,37 @@ func (p *wire_protoTrafficPacket) decode(bs []byte) bool {
return false
case pType != wire_ProtocolTraffic:
return false
case !wire_chop_uint64(&p.ttl, &bs):
case !wire_chop_coords(&p.Coords, &bs):
return false
case !wire_chop_coords(&p.coords, &bs):
case !wire_chop_slice(p.ToKey[:], &bs):
return false
case !wire_chop_slice(p.toKey[:], &bs):
case !wire_chop_slice(p.FromKey[:], &bs):
return false
case !wire_chop_slice(p.fromKey[:], &bs):
return false
case !wire_chop_slice(p.nonce[:], &bs):
case !wire_chop_slice(p.Nonce[:], &bs):
return false
}
p.payload = bs
p.Payload = bs
return true
}
// The wire format for link protocol traffic, namely switchMsg.
// There's really two layers of this, with the outer layer using permanent keys, and the inner layer using ephemeral keys.
// The keys themselves are exchanged as part of the connection setup, and then omitted from the packets.
// The two layer logic is handled in peers.go, but it's kind of ugly.
type wire_linkProtoTrafficPacket struct {
//toKey boxPubKey
//fromKey boxPubKey
nonce boxNonce
payload []byte
Nonce boxNonce
Payload []byte
}
// Encodes a wire_linkProtoTrafficPacket into its wire format.
func (p *wire_linkProtoTrafficPacket) encode() []byte {
bs := wire_encode_uint64(wire_LinkProtocolTraffic)
//bs = append(bs, p.toKey[:]...)
//bs = append(bs, p.fromKey[:]...)
bs = append(bs, p.nonce[:]...)
bs = append(bs, p.payload...)
bs = append(bs, p.Nonce[:]...)
bs = append(bs, p.Payload...)
return bs
}
// Decodes an encoded wire_linkProtoTrafficPacket into the struct, returning true if successful.
func (p *wire_linkProtoTrafficPacket) decode(bs []byte) bool {
var pType uint64
switch {
@@ -403,37 +297,35 @@ func (p *wire_linkProtoTrafficPacket) decode(bs []byte) bool {
return false
case pType != wire_LinkProtocolTraffic:
return false
//case !wire_chop_slice(p.toKey[:], &bs):
// return false
//case !wire_chop_slice(p.fromKey[:], &bs):
// return false
case !wire_chop_slice(p.nonce[:], &bs):
case !wire_chop_slice(p.Nonce[:], &bs):
return false
}
p.payload = bs
p.Payload = bs
return true
}
////////////////////////////////////////////////////////////////////////////////
// Encodes a sessionPing into its wire format.
func (p *sessionPing) encode() []byte {
var pTypeVal uint64
if p.isPong {
if p.IsPong {
pTypeVal = wire_SessionPong
} else {
pTypeVal = wire_SessionPing
}
bs := wire_encode_uint64(pTypeVal)
//p.sendPermPub used in top level (crypto), so skipped here
bs = append(bs, p.handle[:]...)
bs = append(bs, p.sendSesPub[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(p.tstamp))...)
coords := wire_encode_coords(p.coords)
bs = append(bs, p.Handle[:]...)
bs = append(bs, p.SendSesPub[:]...)
bs = append(bs, wire_encode_uint64(wire_intToUint(p.Tstamp))...)
coords := wire_encode_coords(p.Coords)
bs = append(bs, coords...)
bs = append(bs, wire_encode_uint64(uint64(p.mtu))...)
bs = append(bs, wire_encode_uint64(uint64(p.MTU))...)
return bs
}
// Decodes an encoded sessionPing into the struct, returning true if successful.
func (p *sessionPing) decode(bs []byte) bool {
var pType uint64
var tstamp uint64
@@ -443,37 +335,38 @@ func (p *sessionPing) decode(bs []byte) bool {
return false
case pType != wire_SessionPing && pType != wire_SessionPong:
return false
//p.sendPermPub used in top level (crypto), so skipped here
case !wire_chop_slice(p.handle[:], &bs):
//p.sendPermPub used in top level (crypto), so skipped here
case !wire_chop_slice(p.Handle[:], &bs):
return false
case !wire_chop_slice(p.sendSesPub[:], &bs):
case !wire_chop_slice(p.SendSesPub[:], &bs):
return false
case !wire_chop_uint64(&tstamp, &bs):
return false
case !wire_chop_coords(&p.coords, &bs):
case !wire_chop_coords(&p.Coords, &bs):
return false
case !wire_chop_uint64(&mtu, &bs):
mtu = 1280
}
p.tstamp = wire_intFromUint(tstamp)
p.Tstamp = wire_intFromUint(tstamp)
if pType == wire_SessionPong {
p.isPong = true
p.IsPong = true
}
p.mtu = uint16(mtu)
p.MTU = uint16(mtu)
return true
}
////////////////////////////////////////////////////////////////////////////////
// Encodes a dhtReq into its wire format.
func (r *dhtReq) encode() []byte {
coords := wire_encode_coords(r.coords)
coords := wire_encode_coords(r.Coords)
bs := wire_encode_uint64(wire_DHTLookupRequest)
//bs = append(bs, r.key[:]...)
bs = append(bs, coords...)
bs = append(bs, r.dest[:]...)
bs = append(bs, r.Dest[:]...)
return bs
}
// Decodes an encoded dhtReq into the struct, returning true if successful.
func (r *dhtReq) decode(bs []byte) bool {
var pType uint64
switch {
@@ -481,24 +374,22 @@ func (r *dhtReq) decode(bs []byte) bool {
return false
case pType != wire_DHTLookupRequest:
return false
//case !wire_chop_slice(r.key[:], &bs):
// return false
case !wire_chop_coords(&r.coords, &bs):
case !wire_chop_coords(&r.Coords, &bs):
return false
case !wire_chop_slice(r.dest[:], &bs):
case !wire_chop_slice(r.Dest[:], &bs):
return false
default:
return true
}
}
// Encodes a dhtRes into its wire format.
func (r *dhtRes) encode() []byte {
coords := wire_encode_coords(r.coords)
coords := wire_encode_coords(r.Coords)
bs := wire_encode_uint64(wire_DHTLookupResponse)
//bs = append(bs, r.key[:]...)
bs = append(bs, coords...)
bs = append(bs, r.dest[:]...)
for _, info := range r.infos {
bs = append(bs, r.Dest[:]...)
for _, info := range r.Infos {
coords = wire_encode_coords(info.coords)
bs = append(bs, info.key[:]...)
bs = append(bs, coords...)
@@ -506,6 +397,7 @@ func (r *dhtRes) encode() []byte {
return bs
}
// Decodes an encoded dhtRes into the struct, returning true if successful.
func (r *dhtRes) decode(bs []byte) bool {
var pType uint64
switch {
@@ -513,11 +405,9 @@ func (r *dhtRes) decode(bs []byte) bool {
return false
case pType != wire_DHTLookupResponse:
return false
//case !wire_chop_slice(r.key[:], &bs):
// return false
case !wire_chop_coords(&r.coords, &bs):
case !wire_chop_coords(&r.Coords, &bs):
return false
case !wire_chop_slice(r.dest[:], &bs):
case !wire_chop_slice(r.Dest[:], &bs):
return false
}
for len(bs) > 0 {
@@ -528,63 +418,7 @@ func (r *dhtRes) decode(bs []byte) bool {
case !wire_chop_coords(&info.coords, &bs):
return false
}
r.infos = append(r.infos, &info)
r.Infos = append(r.Infos, &info)
}
return true
}
////////////////////////////////////////////////////////////////////////////////
func (r *searchReq) encode() []byte {
coords := wire_encode_coords(r.coords)
bs := wire_encode_uint64(wire_SearchRequest)
bs = append(bs, r.key[:]...)
bs = append(bs, coords...)
bs = append(bs, r.dest[:]...)
return bs
}
func (r *searchReq) decode(bs []byte) bool {
var pType uint64
switch {
case !wire_chop_uint64(&pType, &bs):
return false
case pType != wire_SearchRequest:
return false
case !wire_chop_slice(r.key[:], &bs):
return false
case !wire_chop_coords(&r.coords, &bs):
return false
case !wire_chop_slice(r.dest[:], &bs):
return false
default:
return true
}
}
func (r *searchRes) encode() []byte {
coords := wire_encode_coords(r.coords)
bs := wire_encode_uint64(wire_SearchResponse)
bs = append(bs, r.key[:]...)
bs = append(bs, coords...)
bs = append(bs, r.dest[:]...)
return bs
}
func (r *searchRes) decode(bs []byte) bool {
var pType uint64
switch {
case !wire_chop_uint64(&pType, &bs):
return false
case pType != wire_SearchResponse:
return false
case !wire_chop_slice(r.key[:], &bs):
return false
case !wire_chop_coords(&r.coords, &bs):
return false
case !wire_chop_slice(r.dest[:], &bs):
return false
default:
return true
}
}

View File

@@ -1,312 +1,255 @@
package main
import "bytes"
import "encoding/hex"
import "encoding/json"
import "encoding/hex"
import "flag"
import "fmt"
import "io/ioutil"
import "net"
import "os"
import "os/signal"
import "syscall"
import "time"
import "regexp"
import _ "net/http/pprof"
import "net/http"
import "math/rand"
import "log"
import "runtime"
import "golang.org/x/net/ipv6"
import "yggdrasil"
import "yggdrasil/config"
import . "yggdrasil"
import "github.com/kardianos/minwinsvc"
import "github.com/neilalexander/hjson-go"
import "github.com/mitchellh/mapstructure"
import _ "github.com/kardianos/minwinsvc"
/**
* This is a very crude wrapper around src/yggdrasil
* It can generate a new config (--genconf)
* It can read a config from stdin (--useconf)
* It can run with an automatic config (--autoconf)
*/
type nodeConfig struct {
Listen string
AdminListen string
Peers []string
BoxPub string
BoxPriv string
SigPub string
SigPriv string
Multicast bool
LinkLocal string
IfName string
IfTAPMode bool
IfMTU int
}
type nodeConfig = config.NodeConfig
type Core = yggdrasil.Core
type node struct {
core Core
sock *ipv6.PacketConn
}
func (n *node) init(cfg *nodeConfig, logger *log.Logger) {
boxPub, err := hex.DecodeString(cfg.BoxPub)
if err != nil {
panic(err)
}
boxPriv, err := hex.DecodeString(cfg.BoxPriv)
if err != nil {
panic(err)
}
sigPub, err := hex.DecodeString(cfg.SigPub)
if err != nil {
panic(err)
}
sigPriv, err := hex.DecodeString(cfg.SigPriv)
if err != nil {
panic(err)
}
n.core.DEBUG_init(boxPub, boxPriv, sigPub, sigPriv)
n.core.DEBUG_setLogger(logger)
ifceExpr, err := regexp.Compile(cfg.LinkLocal)
if err != nil {
panic(err)
}
n.core.DEBUG_setIfceExpr(ifceExpr)
logger.Println("Starting interface...")
n.core.DEBUG_setupAndStartGlobalTCPInterface(cfg.Listen) // Listen for peers on TCP
n.core.DEBUG_setupAndStartGlobalUDPInterface(cfg.Listen) // Also listen on UDP, TODO allow separate configuration for ip/port to listen on each of these
logger.Println("Started interface")
logger.Println("Starting admin socket...")
n.core.DEBUG_setupAndStartAdminInterface(cfg.AdminListen)
logger.Println("Started admin socket")
go func() {
if len(cfg.Peers) == 0 {
return
}
for {
for _, p := range cfg.Peers {
switch {
case len(p) >= 4 && p[:4] == "udp:":
n.core.DEBUG_maybeSendUDPKeys(p[4:])
case len(p) >= 4 && p[:4] == "tcp:":
n.core.DEBUG_addTCPConn(p[4:])
default:
n.core.DEBUG_addTCPConn(p)
}
time.Sleep(time.Second)
}
time.Sleep(time.Minute)
}
}()
}
func generateConfig() *nodeConfig {
// Generates default configuration. This is used when outputting the -genconf
// parameter and also when using -autoconf. The isAutoconf flag is used to
// determine whether the operating system should select a free port by itself
// (which guarantees that there will not be a conflict with any other services)
// or whether to generate a random port number. The only side effect of setting
// isAutoconf is that the TCP and UDP ports will likely end up with different
// port numbers.
func generateConfig(isAutoconf bool) *nodeConfig {
// Create a new core.
core := Core{}
bpub, bpriv := core.DEBUG_newBoxKeys()
spub, spriv := core.DEBUG_newSigKeys()
// Generate encryption keys.
bpub, bpriv := core.NewEncryptionKeys()
spub, spriv := core.NewSigningKeys()
// Create a node configuration and populate it.
cfg := nodeConfig{}
cfg.Listen = "[::]:0"
cfg.AdminListen = "[::1]:9001"
cfg.BoxPub = hex.EncodeToString(bpub[:])
cfg.BoxPriv = hex.EncodeToString(bpriv[:])
cfg.SigPub = hex.EncodeToString(spub[:])
cfg.SigPriv = hex.EncodeToString(spriv[:])
cfg.Peers = []string{}
cfg.Multicast = true
cfg.LinkLocal = ""
cfg.IfName = "auto"
cfg.IfMTU = 65535
if runtime.GOOS == "windows" {
cfg.IfTAPMode = true
if isAutoconf {
cfg.Listen = "[::]:0"
} else {
cfg.IfTAPMode = false
r1 := rand.New(rand.NewSource(time.Now().UnixNano()))
cfg.Listen = fmt.Sprintf("[::]:%d", r1.Intn(65534-32768)+32768)
}
cfg.AdminListen = "localhost:9001"
cfg.EncryptionPublicKey = hex.EncodeToString(bpub[:])
cfg.EncryptionPrivateKey = hex.EncodeToString(bpriv[:])
cfg.SigningPublicKey = hex.EncodeToString(spub[:])
cfg.SigningPrivateKey = hex.EncodeToString(spriv[:])
cfg.Peers = []string{}
cfg.AllowedEncryptionPublicKeys = []string{}
cfg.MulticastInterfaces = []string{".*"}
cfg.IfName = core.GetTUNDefaultIfName()
cfg.IfMTU = core.GetTUNDefaultIfMTU()
cfg.IfTAPMode = core.GetTUNDefaultIfTAPMode()
return &cfg
}
// Generates a new configuration and returns it in HJSON format. This is used
// with -genconf.
func doGenconf() string {
cfg := generateConfig()
bs, err := json.MarshalIndent(cfg, "", " ")
cfg := generateConfig(false)
bs, err := hjson.Marshal(cfg)
if err != nil {
panic(err)
}
return string(bs)
}
var multicastAddr = "[ff02::114]:9001"
func (n *node) listen() {
groupAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
if err != nil {
panic(err)
}
bs := make([]byte, 2048)
for {
nBytes, rcm, fromAddr, err := n.sock.ReadFrom(bs)
if err != nil {
panic(err)
}
//if rcm == nil { continue } // wat
//fmt.Println("DEBUG:", "packet from:", fromAddr.String())
if rcm != nil {
// Windows can't set the flag needed to return a non-nil value here
// So only make these checks if we get something useful back
// TODO? Skip them always, I'm not sure if they're really needed...
if !rcm.Dst.IsLinkLocalMulticast() {
continue
}
if !rcm.Dst.Equal(groupAddr.IP) {
continue
}
}
anAddr := string(bs[:nBytes])
addr, err := net.ResolveTCPAddr("tcp6", anAddr)
if err != nil {
panic(err)
continue
} // Panic for testing, remove later
from := fromAddr.(*net.UDPAddr)
//fmt.Println("DEBUG:", "heard:", addr.IP.String(), "from:", from.IP.String())
if addr.IP.String() != from.IP.String() {
continue
}
addr.Zone = from.Zone
saddr := addr.String()
//if _, isIn := n.peers[saddr]; isIn { continue }
//n.peers[saddr] = struct{}{}
n.core.DEBUG_addTCPConn(saddr) // FIXME? can result in 2 connections per peer
//fmt.Println("DEBUG:", "added multicast peer:", saddr)
}
}
func (n *node) announce() {
groupAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
if err != nil {
panic(err)
}
var anAddr net.TCPAddr
tcpAddr := n.core.DEBUG_getGlobalTCPAddr()
anAddr.Port = tcpAddr.Port
destAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
if err != nil {
panic(err)
}
for {
ifaces, err := net.Interfaces()
if err != nil {
panic(err)
}
for _, iface := range ifaces {
n.sock.JoinGroup(&iface, groupAddr)
//err := n.sock.JoinGroup(&iface, groupAddr)
//if err != nil { panic(err) }
addrs, err := iface.Addrs()
if err != nil {
panic(err)
}
for _, addr := range addrs {
addrIP, _, _ := net.ParseCIDR(addr.String())
if addrIP.To4() != nil {
continue
} // IPv6 only
if !addrIP.IsLinkLocalUnicast() {
continue
}
anAddr.IP = addrIP
anAddr.Zone = iface.Name
destAddr.Zone = iface.Name
msg := []byte(anAddr.String())
n.sock.WriteTo(msg, nil, destAddr)
break
}
time.Sleep(time.Second)
}
time.Sleep(time.Second)
}
}
var pprof = flag.Bool("pprof", false, "Run pprof, see http://localhost:6060/debug/pprof/")
var genconf = flag.Bool("genconf", false, "print a new config to stdout")
var useconf = flag.Bool("useconf", false, "read config from stdin")
var useconffile = flag.String("useconffile", "", "read config from specified file path")
var autoconf = flag.Bool("autoconf", false, "automatic mode (dynamic IP, peer with IPv6 neighbors)")
// The main function is responsible for configuring and starting Yggdrasil.
func main() {
// Configure the command line parameters.
pprof := flag.Bool("pprof", false, "Run pprof, see http://localhost:6060/debug/pprof/")
genconf := flag.Bool("genconf", false, "print a new config to stdout")
useconf := flag.Bool("useconf", false, "read config from stdin")
useconffile := flag.String("useconffile", "", "read config from specified file path")
normaliseconf := flag.Bool("normaliseconf", false, "use in combination with either -useconf or -useconffile, outputs your configuration normalised")
autoconf := flag.Bool("autoconf", false, "automatic mode (dynamic IP, peer with IPv6 neighbors)")
flag.Parse()
var cfg *nodeConfig
switch {
case *autoconf:
cfg = generateConfig()
// Use an autoconf-generated config, this will give us random keys and
// port numbers, and will use an automatically selected TUN/TAP interface.
cfg = generateConfig(true)
case *useconffile != "" || *useconf:
// Use a configuration file. If -useconf, the configuration will be read
// from stdin. If -useconffile, the configuration will be read from the
// filesystem.
var config []byte
var err error
if *useconffile != "" {
// Read the file from the filesystem
config, err = ioutil.ReadFile(*useconffile)
} else {
// Read the file from stdin.
config, err = ioutil.ReadAll(os.Stdin)
}
if err != nil {
panic(err)
}
decoder := json.NewDecoder(bytes.NewReader(config))
cfg = generateConfig()
err = decoder.Decode(cfg)
// Generate a new configuration - this gives us a set of sane defaults -
// then parse the configuration we loaded above on top of it. The effect
// of this is that any configuration item that is missing from the provided
// configuration will use a sane default.
cfg = generateConfig(false)
var dat map[string]interface{}
if err := hjson.Unmarshal(config, &dat); err != nil {
panic(err)
}
confJson, err := json.Marshal(dat)
if err != nil {
panic(err)
}
json.Unmarshal(confJson, &cfg)
// For now we will do a little bit to help the user adjust their
// configuration to match the new configuration format, as some of the key
// names have changed recently.
changes := map[string]string{
"Multicast": "",
"LinkLocal": "MulticastInterfaces",
"BoxPub": "EncryptionPublicKey",
"BoxPriv": "EncryptionPrivateKey",
"SigPub": "SigningPublicKey",
"SigPriv": "SigningPrivateKey",
"AllowedBoxPubs": "AllowedEncryptionPublicKeys",
}
// Loop over the mappings aove and see if we have anything to fix.
for from, to := range changes {
if _, ok := dat[from]; ok {
if to == "" {
if !*normaliseconf {
log.Println("Warning: Deprecated config option", from, "- please remove")
}
} else {
if !*normaliseconf {
log.Println("Warning: Deprecated config option", from, "- please rename to", to)
}
// If the configuration file doesn't already contain a line with the
// new name then set it to the old value. This makes sure that we
// don't overwrite something that was put there intentionally.
if _, ok := dat[to]; !ok {
dat[to] = dat[from]
}
}
}
}
// Overlay our newly mapped configuration onto the autoconf node config that
// we generated above.
if err = mapstructure.Decode(dat, &cfg); err != nil {
panic(err)
}
// If the -normaliseconf option was specified then remarshal the above
// configuration and print it back to stdout. This lets the user update
// their configuration file with newly mapped names (like above) or to
// convert from plain JSON to commented HJSON.
if *normaliseconf {
bs, err := hjson.Marshal(cfg)
if err != nil {
panic(err)
}
fmt.Println(string(bs))
return
}
case *genconf:
// Generate a new configuration and print it to stdout.
fmt.Println(doGenconf())
default:
// No flags were provided, therefore print the list of flags to stdout.
flag.PrintDefaults()
}
// Have we got a working configuration? If we don't then it probably means
// that neither -autoconf, -useconf or -useconffile were set above. Stop
// if we don't.
if cfg == nil {
return
}
// Create a new logger that logs output to stdout.
logger := log.New(os.Stdout, "", log.Flags())
// If the -pprof flag was provided then start the pprof service on port 6060.
if *pprof {
runtime.SetBlockProfileRate(1)
go func() { log.Println(http.ListenAndServe("localhost:6060", nil)) }()
if err := yggdrasil.StartProfiler(logger); err != nil {
logger.Println(err)
}
}
// Setup
logger.Println("Initializing...")
// Setup the Yggdrasil node itself. The node{} type includes a Core, so we
// don't need to create this manually.
n := node{}
n.init(cfg, logger)
if cfg.IfName != "none" {
logger.Println("Starting TUN/TAP...")
} else {
logger.Println("Not starting TUN/TAP")
// Check to see if any multicast interface expressions were provided in the
// config. If they were then set them now.
for _, ll := range cfg.MulticastInterfaces {
ifceExpr, err := regexp.Compile(ll)
if err != nil {
panic(err)
}
n.core.AddMulticastInterfaceExpr(ifceExpr)
}
//n.core.DEBUG_startTun(cfg.IfName) // 1280, the smallest supported MTU
n.core.DEBUG_startTunWithMTU(cfg.IfName, cfg.IfTAPMode, cfg.IfMTU) // Largest supported MTU
defer func() {
logger.Println("Closing...")
n.core.DEBUG_stopTun()
// Now that we have a working configuration, we can now actually start
// Yggdrasil. This will start the router, switch, DHT node, TCP and UDP
// sockets, TUN/TAP adapter and multicast discovery port.
if err := n.core.Start(cfg, logger); err != nil {
logger.Println("An error occurred during startup")
panic(err)
}
// Check to see if any allowed encryption keys were provided in the config.
// If they were then set them now.
for _, pBoxStr := range cfg.AllowedEncryptionPublicKeys {
n.core.AddAllowedEncryptionPublicKey(pBoxStr)
}
// If any static peers were provided in the configuration above then we should
// configure them. The loop ensures that disconnected peers will eventually
// be reconnected with.
go func() {
if len(cfg.Peers) == 0 {
return
}
for {
for _, p := range cfg.Peers {
n.core.AddPeer(p)
time.Sleep(time.Second)
}
time.Sleep(time.Minute)
}
}()
logger.Println("Started...")
if cfg.Multicast {
addr, err := net.ResolveUDPAddr("udp", multicastAddr)
if err != nil {
panic(err)
}
listenString := fmt.Sprintf("[::]:%v", addr.Port)
conn, err := net.ListenPacket("udp6", listenString)
if err != nil {
panic(err)
}
//defer conn.Close() // Let it close on its own when the application exits
n.sock = ipv6.NewPacketConn(conn)
if err = n.sock.SetControlMessage(ipv6.FlagDst, true); err != nil {
// Windows can't set this flag, so we need to handle it in other ways
//panic(err)
}
go n.listen()
go n.announce()
}
// Catch interrupt to exit gracefully
// The Stop function ensures that the TUN/TAP adapter is correctly shut down
// before the program exits.
defer func() {
n.core.Stop()
}()
// Make some nice output that tells us what our IPv6 address and subnet are.
// This is just logged to stdout for the user.
address := n.core.GetAddress()
subnet := n.core.GetSubnet()
logger.Printf("Your IPv6 address is %s", address.String())
logger.Printf("Your IPv6 subnet is %s", subnet.String())
// Catch interrupts from the operating system to exit gracefully.
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, os.Kill)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
// Create a function to capture the service being stopped on Windows.
winTerminate := func() {
c <- os.Interrupt
}
minwinsvc.SetOnExit(winTerminate)
// Wait for the terminate/interrupt signal. Once a signal is received, the
// deferred Stop function above will run which will shut down TUN/TAP.
<-c
logger.Println("Stopping...")
}

222
yggdrasilctl.go Normal file
View File

@@ -0,0 +1,222 @@
package main
import "flag"
import "fmt"
import "strings"
import "net"
import "sort"
import "encoding/json"
import "strconv"
import "os"
type admin_info map[string]interface{}
func main() {
server := flag.String("endpoint", "localhost:9001", "Admin socket endpoint")
injson := flag.Bool("json", false, "Output in JSON format")
flag.Parse()
args := flag.Args()
if len(args) == 0 {
fmt.Println("usage:", os.Args[0], "[-endpoint=localhost:9001] [-json] command [key=value] [...]")
fmt.Println("example:", os.Args[0], "getPeers")
fmt.Println("example:", os.Args[0], "setTunTap name=auto mtu=1500 tap_mode=false")
fmt.Println("example:", os.Args[0], "-endpoint=localhost:9001 getDHT")
return
}
conn, err := net.Dial("tcp", *server)
if err != nil {
panic(err)
}
defer conn.Close()
decoder := json.NewDecoder(conn)
encoder := json.NewEncoder(conn)
send := make(admin_info)
recv := make(admin_info)
for c, a := range args {
if c == 0 {
send["request"] = a
continue
}
tokens := strings.Split(a, "=")
if i, err := strconv.Atoi(tokens[1]); err == nil {
send[tokens[0]] = i
} else {
switch tokens[1] {
case "true":
send[tokens[0]] = true
case "false":
send[tokens[0]] = false
default:
send[tokens[0]] = tokens[1]
}
}
}
if err := encoder.Encode(&send); err != nil {
panic(err)
}
if err := decoder.Decode(&recv); err == nil {
if recv["status"] == "error" {
if err, ok := recv["error"]; ok {
fmt.Println("Error:", err)
} else {
fmt.Println("Unspecified error occured")
}
os.Exit(1)
}
if _, ok := recv["request"]; !ok {
fmt.Println("Missing request in response (malformed response?)")
return
}
if _, ok := recv["response"]; !ok {
fmt.Println("Missing response body (malformed response?)")
return
}
req := recv["request"].(map[string]interface{})
res := recv["response"].(map[string]interface{})
if *injson {
if json, err := json.MarshalIndent(res, "", " "); err == nil {
fmt.Println(string(json))
}
os.Exit(0)
}
switch req["request"] {
case "dot":
fmt.Println(res["dot"])
case "help", "getPeers", "getSwitchPeers", "getDHT", "getSessions":
maxWidths := make(map[string]int)
var keyOrder []string
keysOrdered := false
for _, tlv := range res {
for slk, slv := range tlv.(map[string]interface{}) {
if !keysOrdered {
for k := range slv.(map[string]interface{}) {
keyOrder = append(keyOrder, fmt.Sprint(k))
}
sort.Strings(keyOrder)
keysOrdered = true
}
for k, v := range slv.(map[string]interface{}) {
if len(fmt.Sprint(slk)) > maxWidths["key"] {
maxWidths["key"] = len(fmt.Sprint(slk))
}
if len(fmt.Sprint(v)) > maxWidths[k] {
maxWidths[k] = len(fmt.Sprint(v))
if maxWidths[k] < len(k) {
maxWidths[k] = len(k)
}
}
}
}
if len(keyOrder) > 0 {
fmt.Printf("%-"+fmt.Sprint(maxWidths["key"])+"s ", "")
for _, v := range keyOrder {
fmt.Printf("%-"+fmt.Sprint(maxWidths[v])+"s ", v)
}
fmt.Println()
}
for slk, slv := range tlv.(map[string]interface{}) {
fmt.Printf("%-"+fmt.Sprint(maxWidths["key"])+"s ", slk)
for _, k := range keyOrder {
preformatted := slv.(map[string]interface{})[k]
var formatted string
switch k {
case "bytes_sent", "bytes_recvd":
formatted = fmt.Sprintf("%d", uint(preformatted.(float64)))
case "uptime", "last_seen":
seconds := uint(preformatted.(float64)) % 60
minutes := uint(preformatted.(float64)/60) % 60
hours := uint(preformatted.(float64) / 60 / 60)
formatted = fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
default:
formatted = fmt.Sprint(preformatted)
}
fmt.Printf("%-"+fmt.Sprint(maxWidths[k])+"s ", formatted)
}
fmt.Println()
}
}
case "getTunTap", "setTunTap":
for k, v := range res {
fmt.Println("Interface name:", k)
if mtu, ok := v.(map[string]interface{})["mtu"].(float64); ok {
fmt.Println("Interface MTU:", mtu)
}
if tap_mode, ok := v.(map[string]interface{})["tap_mode"].(bool); ok {
fmt.Println("TAP mode:", tap_mode)
}
}
case "getSelf":
for k, v := range res["self"].(map[string]interface{}) {
fmt.Println("IPv6 address:", k)
if subnet, ok := v.(map[string]interface{})["subnet"].(string); ok {
fmt.Println("IPv6 subnet:", subnet)
}
if coords, ok := v.(map[string]interface{})["coords"].(string); ok {
fmt.Println("Coords:", coords)
}
}
case "addPeer", "removePeer", "addAllowedEncryptionPublicKey", "removeAllowedEncryptionPublicKey":
if _, ok := res["added"]; ok {
for _, v := range res["added"].([]interface{}) {
fmt.Println("Added:", fmt.Sprint(v))
}
}
if _, ok := res["not_added"]; ok {
for _, v := range res["not_added"].([]interface{}) {
fmt.Println("Not added:", fmt.Sprint(v))
}
}
if _, ok := res["removed"]; ok {
for _, v := range res["removed"].([]interface{}) {
fmt.Println("Removed:", fmt.Sprint(v))
}
}
if _, ok := res["not_removed"]; ok {
for _, v := range res["not_removed"].([]interface{}) {
fmt.Println("Not removed:", fmt.Sprint(v))
}
}
case "getAllowedEncryptionPublicKeys":
if _, ok := res["allowed_box_pubs"]; !ok {
fmt.Println("All connections are allowed")
} else if res["allowed_box_pubs"] == nil {
fmt.Println("All connections are allowed")
} else {
fmt.Println("Connections are allowed only from the following public box keys:")
for _, v := range res["allowed_box_pubs"].([]interface{}) {
fmt.Println("-", v)
}
}
case "getMulticastInterfaces":
if _, ok := res["multicast_interfaces"]; !ok {
fmt.Println("No multicast interfaces found")
} else if res["multicast_interfaces"] == nil {
fmt.Println("No multicast interfaces found")
} else {
fmt.Println("Multicast peer discovery is active on:")
for _, v := range res["multicast_interfaces"].([]interface{}) {
fmt.Println("-", v)
}
}
default:
if json, err := json.MarshalIndent(recv["response"], "", " "); err == nil {
fmt.Println(string(json))
}
}
}
if v, ok := recv["status"]; ok && v == "error" {
os.Exit(1)
}
os.Exit(0)
}