mirror of
https://github.com/yggdrasil-network/yggdrasil-go.git
synced 2024-12-21 23:47:31 +00:00
first code/readme/license commit
This commit is contained in:
parent
35852be36d
commit
d7e6d814a0
185
LICENSE
Normal file
185
LICENSE
Normal file
@ -0,0 +1,185 @@
|
||||
This software is licensed under the LGPLv3, included below.
|
||||
|
||||
As a special exception to the GNU Lesser General Public License version 3
|
||||
("LGPL3"), the copyright holders of this Library give you permission to
|
||||
convey to a third party a Combined Work that links statically or dynamically
|
||||
to this Library without providing any Minimal Corresponding Source or
|
||||
Minimal Application Code as set out in 4d or providing the installation
|
||||
information set out in section 4e, provided that you comply with the other
|
||||
provisions of LGPL3 and provided that you meet, for the Application the
|
||||
terms and conditions of the license(s) which apply to the Application.
|
||||
|
||||
Except as stated in this special exception, the provisions of LGPL3 will
|
||||
continue to comply in full to this Library. If you modify this Library, you
|
||||
may apply this exception to your version of this Library, but you are not
|
||||
obliged to do so. If you do not wish to do so, delete this exception
|
||||
statement from your version. This exception does not (and cannot) modify any
|
||||
license terms which apply to the Application, with which you must still
|
||||
comply.
|
||||
|
||||
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
92
README.md
92
README.md
@ -1,2 +1,90 @@
|
||||
# yggdrasil-go
|
||||
An experiment in scalable routing as an encrypted IPv6 overlay network
|
||||
# Yggdrasil
|
||||
|
||||
## What is it?
|
||||
|
||||
This is a toy implementation of an encrypted IPv6 network, with many good ideas stolen from [cjdns](https://github.com/cjdelisle/cjdns), which was written to test a particular routing scheme that I cobbled together one random Wednesday afternoon.
|
||||
It's notably not a shortest path routing scheme, with the goal of scalable name-independent routing on dynamic networks with an internet-like topology.
|
||||
It's named Yggdrasil after the world tree from Norse mythology, because that seemed like the obvious name given how it works.
|
||||
For a longer, rambling version of this readme with more information, see: [doc](doc/README.md).
|
||||
|
||||
This is a toy / proof-of-principle, so it's not even alpha quality software--any nontrivial update is likely to break backwards compatibility with no possibility for a clean upgrade path.
|
||||
You're encouraged to play with it, but I strongly advise against using it for anything mission critical.
|
||||
|
||||
## Obligatory performance propaganda
|
||||
|
||||
A [simplified model](misc/sim/treesim-forward.py) of this routing scheme has been tested in simulation on the 9204-node [skitter](https://www.caida.org/tools/measurement/skitter/) network topology dataset from [caida](https://www.caida.org/), and compared with results in [arxiv:0708.2309](https://arxiv.org/abs/0708.2309).
|
||||
Using the routing scheme as implemented in this code, I observe an average multiplicative stretch of 1.08, with an average routing table size of 6 for a name-dependent scheme, and approximately 30 additional (but smaller) entries needed for the name-independent routing table.
|
||||
The number of name-dependent routing table entries needed is proportional to node degree, so that 6 is the mean of a distribution with a long tail, but I believe this is an acceptable tradeoff.
|
||||
The size of name-dependent routing table enties is relatively large, due to cryptographic signatures associated with routing table updates, but in the absence of cryptographic overhead I believe each entry is otherwise comparable to the BC routing scheme described in the above paper.
|
||||
A modified version of this scheme, with the same resource requirements, achieves a multiplicative stretch of 1.02, which drops to 1.01 if source routing is used.
|
||||
Both of these optimizations are not present in the current implementation, as the former depends on network state information that I haven't found a way to cryptographically secure, and the latter optimization is both tedious to implement and would make debugging other aspects of the implementation more difficult.
|
||||
|
||||
## Building
|
||||
|
||||
1. Install Go (tested on 1.9, I use [godeb](https://github.com/niemeyer/godeb)).
|
||||
2. Clone this repository.
|
||||
2. `./build`
|
||||
|
||||
The build script sets its own `$GOPATH`, so the build environment is self-contained.
|
||||
This code only works on linux, due to a few OS-specific parts that I haven't had an interest in rewriting, but see the optional example below for a way to share connectivity with the rest of a network.
|
||||
|
||||
## Running
|
||||
|
||||
To run the program, you'll need permission to create a `tun` device and configure it using `ip`.
|
||||
If you don't want to mess with capabilities for the `tun` device, then using `sudo` should work, with the usual security caveats about running a program as root.
|
||||
|
||||
To run with default settings:
|
||||
|
||||
1. `./yggdrasil --autoconf`
|
||||
|
||||
That will generate a new set of keys (and an IP address) each time the program is run.
|
||||
The program will bind to all addresses on a random port and listen for incoming connections.
|
||||
It will send announcements over IPv6 link-local multicast, and it will attempt to start a connection if it hears an announcement from another device.
|
||||
|
||||
In practice, you probably want to run this instead:
|
||||
|
||||
1. `./yggdrasil --genconf > conf.json`
|
||||
2. `./yggdrasil --useconf < conf.json`
|
||||
|
||||
This keeps a persistent set of keys (and by extension, IP address) and gives you the option of editing the configuration file.
|
||||
If you want to use it as an overlay network on top of e.g. the internet, then you can do so by adding the remote devices domain/address and port (as a string, e.g. `"1.2.3.4:5678"`) to the list of `Peers` in the configuration file.
|
||||
|
||||
## Optional: advertise a prefix locally
|
||||
|
||||
Suppose a node has generated the address: `fd00:1111:2222:3333:4444:5555:6666:7777`
|
||||
|
||||
Then the node may also use addresses from the prefix: `fd80:1111:2222:3333::/64` (note the `fd00` changed to `fd80`, a separate `/9` is used for prefixes, but the rest of the first 64 bits are the same).
|
||||
|
||||
To advertise this prefix and a route to `fd00::/8`, the following seems to work for me:
|
||||
|
||||
1. Enable IPv6 forwarding (e.g. `sysctl -w net.ipv6.conf.all.forwarding=1` or add it to sysctl.conf).
|
||||
|
||||
2. `ip addr add fd80:1111:2222:3333::1/64 dev eth0` or similar, to assign an address for the router to use in that prefix, where the LAN is reachable through `eth0`.
|
||||
|
||||
3. Install/run `radvd` with something like the following in `/etc/radvd.conf`:
|
||||
```
|
||||
interface eth0
|
||||
{
|
||||
AdvSendAdvert on;
|
||||
prefix fd80:1111:2222:3333::/64 {
|
||||
AdvOnLink on;
|
||||
AdvAutonomous on;
|
||||
};
|
||||
route fd00::/8 {};
|
||||
};
|
||||
```
|
||||
|
||||
This is enough to give unsupported devices on my LAN access to the network, with a few security and performance cautions outlined in the [doc](doc/README.md) file.
|
||||
|
||||
## How does it work?
|
||||
|
||||
I'd rather not try to explain in the readme, but I describe it further in the [doc](doc/README.md) file, so you can check there if you're interested.
|
||||
Be warned that it's still not a very good explanation, but it at least gives a high-level overview and links to some relevant work by other people.
|
||||
I may try to write another document at some point, to thoroughly explain how everything works, if the need arises.
|
||||
|
||||
## License
|
||||
|
||||
This code is released under the terms of the LGPLv3, but with an added exception that was shamelessly taken from [godeb](https://github.com/niemeyer/godeb).
|
||||
Under certain circumstances, this exception permits distribution of binaries that are (statically or dynamically) linked with this code, without requiring the distribution of Minimal Corresponding Source or Minimal Application Code.
|
||||
For more details, see: [LICENSE](LICENSE).
|
||||
|
||||
|
10
build
Executable file
10
build
Executable file
@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
export GOPATH=$PWD
|
||||
echo "Downloading..."
|
||||
go get -d -v yggdrasil
|
||||
for file in *.go ; do
|
||||
echo "Building: $file"
|
||||
go build -v $file
|
||||
#go build -ldflags="-s -w" -v $file
|
||||
#upx --brute ${file/.go/}
|
||||
done
|
188
doc/README.md
Normal file
188
doc/README.md
Normal file
@ -0,0 +1,188 @@
|
||||
# Yggdrasil-go
|
||||
|
||||
## What is it?
|
||||
|
||||
This is a toy implementation of an encrypted IPv6 network.
|
||||
A number of years ago, I started to spend some of my free time studying and routing schemes, and eventually decided that it made sense to come up with my own.
|
||||
After much time spent reflecting on the problem, and a few failed starts, I eventually cobbled together one that seemed to have, more or less, the performance characteristics I was looking for.
|
||||
I resolved to eventually write a proof-of-principle / test implementation, and I thought it would make sense to include many of the nice bells and whistles that I've grown accustomed to from using [cjdns](https://github.com/cjdelisle/cjdns), plus a few additional features that I wanted to test.
|
||||
Fast forward through a couple years of procrastination, and I've finally started working on it in my limited spare time.
|
||||
I've found that it's now marginally more interesting than embarrassing, so here it is.
|
||||
|
||||
The routing scheme was designed for scalable name-independent routing on graphs with an internet-like topology.
|
||||
By internet-like, I mean that the network has a densely connected core with many triangles, a diameter that increases slowly with network size, and where any sparse edges tend to be relatively tree-like, all of which appear to be common features of large graphs describing "organically" grown relationships.
|
||||
By scalable name-independent routing, I mean:
|
||||
|
||||
1. Scalable: resource consumption should grow slowly with the size of the network.
|
||||
In particular, for internet-like networks, the goal is to use only a (poly)logarithmic amount of memory, use a logarithmic amount of bandwidth per one-hop neighbor for control traffic, and to maintain low average multiplicative path stretch (introducing overhead of perhaps a few percent) that does not become worse as the network grows.
|
||||
|
||||
2. Name-independent: a node's identifier should be independent of network topology and state, such that a node may freely change their identifier in a static network, or keep it static under state changes in a dynamic network.
|
||||
In particular, addresses are self-assigned and derived from a public key, which circumvents the use of a centralized addressing authority or public key infrastructure.
|
||||
|
||||
Running this code will:
|
||||
|
||||
1. Set up a `tun` device and assign it a Unique Local Address (ULA) in `fd00::/8`.
|
||||
2. Connect to other nodes running the software.
|
||||
3. Route traffic for and through other nodes.
|
||||
|
||||
A device's ULA is actually from `fd00::/9`, and a matching `/64` prefix is available under `fd80::/9`. This allows the node to advertise a route on its LAN, as a workaround for unsupported devices.
|
||||
|
||||
## Building
|
||||
|
||||
1. Install Go (tested on 1.9, I use [godeb](https://github.com/niemeyer/godeb)).
|
||||
2. Clone this repository.
|
||||
2. `./build`
|
||||
|
||||
It's written in Go because I felt like learning a new language, and Go seemed like an easy language to learn while still being a reasonable choice for language to prototype network code.
|
||||
Note that the build script defines its own `$GOPATH`, so the build and its dependencies should be self contained.
|
||||
It only works on Linux at this time, because a little code (related to the `tun` device) is platform dependent, and changing that hasn't been a high priority.
|
||||
|
||||
## Running
|
||||
|
||||
To run the program, you'll need permission to create a `tun` device and configure it using `ip`.
|
||||
If you don't want to mess with capabilities for the `tun` device, then using `sudo` should work, with the usual security caveats about running a program as root.
|
||||
|
||||
To run with default settings:
|
||||
|
||||
1. `./yggdrasil --autoconf`
|
||||
|
||||
That will generate a new set of keys (and an IP address) each time the program is run.
|
||||
The program will bind to all addresses on a random port and listen for incoming connections.
|
||||
It will send announcements over IPv6 link-local multicast, and attempt to start a connection if it hears an announcement from another device.
|
||||
|
||||
In practice, you probably want to run this instead:
|
||||
|
||||
1. `./yggdrasil --genconf > conf.json`
|
||||
2. `./yggdrasil --useconf < conf.json`
|
||||
|
||||
The first step generates a configuration file with a set of cryptographic keys and default settings.
|
||||
The second step runs the program using the configuration provided in that file.
|
||||
Because ULAs are derived from keys, using a fixed set of keys causes a node to keep the same address each time the program is run.
|
||||
|
||||
If you want to use it as an overlay network on top of e.g. the internet, then you can do so by adding the address and port of the device you want to connect to (as a string, e.g. `"1.2.3.4:5678"`) to the list of `Peers` in the configuration file.
|
||||
This should accept IPv4 and IPv6 addresses, and I think it should resolve host/domain names, but I haven't really tested that, so your mileage may vary.
|
||||
You can also configure which address and/or port to listen on by editing the configuration file, in case you want to bind to a specific address or listen for incoming connections on a fixed port.
|
||||
|
||||
Also note that the nodes is connected to the network through a `tun` device, so it follows point-to-point semantics.
|
||||
This means it's limited to routing traffic with source and destination addresses in `fd00::/8`--you can't add a prefix to your routing table "via" an address in that range, as the router has no idea who you meant to send it to.
|
||||
In particular, this means you can't set a working default route that *directly* uses the overlay network, but I've had success *indirectly* using it to connect to an off-the-shelf VPN that I can use as a default route for internet access.
|
||||
|
||||
## Optional: advertise a prefix locally
|
||||
|
||||
Suppose a node has been given the address: `fd00:1111:2222:3333:4444:5555:6666:7777`
|
||||
|
||||
Then the node may also use addresses from the prefix: `fd80:1111:2222:3333::/64` (note the `fd00` -> `fd80`, a separate `/9` is used for prefixes).
|
||||
|
||||
To advertise this prefix and a route to `fd00::/8`, the following seems to work for me:
|
||||
|
||||
1. Enable IPv6 forwarding (e.g. `sysctl -w net.ipv6.conf.all.forwarding=1` or add it to sysctl.conf).
|
||||
|
||||
2. `ip addr add fd80:1111:2222:3333::1/64 dev eth0` or similar, to assign an address for the router to use in that prefix, where the LAN is reachable through `eth0`.
|
||||
|
||||
3. Install/run `radvd` with something like the following in `/etc/radvd.conf`:
|
||||
```
|
||||
interface eth0
|
||||
{
|
||||
AdvSendAdvert on;
|
||||
prefix fd80:1111:2222:3333::/64 {
|
||||
AdvOnLink on;
|
||||
AdvAutonomous on;
|
||||
};
|
||||
route fd00::/8 {};
|
||||
};
|
||||
```
|
||||
|
||||
Now any IPv6-enabled device in the LAN can use stateless address auto-configuration to assign itself a working `fd00::/8` address from the `/64` prefix, and communicate with the wider network through the router, without requiring any special configuration for each device.
|
||||
I've used this to e.g. get my phone on the network.
|
||||
Note that there are a some differences when accessing the network this way:
|
||||
|
||||
1. There are 64 fewer bits of address space available for self-certifying addresses.
|
||||
This means that it is 64 bits easier to brute force a prefix collision than collision for a full node's IP address. As such, you may want to change addresses frequently, or else brute force an address with more security bits (see: `misc/genkeys.go`).
|
||||
|
||||
2. The LAN depends on the router for cryptography.
|
||||
So while traffic going through the WAN is encrypted, the LAN is still just a LAN. You may want to secure your network.
|
||||
|
||||
3. Related to the above, the cryptography and I/O through the `tun` device both place additional load on the router, above what is normally present from forwarding packets between full nodes in the network, so the router may need more computing power to reach line rate.
|
||||
|
||||
## How does it work?
|
||||
|
||||
Consider the internet, which uses a network-of-networks model with address aggregation.
|
||||
Addresses are allocated by a central authority, as blocks of contiguous addresses with a matching prefix.
|
||||
Within a network, each node may represent one or more prefixes, with each prefix representing a network of one or more nodes.
|
||||
On the largest scale, BGP is used to route traffic between networks (autonomous systems), and other protocols can be used to route within a network.
|
||||
The effectiveness of such hierarchical addressing and routing strategies depend on network topology, with the internet's observed topology being the worst case of all known topologies from a scalability standpoint (see [arxiv:0708.2309](https://arxiv.org/abs/0708.2309) for a better explanation of the issue, but the problem is essentially that address aggregation is ineffective in a network with a large number of nodes and a small diameter).
|
||||
|
||||
The routing scheme implemented by this code tries a different approach.
|
||||
Instead of using assigned addresses and a routing table based on prefixes and address aggregation, routing and addressing are handled through a combination of:
|
||||
|
||||
1. Self-assigned cryptographically generated addresses, to handle address allocation without a central authority.
|
||||
2. A kademlia-like distributed hash table, to look up a node's (name-dependent) routing information from their (name-independent routing) IP address.
|
||||
3. A name-dependent routing scheme based on greedy routing in a metric space, constructed from an arbitrarily rooted spanning tree, which gives a reasonable approximation of the true distance between nodes for certain network topologies (namely the scale-free topology that seems to emerge in many large graphs, including the internet). The spanning tree embedding takes stability into account when selecting which one-hop neighbor to use as a parent, and path selection uses (poorly) estimated available bandwidth as a criteria, subject to the constraint that metric space distances must decrease with each hop. Incidentally, the name `yggdrasil` was selected for this test code because that's obviously what you call an immense tree that connects worlds.
|
||||
|
||||
The network then presents itself as having a single "flat" address with no aggregation.
|
||||
Under the hood, it runs as an overlay on top of existing IP networks.
|
||||
Link-local IPv6 multicast traffic is used to advertise on the underlying networks, which can as easily be a wired or wireless LAN, a direct (e.g. ethernet) connection between two devices, a wireless ad-hoc network, etc.
|
||||
Additional connections can be added manually to peer over networks where link-local multicast is insufficient, which allows you to e.g. use the internet to bridge local networks.
|
||||
|
||||
The name-dependent routing layer uses cryptographically signed (`Ed25519`) path-vector-like routing messages, similar to S-BGP, which should prevent route poisoning and related attacks.
|
||||
For encryption, it uses the Go implementation of the `nacl/box` scheme, which is built from a Curve25519 key exchange with XSalsa20 as a stream cypher and Poly1305 for integrity and authentication.
|
||||
Permanent keys are used for protocol traffic, including the ephemeral key exchange, and a hash of a node's permanent public key is used to construct a node's address.
|
||||
Ephemeral keys are used for encapsulated IP(v6) traffic, which provides forward secrecy.
|
||||
Go's `crypto/rand` library is used for nonce generation.
|
||||
In short, I've tried to not make this a complete security disaster, but the code hasn't been independently audited and I'm nothing close to a security expert, so it should be considered a proof-of-principle rather than a safe implementation.
|
||||
At a minimum, I know of no way to prevent gray hole attacks.
|
||||
|
||||
I realize that this is a terribly short description of how it works, so I may elaborate further in another document if the need arises.
|
||||
Otherwise, I guess you could try to read my terrible and poorly documented code if you want to know more.
|
||||
|
||||
## Related work
|
||||
|
||||
A lot of inspiration comes from [cjdns](https://github.com/cjdelisle/cjdns).
|
||||
I'm a contributor to that project, and I wanted to test out some ideas that weren't convenient to prototype in the existing code base, which is why I wrote this toy.
|
||||
|
||||
On the routing side, a lot of influence came from compact routing.
|
||||
A number of compact routing schemes are evaluated in [arxiv:0708.2309](https://arxiv.org/abs/0708.2309) and may be used as a basis for comparison.
|
||||
When tested in a simplified simulation environment on CAIDA's 9204-node "skitter" network graph used in that paper, I observed an average multiplicative stretch of about 1.08 with my routing scheme, as implemented here.
|
||||
This can be lowered to less than 1.02 using a source-routed version of the algorithm and including node degree as an additional parameter of the embedding, which is of academic interest, but degree's unverifiability makes it impractical for this implementation.
|
||||
In either case, this only requires 1 routing table entry per one-hop neighbor (this averages ~6 for in the skitter network graph), plus a logarithmic number of DHT entries (expected to be ~26, based on extrapolations from networks with a few hundred nodes--running the full implementation on the skitter graph is impractical on my machine).
|
||||
I don't think stretch is really an appropriate metric, as it doesn't consider the difference to total network cost from a high-stretch short path vs a high-stretch long path.
|
||||
In this scheme, and I believe in most compact routing schemes, longer paths tend to have lower multiplicative stretch, and shorter paths are more likely to have longer stretch.
|
||||
I would argue that this is preferable to the alternative.
|
||||
|
||||
While I use a slightly different approach, the idea to try a greedy routing scheme was inspired by the use of greedy routing on networks embedded in the hyperbolic plane (such as [Kleinberg's work](https://doi.org/10.1109%2FINFCOM.2007.221) and [Greedy Forwarding on the NDN Testbed](https://www.caida.org/research/routing/greedy_forwarding_ndn/)).
|
||||
I use distance on a spanning tree as the metric, as seems to work well on the types of networks I'm concerned with, and it simplifies other aspects of the implementation.
|
||||
The hyperbolic embedding algorithms I'm aware of, or specifically the distributed ones, operate by constructing a spanning tree of the network and then embedding the tree.
|
||||
So I don't see much harm, at present, of skipping the hyperbolic plane and directly using the tree for the metric space.
|
||||
|
||||
## Misc. notes
|
||||
|
||||
This is a toy experiment / proof-of-concept.
|
||||
It's only meant to test if / how well some ideas work.
|
||||
I have no idea what I'm doing, so for all I know it's entirely possible that it could crash your computer, eat your homework, or set fire to your house.
|
||||
Some parts are also written to be as bad as I could make them while still being technically correct, in an effort to make bugs obvious if they occur, which means that even when it does work it may be fragile and error prone.
|
||||
|
||||
In particular, you should expect it to perform poorly under mobility events, and to converge slowly in dynamic networks. All else being equal, this implementation should tend to prefer long-lived links over short-lived ones when embedding, and (poorly estimated) high bandwidth links over low bandwidth ones when forwarding traffic. As such, in multi-homed or mobile scenarios, there may be some tendency for it to make decisions you disagree with.
|
||||
|
||||
While stretch is low on internet-like graphs, the best upper bound I've established on the *additive* stretch of this scheme, after convergence, is the same as for tree routing: proportional to network diameter. For sparse graphs with a large diameter, the scheme may not find particularly efficient paths, even under ideal circumstances. I would argue that such networks tend not to grow large enough for scalability to be an issue, so another routing scheme is better suited to those networks.
|
||||
|
||||
Regarding the announce-able prefix thing, what I wanted to do is use `fc00::/7`, where `fc00::/8` is for nodes and `fd00::/8` is for prefixes.
|
||||
I would also possibly widen the prefixes to `/48`, to match [rfc4193](https://tools.ietf.org/html/rfc4193), and possibly provide an option to keep using a `/64` by splitting it into two `/9` blocks (where `/64` prefixes would continue to live in `fd80::/9`), or else convince myself that the security implications of another 16 bits don't matter (to avoid the complexity of splitting it into two `/9` ranges for prefixes).
|
||||
Using `fc00::/8` this way would cause issues if trying to also run cjdns.
|
||||
Since I like cjdns, and want the option of running it on the same nodes, I've decided not to do that.
|
||||
If I ever give up on avoiding cjdns conflicts, then I may change the addressing scheme to match the above.
|
||||
|
||||
Despite the tree being constructed from path-vector-like routing messages, there's no support for routing policy right now.
|
||||
As a result, peer relationships are bimodal: either you're not connected to someone, or you're connected and you'll route traffic *to* and *through* them.
|
||||
Nodes also accept all incoming connections, so if you want to limit who can connect then you'll need to provide some other kind of access controls.
|
||||
|
||||
The current implementation does all of its setup when the program starts, and then nothing can be reconfigured without restarting the program.
|
||||
At some point I may add a remote API, so a running node can be reconfigured (to e.g. add/remove peers) without restarting, or probe the internal state of the router to get useful debugging info.
|
||||
So far, things seem to work the way I want/expect without much trouble, so I haven't felt the need to do this yet.
|
||||
|
||||
Some parts of the implementation can take advantage of multiple cores, but other parts that could simply do not.
|
||||
Some parts are fast, but other parts are slower than they have any right to be, e.g. I can't figure out why some syscalls are as expensive as they are, so the `tun` in particular tends to be a CPU bottleneck (multi-queue could help in some cases, but that just spreads the cost around, and it doesn't help with single streams of traffic).
|
||||
The Go runtime's GC tends to have short pauses, but it does have pauses.
|
||||
So even if the ideas that went into this routing scheme turn out to be useful, this implementation is likely to remain mediocre at best for the foreseeable future.
|
||||
If the is thing works well and the protocol stabilizes, then it's worth considering re-implementation and/or a formal spec and RFC.
|
||||
In such a case, it's entirely reasonable to change parts of the spec purely to make the efficient implementation easier (e.g. it makes sense to want zero-copy networking, but a couple parts of the current protocol might make that impractical).
|
||||
|
77
misc/genkeys.go
Normal file
77
misc/genkeys.go
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
|
||||
This file generates crypto keys.
|
||||
It prints out a new set of keys each time if finds a "better" one.
|
||||
By default, "better" means a higher NodeID (-> higher IP address).
|
||||
This is because the IP address format can compress leading 1s in the address, to incrase the number of ID bits in the address.
|
||||
|
||||
If run with the "-sig" flag, it generates signing keys instead.
|
||||
A "better" signing key means one with a higher TreeID.
|
||||
This only matters if it's high enough to make you the root of the tree.
|
||||
|
||||
*/
|
||||
package main
|
||||
|
||||
import "encoding/hex"
|
||||
import "flag"
|
||||
import "fmt"
|
||||
import . "yggdrasil"
|
||||
|
||||
var doSig = flag.Bool("sig", false, "generate new signing keys instead")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
switch {
|
||||
case *doSig: doSigKeys()
|
||||
default: doBoxKeys()
|
||||
}
|
||||
}
|
||||
|
||||
func isBetter(oldID, newID []byte) bool {
|
||||
for idx := range oldID {
|
||||
if newID[idx] > oldID[idx] { return true }
|
||||
if newID[idx] < oldID[idx] { return false }
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func doBoxKeys() {
|
||||
c := Core{}
|
||||
pub, _ := c.DEBUG_newBoxKeys()
|
||||
bestID := c.DEBUG_getNodeID(pub)
|
||||
for idx := range bestID {
|
||||
bestID[idx] = 0
|
||||
}
|
||||
for {
|
||||
pub, priv := c.DEBUG_newBoxKeys()
|
||||
id := c.DEBUG_getNodeID(pub)
|
||||
if !isBetter(bestID[:], id[:]) { continue }
|
||||
bestID = id
|
||||
ip := c.DEBUG_addrForNodeID(id)
|
||||
fmt.Println("--------------------------------------------------------------------------------")
|
||||
fmt.Println("boxPriv:", hex.EncodeToString(priv[:]))
|
||||
fmt.Println("boxPub:", hex.EncodeToString(pub[:]))
|
||||
fmt.Println("NodeID:", hex.EncodeToString(id[:]))
|
||||
fmt.Println("IP:", ip)
|
||||
}
|
||||
}
|
||||
|
||||
func doSigKeys() {
|
||||
c := Core{}
|
||||
pub, _ := c.DEBUG_newSigKeys()
|
||||
bestID := c.DEBUG_getTreeID(pub)
|
||||
for idx := range bestID {
|
||||
bestID[idx] = 0
|
||||
}
|
||||
for {
|
||||
pub, priv := c.DEBUG_newSigKeys()
|
||||
id := c.DEBUG_getTreeID(pub)
|
||||
if !isBetter(bestID[:], id[:]) { continue }
|
||||
bestID = id
|
||||
fmt.Println("--------------------------------------------------------------------------------")
|
||||
fmt.Println("sigPriv:", hex.EncodeToString(priv[:]))
|
||||
fmt.Println("sigPub:", hex.EncodeToString(pub[:]))
|
||||
fmt.Println("TreeID:", hex.EncodeToString(id[:]))
|
||||
}
|
||||
}
|
||||
|
23
misc/run-conf2-netns
Executable file
23
misc/run-conf2-netns
Executable file
@ -0,0 +1,23 @@
|
||||
#!/bin/sh
|
||||
|
||||
ip netns add peerns
|
||||
ip link add veth0 type veth peer name veth1
|
||||
ifconfig veth0 192.168.2.1/24 up
|
||||
echo "1"
|
||||
#tc qdisc add dev veth0 root tbf rate 8mbit burst 8192 latency 1ms
|
||||
#tc qdisc add dev veth0 root netem delay 50ms 5ms distribution normal
|
||||
echo "2"
|
||||
ip link set veth1 netns peerns
|
||||
ip netns exec peerns ifconfig veth1 192.168.2.2/24 up
|
||||
echo "3"
|
||||
#ip netns exec peerns tc qdisc add dev veth1 root tbf rate 8mbit burst 8192 latency 1ms
|
||||
#ip netns exec peerns tc qdisc add dev veth1 root netem delay 50ms 5ms distribution normal
|
||||
echo "4"
|
||||
ip netns exec peerns ip addr list
|
||||
#ip netns exec peerns ./run -useconf=conf2.json
|
||||
ip netns exec peerns ip link set dev lo up
|
||||
ip netns exec peerns ./run -autoconf -pprof
|
||||
#GODEBUG=gctrace=1 ip netns exec peerns ./run -autoconf
|
||||
#ip netns exec peerns ./run -useconf=conf2.json -cpuprofile=cpu2.prof -memprofile=mem2.prof
|
||||
#ip netns delete peerns
|
||||
|
28
misc/run-conf3-netns
Executable file
28
misc/run-conf3-netns
Executable file
@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
|
||||
ip netns add peerns3
|
||||
ip link add veth23 type veth peer name veth32
|
||||
ip link set veth23 netns peerns
|
||||
ip netns exec peerns ifconfig veth23 192.168.3.1/24 up
|
||||
#ip netns exec peerns tc qdisc add dev veth23 root tbf rate 8mbit burst 8192 latency 1ms
|
||||
#ip netns exec peerns tc qdisc add dev veth23 root netem delay 50ms 5ms distribution normal
|
||||
ip link set veth32 netns peerns3
|
||||
ip netns exec peerns3 ifconfig veth32 192.168.3.2/24 up
|
||||
#ip netns exec peerns3 tc qdisc add dev veth32 root tbf rate 8mbit burst 8192 latency 1ms
|
||||
#ip netns exec peerns3 tc qdisc add dev veth32 root netem delay 50ms 5ms distribution normal
|
||||
ip netns exec peerns3 ip route add 192.168.2.0/24 via 192.168.3.1
|
||||
|
||||
#ip link add veth13 type veth peer name veth31
|
||||
#ifconfig veth13 192.168.4.1/24 up
|
||||
#ip netns exec peerns tc qdisc add dev veth23 root tbf rate 8mbit burst 8192 latency 1ms
|
||||
#ip netns exec peerns tc qdisc add dev veth23 root netem delay 50ms 5ms distribution normal
|
||||
#ip link set veth31 netns peerns3
|
||||
#ip netns exec peerns3 ifconfig veth32 192.168.4.3/24 up
|
||||
#ip netns exec peerns3 tc qdisc add dev veth32 root tbf rate 8mbit burst 8192 latency 1ms
|
||||
#ip netns exec peerns3 tc qdisc add dev veth32 root netem delay 50ms 5ms distribution normal
|
||||
#ip netns exec peerns3 ip route add 192.168.2.0/24 via 192.168.3.1
|
||||
|
||||
ip netns exec peerns3 ip addr list
|
||||
#ip netns exec peerns3 ./run -useconf=conf3.json
|
||||
ip netns exec peerns3 ./run -autoconf
|
||||
#ip netns delete peerns3
|
28
misc/run-conf4-netns
Executable file
28
misc/run-conf4-netns
Executable file
@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
|
||||
ip netns add peerns4
|
||||
ip link add veth34 type veth peer name veth43
|
||||
ip link set veth34 netns peerns3
|
||||
ip netns exec peerns3 ifconfig veth34 192.168.4.3/24 up
|
||||
#ip netns exec peerns tc qdisc add dev veth23 root tbf rate 8mbit burst 8192 latency 1ms
|
||||
#ip netns exec peerns tc qdisc add dev veth23 root netem delay 50ms 5ms distribution normal
|
||||
ip link set veth43 netns peerns4
|
||||
ip netns exec peerns4 ifconfig veth43 192.168.4.4/24 up
|
||||
#ip netns exec peerns3 tc qdisc add dev veth32 root tbf rate 8mbit burst 8192 latency 1ms
|
||||
#ip netns exec peerns3 tc qdisc add dev veth32 root netem delay 50ms 5ms distribution normal
|
||||
#ip netns exec peerns4 ip route add 192.168.3.0/24 via 192.168.4.3
|
||||
|
||||
#ip link add veth13 type veth peer name veth31
|
||||
#ifconfig veth13 192.168.4.1/24 up
|
||||
#ip netns exec peerns tc qdisc add dev veth23 root tbf rate 8mbit burst 8192 latency 1ms
|
||||
#ip netns exec peerns tc qdisc add dev veth23 root netem delay 50ms 5ms distribution normal
|
||||
#ip link set veth31 netns peerns3
|
||||
#ip netns exec peerns3 ifconfig veth32 192.168.4.3/24 up
|
||||
#ip netns exec peerns3 tc qdisc add dev veth32 root tbf rate 8mbit burst 8192 latency 1ms
|
||||
#ip netns exec peerns3 tc qdisc add dev veth32 root netem delay 50ms 5ms distribution normal
|
||||
#ip netns exec peerns3 ip route add 192.168.2.0/24 via 192.168.3.1
|
||||
|
||||
ip netns exec peerns4 ip addr list
|
||||
#ip netns exec peerns3 ./run -useconf=conf3.json
|
||||
ip netns exec peerns4 ./run -autoconf
|
||||
#ip netns delete peerns3
|
69
misc/run-schannel-netns
Executable file
69
misc/run-schannel-netns
Executable file
@ -0,0 +1,69 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Connects nodes in a network resembling an s-channel feynmann diagram.
|
||||
|
||||
# 1 5
|
||||
# \ /
|
||||
# 3--4
|
||||
# / \
|
||||
# 2 6
|
||||
|
||||
# Bandwidth constraints are applied to 4<->5 and 4<->6.
|
||||
# The idea is to make sure that bottlenecks on one link don't affect the other.
|
||||
|
||||
ip netns add node1
|
||||
ip netns add node2
|
||||
ip netns add node3
|
||||
ip netns add node4
|
||||
ip netns add node5
|
||||
ip netns add node6
|
||||
|
||||
ip link add veth13 type veth peer name veth31
|
||||
ip link set veth13 netns node1 up
|
||||
ip link set veth31 netns node3 up
|
||||
|
||||
ip link add veth23 type veth peer name veth32
|
||||
ip link set veth23 netns node2 up
|
||||
ip link set veth32 netns node3 up
|
||||
|
||||
ip link add veth34 type veth peer name veth43
|
||||
ip link set veth34 netns node3 up
|
||||
ip link set veth43 netns node4 up
|
||||
|
||||
ip link add veth45 type veth peer name veth54
|
||||
ip link set veth45 netns node4 up
|
||||
ip link set veth54 netns node5 up
|
||||
|
||||
ip link add veth46 type veth peer name veth64
|
||||
ip link set veth46 netns node4 up
|
||||
ip link set veth64 netns node6 up
|
||||
|
||||
ip netns exec node4 tc qdisc add dev veth45 root tbf rate 100mbit burst 8192 latency 1ms
|
||||
ip netns exec node5 tc qdisc add dev veth54 root tbf rate 100mbit burst 8192 latency 1ms
|
||||
|
||||
ip netns exec node4 tc qdisc add dev veth46 root tbf rate 10mbit burst 8192 latency 1ms
|
||||
ip netns exec node6 tc qdisc add dev veth64 root tbf rate 10mbit burst 8192 latency 1ms
|
||||
|
||||
ip netns exec node1 ./run --autoconf --pprof &> /dev/null &
|
||||
ip netns exec node2 ./run --autoconf --pprof &> /dev/null &
|
||||
ip netns exec node3 ./run --autoconf --pprof &> /dev/null &
|
||||
ip netns exec node4 ./run --autoconf --pprof &> /dev/null &
|
||||
ip netns exec node5 ./run --autoconf --pprof &> /dev/null &
|
||||
ip netns exec node6 ./run --autoconf --pprof &> /dev/null &
|
||||
|
||||
echo "Started, to continue you should (possibly w/ sudo):"
|
||||
echo "kill" $(jobs -p)
|
||||
wait
|
||||
|
||||
ip netns delete node1
|
||||
ip netns delete node2
|
||||
ip netns delete node3
|
||||
ip netns delete node4
|
||||
ip netns delete node5
|
||||
ip netns delete node6
|
||||
|
||||
ip link delete veth13
|
||||
ip link delete veth23
|
||||
ip link delete veth34
|
||||
ip link delete veth45
|
||||
ip link delete veth46
|
1593
misc/sim/fc00-2017-08-12.txt
Normal file
1593
misc/sim/fc00-2017-08-12.txt
Normal file
File diff suppressed because it is too large
Load Diff
60
misc/sim/merge-skitter.py
Normal file
60
misc/sim/merge-skitter.py
Normal file
@ -0,0 +1,60 @@
|
||||
import glob
|
||||
inputDirPath = "out-skitter"
|
||||
|
||||
inputFilePaths = glob.glob(inputDirPath+"/*")
|
||||
inputFilePaths.sort()
|
||||
|
||||
merged = dict()
|
||||
|
||||
stretches = []
|
||||
|
||||
total = 0
|
||||
for inputFilePath in inputFilePaths:
|
||||
print "Processing file {}".format(inputFilePath)
|
||||
with open(inputFilePath, 'r') as f:
|
||||
inData = f.readlines()
|
||||
pathsChecked = 0.
|
||||
avgStretch = 0.
|
||||
for line in inData:
|
||||
dat = line.rstrip('\n').split(' ')
|
||||
eHops = int(dat[0])
|
||||
nHops = int(dat[1])
|
||||
count = int(dat[2])
|
||||
if eHops not in merged: merged[eHops] = dict()
|
||||
if nHops not in merged[eHops]: merged[eHops][nHops] = 0
|
||||
merged[eHops][nHops] += count
|
||||
total += count
|
||||
pathsChecked += count
|
||||
stretch = float(nHops)/eHops
|
||||
avgStretch += stretch*count
|
||||
finStretch = avgStretch / max(1, pathsChecked)
|
||||
stretches.append(str(finStretch))
|
||||
|
||||
hopsUsed = 0.
|
||||
hopsNeeded = 0.
|
||||
avgStretch = 0.
|
||||
results = []
|
||||
for eHops in sorted(merged.keys()):
|
||||
for nHops in sorted(merged[eHops].keys()):
|
||||
count = merged[eHops][nHops]
|
||||
result = "{} {} {}".format(eHops, nHops, count)
|
||||
results.append(result)
|
||||
hopsUsed += nHops*count
|
||||
hopsNeeded += eHops*count
|
||||
stretch = float(nHops)/eHops
|
||||
avgStretch += stretch*count
|
||||
print result
|
||||
bandwidthUsage = hopsUsed/max(1, hopsNeeded)
|
||||
avgStretch /= max(1, total)
|
||||
|
||||
with open("results.txt", "w") as f:
|
||||
f.write('\n'.join(results))
|
||||
|
||||
with open("stretches.txt", "w") as f:
|
||||
f.write('\n'.join(stretches))
|
||||
|
||||
print "Total files processed: {}".format(len(inputFilePaths))
|
||||
print "Total paths found: {}".format(total)
|
||||
print "Bandwidth usage: {}".format(bandwidthUsage)
|
||||
print "Average stretch: {}".format(avgStretch)
|
||||
|
60
misc/sim/merge.py
Normal file
60
misc/sim/merge.py
Normal file
@ -0,0 +1,60 @@
|
||||
import glob
|
||||
inputDirPath = "fc00"
|
||||
|
||||
inputFilePaths = glob.glob(inputDirPath+"/*")
|
||||
inputFilePaths.sort()
|
||||
|
||||
merged = dict()
|
||||
|
||||
stretches = []
|
||||
|
||||
total = 0
|
||||
for inputFilePath in inputFilePaths:
|
||||
print "Processing file {}".format(inputFilePath)
|
||||
with open(inputFilePath, 'r') as f:
|
||||
inData = f.readlines()
|
||||
pathsChecked = 0.
|
||||
avgStretch = 0.
|
||||
for line in inData:
|
||||
dat = line.rstrip('\n').split(' ')
|
||||
eHops = int(dat[0])
|
||||
nHops = int(dat[1])
|
||||
count = int(dat[2])
|
||||
if eHops not in merged: merged[eHops] = dict()
|
||||
if nHops not in merged[eHops]: merged[eHops][nHops] = 0
|
||||
merged[eHops][nHops] += count
|
||||
total += count
|
||||
pathsChecked += count
|
||||
stretch = float(nHops)/eHops
|
||||
avgStretch += stretch*count
|
||||
finStretch = avgStretch / max(1, pathsChecked)
|
||||
stretches.append(str(finStretch))
|
||||
|
||||
hopsUsed = 0.
|
||||
hopsNeeded = 0.
|
||||
avgStretch = 0.
|
||||
results = []
|
||||
for eHops in sorted(merged.keys()):
|
||||
for nHops in sorted(merged[eHops].keys()):
|
||||
count = merged[eHops][nHops]
|
||||
result = "{} {} {}".format(eHops, nHops, count)
|
||||
results.append(result)
|
||||
hopsUsed += nHops*count
|
||||
hopsNeeded += eHops*count
|
||||
stretch = float(nHops)/eHops
|
||||
avgStretch += stretch*count
|
||||
print result
|
||||
bandwidthUsage = hopsUsed/max(1, hopsNeeded)
|
||||
avgStretch /= max(1, total)
|
||||
|
||||
with open("results.txt", "w") as f:
|
||||
f.write('\n'.join(results))
|
||||
|
||||
with open("stretches.txt", "w") as f:
|
||||
f.write('\n'.join(stretches))
|
||||
|
||||
print "Total files processed: {}".format(len(inputFilePaths))
|
||||
print "Total paths found: {}".format(total)
|
||||
print "Bandwidth usage: {}".format(bandwidthUsage)
|
||||
print "Average stretch: {}".format(avgStretch)
|
||||
|
4
misc/sim/test
Executable file
4
misc/sim/test
Executable file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
export GOPATH=$PWD
|
||||
go get -d yggdrasil
|
||||
go run misc/sim/treesim.go
|
193
misc/sim/treesim-basic.go
Normal file
193
misc/sim/treesim-basic.go
Normal file
@ -0,0 +1,193 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "bufio"
|
||||
import "os"
|
||||
import "strings"
|
||||
import "strconv"
|
||||
import "time"
|
||||
|
||||
import "runtime/pprof"
|
||||
import "flag"
|
||||
|
||||
import "router"
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type Node struct {
|
||||
nodeID router.NodeID
|
||||
table router.Table
|
||||
links []*Node
|
||||
}
|
||||
|
||||
func (n *Node) init(nodeID router.NodeID) {
|
||||
n.nodeID = nodeID
|
||||
n.table.Init(nodeID)
|
||||
n.links = append(n.links, n)
|
||||
}
|
||||
|
||||
func linkNodes(m, n *Node) {
|
||||
for _, o := range m.links {
|
||||
if o.nodeID == n.nodeID {
|
||||
// Don't allow duplicates
|
||||
return
|
||||
}
|
||||
}
|
||||
m.links = append(m.links, n)
|
||||
n.links = append(n.links, m)
|
||||
}
|
||||
|
||||
func makeStoreSquareGrid(sideLength int) map[router.NodeID]*Node {
|
||||
store := make(map[router.NodeID]*Node)
|
||||
nNodes := sideLength*sideLength
|
||||
nodeIDs := make([]router.NodeID, 0, nNodes)
|
||||
// TODO shuffle nodeIDs
|
||||
for nodeID := 1 ; nodeID <= nNodes ; nodeID++ {
|
||||
nodeIDs = append(nodeIDs, router.NodeID(nodeID))
|
||||
}
|
||||
for _, nodeID := range nodeIDs {
|
||||
node := &Node{}
|
||||
node.init(nodeID)
|
||||
store[nodeID] = node
|
||||
}
|
||||
for idx := 0 ; idx < nNodes ; idx++ {
|
||||
if (idx % sideLength) != 0 {
|
||||
linkNodes(store[nodeIDs[idx]], store[nodeIDs[idx-1]])
|
||||
}
|
||||
if idx >= sideLength {
|
||||
linkNodes(store[nodeIDs[idx]], store[nodeIDs[idx-sideLength]])
|
||||
}
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
func loadGraph(path string) map[router.NodeID]*Node {
|
||||
f, err := os.Open(path)
|
||||
if err != nil { panic(err) }
|
||||
defer f.Close()
|
||||
store := make(map[router.NodeID]*Node)
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
nodeIDstrs := strings.Split(line, " ")
|
||||
nodeIDi0, _ := strconv.Atoi(nodeIDstrs[0])
|
||||
nodeIDi1, _ := strconv.Atoi(nodeIDstrs[1])
|
||||
nodeID0 := router.NodeID(nodeIDi0)
|
||||
nodeID1 := router.NodeID(nodeIDi1)
|
||||
if store[nodeID0] == nil {
|
||||
node := &Node{}
|
||||
node.init(nodeID0)
|
||||
store[nodeID0] = node
|
||||
}
|
||||
if store[nodeID1] == nil {
|
||||
node := &Node{}
|
||||
node.init(nodeID1)
|
||||
store[nodeID1] = node
|
||||
}
|
||||
linkNodes(store[nodeID0], store[nodeID1])
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func idleUntilConverged(store map[router.NodeID]*Node) {
|
||||
timeOfLastChange := 0
|
||||
step := 0
|
||||
// Idle untl the network has converged
|
||||
for step - timeOfLastChange < 4*router.TIMEOUT {
|
||||
step++
|
||||
fmt.Println("Step:", step, "--", "last change:", timeOfLastChange)
|
||||
for _, node := range store {
|
||||
node.table.Tick()
|
||||
for idx, link := range node.links[1:] {
|
||||
msg := node.table.CreateMessage(router.Iface(idx))
|
||||
for idx, fromNode := range link.links {
|
||||
if fromNode == node {
|
||||
//fmt.Println("Sending from node", node.nodeID, "to", link.nodeID)
|
||||
link.table.HandleMessage(msg, router.Iface(idx))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//for _, node := range store {
|
||||
// if node.table.DEBUG_isDirty() { timeOfLastChange = step }
|
||||
//}
|
||||
//time.Sleep(10*time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func testPaths(store map[router.NodeID]*Node) {
|
||||
nNodes := len(store)
|
||||
nodeIDs := make([]router.NodeID, 0, nNodes)
|
||||
for nodeID := range store {
|
||||
nodeIDs = append(nodeIDs, nodeID)
|
||||
}
|
||||
lookups := 0
|
||||
count := 0
|
||||
start := time.Now()
|
||||
for _, source := range store {
|
||||
count++
|
||||
fmt.Printf("Testing paths from node %d / %d (%d)\n", count, nNodes, source.nodeID)
|
||||
for _, dest := range store {
|
||||
//if source == dest { continue }
|
||||
destLoc := dest.table.GetLocator()
|
||||
temp := 0
|
||||
for here := source ; here != dest ; {
|
||||
temp++
|
||||
if temp > 16 { panic("Loop?") }
|
||||
next := here.links[here.table.Lookup(destLoc)]
|
||||
if next == here {
|
||||
//for idx, link := range here.links {
|
||||
// fmt.Println("DUMP:", idx, link.nodeID)
|
||||
//}
|
||||
panic(fmt.Sprintln("Routing Loop:",
|
||||
source.nodeID,
|
||||
here.nodeID,
|
||||
dest.nodeID))
|
||||
}
|
||||
//fmt.Println("DEBUG:", source.nodeID, here.nodeID, dest.nodeID)
|
||||
here = next
|
||||
lookups++
|
||||
}
|
||||
}
|
||||
}
|
||||
timed := time.Since(start)
|
||||
fmt.Printf("%f lookups per second\n", float64(lookups)/timed.Seconds())
|
||||
}
|
||||
|
||||
func dumpStore(store map[router.NodeID]*Node) {
|
||||
for _, node := range store {
|
||||
fmt.Println("DUMPSTORE:", node.nodeID, node.table.GetLocator())
|
||||
node.table.DEBUG_dumpTable()
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
fmt.Println("Test")
|
||||
store := makeStoreSquareGrid(4)
|
||||
idleUntilConverged(store)
|
||||
dumpStore(store)
|
||||
testPaths(store)
|
||||
//panic("DYING")
|
||||
store = loadGraph("hype-2016-09-19.list")
|
||||
idleUntilConverged(store)
|
||||
dumpStore(store)
|
||||
testPaths(store)
|
||||
}
|
902
misc/sim/treesim-forward.py
Normal file
902
misc/sim/treesim-forward.py
Normal file
@ -0,0 +1,902 @@
|
||||
# Tree routing scheme (named Yggdrasil, after the world tree from Norse mythology)
|
||||
# Steps:
|
||||
# 1: Pick any node, here I'm using highest nodeID
|
||||
# 2: Build spanning tree, each node stores path back to root
|
||||
# Optionally with weights for each hop
|
||||
# Ties broken by preferring a parent with higher degree
|
||||
# 3: Distance metric: self->peer + (via tree) peer->dest
|
||||
# 4: Perform (modified) greedy lookup via this metric for each direction (A->B and B->A)
|
||||
# 5: Source-route traffic using the better of those two paths
|
||||
|
||||
# Note: This makes no attempt to simulate a dynamic network
|
||||
# E.g. A node's peers cannot be disconnected
|
||||
|
||||
# TODO:
|
||||
# Make better use of drop?
|
||||
# In particular, we should be ignoring *all* recently dropped *paths* to the root
|
||||
# To minimize route flapping
|
||||
# Not really an issue in the sim, but probably needed for a real network
|
||||
|
||||
import array
|
||||
import gc
|
||||
import glob
|
||||
import gzip
|
||||
import heapq
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
|
||||
#############
|
||||
# Constants #
|
||||
#############
|
||||
|
||||
# Reminder of where link cost comes in
|
||||
LINK_COST = 1
|
||||
|
||||
# Timeout before dropping something, in simulated seconds
|
||||
TIMEOUT = 60
|
||||
|
||||
###########
|
||||
# Classes #
|
||||
###########
|
||||
|
||||
class PathInfo:
|
||||
def __init__(self, nodeID):
|
||||
self.nodeID = nodeID # e.g. IP
|
||||
self.coords = [] # Position in tree
|
||||
self.tstamp = 0 # Timestamp from sender, to keep track of old vs new info
|
||||
self.degree = 0 # Number of peers the sender has, used to break ties
|
||||
# The above should be signed
|
||||
self.path = [nodeID] # Path to node (in path-vector route)
|
||||
self.time = 0 # Time info was updated, to keep track of e.g. timeouts
|
||||
self.treeID = nodeID # Hack, let tree use different ID than IP, used so we can dijkstra once and test many roots
|
||||
def clone(self):
|
||||
# Return a deep-enough copy of the path
|
||||
clone = PathInfo(None)
|
||||
clone.nodeID = self.nodeID
|
||||
clone.coords = self.coords[:]
|
||||
clone.tstamp = self.tstamp
|
||||
clone.degree = self.degree
|
||||
clone.path = self.path[:]
|
||||
clone.time = self.time
|
||||
clone.treeID = self.treeID
|
||||
return clone
|
||||
# End class PathInfo
|
||||
|
||||
class Node:
|
||||
def __init__(self, nodeID):
|
||||
self.info = PathInfo(nodeID) # Self NodeInfo
|
||||
self.root = None # PathInfo to node at root of tree
|
||||
self.drop = dict() # PathInfo to nodes from clus that have timed out
|
||||
self.peers = dict() # PathInfo to peers
|
||||
self.links = dict() # Links to peers (to pass messages)
|
||||
self.msgs = [] # Said messages
|
||||
self.table = dict() # Pre-computed lookup table of peer info
|
||||
|
||||
def tick(self):
|
||||
# Do periodic maintenance stuff, including push updates
|
||||
self.info.time += 1
|
||||
if self.info.time > self.info.tstamp + TIMEOUT/4:
|
||||
# Update timestamp at least once every 1/4 timeout period
|
||||
# This should probably be randomized in a real implementation
|
||||
self.info.tstamp = self.info.time
|
||||
self.info.degree = 0# TODO decide if degree should be used, len(self.peers)
|
||||
changed = False # Used to track when the network has converged
|
||||
changed |= self.cleanRoot()
|
||||
self.cleanDropped()
|
||||
# Should probably send messages infrequently if there's nothing new to report
|
||||
if self.info.tstamp == self.info.time:
|
||||
msg = self.createMessage()
|
||||
self.sendMessage(msg)
|
||||
return changed
|
||||
|
||||
def cleanRoot(self):
|
||||
changed = False
|
||||
if self.root and self.info.time - self.root.time > TIMEOUT:
|
||||
print "DEBUG: clean root,", self.root.path
|
||||
self.drop[self.root.treeID] = self.root
|
||||
self.root = None
|
||||
changed = True
|
||||
if not self.root or self.root.treeID < self.info.treeID:
|
||||
# No need to drop someone who'se worse than us
|
||||
self.info.coords = [self.info.nodeID]
|
||||
self.root = self.info.clone()
|
||||
changed = True
|
||||
elif self.root.treeID == self.info.treeID:
|
||||
self.root = self.info.clone()
|
||||
return changed
|
||||
|
||||
def cleanDropped(self):
|
||||
# May actually be a treeID... better to iterate over keys explicitly
|
||||
nodeIDs = sorted(self.drop.keys())
|
||||
for nodeID in nodeIDs:
|
||||
node = self.drop[nodeID]
|
||||
if self.info.time - node.time > 4*TIMEOUT:
|
||||
del self.drop[nodeID]
|
||||
return None
|
||||
|
||||
def createMessage(self):
|
||||
# Message is just a tuple
|
||||
# First element is the sender
|
||||
# Second element is the root
|
||||
# We will .clone() everything during the send operation
|
||||
msg = (self.info, self.root)
|
||||
return msg
|
||||
|
||||
def sendMessage(self, msg):
|
||||
for link in self.links.values():
|
||||
newMsg = (msg[0].clone(), msg[1].clone())
|
||||
link.msgs.append(newMsg)
|
||||
return None
|
||||
|
||||
def handleMessages(self):
|
||||
changed = False
|
||||
while self.msgs:
|
||||
changed |= self.handleMessage(self.msgs.pop())
|
||||
return changed
|
||||
|
||||
def handleMessage(self, msg):
|
||||
changed = False
|
||||
for node in msg:
|
||||
# Update the path and timestamp for the sender and root info
|
||||
node.path.append(self.info.nodeID)
|
||||
node.time = self.info.time
|
||||
# Update the sender's info in our list of peers
|
||||
sender = msg[0]
|
||||
self.peers[sender.nodeID] = sender
|
||||
# Decide if we want to update the root
|
||||
root = msg[1]
|
||||
updateRoot = False
|
||||
isSameParent = False
|
||||
isBetterParent = False
|
||||
if len(self.root.path) > 1 and len(root.path) > 1:
|
||||
parent = self.peers[self.root.path[-2]]
|
||||
if parent.nodeID == sender.nodeID: isSameParent = True
|
||||
if sender.degree > parent.degree:
|
||||
# This would also be where you check path uptime/reliability/whatever
|
||||
# All else being equal, we prefer parents with high degree
|
||||
# We are trusting peers to report degree correctly in this case
|
||||
# So expect some performance reduction if your peers aren't trustworthy
|
||||
# (Lies can increase average stretch by a few %)
|
||||
isBetterParent = True
|
||||
if self.info.nodeID in root.path[:-1]: pass # No loopy routes allowed
|
||||
elif root.treeID in self.drop and self.drop[root.treeID].tstamp >= root.tstamp: pass
|
||||
elif not self.root: updateRoot = True
|
||||
elif self.root.treeID < root.treeID: updateRoot = True
|
||||
elif self.root.treeID != root.treeID: pass
|
||||
elif self.root.tstamp > root.tstamp: pass
|
||||
elif len(root.path) < len(self.root.path): updateRoot = True
|
||||
elif isBetterParent and len(root.path) == len(self.root.path): updateRoot = True
|
||||
elif isSameParent and self.root.tstamp < root.tstamp: updateRoot = True
|
||||
if updateRoot:
|
||||
if not self.root or self.root.path != root.path: changed = True
|
||||
self.root = root
|
||||
self.info.coords = self.root.path
|
||||
return changed
|
||||
|
||||
def lookup_old(self, dest):
|
||||
# Note: Can loop in an unconverged network
|
||||
# The person looking up the route is responsible for checking for loops
|
||||
best = None
|
||||
bestDist = 0
|
||||
for node in self.peers.itervalues():
|
||||
# dist = distance to node + dist (on tree) from node to dest
|
||||
dist = len(node.path)-1 + treeDist(node.coords, dest.coords)
|
||||
if not best or dist < bestDist:
|
||||
best = node
|
||||
bestDist = dist
|
||||
if best:
|
||||
next = best.path[-2]
|
||||
assert next in self.peers
|
||||
return next
|
||||
else:
|
||||
# We failed to look something up
|
||||
# TODO some way to signal this which doesn't crash
|
||||
assert False
|
||||
|
||||
def initTable(self):
|
||||
# Pre-computes a lookup table for destination coords
|
||||
# Insert parent first so you prefer them as a next-hop
|
||||
self.table.clear()
|
||||
parent = self.info.nodeID
|
||||
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
|
||||
for peer in self.peers.itervalues():
|
||||
current = self.table
|
||||
for coord in peer.coords:
|
||||
if coord not in current: current[coord] = (peer.nodeID, dict())
|
||||
old = current[coord]
|
||||
next = old[1]
|
||||
oldPeer = self.peers[old[0]]
|
||||
oldDist = len(oldPeer.coords)
|
||||
oldDeg = oldPeer.degree
|
||||
newDist = len(peer.coords)
|
||||
newDeg = peer.degree
|
||||
# Prefer parent
|
||||
# Else prefer short distance from root
|
||||
# If equal distance, prefer high degree
|
||||
if peer.nodeID == parent: current[coord] = (peer.nodeID, next)
|
||||
elif newDist < oldDist: current[coord] = (peer.nodeID, next)
|
||||
elif newDist == oldDist and newDeg > oldDeg: current[coord] = (peer.nodeID, next)
|
||||
current = next
|
||||
return None
|
||||
|
||||
def lookup(self, dest):
|
||||
# Use pre-computed lookup table to look up next hop for dest coords
|
||||
assert self.table
|
||||
if len(self.info.coords) >= 2: parent = self.info.coords[-2]
|
||||
else: parent = None
|
||||
current = (parent, self.table)
|
||||
c = None
|
||||
for coord in dest.coords:
|
||||
c = coord
|
||||
if coord not in current[1]: break
|
||||
current = current[1][coord]
|
||||
next = current[0]
|
||||
if c in self.peers: next = c
|
||||
if next not in self.peers:
|
||||
assert next == None
|
||||
# You're the root of a different connected component
|
||||
# You'd drop the packet in this case
|
||||
# To make the path cache not die, need to return a valid next hop...
|
||||
# Returning self for that reason
|
||||
next = self.info.nodeID
|
||||
return next
|
||||
# End class Node
|
||||
|
||||
####################
|
||||
# Helper Functions #
|
||||
####################
|
||||
|
||||
def getIndexOfLCA(source, dest):
|
||||
# Return index of last common ancestor in source/dest coords
|
||||
# -1 if no common ancestor (e.g. different roots)
|
||||
lcaIdx = -1
|
||||
minLen = min(len(source), len(dest))
|
||||
for idx in xrange(minLen):
|
||||
if source[idx] == dest[idx]: lcaIdx = idx
|
||||
else: break
|
||||
return lcaIdx
|
||||
|
||||
def treePath(source, dest):
|
||||
# Return path with source at head and dest at tail
|
||||
lastMatch = getIndexOfLCA(source, dest)
|
||||
path = dest[-1:lastMatch:-1] + source[lastMatch:]
|
||||
assert path[0] == dest[-1]
|
||||
assert path[-1] == source[-1]
|
||||
return path
|
||||
|
||||
def treeDist(source, dest):
|
||||
dist = len(source) + len(dest)
|
||||
lcaIdx = getIndexOfLCA(source, dest)
|
||||
dist -= 2*(lcaIdx+1)
|
||||
return dist
|
||||
|
||||
def dijkstra(nodestore, startingNodeID):
|
||||
# Idea to use heapq and basic implementation taken from stackexchange post
|
||||
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
|
||||
nodeIDs = sorted(nodestore.keys())
|
||||
nNodes = len(nodeIDs)
|
||||
idxs = dict()
|
||||
for nodeIdx in xrange(nNodes):
|
||||
nodeID = nodeIDs[nodeIdx]
|
||||
idxs[nodeID] = nodeIdx
|
||||
dists = array.array("H", [0]*nNodes)
|
||||
queue = [(0, startingNodeID)]
|
||||
while queue:
|
||||
dist, nodeID = heapq.heappop(queue)
|
||||
idx = idxs[nodeID]
|
||||
if not dists[idx]: # Unvisited, otherwise we skip it
|
||||
dists[idx] = dist
|
||||
for peer in nodestore[nodeID].links:
|
||||
if not dists[idxs[peer]]:
|
||||
# Peer is also unvisited, so add to queue
|
||||
heapq.heappush(queue, (dist+LINK_COST, peer))
|
||||
return dists
|
||||
|
||||
def dijkstrall(nodestore):
|
||||
# Idea to use heapq and basic implementation taken from stackexchange post
|
||||
# http://codereview.stackexchange.com/questions/79025/dijkstras-algorithm-in-python
|
||||
nodeIDs = sorted(nodestore.keys())
|
||||
nNodes = len(nodeIDs)
|
||||
idxs = dict()
|
||||
for nodeIdx in xrange(nNodes):
|
||||
nodeID = nodeIDs[nodeIdx]
|
||||
idxs[nodeID] = nodeIdx
|
||||
dists = array.array("H", [0]*nNodes*nNodes) # use GetCacheIndex(nNodes, start, end)
|
||||
for sourceIdx in xrange(nNodes):
|
||||
print "Finding shortest paths for node {} / {} ({})".format(sourceIdx+1, nNodes, nodeIDs[sourceIdx])
|
||||
queue = [(0, sourceIdx)]
|
||||
while queue:
|
||||
dist, nodeIdx = heapq.heappop(queue)
|
||||
distIdx = getCacheIndex(nNodes, sourceIdx, nodeIdx)
|
||||
if not dists[distIdx]: # Unvisited, otherwise we skip it
|
||||
dists[distIdx] = dist
|
||||
for peer in nodestore[nodeIDs[nodeIdx]].links:
|
||||
pIdx = idxs[peer]
|
||||
pdIdx = getCacheIndex(nNodes, sourceIdx, pIdx)
|
||||
if not dists[pdIdx]:
|
||||
# Peer is also unvisited, so add to queue
|
||||
heapq.heappush(queue, (dist+LINK_COST, pIdx))
|
||||
return dists
|
||||
|
||||
def linkNodes(node1, node2):
|
||||
node1.links[node2.info.nodeID] = node2
|
||||
node2.links[node1.info.nodeID] = node1
|
||||
|
||||
############################
|
||||
# Store topology functions #
|
||||
############################
|
||||
|
||||
def makeStoreSquareGrid(sideLength, randomize=True):
|
||||
# Simple grid in a sideLength*sideLength square
|
||||
# Just used to validate that the code runs
|
||||
store = dict()
|
||||
nodeIDs = list(range(sideLength*sideLength))
|
||||
if randomize: random.shuffle(nodeIDs)
|
||||
for nodeID in nodeIDs:
|
||||
store[nodeID] = Node(nodeID)
|
||||
for index in xrange(len(nodeIDs)):
|
||||
if (index % sideLength != 0): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-1]])
|
||||
if (index >= sideLength): linkNodes(store[nodeIDs[index]], store[nodeIDs[index-sideLength]])
|
||||
print "Grid store created, size {}".format(len(store))
|
||||
return store
|
||||
|
||||
def makeStoreASRelGraph(pathToGraph):
|
||||
#Existing network graphs, in caida.org's asrel format (ASx|ASy|z per line, z denotes relationship type)
|
||||
with open(pathToGraph, "r") as f:
|
||||
inData = f.readlines()
|
||||
store = dict()
|
||||
for line in inData:
|
||||
if line.strip()[0] == "#": continue # Skip comment lines
|
||||
line = line.replace('|'," ")
|
||||
nodes = map(int, line.split()[0:2])
|
||||
if nodes[0] not in store: store[nodes[0]] = Node(nodes[0])
|
||||
if nodes[1] not in store: store[nodes[1]] = Node(nodes[1])
|
||||
linkNodes(store[nodes[0]], store[nodes[1]])
|
||||
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
|
||||
return store
|
||||
|
||||
def makeStoreASRelGraphMaxDeg(pathToGraph, degIdx=0):
|
||||
with open(pathToGraph, "r") as f:
|
||||
inData = f.readlines()
|
||||
store = dict()
|
||||
nodeDeg = dict()
|
||||
for line in inData:
|
||||
if line.strip()[0] == "#": continue # Skip comment lines
|
||||
line = line.replace('|'," ")
|
||||
nodes = map(int, line.split()[0:2])
|
||||
if nodes[0] not in nodeDeg: nodeDeg[nodes[0]] = 0
|
||||
if nodes[1] not in nodeDeg: nodeDeg[nodes[1]] = 0
|
||||
nodeDeg[nodes[0]] += 1
|
||||
nodeDeg[nodes[1]] += 1
|
||||
sortedNodes = sorted(nodeDeg.keys(), \
|
||||
key=lambda x: (nodeDeg[x], x), \
|
||||
reverse=True)
|
||||
maxDegNodeID = sortedNodes[degIdx]
|
||||
return makeStoreASRelGraphFixedRoot(pathToGraph, maxDegNodeID)
|
||||
|
||||
def makeStoreASRelGraphFixedRoot(pathToGraph, rootNodeID):
|
||||
with open(pathToGraph, "r") as f:
|
||||
inData = f.readlines()
|
||||
store = dict()
|
||||
for line in inData:
|
||||
if line.strip()[0] == "#": continue # Skip comment lines
|
||||
line = line.replace('|'," ")
|
||||
nodes = map(int, line.split()[0:2])
|
||||
if nodes[0] not in store:
|
||||
store[nodes[0]] = Node(nodes[0])
|
||||
if nodes[0] == rootNodeID: store[nodes[0]].info.treeID += 1000000000
|
||||
if nodes[1] not in store:
|
||||
store[nodes[1]] = Node(nodes[1])
|
||||
if nodes[1] == rootNodeID: store[nodes[1]].info.treeID += 1000000000
|
||||
linkNodes(store[nodes[0]], store[nodes[1]])
|
||||
print "CAIDA AS-relation graph successfully imported, size {}".format(len(store))
|
||||
return store
|
||||
|
||||
def makeStoreDimesEdges(pathToGraph, rootNodeID=None):
|
||||
# Read from a DIMES csv-formatted graph from a gzip file
|
||||
store = dict()
|
||||
with gzip.open(pathToGraph, "r") as f:
|
||||
inData = f.readlines()
|
||||
size = len(inData)
|
||||
index = 0
|
||||
for edge in inData:
|
||||
if not index % 1000:
|
||||
pct = 100.0*index/size
|
||||
print "Processing edge {}, {:.2f}%".format(index, pct)
|
||||
index += 1
|
||||
dat = edge.rstrip().split(',')
|
||||
node1 = "N" + str(dat[0].strip())
|
||||
node2 = "N" + str(dat[1].strip())
|
||||
if '?' in node1 or '?' in node2: continue #Unknown node
|
||||
if node1 == rootNodeID: node1 = "R" + str(dat[0].strip())
|
||||
if node2 == rootNodeID: node2 = "R" + str(dat[1].strip())
|
||||
if node1 not in store: store[node1] = Node(node1)
|
||||
if node2 not in store: store[node2] = Node(node2)
|
||||
if node1 != node2: linkNodes(store[node1], store[node2])
|
||||
print "DIMES graph successfully imported, size {}".format(len(store))
|
||||
return store
|
||||
|
||||
def makeStoreGeneratedGraph(pathToGraph, root=None):
|
||||
with open(pathToGraph, "r") as f:
|
||||
inData = f.readlines()
|
||||
store = dict()
|
||||
for line in inData:
|
||||
if line.strip()[0] == "#": continue # Skip comment lines
|
||||
nodes = map(int, line.strip().split(' ')[0:2])
|
||||
node1 = nodes[0]
|
||||
node2 = nodes[1]
|
||||
if node1 == root: node1 += 1000000
|
||||
if node2 == root: node2 += 1000000
|
||||
if node1 not in store: store[node1] = Node(node1)
|
||||
if node2 not in store: store[node2] = Node(node2)
|
||||
linkNodes(store[node1], store[node2])
|
||||
print "Generated graph successfully imported, size {}".format(len(store))
|
||||
return store
|
||||
|
||||
|
||||
############################################
|
||||
# Functions used as parts of network tests #
|
||||
############################################
|
||||
|
||||
def idleUntilConverged(store):
|
||||
nodeIDs = sorted(store.keys())
|
||||
timeOfLastChange = 0
|
||||
step = 0
|
||||
# Idle until the network has converged
|
||||
while step - timeOfLastChange < 4*TIMEOUT:
|
||||
step += 1
|
||||
print "Step: {}, last change: {}".format(step, timeOfLastChange)
|
||||
changed = False
|
||||
for nodeID in nodeIDs:
|
||||
# Update node status, send messages
|
||||
changed |= store[nodeID].tick()
|
||||
for nodeID in nodeIDs:
|
||||
# Process messages
|
||||
changed |= store[nodeID].handleMessages()
|
||||
if changed: timeOfLastChange = step
|
||||
initTables(store)
|
||||
return store
|
||||
|
||||
def getCacheIndex(nodes, sourceIndex, destIndex):
|
||||
return sourceIndex*nodes + destIndex
|
||||
|
||||
def initTables(store):
|
||||
nodeIDs = sorted(store.keys())
|
||||
nNodes = len(nodeIDs)
|
||||
print "Initializing routing tables for {} nodes".format(nNodes)
|
||||
for idx in xrange(nNodes):
|
||||
nodeID = nodeIDs[idx]
|
||||
store[nodeID].initTable()
|
||||
print "Routing tables initialized"
|
||||
return None
|
||||
|
||||
def getCache(store):
|
||||
nodeIDs = sorted(store.keys())
|
||||
nNodes = len(nodeIDs)
|
||||
nodeIdxs = dict()
|
||||
for nodeIdx in xrange(nNodes):
|
||||
nodeIdxs[nodeIDs[nodeIdx]] = nodeIdx
|
||||
cache = array.array("H", [0]*nNodes*nNodes)
|
||||
for sourceIdx in xrange(nNodes):
|
||||
sourceID = nodeIDs[sourceIdx]
|
||||
print "Building fast lookup table for node {} / {} ({})".format(sourceIdx+1, nNodes, sourceID)
|
||||
for destIdx in xrange(nNodes):
|
||||
destID = nodeIDs[destIdx]
|
||||
if sourceID == destID: nextHop = destID # lookup would fail
|
||||
else: nextHop = store[sourceID].lookup(store[destID].info)
|
||||
nextHopIdx = nodeIdxs[nextHop]
|
||||
cache[getCacheIndex(nNodes, sourceIdx, destIdx)] = nextHopIdx
|
||||
return cache
|
||||
|
||||
def testPaths(store, dists):
|
||||
cache = getCache(store)
|
||||
nodeIDs = sorted(store.keys())
|
||||
nNodes = len(nodeIDs)
|
||||
idxs = dict()
|
||||
for nodeIdx in xrange(nNodes):
|
||||
nodeID = nodeIDs[nodeIdx]
|
||||
idxs[nodeID] = nodeIdx
|
||||
results = dict()
|
||||
for sourceIdx in xrange(nNodes):
|
||||
sourceID = nodeIDs[sourceIdx]
|
||||
print "Testing paths from node {} / {} ({})".format(sourceIdx+1, len(nodeIDs), sourceID)
|
||||
#dists = dijkstra(store, sourceID)
|
||||
for destIdx in xrange(nNodes):
|
||||
destID = nodeIDs[destIdx]
|
||||
if destID == sourceID: continue # Skip self
|
||||
distIdx = getCacheIndex(nNodes, sourceIdx, destIdx)
|
||||
eHops = dists[distIdx]
|
||||
if not eHops: continue # The network is split, no path exists
|
||||
hops = 0
|
||||
for pair in ((sourceIdx, destIdx),):
|
||||
nHops = 0
|
||||
locIdx = pair[0]
|
||||
dIdx = pair[1]
|
||||
while locIdx != dIdx:
|
||||
locIdx = cache[getCacheIndex(nNodes, locIdx, dIdx)]
|
||||
nHops += 1
|
||||
if not hops or nHops < hops: hops = nHops
|
||||
if eHops not in results: results[eHops] = dict()
|
||||
if hops not in results[eHops]: results[eHops][hops] = 0
|
||||
results[eHops][hops] += 1
|
||||
return results
|
||||
|
||||
def getAvgStretch(pathMatrix):
|
||||
avgStretch = 0.
|
||||
checked = 0.
|
||||
for eHops in sorted(pathMatrix.keys()):
|
||||
for nHops in sorted(pathMatrix[eHops].keys()):
|
||||
count = pathMatrix[eHops][nHops]
|
||||
stretch = float(nHops)/float(max(1, eHops))
|
||||
avgStretch += stretch*count
|
||||
checked += count
|
||||
avgStretch /= max(1, checked)
|
||||
return avgStretch
|
||||
|
||||
def getMaxStretch(pathMatrix):
|
||||
maxStretch = 0.
|
||||
for eHops in sorted(pathMatrix.keys()):
|
||||
for nHops in sorted(pathMatrix[eHops].keys()):
|
||||
stretch = float(nHops)/float(max(1, eHops))
|
||||
maxStretch = max(maxStretch, stretch)
|
||||
return maxStretch
|
||||
|
||||
def getCertSizes(store):
|
||||
# Returns nCerts frequency distribution
|
||||
# De-duplicates common certs (for shared prefixes in the path)
|
||||
sizes = dict()
|
||||
for node in store.values():
|
||||
certs = set()
|
||||
for peer in node.peers.values():
|
||||
pCerts = set()
|
||||
assert len(peer.path) == 2
|
||||
assert peer.coords[-1] == peer.path[0]
|
||||
hops = peer.coords + peer.path[1:]
|
||||
for hopIdx in xrange(len(hops)-1):
|
||||
send = hops[hopIdx]
|
||||
if send == node.info.nodeID: continue # We created it, already have it
|
||||
path = hops[0:hopIdx+2]
|
||||
# Each cert is signed by the sender
|
||||
# Includes information about the path from the sender to the next hop
|
||||
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
|
||||
cert = "{}:{}".format(send, path)
|
||||
certs.add(cert)
|
||||
size = len(certs)
|
||||
if size not in sizes: sizes[size] = 0
|
||||
sizes[size] += 1
|
||||
return sizes
|
||||
|
||||
def getMinLinkCertSizes(store):
|
||||
# Returns nCerts frequency distribution
|
||||
# De-duplicates common certs (for shared prefixes in the path)
|
||||
# Based on the minimum number of certs that must be traded through a particular link
|
||||
# Handled per link
|
||||
sizes = dict()
|
||||
for node in store.values():
|
||||
peerCerts = dict()
|
||||
for peer in node.peers.values():
|
||||
pCerts = set()
|
||||
assert len(peer.path) == 2
|
||||
assert peer.coords[-1] == peer.path[0]
|
||||
hops = peer.coords + peer.path[1:]
|
||||
for hopIdx in xrange(len(hops)-1):
|
||||
send = hops[hopIdx]
|
||||
if send == node.info.nodeID: continue # We created it, already have it
|
||||
path = hops[0:hopIdx+2]
|
||||
# Each cert is signed by the sender
|
||||
# Includes information about the path from the sender to the next hop
|
||||
# Next hop is at hopIdx+1, so the path to next hop is hops[0:hopIdx+2]
|
||||
cert = "{}:{}".format(send, path)
|
||||
pCerts.add(cert)
|
||||
peerCerts[peer.nodeID] = pCerts
|
||||
for peer in peerCerts:
|
||||
size = 0
|
||||
pCerts = peerCerts[peer]
|
||||
for cert in pCerts:
|
||||
required = True
|
||||
for p2 in peerCerts:
|
||||
if p2 == peer: continue
|
||||
p2Certs = peerCerts[p2]
|
||||
if cert in p2Certs: required = False
|
||||
if required: size += 1
|
||||
if size not in sizes: sizes[size] = 0
|
||||
sizes[size] += 1
|
||||
return sizes
|
||||
|
||||
def getPathSizes(store):
|
||||
# Returns frequency distribution of the total number of hops the routing table
|
||||
# I.e. a node with 3 peers, each with 5 hop coord+path, would count as 3x5=15
|
||||
sizes = dict()
|
||||
for node in store.values():
|
||||
size = 0
|
||||
for peer in node.peers.values():
|
||||
assert len(peer.path) == 2
|
||||
assert peer.coords[-1] == peer.path[0]
|
||||
peerSize = len(peer.coords) + len(peer.path) - 1 # double-counts peer, -1
|
||||
size += peerSize
|
||||
if size not in sizes: sizes[size] = 0
|
||||
sizes[size] += 1
|
||||
return sizes
|
||||
|
||||
def getPeerSizes(store):
|
||||
# Returns frequency distribution of the number of peers each node has
|
||||
sizes = dict()
|
||||
for node in store.values():
|
||||
nPeers = len(node.peers)
|
||||
if nPeers not in sizes: sizes[nPeers] = 0
|
||||
sizes[nPeers] += 1
|
||||
return sizes
|
||||
|
||||
def getAvgSize(sizes):
|
||||
sumSizes = 0
|
||||
nNodes = 0
|
||||
for size in sizes:
|
||||
count = sizes[size]
|
||||
sumSizes += size*count
|
||||
nNodes += count
|
||||
avgSize = float(sumSizes)/max(1, nNodes)
|
||||
return avgSize
|
||||
|
||||
def getMaxSize(sizes):
|
||||
return max(sizes.keys())
|
||||
|
||||
def getMinSize(sizes):
|
||||
return min(sizes.keys())
|
||||
|
||||
def getResults(pathMatrix):
|
||||
results = []
|
||||
for eHops in sorted(pathMatrix.keys()):
|
||||
for nHops in sorted(pathMatrix[eHops].keys()):
|
||||
count = pathMatrix[eHops][nHops]
|
||||
results.append("{} {} {}".format(eHops, nHops, count))
|
||||
return '\n'.join(results)
|
||||
|
||||
####################################
|
||||
# Functions to run different tests #
|
||||
####################################
|
||||
|
||||
def runTest(store):
|
||||
# Runs the usual set of tests on the store
|
||||
# Does not save results, so only meant for quick tests
|
||||
# To e.g. check the code works, maybe warm up the pypy jit
|
||||
for node in store.values():
|
||||
node.info.time = random.randint(0, TIMEOUT)
|
||||
node.info.tstamp = TIMEOUT
|
||||
print "Begin testing network"
|
||||
dists = None
|
||||
if not dists: dists = dijkstrall(store)
|
||||
idleUntilConverged(store)
|
||||
pathMatrix = testPaths(store, dists)
|
||||
avgStretch = getAvgStretch(pathMatrix)
|
||||
maxStretch = getMaxStretch(pathMatrix)
|
||||
peers = getPeerSizes(store)
|
||||
certs = getCertSizes(store)
|
||||
paths = getPathSizes(store)
|
||||
linkCerts = getMinLinkCertSizes(store)
|
||||
avgPeerSize = getAvgSize(peers)
|
||||
maxPeerSize = getMaxSize(peers)
|
||||
avgCertSize = getAvgSize(certs)
|
||||
maxCertSize = getMaxSize(certs)
|
||||
avgPathSize = getAvgSize(paths)
|
||||
maxPathSize = getMaxSize(paths)
|
||||
avgLinkCert = getAvgSize(linkCerts)
|
||||
maxLinkCert = getMaxSize(linkCerts)
|
||||
totalCerts = sum(map(lambda x: x*certs[x], certs.keys()))
|
||||
totalLinks = sum(map(lambda x: x*peers[x], peers.keys())) # one-way links
|
||||
avgCertsPerLink = float(totalCerts)/max(1, totalLinks)
|
||||
print "Finished testing network"
|
||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
||||
print "Avg / Max nPeers size: {} / {}".format(avgPeerSize, maxPeerSize)
|
||||
print "Avg / Max nCerts size: {} / {}".format(avgCertSize, maxCertSize)
|
||||
print "Avg / Max total hops in any node's routing table: {} / {}".format(avgPathSize, maxPathSize)
|
||||
print "Avg / Max lower bound cert requests per link (one-way): {} / {}".format(avgLinkCert, maxLinkCert)
|
||||
print "Avg certs per link (one-way): {}".format(avgCertsPerLink)
|
||||
return # End of function
|
||||
|
||||
def rootNodeASTest(path, outDir="output-treesim-AS", dists=None, proc = 1):
|
||||
# Checks performance for every possible choice of root node
|
||||
# Saves output for each root node to a separate file on disk
|
||||
# path = input path to some caida.org formatted AS-relationship graph
|
||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
||||
assert os.path.exists(outDir)
|
||||
store = makeStoreASRelGraph(path)
|
||||
nodes = sorted(store.keys())
|
||||
for nodeIdx in xrange(len(nodes)):
|
||||
if nodeIdx % proc != 0: continue # Work belongs to someone else
|
||||
rootNodeID = nodes[nodeIdx]
|
||||
outpath = outDir+"/{}".format(rootNodeID)
|
||||
if os.path.exists(outpath):
|
||||
print "Skipping {}, already processed".format(rootNodeID)
|
||||
continue
|
||||
store = makeStoreASRelGraphFixedRoot(path, rootNodeID)
|
||||
for node in store.values():
|
||||
node.info.time = random.randint(0, TIMEOUT)
|
||||
node.info.tstamp = TIMEOUT
|
||||
print "Beginning {}, size {}".format(nodeIdx, len(store))
|
||||
if not dists: dists = dijkstrall(store)
|
||||
idleUntilConverged(store)
|
||||
pathMatrix = testPaths(store, dists)
|
||||
avgStretch = getAvgStretch(pathMatrix)
|
||||
maxStretch = getMaxStretch(pathMatrix)
|
||||
results = getResults(pathMatrix)
|
||||
with open(outpath, "w") as f:
|
||||
f.write(results)
|
||||
print "Finished test for root AS {} ({} / {})".format(rootNodeID, nodeIdx+1, len(store))
|
||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
||||
#break # Stop after 1, because they can take forever
|
||||
return # End of function
|
||||
|
||||
def timelineASTest():
|
||||
# Meant to study the performance of the network as a function of network size
|
||||
# Loops over a set of AS-relationship graphs
|
||||
# Runs a test on each graph, selecting highest-degree node as the root
|
||||
# Saves results for each graph to a separate file on disk
|
||||
outDir = "output-treesim-timeline-AS"
|
||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
||||
assert os.path.exists(outDir)
|
||||
paths = sorted(glob.glob("asrel/datasets/*"))
|
||||
for path in paths:
|
||||
date = os.path.basename(path).split(".")[0]
|
||||
outpath = outDir+"/{}".format(date)
|
||||
if os.path.exists(outpath):
|
||||
print "Skipping {}, already processed".format(date)
|
||||
continue
|
||||
store = makeStoreASRelGraphMaxDeg(path)
|
||||
dists = None
|
||||
for node in store.values():
|
||||
node.info.time = random.randint(0, TIMEOUT)
|
||||
node.info.tstamp = TIMEOUT
|
||||
print "Beginning {}, size {}".format(date, len(store))
|
||||
if not dists: dists = dijkstrall(store)
|
||||
idleUntilConverged(store)
|
||||
pathMatrix = testPaths(store, dists)
|
||||
avgStretch = getAvgStretch(pathMatrix)
|
||||
maxStretch = getMaxStretch(pathMatrix)
|
||||
results = getResults(pathMatrix)
|
||||
with open(outpath, "w") as f:
|
||||
f.write(results)
|
||||
print "Finished {} with {} nodes".format(date, len(store))
|
||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
||||
#break # Stop after 1, because they can take forever
|
||||
return # End of function
|
||||
|
||||
def timelineDimesTest():
|
||||
# Meant to study the performance of the network as a function of network size
|
||||
# Loops over a set of AS-relationship graphs
|
||||
# Runs a test on each graph, selecting highest-degree node as the root
|
||||
# Saves results for each graph to a separate file on disk
|
||||
outDir = "output-treesim-timeline-dimes"
|
||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
||||
assert os.path.exists(outDir)
|
||||
# Input files are named ASEdgesX_Y where X = month (no leading 0), Y = year
|
||||
paths = sorted(glob.glob("DIMES/ASEdges/*.gz"))
|
||||
exists = set(glob.glob(outDir+"/*"))
|
||||
for path in paths:
|
||||
date = os.path.basename(path).split(".")[0]
|
||||
outpath = outDir+"/{}".format(date)
|
||||
if outpath in exists:
|
||||
print "Skipping {}, already processed".format(date)
|
||||
continue
|
||||
store = makeStoreDimesEdges(path)
|
||||
# Get the highest degree node and make it root
|
||||
# Sorted by nodeID just to make it stable in the event of a tie
|
||||
nodeIDs = sorted(store.keys())
|
||||
bestRoot = ""
|
||||
bestDeg = 0
|
||||
for nodeID in nodeIDs:
|
||||
node = store[nodeID]
|
||||
if len(node.links) > bestDeg:
|
||||
bestRoot = nodeID
|
||||
bestDeg = len(node.links)
|
||||
assert bestRoot
|
||||
store = makeStoreDimesEdges(path, bestRoot)
|
||||
rootID = "R" + bestRoot[1:]
|
||||
assert rootID in store
|
||||
# Don't forget to set random seed before setitng times
|
||||
# To make results reproducible
|
||||
nodeIDs = sorted(store.keys())
|
||||
random.seed(12345)
|
||||
for nodeID in nodeIDs:
|
||||
node = store[nodeID]
|
||||
node.info.time = random.randint(0, TIMEOUT)
|
||||
node.info.tstamp = TIMEOUT
|
||||
print "Beginning {}, size {}".format(date, len(store))
|
||||
if not dists: dists = dijkstrall(store)
|
||||
idleUntilConverged(store)
|
||||
pathMatrix = testPaths(store, dists)
|
||||
avgStretch = getAvgStretch(pathMatrix)
|
||||
maxStretch = getMaxStretch(pathMatrix)
|
||||
results = getResults(pathMatrix)
|
||||
with open(outpath, "w") as f:
|
||||
f.write(results)
|
||||
print "Finished {} with {} nodes".format(date, len(store))
|
||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
||||
break # Stop after 1, because they can take forever
|
||||
return # End of function
|
||||
|
||||
def scalingTest(maxTests=None, inputDir="graphs"):
|
||||
# Meant to study the performance of the network as a function of network size
|
||||
# Loops over a set of nodes in a previously generated graph
|
||||
# Runs a test on each graph, testing each node as the root
|
||||
# if maxTests is set, tests only that number of roots (highest degree first)
|
||||
# Saves results for each graph to a separate file on disk
|
||||
outDir = "output-treesim-{}".format(inputDir)
|
||||
if not os.path.exists(outDir): os.makedirs(outDir)
|
||||
assert os.path.exists(outDir)
|
||||
paths = sorted(glob.glob("{}/*".format(inputDir)))
|
||||
exists = set(glob.glob(outDir+"/*"))
|
||||
for path in paths:
|
||||
gc.collect() # pypy waits for gc to close files
|
||||
graph = os.path.basename(path).split(".")[0]
|
||||
store = makeStoreGeneratedGraph(path)
|
||||
# Get the highest degree node and make it root
|
||||
# Sorted by nodeID just to make it stable in the event of a tie
|
||||
nodeIDs = sorted(store.keys(), key=lambda x: len(store[x].links), reverse=True)
|
||||
dists = None
|
||||
if maxTests: nodeIDs = nodeIDs[:maxTests]
|
||||
for nodeID in nodeIDs:
|
||||
nodeIDStr = str(nodeID).zfill(len(str(len(store)-1)))
|
||||
outpath = outDir+"/{}-{}".format(graph, nodeIDStr)
|
||||
if outpath in exists:
|
||||
print "Skipping {}-{}, already processed".format(graph, nodeIDStr)
|
||||
continue
|
||||
store = makeStoreGeneratedGraph(path, nodeID)
|
||||
# Don't forget to set random seed before setting times
|
||||
random.seed(12345) # To make results reproducible
|
||||
nIDs = sorted(store.keys())
|
||||
for nID in nIDs:
|
||||
node = store[nID]
|
||||
node.info.time = random.randint(0, TIMEOUT)
|
||||
node.info.tstamp = TIMEOUT
|
||||
print "Beginning {}, size {}".format(graph, len(store))
|
||||
if not dists: dists = dijkstrall(store)
|
||||
idleUntilConverged(store)
|
||||
pathMatrix = testPaths(store, dists)
|
||||
avgStretch = getAvgStretch(pathMatrix)
|
||||
maxStretch = getMaxStretch(pathMatrix)
|
||||
results = getResults(pathMatrix)
|
||||
with open(outpath, "w") as f:
|
||||
f.write(results)
|
||||
print "Finished {} with {} nodes for root {}".format(graph, len(store), nodeID)
|
||||
print "Avg / Max stretch: {} / {}".format(avgStretch, maxStretch)
|
||||
return # End of function
|
||||
|
||||
##################
|
||||
# Main Execution #
|
||||
##################
|
||||
|
||||
if __name__ == "__main__":
|
||||
if True: # Run a quick test
|
||||
random.seed(12345) # DEBUG
|
||||
store = makeStoreSquareGrid(4)
|
||||
runTest(store) # Quick test
|
||||
store = None
|
||||
# Do some real work
|
||||
#runTest(makeStoreDimesEdges("DIMES/ASEdges/ASEdges1_2007.csv.gz"))
|
||||
#timelineDimesTest()
|
||||
#rootNodeASTest("asrel/datasets/19980101.as-rel.txt")
|
||||
#timelineASTest()
|
||||
#rootNodeASTest("hype-2016-09-19.list", "output-treesim-hype")
|
||||
#scalingTest(None, "graphs-20") # First argument 1 to only test 1 root per graph
|
||||
#store = makeStoreGeneratedGraph("bgp_tables")
|
||||
#store = makeStoreGeneratedGraph("skitter")
|
||||
#store = makeStoreASRelGraphMaxDeg("hype-2016-09-19.list") #http://hia.cjdns.ca/watchlist/c/walk.peers.20160919
|
||||
#store = makeStoreGeneratedGraph("fc00-2017-08-12.txt")
|
||||
if store: runTest(store)
|
||||
#rootNodeASTest("skitter", "output-treesim-skitter", None, 0, 1)
|
||||
#scalingTest(1, "graphs-20") # First argument 1 to only test 1 root per graph
|
||||
#scalingTest(1, "graphs-21") # First argument 1 to only test 1 root per graph
|
||||
#scalingTest(1, "graphs-22") # First argument 1 to only test 1 root per graph
|
||||
#scalingTest(1, "graphs-23") # First argument 1 to only test 1 root per graph
|
||||
if not store:
|
||||
import sys
|
||||
args = sys.argv
|
||||
if len(args) == 2:
|
||||
job_number = int(sys.argv[1])
|
||||
#rootNodeASTest("fc00-2017-08-12.txt", "fc00", None, job_number)
|
||||
rootNodeASTest("skitter", "out-skitter", None, job_number)
|
||||
else:
|
||||
print "Usage: {} job_number".format(args[0])
|
||||
print "job_number = which job set to run on this node (1-indexed)"
|
||||
|
410
misc/sim/treesim.go
Normal file
410
misc/sim/treesim.go
Normal file
@ -0,0 +1,410 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "bufio"
|
||||
import "os"
|
||||
import "strings"
|
||||
import "strconv"
|
||||
import "time"
|
||||
|
||||
import "runtime/pprof"
|
||||
import "flag"
|
||||
|
||||
import . "yggdrasil"
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type Node struct {
|
||||
index int
|
||||
core Core
|
||||
send chan<- []byte
|
||||
recv <-chan []byte
|
||||
}
|
||||
|
||||
func (n *Node) init(index int) {
|
||||
n.index = index
|
||||
n.core.Init()
|
||||
n.send = n.core.DEBUG_getSend()
|
||||
n.recv = n.core.DEBUG_getRecv()
|
||||
}
|
||||
|
||||
func (n *Node) printTraffic() {
|
||||
for {
|
||||
packet := <-n.recv
|
||||
fmt.Println(n.index, packet)
|
||||
//panic("Got a packet")
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) startPeers() {
|
||||
//for _, p := range n.core.Peers.Ports {
|
||||
// go p.MainLoop()
|
||||
//}
|
||||
//go n.printTraffic()
|
||||
//n.core.Peers.DEBUG_startPeers()
|
||||
}
|
||||
|
||||
func linkNodes(m, n *Node) {
|
||||
// Don't allow duplicates
|
||||
if m.core.DEBUG_getPeers().DEBUG_hasPeer(n.core.DEBUG_getSigPub()) { return }
|
||||
// Create peers
|
||||
// Buffering reduces packet loss in the sim
|
||||
// This slightly speeds up testing (fewer delays before retrying a ping)
|
||||
p := m.core.DEBUG_getPeers().DEBUG_newPeer(n.core.DEBUG_getBoxPub(),
|
||||
n.core.DEBUG_getSigPub())
|
||||
q := n.core.DEBUG_getPeers().DEBUG_newPeer(m.core.DEBUG_getBoxPub(),
|
||||
m.core.DEBUG_getSigPub())
|
||||
DEBUG_simLinkPeers(p, q)
|
||||
return
|
||||
}
|
||||
|
||||
func makeStoreSquareGrid(sideLength int) map[int]*Node {
|
||||
store := make(map[int]*Node)
|
||||
nNodes := sideLength*sideLength
|
||||
idxs := make([]int, 0, nNodes)
|
||||
// TODO shuffle nodeIDs
|
||||
for idx := 1 ; idx <= nNodes ; idx++ {
|
||||
idxs = append(idxs, idx)
|
||||
}
|
||||
for _, idx := range idxs {
|
||||
node := &Node{}
|
||||
node.init(idx)
|
||||
store[idx] = node
|
||||
}
|
||||
for idx := 0 ; idx < nNodes ; idx++ {
|
||||
if (idx % sideLength) != 0 {
|
||||
linkNodes(store[idxs[idx]], store[idxs[idx-1]])
|
||||
}
|
||||
if idx >= sideLength {
|
||||
linkNodes(store[idxs[idx]], store[idxs[idx-sideLength]])
|
||||
}
|
||||
}
|
||||
//for _, node := range store { node.initPorts() }
|
||||
return store
|
||||
}
|
||||
|
||||
func makeStoreStar(nNodes int) map[int]*Node {
|
||||
store := make(map[int]*Node)
|
||||
center := &Node{}
|
||||
center.init(0)
|
||||
store[0] = center
|
||||
for idx := 1 ; idx < nNodes ; idx++ {
|
||||
node := &Node{}
|
||||
node.init(idx)
|
||||
store[idx] = node
|
||||
linkNodes(center, node)
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
func loadGraph(path string) map[int]*Node {
|
||||
f, err := os.Open(path)
|
||||
if err != nil { panic(err) }
|
||||
defer f.Close()
|
||||
store := make(map[int]*Node)
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
nodeIdxstrs := strings.Split(line, " ")
|
||||
nodeIdx0, _ := strconv.Atoi(nodeIdxstrs[0])
|
||||
nodeIdx1, _ := strconv.Atoi(nodeIdxstrs[1])
|
||||
if store[nodeIdx0] == nil {
|
||||
node := &Node{}
|
||||
node.init(nodeIdx0)
|
||||
store[nodeIdx0] = node
|
||||
}
|
||||
if store[nodeIdx1] == nil {
|
||||
node := &Node{}
|
||||
node.init(nodeIdx1)
|
||||
store[nodeIdx1] = node
|
||||
}
|
||||
linkNodes(store[nodeIdx0], store[nodeIdx1])
|
||||
}
|
||||
//for _, node := range store { node.initPorts() }
|
||||
return store
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func startNetwork(store map[[32]byte]*Node) {
|
||||
for _, node := range store {
|
||||
node.startPeers()
|
||||
}
|
||||
}
|
||||
|
||||
func getKeyedStore(store map[int]*Node) map[[32]byte]*Node {
|
||||
newStore := make(map[[32]byte]*Node)
|
||||
for _, node := range store {
|
||||
newStore[node.core.DEBUG_getSigPub()] = node
|
||||
}
|
||||
return newStore
|
||||
}
|
||||
|
||||
func testPaths(store map[[32]byte]*Node) bool {
|
||||
nNodes := len(store)
|
||||
count := 0
|
||||
for _, source := range store {
|
||||
count++
|
||||
fmt.Printf("Testing paths from node %d / %d (%d)\n", count, nNodes, source.index)
|
||||
for _, dest := range store {
|
||||
//if source == dest { continue }
|
||||
destLoc := dest.core.DEBUG_getLocator()
|
||||
coords := destLoc.DEBUG_getCoords()
|
||||
temp := 0
|
||||
ttl := ^uint64(0)
|
||||
oldTTL := ttl
|
||||
for here := source ; here != dest ; {
|
||||
if ttl == 0 {
|
||||
fmt.Println("Drop:", source.index, here.index, dest.index, oldTTL)
|
||||
return false
|
||||
}
|
||||
temp++
|
||||
if temp > 4096 { panic("Loop?") }
|
||||
oldTTL = ttl
|
||||
nextPort, newTTL := here.core.DEBUG_switchLookup(coords, ttl)
|
||||
ttl = newTTL
|
||||
// First check if "here" is accepting packets from the previous node
|
||||
// TODO explain how this works
|
||||
ports := here.core.DEBUG_getPeers().DEBUG_getPorts()
|
||||
nextPeer := ports[nextPort]
|
||||
if nextPeer == nil {
|
||||
fmt.Println("Peer associated with next port is nil")
|
||||
return false
|
||||
}
|
||||
next := store[nextPeer.DEBUG_getSigKey()]
|
||||
/*
|
||||
if next == here {
|
||||
//for idx, link := range here.links {
|
||||
// fmt.Println("DUMP:", idx, link.nodeID)
|
||||
//}
|
||||
if nextPort != 0 { panic("This should not be") }
|
||||
fmt.Println("Failed to route:", source.index, here.index, dest.index, oldTTL, ttl)
|
||||
//here.table.DEBUG_dumpTable()
|
||||
//fmt.Println("Ports:", here.nodeID, here.ports)
|
||||
return false
|
||||
panic(fmt.Sprintln("Routing Loop:",
|
||||
source.index,
|
||||
here.index,
|
||||
dest.index))
|
||||
}
|
||||
*/
|
||||
if temp > 4090 {
|
||||
fmt.Println("DEBUG:",
|
||||
source.index, source.core.DEBUG_getLocator(),
|
||||
here.index, here.core.DEBUG_getLocator(),
|
||||
dest.index, dest.core.DEBUG_getLocator())
|
||||
here.core.DEBUG_getSwitchTable().DEBUG_dumpTable()
|
||||
}
|
||||
if (here != source) {
|
||||
// This is sufficient to check for routing loops or blackholes
|
||||
//break
|
||||
}
|
||||
here = next
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func stressTest(store map[[32]byte]*Node) {
|
||||
fmt.Println("Stress testing network...")
|
||||
nNodes := len(store)
|
||||
dests := make([][]byte, 0, nNodes)
|
||||
for _, dest := range store {
|
||||
loc := dest.core.DEBUG_getLocator()
|
||||
coords := loc.DEBUG_getCoords()
|
||||
dests = append(dests, coords)
|
||||
}
|
||||
lookups := 0
|
||||
start := time.Now()
|
||||
for _, source := range store {
|
||||
for _, coords := range dests {
|
||||
source.core.DEBUG_switchLookup(coords, ^uint64(0))
|
||||
lookups++
|
||||
}
|
||||
}
|
||||
timed := time.Since(start)
|
||||
fmt.Printf("%d lookups in %s (%f lookups per second)\n",
|
||||
lookups,
|
||||
timed,
|
||||
float64(lookups)/timed.Seconds())
|
||||
}
|
||||
|
||||
func pingNodes(store map[[32]byte]*Node) {
|
||||
fmt.Println("Sending pings...")
|
||||
nNodes := len(store)
|
||||
count := 0
|
||||
equiv := func (a []byte, b []byte) bool {
|
||||
if len(a) != len(b) { return false }
|
||||
for idx := 0 ; idx < len(a) ; idx++ {
|
||||
if a[idx] != b[idx] { return false }
|
||||
}
|
||||
return true
|
||||
}
|
||||
for _, source := range store {
|
||||
count++
|
||||
//if count > 16 { break }
|
||||
fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index)
|
||||
sourceKey := source.core.DEBUG_getBoxPub()
|
||||
payload := sourceKey[:]
|
||||
sourceAddr := source.core.DEBUG_getAddr()[:]
|
||||
sendTo := func (bs []byte, destAddr []byte) {
|
||||
packet := make([]byte, 40+len(bs))
|
||||
copy(packet[8:24], sourceAddr)
|
||||
copy(packet[24:40], destAddr)
|
||||
copy(packet[40:], bs)
|
||||
source.send<-packet
|
||||
}
|
||||
destCount := 0
|
||||
for _, dest := range store {
|
||||
destCount += 1
|
||||
fmt.Printf("%d Nodes, %d Send, %d Recv\n", nNodes, count, destCount)
|
||||
if dest == source {
|
||||
fmt.Println("Skipping self")
|
||||
continue
|
||||
}
|
||||
destAddr := dest.core.DEBUG_getAddr()[:]
|
||||
ticker := time.NewTicker(150*time.Millisecond)
|
||||
ch := make(chan bool, 1)
|
||||
ch<-true
|
||||
doTicker := func () {
|
||||
for _ = range ticker.C {
|
||||
select {
|
||||
case ch<-true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
go doTicker()
|
||||
for loop := true ; loop ; {
|
||||
select {
|
||||
case packet := <-dest.recv: {
|
||||
if equiv(payload, packet[len(packet)-len(payload):]) {
|
||||
loop = false
|
||||
}
|
||||
}
|
||||
case <-ch: sendTo(payload, destAddr)
|
||||
}
|
||||
}
|
||||
ticker.Stop()
|
||||
}
|
||||
//break // Only try sending pings from 1 node
|
||||
// This is because, for some reason, stopTun() doesn't always close it
|
||||
// And if two tuns are up, bad things happen (sends via wrong interface)
|
||||
}
|
||||
fmt.Println("Finished pinging nodes")
|
||||
}
|
||||
|
||||
func pingBench(store map[[32]byte]*Node) {
|
||||
fmt.Println("Benchmarking pings...")
|
||||
nPings := 0
|
||||
payload := make([]byte, 1280+40) // MTU + ipv6 header
|
||||
var timed time.Duration
|
||||
//nNodes := len(store)
|
||||
count := 0
|
||||
for _, source := range store {
|
||||
count++
|
||||
//fmt.Printf("Sending packets from node %d/%d (%d)\n", count, nNodes, source.index)
|
||||
getPing := func (key [32]byte, decodedCoords []byte) []byte {
|
||||
// TODO write some function to do this the right way, put... somewhere...
|
||||
coords := DEBUG_wire_encode_coords(decodedCoords)
|
||||
packet := make([]byte, 0, len(key)+len(coords)+len(payload))
|
||||
packet = append(packet, key[:]...)
|
||||
packet = append(packet, coords...)
|
||||
packet = append(packet, payload[:]...)
|
||||
return packet
|
||||
}
|
||||
for _, dest := range store {
|
||||
key := dest.core.DEBUG_getBoxPub()
|
||||
loc := dest.core.DEBUG_getLocator()
|
||||
coords := loc.DEBUG_getCoords()
|
||||
ping := getPing(key, coords)
|
||||
// TODO make sure the session is open first
|
||||
start := time.Now()
|
||||
for i := 0 ; i < 1000000 ; i++{ source.send<-ping ; nPings++ }
|
||||
timed += time.Since(start)
|
||||
break
|
||||
}
|
||||
break
|
||||
}
|
||||
fmt.Printf("Sent %d pings in %s (%f per second)\n",
|
||||
nPings,
|
||||
timed,
|
||||
float64(nPings)/timed.Seconds())
|
||||
}
|
||||
|
||||
func dumpStore(store map[NodeID]*Node) {
|
||||
for _, node := range store {
|
||||
fmt.Println("DUMPSTORE:", node.index, node.core.DEBUG_getLocator())
|
||||
node.core.DEBUG_getSwitchTable().DEBUG_dumpTable()
|
||||
}
|
||||
}
|
||||
|
||||
func dumpDHTSize(store map[[32]byte]*Node) {
|
||||
var min, max, sum int
|
||||
for _, node := range store {
|
||||
num := node.core.DEBUG_getDHTSize()
|
||||
min = num
|
||||
max = num
|
||||
break
|
||||
}
|
||||
for _, node := range store {
|
||||
num := node.core.DEBUG_getDHTSize()
|
||||
if num < min { min = num }
|
||||
if num > max { max = num }
|
||||
sum += num
|
||||
}
|
||||
avg := float64(sum)/float64(len(store))
|
||||
fmt.Printf("DHT min %d / avg %f / max %d\n", min, avg, max)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
fmt.Println("Test")
|
||||
Util_testAddrIDMask()
|
||||
idxstore := makeStoreSquareGrid(4)
|
||||
//idxstore := makeStoreStar(256)
|
||||
//idxstore := loadGraph("misc/sim/hype-2016-09-19.list")
|
||||
//idxstore := loadGraph("misc/sim/fc00-2017-08-12.txt")
|
||||
//idxstore := loadGraph("skitter")
|
||||
kstore := getKeyedStore(idxstore)
|
||||
/*
|
||||
for _, n := range kstore {
|
||||
log := n.core.DEBUG_getLogger()
|
||||
log.SetOutput(os.Stderr)
|
||||
}
|
||||
*/
|
||||
startNetwork(kstore)
|
||||
//time.Sleep(10*time.Second)
|
||||
// Note that testPaths only works if pressure is turend off
|
||||
// Otherwise congestion can lead to routing loops?
|
||||
for finished := false; !finished ; { finished = testPaths(kstore) }
|
||||
pingNodes(kstore)
|
||||
//pingBench(kstore) // Only after disabling debug output
|
||||
//stressTest(kstore)
|
||||
//time.Sleep(120*time.Second)
|
||||
dumpDHTSize(kstore) // note that this uses racey functions to read things...
|
||||
}
|
||||
|
22
misc/tests/atomic-toy.go
Normal file
22
misc/tests/atomic-toy.go
Normal file
@ -0,0 +1,22 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "time"
|
||||
import "sync/atomic"
|
||||
import "runtime"
|
||||
|
||||
func main() {
|
||||
|
||||
var ops uint64 = 0
|
||||
for i := 0 ; i < 4 ; i++ {
|
||||
go func () {
|
||||
for {
|
||||
atomic.AddUint64(&ops, 1)
|
||||
runtime.Gosched()
|
||||
}
|
||||
}()
|
||||
}
|
||||
time.Sleep(1*time.Second)
|
||||
opsFinal := atomic.LoadUint64(&ops)
|
||||
fmt.Println("ops:", opsFinal)
|
||||
}
|
42
misc/tests/bandwidth.go
Normal file
42
misc/tests/bandwidth.go
Normal file
@ -0,0 +1,42 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "net"
|
||||
import "time"
|
||||
|
||||
func main () {
|
||||
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
|
||||
if err != nil { panic(err) }
|
||||
listener, err := net.ListenTCP("tcp", addr)
|
||||
if err != nil { panic(err) }
|
||||
defer listener.Close()
|
||||
|
||||
packetSize := 65535
|
||||
numPackets := 65535
|
||||
|
||||
go func () {
|
||||
send, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, packetSize)
|
||||
for idx := 0 ; idx < numPackets ; idx++ { send.Write(msg) }
|
||||
}()
|
||||
|
||||
start := time.Now()
|
||||
//msg := make([]byte, 1280)
|
||||
sock, err := listener.AcceptTCP()
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
read := 0
|
||||
buf := make([]byte, packetSize)
|
||||
for {
|
||||
n, err := sock.Read(buf)
|
||||
read += n
|
||||
if err != nil { break }
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
fmt.Printf("%f bits/sec\n", 8*float64(read)/timed.Seconds())
|
||||
}
|
||||
|
36
misc/tests/channelbenchmark.go
Normal file
36
misc/tests/channelbenchmark.go
Normal file
@ -0,0 +1,36 @@
|
||||
package main
|
||||
|
||||
import "time"
|
||||
import "fmt"
|
||||
import "sync"
|
||||
|
||||
func main() {
|
||||
fmt.Println("Testing speed of recv+send loop")
|
||||
const count = 10000000
|
||||
c := make(chan []byte, 1)
|
||||
c<-[]byte{}
|
||||
var wg sync.WaitGroup
|
||||
worker := func () {
|
||||
for idx := 0 ; idx < count ; idx++ {
|
||||
p := <-c
|
||||
select {
|
||||
case c<-p:
|
||||
default:
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
nIter := 0
|
||||
start := time.Now()
|
||||
for idx := 0 ; idx < 1 ; idx++ {
|
||||
go worker()
|
||||
nIter += count
|
||||
wg.Add(1)
|
||||
}
|
||||
wg.Wait()
|
||||
stop := time.Now()
|
||||
timed := stop.Sub(start)
|
||||
fmt.Printf("%d iterations in %s\n", nIter, timed)
|
||||
fmt.Printf("%f iterations per second\n", float64(nIter)/timed.Seconds())
|
||||
fmt.Printf("%s per iteration\n", timed/time.Duration(nIter))
|
||||
}
|
52
misc/tests/gob-test.go
Normal file
52
misc/tests/gob-test.go
Normal file
@ -0,0 +1,52 @@
|
||||
package main
|
||||
|
||||
import "bytes"
|
||||
import "encoding/gob"
|
||||
import "time"
|
||||
import "fmt"
|
||||
|
||||
type testStruct struct {
|
||||
First uint64
|
||||
Second float64
|
||||
Third []byte
|
||||
}
|
||||
|
||||
|
||||
func testFunc(tickerDuration time.Duration) {
|
||||
chn := make(chan []byte)
|
||||
ticker := time.NewTicker(tickerDuration)
|
||||
defer ticker.Stop()
|
||||
send := testStruct{First: 1, Second: 2, Third: []byte{3, 4, 5}}
|
||||
buf := bytes.NewBuffer(nil)
|
||||
enc := gob.NewEncoder(buf)
|
||||
dec := gob.NewDecoder(buf)
|
||||
sendCall := func () {
|
||||
err := enc.EncodeValue(&send)
|
||||
if err != nil { panic(err) }
|
||||
bs := make([]byte, buf.Len())
|
||||
buf.Read(bs)
|
||||
fmt.Println("send:", bs)
|
||||
go func() { chn<-bs }()
|
||||
}
|
||||
recvCall := func (bs []byte) {
|
||||
buf.Write(bs)
|
||||
recv := testStruct{}
|
||||
err := dec.DecodeValue(&recv)
|
||||
fmt.Println("recv:", bs)
|
||||
if err != nil { panic(err) }
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case bs := <-chn : recvCall(bs)
|
||||
case <-ticker.C : sendCall()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
go testFunc(100*time.Millisecond) // Does not crash
|
||||
time.Sleep(time.Second)
|
||||
go testFunc(time.Nanosecond) // Does crash
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
22
misc/tests/goroutine-test.go
Normal file
22
misc/tests/goroutine-test.go
Normal file
@ -0,0 +1,22 @@
|
||||
package main
|
||||
|
||||
import "sync"
|
||||
import "time"
|
||||
import "fmt"
|
||||
|
||||
func main () {
|
||||
const reqs = 1000000
|
||||
var wg sync.WaitGroup
|
||||
start := time.Now()
|
||||
for idx := 0 ; idx < reqs ; idx++ {
|
||||
wg.Add(1)
|
||||
go func () { wg.Done() } ()
|
||||
}
|
||||
wg.Wait()
|
||||
stop := time.Now()
|
||||
timed := stop.Sub(start)
|
||||
fmt.Printf("%d goroutines in %s (%f per second)\n",
|
||||
reqs,
|
||||
timed,
|
||||
reqs/timed.Seconds())
|
||||
}
|
49
misc/tests/multicast.go
Normal file
49
misc/tests/multicast.go
Normal file
@ -0,0 +1,49 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "net"
|
||||
import "time"
|
||||
|
||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
//addr, err := net.ResolveUDPAddr("udp", "[ff02::1%veth0]:9001")
|
||||
addr, err := net.ResolveUDPAddr("udp", "[ff02::1]:9001")
|
||||
if err != nil { panic(err) }
|
||||
sock, err := net.ListenMulticastUDP("udp", nil, addr)
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
|
||||
go func () {
|
||||
saddr, err := net.ResolveUDPAddr("udp", "[::]:0")
|
||||
if err != nil { panic(err) }
|
||||
send, err := net.ListenUDP("udp", saddr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
//fmt.Println("Sending...")
|
||||
send.WriteTo(msg, addr)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
//fmt.Println("Reading:", i)
|
||||
sock.ReadFromUDP(msg)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
}
|
||||
|
||||
func main () {
|
||||
|
||||
basic_test()
|
||||
|
||||
}
|
78
misc/tests/packetbenchmark.go
Normal file
78
misc/tests/packetbenchmark.go
Normal file
@ -0,0 +1,78 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "net"
|
||||
import "time"
|
||||
|
||||
|
||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
var ip *net.IP
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil { panic(err) }
|
||||
var zone string
|
||||
for _, iface := range ifaces {
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil { panic(err) }
|
||||
for _, addr := range addrs {
|
||||
addrIP, _, _ := net.ParseCIDR(addr.String())
|
||||
if addrIP.To4() != nil { continue } // IPv6 only
|
||||
if !addrIP.IsLinkLocalUnicast() { continue }
|
||||
zone = iface.Name
|
||||
ip = &addrIP
|
||||
}
|
||||
addrs, err = iface.MulticastAddrs()
|
||||
if err != nil { panic(err) }
|
||||
for _, addr := range addrs {
|
||||
fmt.Println(addr.String())
|
||||
}
|
||||
}
|
||||
if ip == nil { panic("No link-local IPv6 found") }
|
||||
fmt.Println("Using address:", *ip)
|
||||
|
||||
addr := net.UDPAddr{IP: *ip, Port: 9001, Zone: zone}
|
||||
|
||||
saddr := net.UDPAddr{IP: *ip, Port: 9002, Zone: zone}
|
||||
send, err := net.ListenUDP("udp", &saddr)
|
||||
defer send.Close()
|
||||
if err != nil { panic(err) }
|
||||
sock, err := net.ListenUDP("udp", &addr)
|
||||
defer sock.Close()
|
||||
if err != nil { panic(err) }
|
||||
|
||||
const buffSize = 1048576*100
|
||||
|
||||
send.SetWriteBuffer(buffSize)
|
||||
sock.SetReadBuffer(buffSize)
|
||||
sock.SetWriteBuffer(buffSize)
|
||||
|
||||
|
||||
go func () {
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
send.WriteTo(msg, &addr)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 100000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
_, addr, _ := sock.ReadFrom(msg)
|
||||
sock.WriteTo(msg, addr)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
}
|
||||
|
||||
func main () {
|
||||
|
||||
basic_test()
|
||||
|
||||
}
|
83
misc/tests/pool.go
Normal file
83
misc/tests/pool.go
Normal file
@ -0,0 +1,83 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
//import "net"
|
||||
import "time"
|
||||
import "runtime"
|
||||
import "sync/atomic"
|
||||
|
||||
func poolbench() {
|
||||
nWorkers := runtime.GOMAXPROCS(0)
|
||||
work := make(chan func(), 1)
|
||||
workers := make(chan chan<- func(), nWorkers)
|
||||
makeWorker := func() chan<- func() {
|
||||
ch := make(chan func())
|
||||
go func() {
|
||||
for {
|
||||
f := <-ch
|
||||
f()
|
||||
select {
|
||||
case workers<-(ch):
|
||||
default: return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
getWorker := func() chan<- func() {
|
||||
select {
|
||||
case ch := <-workers: return ch
|
||||
default: return makeWorker()
|
||||
}
|
||||
}
|
||||
dispatcher := func() {
|
||||
for {
|
||||
w := <-work
|
||||
ch := getWorker()
|
||||
ch<-w
|
||||
}
|
||||
}
|
||||
go dispatcher()
|
||||
var count uint64
|
||||
const nCounts = 1000000
|
||||
for idx := 0 ; idx < nCounts ; idx++ {
|
||||
f := func() { atomic.AddUint64(&count, 1) }
|
||||
work <- f
|
||||
}
|
||||
for atomic.LoadUint64(&count) < nCounts {}
|
||||
}
|
||||
|
||||
func normalbench() {
|
||||
var count uint64
|
||||
const nCounts = 1000000
|
||||
ch := make(chan struct{}, 1)
|
||||
ch<-struct{}{}
|
||||
for idx := 0 ; idx < nCounts ; idx++ {
|
||||
f := func() { atomic.AddUint64(&count, 1) }
|
||||
f()
|
||||
<-ch
|
||||
ch<-struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func gobench() {
|
||||
var count uint64
|
||||
const nCounts = 1000000
|
||||
for idx := 0 ; idx < nCounts ; idx++ {
|
||||
f := func() { atomic.AddUint64(&count, 1) }
|
||||
go f()
|
||||
}
|
||||
for atomic.LoadUint64(&count) < nCounts {}
|
||||
}
|
||||
|
||||
func main() {
|
||||
start := time.Now()
|
||||
poolbench()
|
||||
fmt.Println(time.Since(start))
|
||||
start = time.Now()
|
||||
normalbench()
|
||||
fmt.Println(time.Since(start))
|
||||
start = time.Now()
|
||||
gobench()
|
||||
fmt.Println(time.Since(start))
|
||||
}
|
84
misc/tests/quic.go
Normal file
84
misc/tests/quic.go
Normal file
@ -0,0 +1,84 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
"bytes"
|
||||
"sync"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"math/big"
|
||||
quic "github.com/lucas-clemente/quic-go"
|
||||
)
|
||||
|
||||
const addr = "[::1]:9001"
|
||||
|
||||
func main () {
|
||||
go run_server()
|
||||
run_client()
|
||||
}
|
||||
|
||||
func run_server() {
|
||||
listener, err := quic.ListenAddr(addr, generateTLSConfig(), nil)
|
||||
if err != nil { panic(err) }
|
||||
ses, err := listener.Accept()
|
||||
if err != nil { panic(err) }
|
||||
for {
|
||||
stream, err := ses.AcceptStream()
|
||||
if err != nil { panic(err) }
|
||||
go func() {
|
||||
defer stream.Close()
|
||||
bs := bytes.Buffer{}
|
||||
_, err := bs.ReadFrom(stream)
|
||||
if err != nil { panic(err) } //<-- TooManyOpenStreams
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func run_client() {
|
||||
msgSize := 1048576
|
||||
msgCount := 128
|
||||
ses, err := quic.DialAddr(addr, &tls.Config{InsecureSkipVerify: true}, nil)
|
||||
if err != nil { panic(err) }
|
||||
bs := make([]byte, msgSize)
|
||||
wg := sync.WaitGroup{}
|
||||
start := time.Now()
|
||||
for idx := 0 ; idx < msgCount ; idx++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
stream, err := ses.OpenStreamSync()
|
||||
if err != nil { panic(err) }
|
||||
defer stream.Close()
|
||||
stream.Write(bs)
|
||||
}() // "go" this later
|
||||
}
|
||||
wg.Wait()
|
||||
timed := time.Since(start)
|
||||
fmt.Println("Client finished", timed, fmt.Sprintf("%f Bits/sec", 8*float64(msgSize*msgCount)/timed.Seconds()))
|
||||
}
|
||||
|
||||
// Setup a bare-bones TLS config for the server
|
||||
func generateTLSConfig() *tls.Config {
|
||||
key, err := rsa.GenerateKey(rand.Reader, 1024)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
template := x509.Certificate{SerialNumber: big.NewInt(1)}
|
||||
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)})
|
||||
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
|
||||
|
||||
tlsCert, err := tls.X509KeyPair(certPEM, keyPEM)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &tls.Config{Certificates: []tls.Certificate{tlsCert}}
|
||||
}
|
||||
|
69
misc/tests/socktest.go
Normal file
69
misc/tests/socktest.go
Normal file
@ -0,0 +1,69 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "net"
|
||||
import "time"
|
||||
import "flag"
|
||||
import "os"
|
||||
import "runtime/pprof"
|
||||
|
||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
addr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
|
||||
if err != nil { panic(err) }
|
||||
sock, err := net.ListenUDP("udp", addr)
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
|
||||
go func () {
|
||||
send, err := net.DialUDP("udp", nil, addr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
send.Write(msg)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
sock.ReadFrom(msg)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
77
misc/tests/socktest2.go
Normal file
77
misc/tests/socktest2.go
Normal file
@ -0,0 +1,77 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "net"
|
||||
import "time"
|
||||
import "flag"
|
||||
import "os"
|
||||
import "runtime/pprof"
|
||||
|
||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
addr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
|
||||
if err != nil { panic(err) }
|
||||
sock, err := net.ListenUDP("udp", addr)
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
|
||||
go func () {
|
||||
send, err := net.DialUDP("udp", nil, addr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
bss := make(net.Buffers, 0, 1024)
|
||||
for {
|
||||
for len(bss) < 1024 {
|
||||
bss = append(bss, msg)
|
||||
}
|
||||
bss.WriteTo(send)
|
||||
//bss = bss[:0]
|
||||
//send.Write(msg)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
n, err := sock.Read(msg)
|
||||
if err != nil { panic(err) }
|
||||
fmt.Println(n)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
99
misc/tests/socktest_linklocal.go
Normal file
99
misc/tests/socktest_linklocal.go
Normal file
@ -0,0 +1,99 @@
|
||||
package main
|
||||
|
||||
import "flag"
|
||||
import "fmt"
|
||||
import "net"
|
||||
import "os"
|
||||
import "runtime/pprof"
|
||||
import "time"
|
||||
|
||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
var ip *net.IP
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil { panic(err) }
|
||||
var zone string
|
||||
for _, iface := range ifaces {
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil { panic(err) }
|
||||
for _, addr := range addrs {
|
||||
addrIP, _, _ := net.ParseCIDR(addr.String())
|
||||
if addrIP.To4() != nil { continue } // IPv6 only
|
||||
if !addrIP.IsLinkLocalUnicast() { continue }
|
||||
fmt.Println(iface.Name, addrIP)
|
||||
zone = iface.Name
|
||||
ip = &addrIP
|
||||
}
|
||||
if ip != nil { break }
|
||||
/*
|
||||
addrs, err = iface.MulticastAddrs()
|
||||
if err != nil { panic(err) }
|
||||
for _, addr := range addrs {
|
||||
fmt.Println(addr.String())
|
||||
}
|
||||
*/
|
||||
}
|
||||
if ip == nil { panic("No link-local IPv6 found") }
|
||||
fmt.Println("Using address:", *ip)
|
||||
addr := net.UDPAddr{IP: *ip, Port: 9001, Zone: zone}
|
||||
|
||||
laddr, err := net.ResolveUDPAddr("udp", "[::]:9001")
|
||||
if err != nil { panic(err) }
|
||||
sock, err := net.ListenUDP("udp", laddr)
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
|
||||
go func () {
|
||||
send, err := net.DialUDP("udp", nil, &addr)
|
||||
//send, err := net.ListenUDP("udp", nil)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
send.Write(msg)
|
||||
//send.WriteToUDP(msg, &addr)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
sock.ReadFromUDP(msg)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
94
misc/tests/socktest_tcp.go
Normal file
94
misc/tests/socktest_tcp.go
Normal file
@ -0,0 +1,94 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "net"
|
||||
import "time"
|
||||
import "flag"
|
||||
import "os"
|
||||
import "runtime/pprof"
|
||||
|
||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible?
|
||||
|
||||
const buffSize = 32
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
|
||||
if err != nil { panic(err) }
|
||||
listener, err := net.ListenTCP("tcp", addr)
|
||||
if err != nil { panic(err) }
|
||||
defer listener.Close()
|
||||
|
||||
go func () {
|
||||
send, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
bss := make(net.Buffers, 0, 1024)
|
||||
for {
|
||||
for len(bss) < 1 { //buffSize {
|
||||
bss = append(bss, msg)
|
||||
}
|
||||
bss := net.Buffers{[]byte{0,1,2,3}, []byte{0,1}, msg}
|
||||
bss.WriteTo(send)
|
||||
//send.Write(msg)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
//msg := make([]byte, 1280)
|
||||
sock, err := listener.AcceptTCP()
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
msg := make([]byte, 1280*buffSize)
|
||||
n, err := sock.Read(msg)
|
||||
if err != nil { panic(err) }
|
||||
msg = msg[:n]
|
||||
for len(msg) > 1286 {
|
||||
// handle message
|
||||
i++
|
||||
msg = msg[1286:]
|
||||
}
|
||||
// handle remaining fragment of message
|
||||
//fmt.Println(n)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
_ = func (in (chan<- int)) {
|
||||
close(in)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
72
misc/tests/socktest_udp.go
Normal file
72
misc/tests/socktest_udp.go
Normal file
@ -0,0 +1,72 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "net"
|
||||
import "time"
|
||||
import "flag"
|
||||
import "os"
|
||||
import "runtime/pprof"
|
||||
|
||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
addr, err := net.ResolveUDPAddr("udp", "[::1]:0")
|
||||
if err != nil { panic(err) }
|
||||
sock, err := net.ListenUDP("udp", addr)
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
|
||||
go func () {
|
||||
raddr := sock.LocalAddr().(*net.UDPAddr)
|
||||
send, err := net.DialUDP("udp", nil, raddr)
|
||||
//send, err := net.ListenUDP("udp", addr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
send.Write(msg)
|
||||
//send.WriteToUDP(msg, raddr)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
sock.ReadFromUDP(msg)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
72
misc/tests/socktest_udp2.go
Normal file
72
misc/tests/socktest_udp2.go
Normal file
@ -0,0 +1,72 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "net"
|
||||
import "time"
|
||||
import "flag"
|
||||
import "os"
|
||||
import "runtime/pprof"
|
||||
|
||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
saddr, err := net.ResolveUDPAddr("udp", "[::1]:9001")
|
||||
if err != nil { panic(err) }
|
||||
raddr, err := net.ResolveUDPAddr("udp", "[::1]:9002")
|
||||
if err != nil { panic(err) }
|
||||
|
||||
send, err := net.DialUDP("udp", saddr, raddr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
|
||||
recv, err := net.DialUDP("udp", raddr, saddr)
|
||||
if err != nil { panic(err) }
|
||||
defer recv.Close()
|
||||
|
||||
go func () {
|
||||
msg := make([]byte, 1280)
|
||||
for {
|
||||
send.Write(msg)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
msg := make([]byte, 2000)
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
recv.Read(msg)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
88
misc/tests/socktest_udp_nodial.go
Normal file
88
misc/tests/socktest_udp_nodial.go
Normal file
@ -0,0 +1,88 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "net"
|
||||
import "time"
|
||||
import "flag"
|
||||
import "os"
|
||||
import "runtime/pprof"
|
||||
|
||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
sock, err := net.ListenUDP("udp", nil)
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
|
||||
ch := make(chan []byte, 1)
|
||||
|
||||
writer := func () {
|
||||
raddr := sock.LocalAddr().(*net.UDPAddr)
|
||||
//send, err := net.ListenUDP("udp", nil)
|
||||
//if err != nil { panic(err) }
|
||||
//defer send.Close()
|
||||
for {
|
||||
select {
|
||||
case <-ch:
|
||||
default:
|
||||
}
|
||||
msg := make([]byte, 1280)
|
||||
sock.WriteToUDP(msg, raddr)
|
||||
//send.WriteToUDP(msg, raddr)
|
||||
}
|
||||
}
|
||||
go writer()
|
||||
//go writer()
|
||||
//go writer()
|
||||
//go writer()
|
||||
|
||||
numPackets := 65536
|
||||
size := 0
|
||||
start := time.Now()
|
||||
success := 0
|
||||
for i := 0 ; i < numPackets ; i++ {
|
||||
msg := make([]byte, 2048)
|
||||
n, _, err := sock.ReadFromUDP(msg)
|
||||
if err != nil { panic(err) }
|
||||
size += n
|
||||
select {
|
||||
case ch <- msg: success += 1
|
||||
default:
|
||||
}
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
fmt.Printf("%f bits per second\n", 8*float64(size)/timed.Seconds())
|
||||
fmt.Println("Success:", success, "/", numPackets)
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
117
misc/tests/socktest_udp_sendmmsg.go
Normal file
117
misc/tests/socktest_udp_sendmmsg.go
Normal file
@ -0,0 +1,117 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "net"
|
||||
import "time"
|
||||
import "flag"
|
||||
import "os"
|
||||
import "runtime/pprof"
|
||||
|
||||
import "golang.org/x/net/ipv6"
|
||||
|
||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
|
||||
if err != nil { panic(err) }
|
||||
sock, err := net.ListenUDP("udp", udpAddr)
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
|
||||
writer := func () {
|
||||
raddr := sock.LocalAddr().(*net.UDPAddr)
|
||||
send, err := net.ListenUDP("udp", nil)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
conn := ipv6.NewPacketConn(send)
|
||||
defer conn.Close()
|
||||
var msgs []ipv6.Message
|
||||
for idx := 0 ; idx < 1024 ; idx++ {
|
||||
msg := ipv6.Message{Addr: raddr, Buffers: [][]byte{make([]byte, 1280)}}
|
||||
msgs = append(msgs, msg)
|
||||
}
|
||||
for {
|
||||
/*
|
||||
var msgs []ipv6.Message
|
||||
for idx := 0 ; idx < 1024 ; idx++ {
|
||||
msg := ipv6.Message{Addr: raddr, Buffers: [][]byte{make([]byte, 1280)}}
|
||||
msgs = append(msgs, msg)
|
||||
}
|
||||
*/
|
||||
conn.WriteBatch(msgs, 0)
|
||||
}
|
||||
|
||||
}
|
||||
go writer()
|
||||
//go writer()
|
||||
//go writer()
|
||||
//go writer()
|
||||
|
||||
numPackets := 65536
|
||||
size := 0
|
||||
count := 0
|
||||
start := time.Now()
|
||||
/*
|
||||
conn := ipv6.NewPacketConn(sock)
|
||||
defer conn.Close()
|
||||
for ; count < numPackets ; count++ {
|
||||
msgs := make([]ipv6.Message, 1024)
|
||||
for _, msg := range msgs {
|
||||
msg.Buffers = append(msg.Buffers, make([]byte, 2048))
|
||||
}
|
||||
n, err := conn.ReadBatch(msgs, 0)
|
||||
if err != nil { panic(err) }
|
||||
fmt.Println("DEBUG: n", n)
|
||||
for _, msg := range msgs[:n] {
|
||||
fmt.Println("DEBUG: msg", msg)
|
||||
size += msg.N
|
||||
//for _, bs := range msg.Buffers {
|
||||
// size += len(bs)
|
||||
//}
|
||||
count++
|
||||
}
|
||||
}
|
||||
//*/
|
||||
//*
|
||||
for ; count < numPackets ; count++ {
|
||||
msg := make([]byte, 2048)
|
||||
n, _, err := sock.ReadFromUDP(msg)
|
||||
if err != nil { panic(err) }
|
||||
size += n
|
||||
}
|
||||
//*/
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(count)/timed.Seconds())
|
||||
fmt.Printf("%f bits/second\n", float64(8*size)/timed.Seconds())
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
96
misc/tests/tcptest.go
Normal file
96
misc/tests/tcptest.go
Normal file
@ -0,0 +1,96 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import "net"
|
||||
import "time"
|
||||
import "flag"
|
||||
import "os"
|
||||
import "runtime/pprof"
|
||||
|
||||
// TODO look into netmap + libpcap to bypass the kernel as much as possible?
|
||||
|
||||
const buffSize = 32
|
||||
|
||||
func basic_test() {
|
||||
|
||||
// TODO need a way to look up who our link-local neighbors are for each iface!
|
||||
|
||||
addr, err := net.ResolveTCPAddr("tcp", "[::1]:9001")
|
||||
if err != nil { panic(err) }
|
||||
listener, err := net.ListenTCP("tcp", addr)
|
||||
if err != nil { panic(err) }
|
||||
defer listener.Close()
|
||||
|
||||
go func () {
|
||||
send, err := net.DialTCP("tcp", nil, addr)
|
||||
if err != nil { panic(err) }
|
||||
defer send.Close()
|
||||
msg := make([]byte, 1280)
|
||||
bss := make(net.Buffers, 0, 1024)
|
||||
count := 0
|
||||
for {
|
||||
time.Sleep(100*time.Millisecond)
|
||||
for len(bss) < count {
|
||||
bss = append(bss, msg)
|
||||
}
|
||||
bss.WriteTo(send)
|
||||
count++
|
||||
//send.Write(msg)
|
||||
}
|
||||
}()
|
||||
|
||||
numPackets := 1000000
|
||||
start := time.Now()
|
||||
//msg := make([]byte, 1280)
|
||||
sock, err := listener.AcceptTCP()
|
||||
if err != nil { panic(err) }
|
||||
defer sock.Close()
|
||||
for {
|
||||
msg := make([]byte, 1280*buffSize)
|
||||
n, err := sock.Read(msg)
|
||||
if err != nil { panic(err) }
|
||||
msg = msg[:n]
|
||||
fmt.Println("Read:", n)
|
||||
for len(msg) > 1280 {
|
||||
// handle message
|
||||
msg = msg[1280:]
|
||||
}
|
||||
// handle remaining fragment of message
|
||||
//fmt.Println(n)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
|
||||
fmt.Printf("%f packets per second\n", float64(numPackets)/timed.Seconds())
|
||||
|
||||
_ = func (in (chan<- int)) {
|
||||
close(in)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`")
|
||||
var memprofile = flag.String("memprofile", "", "write memory profile to this file")
|
||||
|
||||
func main () {
|
||||
flag.Parse()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create CPU profile: ", err))
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
panic(fmt.Sprintf("could not start CPU profile: ", err))
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
if *memprofile != "" {
|
||||
f, err := os.Create(*memprofile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not create memory profile: ", err))
|
||||
}
|
||||
defer func () { pprof.WriteHeapProfile(f) ; f.Close() }()
|
||||
}
|
||||
basic_test()
|
||||
|
||||
}
|
||||
|
82
misc/tests/tunbench-client.go
Normal file
82
misc/tests/tunbench-client.go
Normal file
@ -0,0 +1,82 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/songgao/water"
|
||||
)
|
||||
|
||||
const mtu = 65535
|
||||
|
||||
func setup_dev() *water.Interface {
|
||||
ifce, err := water.New(water.Config{
|
||||
DeviceType: water.TUN,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ifce
|
||||
}
|
||||
|
||||
func setup_dev1() *water.Interface {
|
||||
ifce := setup_dev()
|
||||
cmd := exec.Command("ip", "-f", "inet6",
|
||||
"addr", "add", "fc00::2/8",
|
||||
"dev", ifce.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to assign address")
|
||||
}
|
||||
cmd = exec.Command("ip", "link", "set",
|
||||
"dev", ifce.Name(),
|
||||
"mtu", fmt.Sprintf("%d", mtu),
|
||||
"up")
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to bring up interface")
|
||||
}
|
||||
return ifce
|
||||
}
|
||||
|
||||
func connect(ifce *water.Interface) {
|
||||
conn, err := net.DialTimeout("tcp", "192.168.2.2:9001", time.Second)
|
||||
if err != nil { panic(err) }
|
||||
sock := conn.(*net.TCPConn)
|
||||
// TODO go a worker to move packets to/from the tun
|
||||
}
|
||||
|
||||
func bench() {
|
||||
}
|
||||
|
||||
func main() {
|
||||
ifce := setup_dev1()
|
||||
connect(ifce)
|
||||
bench()
|
||||
fmt.Println("Done?")
|
||||
return
|
||||
ifce, err := water.New(water.Config{
|
||||
DeviceType: water.TUN,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Printf("Interface Name: %s\n", ifce.Name())
|
||||
|
||||
packet := make([]byte, 2000)
|
||||
for {
|
||||
n, err := ifce.Read(packet)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("Packet Received: % x\n", packet[:n])
|
||||
}
|
||||
}
|
||||
|
127
misc/tests/tunbench-server.go
Normal file
127
misc/tests/tunbench-server.go
Normal file
@ -0,0 +1,127 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os/exec"
|
||||
|
||||
"github.com/songgao/water"
|
||||
)
|
||||
|
||||
const mtu = 65535
|
||||
const netnsName = "tunbenchns"
|
||||
|
||||
func setup_dev() *water.Interface {
|
||||
ifce, err := water.New(water.Config{
|
||||
DeviceType: water.TUN,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ifce
|
||||
}
|
||||
|
||||
func setup_dev1() *water.Interface {
|
||||
ifce := setup_dev()
|
||||
cmd := exec.Command("ip", "-f", "inet6",
|
||||
"addr", "add", "fc00::1/8",
|
||||
"dev", ifce.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
fmt.Println(string(err))
|
||||
panic("Failed to assign address")
|
||||
}
|
||||
cmd = exec.Command("ip", "link", "set",
|
||||
"dev", tun.name,
|
||||
"mtu", fmt.Sprintf("%d", mtu),
|
||||
"up")
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to bring up interface")
|
||||
}
|
||||
return ifce
|
||||
}
|
||||
|
||||
func addNS(name string) {
|
||||
cmd := exec.COmmand("ip", "netns", "add", name)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to setup netns")
|
||||
}
|
||||
}
|
||||
|
||||
func delNS(name string) {
|
||||
cmd := exec.COmmand("ip", "netns", "delete", name)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to setup netns")
|
||||
}
|
||||
}
|
||||
|
||||
func doInNetNS(comm ...string) *exec.Cmd {
|
||||
return exec.Command("ip", "netns", "exec", netnsName, comm...)
|
||||
}
|
||||
|
||||
func setup_dev2() *water.Interface {
|
||||
ifce := setup_dev()
|
||||
addNS(netnsName)
|
||||
cmd := exec.Command("ip", "link", "set", ifce.Name(), "netns", netnsName)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to move tun to netns")
|
||||
}
|
||||
cmd = doInNetNS("ip", "-f", "inet6",
|
||||
"addr", "add", "fc00::2/8",
|
||||
"dev", ifce.Name())
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to assign address")
|
||||
}
|
||||
cmd = doInNetNS("ip", "link", "set",
|
||||
"dev", tun.name,
|
||||
"mtu", fmt.Sprintf("%d", mtu),
|
||||
"up")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
fmt.Println(string(err))
|
||||
panic("Failed to bring up interface")
|
||||
}
|
||||
return ifce
|
||||
}
|
||||
|
||||
func connect() {
|
||||
|
||||
}
|
||||
|
||||
func bench() {
|
||||
}
|
||||
|
||||
func main() {
|
||||
ifce, err := water.New(water.Config{
|
||||
DeviceType: water.TUN,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Printf("Interface Name: %s\n", ifce.Name())
|
||||
|
||||
packet := make([]byte, 2000)
|
||||
for {
|
||||
n, err := ifce.Read(packet)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("Packet Received: % x\n", packet[:n])
|
||||
}
|
||||
}
|
||||
|
130
misc/tests/tunbench.go
Normal file
130
misc/tests/tunbench.go
Normal file
@ -0,0 +1,130 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os/exec"
|
||||
|
||||
"github.com/songgao/water"
|
||||
)
|
||||
|
||||
const mtu = 65535
|
||||
const netnsName = "tunbenchns"
|
||||
|
||||
func setup_dev() *water.Interface {
|
||||
ifce, err := water.New(water.Config{
|
||||
DeviceType: water.TUN,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ifce
|
||||
}
|
||||
|
||||
func setup_dev1() *water.Interface {
|
||||
ifce := setup_dev()
|
||||
cmd := exec.Command("ip", "-f", "inet6",
|
||||
"addr", "add", "fc00::1/8",
|
||||
"dev", ifce.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
fmt.Println(string(err))
|
||||
panic("Failed to assign address")
|
||||
}
|
||||
cmd = exec.Command("ip", "link", "set",
|
||||
"dev", tun.name,
|
||||
"mtu", fmt.Sprintf("%d", mtu),
|
||||
"up")
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to bring up interface")
|
||||
}
|
||||
return ifce
|
||||
}
|
||||
|
||||
func addNS(name string) {
|
||||
cmd := exec.COmmand("ip", "netns", "add", name)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to setup netns")
|
||||
}
|
||||
}
|
||||
|
||||
func delNS(name string) {
|
||||
cmd := exec.COmmand("ip", "netns", "delete", name)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to setup netns")
|
||||
}
|
||||
}
|
||||
|
||||
func doInNetNS(comm ...string) *exec.Cmd {
|
||||
return exec.Command("ip", "netns", "exec", netnsName, comm...)
|
||||
}
|
||||
|
||||
func setup_dev2() *water.Interface {
|
||||
ifce := setup_dev()
|
||||
addNS(netnsName)
|
||||
cmd := exec.Command("ip", "link", "set", ifce.Name(), "netns", netnsName)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to move tun to netns")
|
||||
}
|
||||
cmd =
|
||||
cmd = exec.Command(
|
||||
"ip", "-f", "inet6",
|
||||
"addr", "add", "fc00::2/8",
|
||||
"dev", ifce.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
panic("Failed to assign address")
|
||||
}
|
||||
cmd = exec.Command(
|
||||
"ip", "link", "set",
|
||||
"dev", tun.name,
|
||||
"mtu", fmt.Sprintf("%d", mtu),
|
||||
"up")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(string(out))
|
||||
fmt.Println(string(err))
|
||||
panic("Failed to bring up interface")
|
||||
}
|
||||
return ifce
|
||||
}
|
||||
|
||||
func connect() {
|
||||
|
||||
}
|
||||
|
||||
func bench() {
|
||||
}
|
||||
|
||||
func main() {
|
||||
ifce, err := water.New(water.Config{
|
||||
DeviceType: water.TUN,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.Printf("Interface Name: %s\n", ifce.Name())
|
||||
|
||||
packet := make([]byte, 2000)
|
||||
for {
|
||||
n, err := ifce.Read(packet)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("Packet Received: % x\n", packet[:n])
|
||||
}
|
||||
}
|
||||
|
41
misc/tests/tuntest.go
Normal file
41
misc/tests/tuntest.go
Normal file
@ -0,0 +1,41 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/FlexibleBroadband/tun-go"
|
||||
)
|
||||
|
||||
// first start server tun server.
|
||||
func main() {
|
||||
wg := sync.WaitGroup{}
|
||||
// local tun interface read and write channel.
|
||||
rCh := make(chan []byte, 1024)
|
||||
// read from local tun interface channel, and write into remote udp channel.
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Done()
|
||||
for {
|
||||
data := <-rCh
|
||||
// if data[0]&0xf0 == 0x40
|
||||
// write into udp conn.
|
||||
log.Println("tun->conn:", len(data))
|
||||
log.Println("read!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
|
||||
log.Println("src:", net.IP(data[8:24]), "dst:", net.IP(data[24:40]))
|
||||
}
|
||||
}()
|
||||
|
||||
address := net.ParseIP("fc00::1")
|
||||
tuntap, err := tun.OpenTun(address)
|
||||
if err != nil { panic(err) }
|
||||
defer tuntap.Close()
|
||||
// read data from tun into rCh channel.
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := tuntap.Read(rCh); err != nil { panic(err) }
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
39
misc/tests/wire-test.go
Normal file
39
misc/tests/wire-test.go
Normal file
@ -0,0 +1,39 @@
|
||||
package main
|
||||
|
||||
import "wire"
|
||||
import "fmt"
|
||||
|
||||
import "time"
|
||||
|
||||
func main() {
|
||||
for idx := 0 ; idx < 64 ; idx++ {
|
||||
num := uint64(1) << uint(idx)
|
||||
encoded := make([]byte, 10)
|
||||
length := wire.Encode_uint64(num, encoded)
|
||||
decoded, _ := wire.Decode_uint64(encoded[:length])
|
||||
if decoded != num { panic(fmt.Sprintf("%d != %d", decoded, num)) }
|
||||
}
|
||||
const count = 1000000
|
||||
start := time.Now()
|
||||
encoded := make([]byte, 10)
|
||||
//num := ^uint64(0) // Longest possible value for full uint64 range
|
||||
num := ^uint64(0) >> 1 // Largest positive int64 (real use case)
|
||||
//num := uint64(0) // Shortest possible value, most will be of this length
|
||||
length := wire.Encode_uint64(num, encoded)
|
||||
for idx := 0 ; idx < count ; idx++ {
|
||||
wire.Encode_uint64(num, encoded)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
fmt.Println("Ops:", count/timed.Seconds())
|
||||
fmt.Println("Time:", timed.Nanoseconds()/count)
|
||||
|
||||
encoded = encoded[:length]
|
||||
start = time.Now()
|
||||
for idx := 0 ; idx < count ; idx++ {
|
||||
wire.Decode_uint64(encoded)
|
||||
}
|
||||
timed = time.Since(start)
|
||||
fmt.Println("Ops:", count/timed.Seconds())
|
||||
fmt.Println("Time:", timed.Nanoseconds()/count)
|
||||
}
|
||||
|
209
misc/yggdrasil.go.tcp
Normal file
209
misc/yggdrasil.go.tcp
Normal file
@ -0,0 +1,209 @@
|
||||
package main
|
||||
|
||||
import "bytes"
|
||||
import "encoding/hex"
|
||||
import "encoding/json"
|
||||
import "flag"
|
||||
import "fmt"
|
||||
import "io/ioutil"
|
||||
import "net"
|
||||
import "os"
|
||||
import "os/signal"
|
||||
import "time"
|
||||
|
||||
import _ "net/http/pprof"
|
||||
import "net/http"
|
||||
import "log"
|
||||
import "runtime"
|
||||
|
||||
import "golang.org/x/net/ipv6"
|
||||
|
||||
import . "yggdrasil"
|
||||
|
||||
/**
|
||||
* This is a very crude wrapper around src/yggdrasil
|
||||
* It can generate a new config (--genconf)
|
||||
* It can read a config from stdin (--useconf)
|
||||
* It can run with an automatic config (--autoconf)
|
||||
*/
|
||||
|
||||
type nodeConfig struct {
|
||||
Listen string
|
||||
Peers []string
|
||||
BoxPub string
|
||||
BoxPriv string
|
||||
SigPub string
|
||||
SigPriv string
|
||||
Multicast bool
|
||||
}
|
||||
|
||||
type node struct {
|
||||
core Core
|
||||
sock *ipv6.PacketConn
|
||||
}
|
||||
|
||||
func (n *node) init(cfg *nodeConfig, logger *log.Logger) {
|
||||
boxPub, err := hex.DecodeString(cfg.BoxPub)
|
||||
if err != nil { panic(err) }
|
||||
boxPriv, err := hex.DecodeString(cfg.BoxPriv)
|
||||
if err != nil { panic(err) }
|
||||
sigPub, err := hex.DecodeString(cfg.SigPub)
|
||||
if err != nil { panic(err) }
|
||||
sigPriv, err := hex.DecodeString(cfg.SigPriv)
|
||||
if err != nil { panic(err) }
|
||||
n.core.DEBUG_init(boxPub, boxPriv, sigPub, sigPriv)
|
||||
n.core.DEBUG_setLogger(logger)
|
||||
logger.Println("Starting interface...")
|
||||
n.core.DEBUG_setupAndStartGlobalTCPInterface(cfg.Listen)
|
||||
logger.Println("Started interface")
|
||||
go func () {
|
||||
if len(cfg.Peers) == 0 { return }
|
||||
for {
|
||||
for _, p := range cfg.Peers {
|
||||
n.core.DEBUG_addTCPConn(p)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func generateConfig() *nodeConfig {
|
||||
core := Core{}
|
||||
bpub, bpriv := core.DEBUG_newBoxKeys()
|
||||
spub, spriv := core.DEBUG_newSigKeys()
|
||||
cfg := nodeConfig{}
|
||||
cfg.Listen = "[::]:0"
|
||||
cfg.BoxPub = hex.EncodeToString(bpub[:])
|
||||
cfg.BoxPriv = hex.EncodeToString(bpriv[:])
|
||||
cfg.SigPub = hex.EncodeToString(spub[:])
|
||||
cfg.SigPriv = hex.EncodeToString(spriv[:])
|
||||
cfg.Peers = []string{}
|
||||
cfg.Multicast = true
|
||||
return &cfg
|
||||
}
|
||||
|
||||
func doGenconf() string {
|
||||
cfg := generateConfig()
|
||||
bs, err := json.MarshalIndent(cfg, "", " ")
|
||||
if err != nil { panic(err) }
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
var multicastAddr = "[ff02::114]:9001"
|
||||
|
||||
func (n *node) listen() {
|
||||
groupAddr, err := net.ResolveUDPAddr("udp", multicastAddr)
|
||||
if err != nil { panic(err) }
|
||||
bs := make([]byte, 2048)
|
||||
for {
|
||||
nBytes, rcm, fromAddr, err := n.sock.ReadFrom(bs)
|
||||
if err != nil { panic(err) }
|
||||
//if rcm == nil { continue } // wat
|
||||
//fmt.Println("DEBUG:", "packet from:", fromAddr.String())
|
||||
if !rcm.Dst.IsLinkLocalMulticast() { continue }
|
||||
if !rcm.Dst.Equal(groupAddr.IP) { continue }
|
||||
anAddr := string(bs[:nBytes])
|
||||
addr, err := net.ResolveTCPAddr("tcp", anAddr)
|
||||
if err != nil { panic(err) ; continue } // Panic for testing, remove later
|
||||
from := fromAddr.(*net.UDPAddr)
|
||||
//fmt.Println("DEBUG:", "heard:", addr.IP.String(), "from:", from.IP.String())
|
||||
if addr.IP.String() != from.IP.String() { continue }
|
||||
addr.Zone = from.Zone
|
||||
saddr := addr.String()
|
||||
//if _, isIn := n.peers[saddr]; isIn { continue }
|
||||
//n.peers[saddr] = struct{}{}
|
||||
n.core.DEBUG_addTCPConn(saddr)
|
||||
//fmt.Println("DEBUG:", "added multicast peer:", saddr)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) announce() {
|
||||
groupAddr, err := net.ResolveUDPAddr("udp", multicastAddr)
|
||||
if err != nil { panic(err) }
|
||||
tcpaddr := n.core.DEBUG_getGlobalTCPAddr()
|
||||
anAddr, err := net.ResolveTCPAddr("tcp", tcpaddr.String())
|
||||
if err != nil { panic(err) }
|
||||
destAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
|
||||
if err != nil { panic(err) }
|
||||
for {
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil { panic(err) }
|
||||
for _, iface := range ifaces {
|
||||
n.sock.JoinGroup(&iface, groupAddr)
|
||||
//err := n.sock.JoinGroup(&iface, groupAddr)
|
||||
//if err != nil { panic(err) }
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil { panic(err) }
|
||||
for _, addr := range addrs {
|
||||
addrIP, _, _ := net.ParseCIDR(addr.String())
|
||||
if addrIP.To4() != nil { continue } // IPv6 only
|
||||
if !addrIP.IsLinkLocalUnicast() { continue }
|
||||
anAddr.IP = addrIP
|
||||
anAddr.Zone = iface.Name
|
||||
destAddr.Zone = iface.Name
|
||||
msg := []byte(anAddr.String())
|
||||
n.sock.WriteTo(msg, nil, destAddr)
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
var pprof = flag.Bool("pprof", false, "Run pprof, see http://localhost:6060/debug/pprof/")
|
||||
var genconf = flag.Bool("genconf", false, "print a new config to stdout")
|
||||
var useconf = flag.Bool("useconf", false, "read config from stdin")
|
||||
var autoconf = flag.Bool("autoconf", false, "automatic mode (dynamic IP, peer with IPv6 neighbors)")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
var cfg *nodeConfig
|
||||
switch {
|
||||
case *autoconf: cfg = generateConfig()
|
||||
case *useconf:
|
||||
config, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil { panic(err) }
|
||||
decoder := json.NewDecoder(bytes.NewReader(config))
|
||||
err = decoder.Decode(&cfg)
|
||||
if err != nil { panic(err) }
|
||||
case *genconf: fmt.Println(doGenconf())
|
||||
default: flag.PrintDefaults()
|
||||
}
|
||||
if cfg == nil { return }
|
||||
logger := log.New(os.Stdout, "", log.Flags())
|
||||
if *pprof {
|
||||
runtime.SetBlockProfileRate(1)
|
||||
go func() { log.Println(http.ListenAndServe("localhost:6060", nil)) }()
|
||||
}
|
||||
// Setup
|
||||
logger.Println("Initializing...")
|
||||
n := node{}
|
||||
n.init(cfg, logger)
|
||||
logger.Println("Starting tun...")
|
||||
//n.core.DEBUG_startTun() // 1280, the smallest supported MTU
|
||||
n.core.DEBUG_startTunWithMTU(65535) // Largest supported MTU
|
||||
defer func() {
|
||||
logger.Println("Closing...")
|
||||
n.core.DEBUG_stopTun()
|
||||
}()
|
||||
logger.Println("Started...")
|
||||
if cfg.Multicast {
|
||||
addr, err := net.ResolveUDPAddr("udp", multicastAddr)
|
||||
if err != nil { panic(err) }
|
||||
listenString := fmt.Sprintf("[::]:%v", addr.Port)
|
||||
conn, err := net.ListenPacket("udp6", listenString)
|
||||
if err != nil { panic(err) }
|
||||
//defer conn.Close() // Let it close on its own when the application exits
|
||||
n.sock = ipv6.NewPacketConn(conn)
|
||||
if err = n.sock.SetControlMessage(ipv6.FlagDst, true) ; err != nil { panic(err) }
|
||||
go n.listen()
|
||||
go n.announce()
|
||||
}
|
||||
// Catch interrupt to exit gracefully
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt)
|
||||
<-c
|
||||
logger.Println("Stopping...")
|
||||
}
|
||||
|
108
src/yggdrasil/address.go
Normal file
108
src/yggdrasil/address.go
Normal file
@ -0,0 +1,108 @@
|
||||
package yggdrasil
|
||||
|
||||
type address [16]byte // IPv6 address within the network
|
||||
type subnet [8]byte // It's a /64
|
||||
|
||||
var address_prefix = [...]byte{0xfd} // For node addresses + local subnets
|
||||
|
||||
func (a *address) isValid() bool {
|
||||
for idx := range address_prefix {
|
||||
if (*a)[idx] != address_prefix[idx] { return false }
|
||||
}
|
||||
return (*a)[len(address_prefix)] & 0x80 == 0
|
||||
}
|
||||
|
||||
func (s *subnet) isValid() bool {
|
||||
for idx := range address_prefix {
|
||||
if (*s)[idx] != address_prefix[idx] { return false }
|
||||
}
|
||||
return (*s)[len(address_prefix)] & 0x80 != 0
|
||||
}
|
||||
|
||||
func address_addrForNodeID(nid *NodeID) *address {
|
||||
// 128 bit address
|
||||
// Begins with prefix
|
||||
// Next bit is a 0
|
||||
// Next 7 bits, interpreted as a uint, are # of leading 1s in the NodeID
|
||||
// Leading 1s and first leading 0 of the NodeID are truncated off
|
||||
// The rest is appended to the IPv6 address (truncated to 128 bits total)
|
||||
var addr address
|
||||
var temp []byte
|
||||
done := false
|
||||
ones := byte(0)
|
||||
bits := byte(0)
|
||||
nBits := 0
|
||||
for idx := 0 ; idx < 8*len(nid) ; idx++ {
|
||||
bit := (nid[idx/8] & (0x80 >> byte(idx % 8))) >> byte(7 - (idx % 8))
|
||||
if !done && bit != 0 {
|
||||
ones++
|
||||
continue
|
||||
}
|
||||
if !done && bit == 0 {
|
||||
done = true
|
||||
continue // FIXME this assumes that ones <= 127
|
||||
}
|
||||
bits = (bits << 1) | bit
|
||||
nBits++
|
||||
if nBits == 8 {
|
||||
nBits = 0
|
||||
temp = append(temp, bits)
|
||||
}
|
||||
}
|
||||
copy(addr[:], address_prefix[:])
|
||||
addr[len(address_prefix)] = ones & 0x7f
|
||||
copy(addr[len(address_prefix)+1:], temp)
|
||||
return &addr
|
||||
}
|
||||
|
||||
func address_subnetForNodeID(nid *NodeID) *subnet {
|
||||
// Exactly as the address version, with two exceptions:
|
||||
// 1) The first bit after the fixed prefix is a 1 instead of a 0
|
||||
// 2) It's truncated to a subnet prefix length instead of 128 bits
|
||||
addr := *address_addrForNodeID(nid)
|
||||
var snet subnet
|
||||
copy(snet[:], addr[:])
|
||||
snet[len(address_prefix)] |= 0x80
|
||||
return &snet
|
||||
}
|
||||
|
||||
func (a *address) getNodeIDandMask() (*NodeID, *NodeID) {
|
||||
// Mask is a bitmask to mark the bits visible from the address
|
||||
// This means truncated leading 1s, first leading 0, and visible part of addr
|
||||
var nid NodeID
|
||||
var mask NodeID
|
||||
ones := int(a[len(address_prefix)] & 0x7f)
|
||||
for idx := 0 ; idx < ones ; idx++ { nid[idx/8] |= 0x80 >> byte(idx % 8) }
|
||||
nidOffset := ones+1
|
||||
addrOffset := 8*len(address_prefix)+8
|
||||
for idx := addrOffset ; idx < 8*len(a) ; idx++ {
|
||||
bits := a[idx/8] & (0x80 >> byte(idx % 8))
|
||||
bits <<= byte(idx % 8)
|
||||
nidIdx := nidOffset + (idx - addrOffset)
|
||||
bits >>= byte(nidIdx % 8)
|
||||
nid[nidIdx/8] |= bits
|
||||
}
|
||||
maxMask := 8*(len(a) - len(address_prefix) - 1) + ones + 1
|
||||
for idx := 0 ; idx < maxMask ; idx++ { mask[idx/8] |= 0x80 >> byte(idx % 8) }
|
||||
return &nid, &mask
|
||||
}
|
||||
|
||||
func (s *subnet) getNodeIDandMask() (*NodeID, *NodeID) {
|
||||
// As witht he address version, but visible parts of the subnet prefix instead
|
||||
var nid NodeID
|
||||
var mask NodeID
|
||||
ones := int(s[len(address_prefix)] & 0x7f)
|
||||
for idx := 0 ; idx < ones ; idx++ { nid[idx/8] |= 0x80 >> byte(idx % 8) }
|
||||
nidOffset := ones+1
|
||||
addrOffset := 8*len(address_prefix)+8
|
||||
for idx := addrOffset ; idx < 8*len(s) ; idx++ {
|
||||
bits := s[idx/8] & (0x80 >> byte(idx % 8))
|
||||
bits <<= byte(idx % 8)
|
||||
nidIdx := nidOffset + (idx - addrOffset)
|
||||
bits >>= byte(nidIdx % 8)
|
||||
nid[nidIdx/8] |= bits
|
||||
}
|
||||
maxMask := 8*(len(s) - len(address_prefix) - 1) + ones + 1
|
||||
for idx := 0 ; idx < maxMask ; idx++ { mask[idx/8] |= 0x80 >> byte(idx % 8) }
|
||||
return &nid, &mask
|
||||
}
|
64
src/yggdrasil/core.go
Normal file
64
src/yggdrasil/core.go
Normal file
@ -0,0 +1,64 @@
|
||||
package yggdrasil
|
||||
|
||||
import "io/ioutil"
|
||||
import "log"
|
||||
|
||||
type Core struct {
|
||||
// This is the main data structure that holds everything else for a node
|
||||
// TODO? move keys out of core and into something more appropriate
|
||||
// e.g. box keys live in sessions
|
||||
// sig keys live in peers or sigs (or wherever signing/validating logic is)
|
||||
boxPub boxPubKey
|
||||
boxPriv boxPrivKey
|
||||
sigPub sigPubKey
|
||||
sigPriv sigPrivKey
|
||||
switchTable switchTable
|
||||
peers peers
|
||||
sigs sigManager
|
||||
sessions sessions
|
||||
router router
|
||||
dht dht
|
||||
tun tunDevice
|
||||
searches searches
|
||||
tcp *tcpInterface
|
||||
udp *udpInterface
|
||||
log *log.Logger
|
||||
}
|
||||
|
||||
func (c *Core) Init() {
|
||||
// Only called by the simulator, to set up nodes with random keys
|
||||
bpub, bpriv := newBoxKeys()
|
||||
spub, spriv := newSigKeys()
|
||||
c.init(bpub, bpriv, spub, spriv)
|
||||
}
|
||||
|
||||
func (c *Core) init(bpub *boxPubKey,
|
||||
bpriv *boxPrivKey,
|
||||
spub *sigPubKey,
|
||||
spriv *sigPrivKey) {
|
||||
// TODO separate init and start functions
|
||||
// Init sets up structs
|
||||
// Start launches goroutines that depend on structs being set up
|
||||
// This is pretty much required to avoid race conditions
|
||||
util_initByteStore()
|
||||
c.log = log.New(ioutil.Discard, "", 0)
|
||||
c.boxPub, c.boxPriv = *bpub, *bpriv
|
||||
c.sigPub, c.sigPriv = *spub, *spriv
|
||||
c.sigs.init()
|
||||
c.searches.init(c)
|
||||
c.dht.init(c)
|
||||
c.sessions.init(c)
|
||||
c.peers.init(c)
|
||||
c.router.init(c)
|
||||
c.switchTable.init(c, c.sigPub) // TODO move before peers? before router?
|
||||
c.tun.init(c)
|
||||
}
|
||||
|
||||
func (c *Core) GetNodeID() *NodeID {
|
||||
return getNodeID(&c.boxPub)
|
||||
}
|
||||
|
||||
func (c *Core) GetTreeID() *TreeID {
|
||||
return getTreeID(&c.sigPub)
|
||||
}
|
||||
|
154
src/yggdrasil/crypto.go
Normal file
154
src/yggdrasil/crypto.go
Normal file
@ -0,0 +1,154 @@
|
||||
package yggdrasil
|
||||
|
||||
/*
|
||||
|
||||
This part of the package wraps crypto operations needed elsewhere
|
||||
|
||||
In particular, it exposes key generation for ed25519 and nacl box
|
||||
|
||||
It also defines NodeID and TreeID as hashes of keys, and wraps hash functions
|
||||
|
||||
*/
|
||||
|
||||
import "crypto/rand"
|
||||
import "crypto/sha512"
|
||||
import "golang.org/x/crypto/ed25519"
|
||||
import "golang.org/x/crypto/nacl/box"
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// NodeID and TreeID
|
||||
|
||||
const NodeIDLen = sha512.Size
|
||||
const TreeIDLen = sha512.Size
|
||||
const handleLen = 8
|
||||
|
||||
type NodeID [NodeIDLen]byte
|
||||
type TreeID [TreeIDLen]byte
|
||||
type handle [handleLen]byte
|
||||
|
||||
func getNodeID(pub *boxPubKey) *NodeID {
|
||||
h := sha512.Sum512(pub[:])
|
||||
return (*NodeID)(&h)
|
||||
}
|
||||
|
||||
func getTreeID(pub *sigPubKey) *TreeID {
|
||||
h := sha512.Sum512(pub[:])
|
||||
return (*TreeID)(&h)
|
||||
}
|
||||
|
||||
func newHandle() *handle {
|
||||
var h handle
|
||||
_, err := rand.Read(h[:])
|
||||
if err != nil { panic(err) }
|
||||
return &h
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Signatures
|
||||
|
||||
const sigPubKeyLen = ed25519.PublicKeySize
|
||||
const sigPrivKeyLen = ed25519.PrivateKeySize
|
||||
const sigLen = ed25519.SignatureSize
|
||||
|
||||
type sigPubKey [sigPubKeyLen]byte
|
||||
type sigPrivKey [sigPrivKeyLen]byte
|
||||
type sigBytes [sigLen]byte
|
||||
|
||||
func newSigKeys() (*sigPubKey, *sigPrivKey) {
|
||||
var pub sigPubKey
|
||||
var priv sigPrivKey
|
||||
pubSlice, privSlice, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil { panic(err) }
|
||||
copy(pub[:], pubSlice)
|
||||
copy(priv[:], privSlice)
|
||||
return &pub, &priv
|
||||
}
|
||||
|
||||
func sign(priv *sigPrivKey, msg []byte) *sigBytes {
|
||||
var sig sigBytes
|
||||
sigSlice := ed25519.Sign(priv[:], msg)
|
||||
copy(sig[:], sigSlice)
|
||||
return &sig
|
||||
}
|
||||
|
||||
func verify(pub *sigPubKey, msg []byte, sig *sigBytes) bool {
|
||||
// Should sig be an array instead of a slice?...
|
||||
// It's fixed size, but
|
||||
return ed25519.Verify(pub[:], msg, sig[:])
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// NaCl-like crypto "box" (curve25519+xsalsa20+poly1305)
|
||||
|
||||
const boxPubKeyLen = 32
|
||||
const boxPrivKeyLen = 32
|
||||
const boxSharedKeyLen = 32
|
||||
const boxNonceLen = 24
|
||||
|
||||
type boxPubKey [boxPubKeyLen]byte
|
||||
type boxPrivKey [boxPrivKeyLen]byte
|
||||
type boxSharedKey [boxSharedKeyLen]byte
|
||||
type boxNonce [boxNonceLen]byte
|
||||
|
||||
func newBoxKeys() (*boxPubKey, *boxPrivKey) {
|
||||
pubBytes, privBytes, err := box.GenerateKey(rand.Reader)
|
||||
if err != nil { panic(err) }
|
||||
pub := (*boxPubKey)(pubBytes)
|
||||
priv := (*boxPrivKey)(privBytes)
|
||||
return pub, priv
|
||||
}
|
||||
|
||||
func getSharedKey(myPrivKey *boxPrivKey,
|
||||
othersPubKey *boxPubKey) *boxSharedKey {
|
||||
var shared [boxSharedKeyLen]byte
|
||||
priv := (*[boxPrivKeyLen]byte)(myPrivKey)
|
||||
pub := (*[boxPubKeyLen]byte)(othersPubKey)
|
||||
box.Precompute(&shared, pub, priv)
|
||||
return (*boxSharedKey)(&shared)
|
||||
}
|
||||
|
||||
func boxOpen(shared *boxSharedKey,
|
||||
boxed []byte,
|
||||
nonce *boxNonce) ([]byte, bool) {
|
||||
out := util_getBytes()
|
||||
//return append(out, boxed...), true // XXX HACK to test without encryption
|
||||
s := (*[boxSharedKeyLen]byte)(shared)
|
||||
n := (*[boxNonceLen]byte)(nonce)
|
||||
unboxed, success := box.OpenAfterPrecomputation(out, boxed, n, s)
|
||||
return unboxed, success
|
||||
}
|
||||
|
||||
func boxSeal(shared *boxSharedKey, unboxed []byte, nonce *boxNonce) ([]byte, *boxNonce) {
|
||||
if nonce == nil { nonce = newBoxNonce() }
|
||||
nonce.update()
|
||||
out := util_getBytes()
|
||||
//return append(out, unboxed...), nonce // XXX HACK to test without encryption
|
||||
s := (*[boxSharedKeyLen]byte)(shared)
|
||||
n := (*[boxNonceLen]byte)(nonce)
|
||||
boxed := box.SealAfterPrecomputation(out, unboxed, n, s)
|
||||
return boxed, nonce
|
||||
}
|
||||
|
||||
func newBoxNonce() *boxNonce {
|
||||
var nonce boxNonce
|
||||
_, err := rand.Read(nonce[:])
|
||||
for ; err == nil && nonce[0] == 0xff ; _, err = rand.Read(nonce[:]){
|
||||
// Make sure nonce isn't too high
|
||||
// This is just to make rollover unlikely to happen
|
||||
// Rollover is fine, but it may kill the session and force it to reopen
|
||||
}
|
||||
if err != nil { panic(err) }
|
||||
return &nonce
|
||||
}
|
||||
|
||||
func (n *boxNonce) update() {
|
||||
oldNonce := *n
|
||||
n[len(n)-1] += 2
|
||||
for i := len(n)-2 ; i >= 0 ; i-- {
|
||||
if n[i+1] < oldNonce[i+1] { n[i] += 1 }
|
||||
}
|
||||
}
|
||||
|
336
src/yggdrasil/debug.go
Normal file
336
src/yggdrasil/debug.go
Normal file
@ -0,0 +1,336 @@
|
||||
package yggdrasil
|
||||
|
||||
// These are functions that should not exist
|
||||
// They are (or were) used during development, to work around missing features
|
||||
// They're also used to configure things from the outside
|
||||
// It would be better to define and export a few config functions elsewhere
|
||||
// Or define some remote API and call it to send/request configuration info
|
||||
|
||||
import _ "golang.org/x/net/ipv6" // TODO put this somewhere better
|
||||
|
||||
import "fmt"
|
||||
import "net"
|
||||
import "log"
|
||||
|
||||
// Core
|
||||
|
||||
func (c *Core) DEBUG_getSigPub() sigPubKey {
|
||||
return (sigPubKey)(c.sigPub)
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getBoxPub() boxPubKey {
|
||||
return (boxPubKey)(c.boxPub)
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getSend() (chan<- []byte) {
|
||||
return c.tun.send
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getRecv() (<-chan []byte) {
|
||||
return c.tun.recv
|
||||
}
|
||||
|
||||
// Peer
|
||||
|
||||
func (c *Core) DEBUG_getPeers() *peers {
|
||||
return &c.peers
|
||||
}
|
||||
|
||||
func (ps *peers) DEBUG_newPeer(box boxPubKey,
|
||||
sig sigPubKey) *peer {
|
||||
//in <-chan []byte,
|
||||
//out chan<- []byte) *peer {
|
||||
return ps.newPeer(&box, &sig)//, in, out)
|
||||
}
|
||||
|
||||
/*
|
||||
func (ps *peers) DEBUG_startPeers() {
|
||||
ps.mutex.RLock()
|
||||
defer ps.mutex.RUnlock()
|
||||
for _, p := range ps.ports {
|
||||
if p == nil { continue }
|
||||
go p.MainLoop()
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
func (ps *peers) DEBUG_hasPeer(key sigPubKey) bool {
|
||||
ports := ps.ports.Load().(map[switchPort]*peer)
|
||||
for _, p := range ports {
|
||||
if p == nil { continue }
|
||||
if p.sig == key { return true }
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ps *peers) DEBUG_getPorts() map[switchPort]*peer {
|
||||
ports := ps.ports.Load().(map[switchPort]*peer)
|
||||
newPeers := make(map[switchPort]*peer)
|
||||
for port, p := range ports{
|
||||
newPeers[port] = p
|
||||
}
|
||||
return newPeers
|
||||
}
|
||||
|
||||
func (p *peer) DEBUG_getSigKey() sigPubKey {
|
||||
return p.sig
|
||||
}
|
||||
|
||||
func (p *peer) DEEBUG_getPort() switchPort {
|
||||
return p.port
|
||||
}
|
||||
|
||||
// Router
|
||||
|
||||
func (c *Core) DEBUG_getSwitchTable() *switchTable {
|
||||
return &c.switchTable
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getLocator() switchLocator {
|
||||
return c.switchTable.getLocator()
|
||||
}
|
||||
|
||||
func (l *switchLocator) DEBUG_getCoords() []byte {
|
||||
return l.getCoords()
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_switchLookup(dest []byte, ttl uint64) (switchPort, uint64) {
|
||||
return c.switchTable.lookup(dest, ttl)
|
||||
}
|
||||
|
||||
/*
|
||||
func (t *switchTable) DEBUG_isDirty() bool {
|
||||
//data := t.data.Load().(*tabledata)
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
data := t.data
|
||||
return data.dirty
|
||||
}
|
||||
*/
|
||||
|
||||
func (t *switchTable) DEBUG_dumpTable() {
|
||||
//data := t.data.Load().(*tabledata)
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
data := t.data
|
||||
for _, peer := range data.peers {
|
||||
//fmt.Println("DUMPTABLE:", t.treeID, peer.treeID, peer.port,
|
||||
// peer.locator.Root, peer.coords,
|
||||
// peer.reverse.Root, peer.reverse.Coords, peer.forward)
|
||||
fmt.Println("DUMPTABLE:", t.key, peer.key, peer.locator.coords, peer.port/*, peer.forward*/)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *switchTable) DEBUG_getReversePort(port switchPort) switchPort {
|
||||
// Returns Port(0) if it cannot get the reverse peer for any reason
|
||||
//data := t.data.Load().(*tabledata)
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
data := t.data
|
||||
if port >= switchPort(len(data.peers)) { return switchPort(0) }
|
||||
pinfo := data.peers[port]
|
||||
if len(pinfo.locator.coords) < 1 { return switchPort(0) }
|
||||
return pinfo.locator.coords[len(pinfo.locator.coords)-1]
|
||||
}
|
||||
|
||||
// Wire
|
||||
|
||||
func DEBUG_wire_encode_coords(coords []byte) []byte {
|
||||
return wire_encode_coords(coords)
|
||||
}
|
||||
|
||||
// DHT, via core
|
||||
|
||||
func (c *Core) DEBUG_getDHTSize() int {
|
||||
total := 0
|
||||
for bidx := 0 ; bidx < c.dht.nBuckets() ; bidx++ {
|
||||
b := c.dht.getBucket(bidx)
|
||||
total += len(b.infos)
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// udpInterface
|
||||
// FIXME udpInterface isn't exported
|
||||
// So debug functions need to work differently...
|
||||
|
||||
/*
|
||||
func (c *Core) DEBUG_setupLoopbackUDPInterface() {
|
||||
iface := udpInterface{}
|
||||
iface.init(c, "[::1]:0")
|
||||
c.ifaces = append(c.ifaces[:0], &iface)
|
||||
}
|
||||
*/
|
||||
|
||||
/*
|
||||
func (c *Core) DEBUG_getLoopbackAddr() net.Addr {
|
||||
iface := c.ifaces[0]
|
||||
return iface.sock.LocalAddr()
|
||||
}
|
||||
*/
|
||||
|
||||
/*
|
||||
func (c *Core) DEBUG_addLoopbackPeer(addr *net.UDPAddr,
|
||||
in (chan<- []byte),
|
||||
out (<-chan []byte)) {
|
||||
iface := c.ifaces[0]
|
||||
iface.addPeer(addr, in, out)
|
||||
}
|
||||
*/
|
||||
|
||||
/*
|
||||
func (c *Core) DEBUG_startLoopbackUDPInterface() {
|
||||
iface := c.ifaces[0]
|
||||
go iface.reader()
|
||||
for addr, chs := range iface.peers {
|
||||
udpAddr, err := net.ResolveUDPAddr("udp6", addr)
|
||||
if err != nil { panic(err) }
|
||||
go iface.writer(udpAddr, chs.out)
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (c *Core) DEBUG_getAddr() *address {
|
||||
return address_addrForNodeID(&c.dht.nodeID)
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_startTun() {
|
||||
c.DEBUG_startTunWithMTU(1280)
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_startTunWithMTU(mtu int) {
|
||||
addr := c.DEBUG_getAddr()
|
||||
straddr := fmt.Sprintf("%s/%v", net.IP(addr[:]).String(), 8*len(address_prefix))
|
||||
err := c.tun.setup(straddr, mtu)
|
||||
if err != nil { panic(err) }
|
||||
go c.tun.read()
|
||||
go c.tun.write()
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_stopTun() {
|
||||
c.tun.close()
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (c *Core) DEBUG_newBoxKeys() (*boxPubKey, *boxPrivKey) {
|
||||
return newBoxKeys()
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_newSigKeys() (*sigPubKey, *sigPrivKey) {
|
||||
return newSigKeys()
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getNodeID(pub *boxPubKey) *NodeID {
|
||||
return getNodeID(pub)
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getTreeID(pub *sigPubKey) *TreeID {
|
||||
return getTreeID(pub)
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_addrForNodeID(nodeID *NodeID) string {
|
||||
return net.IP(address_addrForNodeID(nodeID)[:]).String()
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_init(bpub []byte,
|
||||
bpriv []byte,
|
||||
spub []byte,
|
||||
spriv []byte) {
|
||||
var boxPub boxPubKey
|
||||
var boxPriv boxPrivKey
|
||||
var sigPub sigPubKey
|
||||
var sigPriv sigPrivKey
|
||||
copy(boxPub[:], bpub)
|
||||
copy(boxPriv[:], bpriv)
|
||||
copy(sigPub[:], spub)
|
||||
copy(sigPriv[:], spriv)
|
||||
c.init(&boxPub, &boxPriv, &sigPub, &sigPriv)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (c *Core) DEBUG_setupAndStartGlobalUDPInterface(addrport string) {
|
||||
iface := udpInterface{}
|
||||
iface.init(c, addrport)
|
||||
c.udp = &iface
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getGlobalUDPAddr() net.Addr {
|
||||
return c.udp.sock.LocalAddr()
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_sendUDPKeys(saddr string) {
|
||||
addr := connAddr(saddr)
|
||||
c.udp.sendKeys(addr)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
//*
|
||||
func (c *Core) DEBUG_setupAndStartGlobalTCPInterface(addrport string) {
|
||||
iface := tcpInterface{}
|
||||
iface.init(c, addrport)
|
||||
c.tcp = &iface
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getGlobalTCPAddr() *net.TCPAddr {
|
||||
return c.tcp.serv.Addr().(*net.TCPAddr)
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_addTCPConn(saddr string) {
|
||||
c.tcp.call(saddr)
|
||||
}
|
||||
//*/
|
||||
|
||||
/*
|
||||
func (c *Core) DEBUG_startSelfPeer() {
|
||||
c.Peers.mutex.RLock()
|
||||
defer c.Peers.mutex.RUnlock()
|
||||
p := c.Peers.ports[0]
|
||||
go p.MainLoop()
|
||||
}
|
||||
*/
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/*
|
||||
func (c *Core) DEBUG_setupAndStartGlobalKCPInterface(addrport string) {
|
||||
iface := kcpInterface{}
|
||||
iface.init(c, addrport)
|
||||
c.kcp = &iface
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_getGlobalKCPAddr() net.Addr {
|
||||
return c.kcp.serv.Addr()
|
||||
}
|
||||
|
||||
func (c *Core) DEBUG_addKCPConn(saddr string) {
|
||||
c.kcp.call(saddr)
|
||||
}
|
||||
*/
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (c *Core) DEBUG_setLogger(log *log.Logger) {
|
||||
c.log = log
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func DEBUG_simLinkPeers(p, q *peer) {
|
||||
// Sets q.out() to point to p and starts p.linkLoop()
|
||||
plinkIn := make(chan []byte, 1)
|
||||
qlinkIn := make(chan []byte, 1)
|
||||
p.out = func(bs []byte) {
|
||||
go q.handlePacket(bs, qlinkIn)
|
||||
}
|
||||
q.out = func(bs []byte) {
|
||||
go p.handlePacket(bs, plinkIn)
|
||||
}
|
||||
go p.linkLoop(plinkIn)
|
||||
go q.linkLoop(qlinkIn)
|
||||
}
|
||||
|
383
src/yggdrasil/dht.go
Normal file
383
src/yggdrasil/dht.go
Normal file
@ -0,0 +1,383 @@
|
||||
package yggdrasil
|
||||
|
||||
/*
|
||||
|
||||
This part has the (kademlia-like) distributed hash table
|
||||
|
||||
It's used to look up coords for a NodeID
|
||||
|
||||
Every node participates in the DHT, and the DHT stores no real keys/values
|
||||
(Only the peer relationships / lookups are needed)
|
||||
|
||||
This version is intentionally fragile, by being recursive instead of iterative
|
||||
(it's also not parallel, as a result)
|
||||
This is to make sure that DHT black holes are visible if they exist
|
||||
(the iterative parallel approach tends to get around them sometimes)
|
||||
I haven't seen this get stuck on blackholes, but I also haven't proven it can't
|
||||
Slight changes *do* make it blackhole hard, bootstrapping isn't an easy problem
|
||||
|
||||
*/
|
||||
|
||||
// TODO handle the case where we try to look ourself up
|
||||
// Ends up at bucket index NodeIDLen
|
||||
// That's 1 too many
|
||||
|
||||
import "sort"
|
||||
import "time"
|
||||
//import "fmt"
|
||||
|
||||
// Maximum size for buckets and lookups
|
||||
// Exception for buckets if the next one is non-full
|
||||
const dht_bucket_size = 2 // This should be at least 2
|
||||
const dht_lookup_size = 2 // This should be at least 1, below 2 is impractical
|
||||
const dht_bucket_number = 8*NodeIDLen // This shouldn't be changed
|
||||
|
||||
type dhtInfo struct {
|
||||
// TODO save their nodeID so we don't need to rehash if we need it again
|
||||
nodeID_hidden *NodeID
|
||||
key boxPubKey
|
||||
coords []byte
|
||||
send time.Time // When we last sent a message
|
||||
recv time.Time // When we last received a message
|
||||
pings int // Decide when to drop
|
||||
}
|
||||
|
||||
func (info *dhtInfo) getNodeID() *NodeID {
|
||||
if info.nodeID_hidden == nil {
|
||||
info.nodeID_hidden = getNodeID(&info.key)
|
||||
}
|
||||
return info.nodeID_hidden
|
||||
}
|
||||
|
||||
type bucket struct {
|
||||
infos []*dhtInfo
|
||||
}
|
||||
|
||||
type dhtReq struct {
|
||||
key boxPubKey // Key of whoever asked
|
||||
coords []byte // Coords of whoever asked
|
||||
dest NodeID // NodeID they're asking about
|
||||
}
|
||||
|
||||
type dhtRes struct {
|
||||
key boxPubKey // key to respond to
|
||||
coords []byte // coords to respond to
|
||||
dest NodeID
|
||||
infos []*dhtInfo // response
|
||||
}
|
||||
|
||||
type dht struct {
|
||||
core *Core
|
||||
nodeID NodeID
|
||||
buckets_hidden [dht_bucket_number]bucket // Extra is for the self-bucket
|
||||
peers chan *dhtInfo // other goroutines put incoming dht updates here
|
||||
reqs map[boxPubKey]map[NodeID]time.Time
|
||||
offset int
|
||||
}
|
||||
|
||||
func (t *dht) init(c *Core) {
|
||||
t.core = c
|
||||
t.nodeID = *t.core.GetNodeID()
|
||||
t.peers = make(chan *dhtInfo, 1)
|
||||
t.reqs = make(map[boxPubKey]map[NodeID]time.Time)
|
||||
}
|
||||
|
||||
func (t *dht) handleReq(req *dhtReq) {
|
||||
// Send them what they asked for
|
||||
loc := t.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
res := dhtRes{
|
||||
key: t.core.boxPub,
|
||||
coords: coords,
|
||||
dest: req.dest,
|
||||
infos: t.lookup(&req.dest),
|
||||
}
|
||||
t.sendRes(&res, req)
|
||||
// Also (possibly) add them to our DHT
|
||||
info := dhtInfo{
|
||||
key: req.key,
|
||||
coords: req.coords,
|
||||
}
|
||||
t.insertIfNew(&info) // This seems DoSable (we just trust their coords...)
|
||||
//if req.dest != t.nodeID { t.ping(&info, info.getNodeID()) } // Or spam...
|
||||
}
|
||||
|
||||
func (t *dht) handleRes(res *dhtRes) {
|
||||
reqs, isIn := t.reqs[res.key]
|
||||
if !isIn { return }
|
||||
_, isIn = reqs[res.dest]
|
||||
if !isIn { return }
|
||||
rinfo := dhtInfo{
|
||||
key: res.key,
|
||||
coords: res.coords,
|
||||
send: time.Now(), // Technically wrong but should be OK... FIXME or not
|
||||
recv: time.Now(),
|
||||
}
|
||||
// If they're already in the table, then keep the correct send time
|
||||
bidx, isOK := t.getBucketIndex(rinfo.getNodeID())
|
||||
if !isOK { return }
|
||||
b := t.getBucket(bidx)
|
||||
for _, oldinfo := range b.infos {
|
||||
if oldinfo.key == rinfo.key {rinfo.send = oldinfo.send }
|
||||
}
|
||||
// Insert into table
|
||||
t.insert(&rinfo)
|
||||
if res.dest == *rinfo.getNodeID() { return } // No infinite recursions
|
||||
// ping the nodes we were told about
|
||||
if len(res.infos) > dht_lookup_size {
|
||||
// Ignore any "extra" lookup results
|
||||
res.infos = res.infos[:dht_lookup_size]
|
||||
}
|
||||
for _, info := range res.infos {
|
||||
bidx, isOK := t.getBucketIndex(info.getNodeID())
|
||||
if !isOK { continue }
|
||||
b := t.getBucket(bidx)
|
||||
if b.contains(info) { continue } // wait for maintenance cycle to get them
|
||||
t.ping(info, info.getNodeID())
|
||||
}
|
||||
}
|
||||
|
||||
func (t *dht) lookup(nodeID *NodeID) []*dhtInfo {
|
||||
// FIXME this allocates a bunch, sorts, and keeps the part it likes
|
||||
// It would be better to only track the part it likes to begin with
|
||||
addInfos := func (res []*dhtInfo, infos []*dhtInfo) ([]*dhtInfo) {
|
||||
for _, info := range infos {
|
||||
if info == nil { panic ("Should never happen!") }
|
||||
if true || dht_firstCloserThanThird(info.getNodeID(), nodeID, &t.nodeID) {
|
||||
res = append(res, info)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
var res []*dhtInfo
|
||||
for bidx := 0 ; bidx < t.nBuckets() ; bidx++ {
|
||||
b := t.getBucket(bidx)
|
||||
res = addInfos(res, b.infos)
|
||||
}
|
||||
doSort := func(infos []*dhtInfo) {
|
||||
less := func (i, j int) bool {
|
||||
return dht_firstCloserThanThird(infos[i].getNodeID(),
|
||||
nodeID,
|
||||
infos[j].getNodeID())
|
||||
}
|
||||
sort.SliceStable(infos, less)
|
||||
}
|
||||
doSort(res)
|
||||
if len(res) > dht_lookup_size { res = res[:dht_lookup_size] }
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *dht) getBucket(bidx int) *bucket {
|
||||
return &t.buckets_hidden[bidx]
|
||||
}
|
||||
|
||||
func (t *dht) nBuckets() int {
|
||||
return len(t.buckets_hidden)
|
||||
}
|
||||
|
||||
func (t *dht) insertIfNew(info *dhtInfo) {
|
||||
//fmt.Println("DEBUG: dht insertIfNew:", info.getNodeID(), info.coords)
|
||||
// Insert a peer if and only if the bucket doesn't already contain it
|
||||
nodeID := info.getNodeID()
|
||||
bidx, isOK := t.getBucketIndex(nodeID)
|
||||
if !isOK { return }
|
||||
b := t.getBucket(bidx)
|
||||
if !b.contains(info) {
|
||||
// We've never heard this node before
|
||||
// TODO is there a better time than "now" to set send/recv to?
|
||||
// (Is there another "natural" choice that bootstraps faster?)
|
||||
info.send = time.Now()
|
||||
info.recv = info.send
|
||||
t.insert(info)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *dht) insert(info *dhtInfo) {
|
||||
//fmt.Println("DEBUG: dht insert:", info.getNodeID(), info.coords)
|
||||
// First update the time on this info
|
||||
info.recv = time.Now()
|
||||
// Get the bucket for this node
|
||||
nodeID := info.getNodeID()
|
||||
bidx, isOK := t.getBucketIndex(nodeID)
|
||||
if !isOK { return }
|
||||
b := t.getBucket(bidx)
|
||||
// First drop any existing entry from the bucket
|
||||
b.drop(&info.key)
|
||||
// Now add to the *end* of the bucket
|
||||
b.infos = append(b.infos, info)
|
||||
// Check if the next bucket is non-full and return early if it is
|
||||
if bidx+1 == t.nBuckets() { return }
|
||||
bnext := t.getBucket(bidx+1)
|
||||
if len(bnext.infos) < dht_bucket_size { return }
|
||||
// Shrink from the *front* to requied size
|
||||
for len(b.infos) > dht_bucket_size { b.infos = b.infos[1:] }
|
||||
}
|
||||
|
||||
func (t *dht) getBucketIndex(nodeID *NodeID) (int, bool) {
|
||||
for bidx := 0 ; bidx < t.nBuckets() ; bidx++ {
|
||||
them := nodeID[bidx/8] & (0x80 >> byte(bidx % 8))
|
||||
me := t.nodeID[bidx/8] & (0x80 >> byte(bidx % 8))
|
||||
if them != me { return bidx, true }
|
||||
}
|
||||
return t.nBuckets(), false
|
||||
}
|
||||
|
||||
func (b *bucket) contains(ninfo *dhtInfo) bool {
|
||||
// Compares if key and coords match
|
||||
for _, info := range b.infos {
|
||||
if info == nil { panic("Should never happen") }
|
||||
if info.key == ninfo.key {
|
||||
if len(info.coords) != len(ninfo.coords) { return false }
|
||||
for idx := 0 ; idx < len(info.coords) ; idx++ {
|
||||
if info.coords[idx] != ninfo.coords[idx] { return false }
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *bucket) drop(key *boxPubKey) {
|
||||
clean := func (infos []*dhtInfo) []*dhtInfo {
|
||||
cleaned := infos[:0]
|
||||
for _, info := range infos {
|
||||
if info.key == *key { continue }
|
||||
cleaned = append(cleaned, info)
|
||||
}
|
||||
return cleaned
|
||||
}
|
||||
b.infos = clean(b.infos)
|
||||
}
|
||||
|
||||
func (t *dht) sendReq(req *dhtReq, dest *dhtInfo) {
|
||||
// Send a dhtReq to the node in dhtInfo
|
||||
bs := req.encode()
|
||||
shared := t.core.sessions.getSharedKey(&t.core.boxPriv, &dest.key)
|
||||
payload, nonce := boxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: dest.coords,
|
||||
toKey: dest.key,
|
||||
fromKey: t.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload:payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
t.core.router.out(packet)
|
||||
reqsToDest, isIn := t.reqs[dest.key]
|
||||
if !isIn {
|
||||
t.reqs[dest.key] = make(map[NodeID]time.Time)
|
||||
reqsToDest, isIn = t.reqs[dest.key]
|
||||
if !isIn { panic("This should never happen") }
|
||||
}
|
||||
reqsToDest[req.dest] = time.Now()
|
||||
}
|
||||
|
||||
func (t *dht) sendRes(res *dhtRes, req *dhtReq) {
|
||||
// Send a reply for a dhtReq
|
||||
bs := res.encode()
|
||||
shared := t.core.sessions.getSharedKey(&t.core.boxPriv, &req.key)
|
||||
payload, nonce := boxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: req.coords,
|
||||
toKey: req.key,
|
||||
fromKey: t.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
t.core.router.out(packet)
|
||||
}
|
||||
|
||||
func (b *bucket) isEmpty() bool {
|
||||
return len(b.infos) == 0
|
||||
}
|
||||
|
||||
func (b *bucket) nextToPing() *dhtInfo {
|
||||
// Check the nodes in the bucket
|
||||
// Return whichever one responded least recently
|
||||
// Delay of 6 seconds between pinging the same node
|
||||
// Gives them time to respond
|
||||
// And time between traffic loss from short term congestion in the network
|
||||
var toPing *dhtInfo
|
||||
for _, next := range b.infos {
|
||||
if time.Since(next.send) < 6*time.Second { continue }
|
||||
if toPing == nil || next.recv.Before(toPing.recv) { toPing = next }
|
||||
}
|
||||
return toPing
|
||||
}
|
||||
|
||||
func (t *dht) getTarget(bidx int) *NodeID {
|
||||
targetID := t.nodeID
|
||||
targetID[bidx/8] ^= 0x80 >> byte(bidx % 8)
|
||||
return &targetID
|
||||
}
|
||||
|
||||
func (t *dht) ping(info *dhtInfo, target *NodeID) {
|
||||
if info.pings > 2 {
|
||||
bidx, isOK := t.getBucketIndex(info.getNodeID())
|
||||
if !isOK { panic("This should never happen") }
|
||||
b := t.getBucket(bidx)
|
||||
b.drop(&info.key)
|
||||
return
|
||||
}
|
||||
if target == nil { target = &t.nodeID }
|
||||
loc := t.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
req := dhtReq{
|
||||
key: t.core.boxPub,
|
||||
coords: coords,
|
||||
dest: *target,
|
||||
}
|
||||
info.pings++
|
||||
info.send = time.Now()
|
||||
t.sendReq(&req, info)
|
||||
}
|
||||
|
||||
func (t *dht) doMaintenance() {
|
||||
// First clean up reqs
|
||||
for key, reqs := range t.reqs {
|
||||
for target, timeout := range reqs {
|
||||
if time.Since(timeout) > time.Minute { delete(reqs, target) }
|
||||
}
|
||||
if len(reqs) == 0 { delete(t.reqs, key) }
|
||||
}
|
||||
// Ping the least recently contacted node
|
||||
// This is to make sure we eventually notice when someone times out
|
||||
var oldest *dhtInfo
|
||||
last := 0
|
||||
for bidx := 0 ; bidx < t.nBuckets() ; bidx++ {
|
||||
b := t.getBucket(bidx)
|
||||
if !b.isEmpty() {
|
||||
last = bidx
|
||||
toPing := b.nextToPing()
|
||||
if toPing == nil { continue } // We've recently pinged everyone in b
|
||||
if oldest == nil || toPing.recv.Before(oldest.recv) {
|
||||
oldest = toPing
|
||||
}
|
||||
}
|
||||
}
|
||||
if oldest != nil { t.ping(oldest, nil) } // if the DHT isn't empty
|
||||
// Refresh buckets
|
||||
if t.offset > last { t.offset = 0 }
|
||||
target := t.getTarget(t.offset)
|
||||
for _, info := range t.lookup(target) {
|
||||
t.ping(info, target)
|
||||
break
|
||||
}
|
||||
t.offset++
|
||||
}
|
||||
|
||||
func dht_firstCloserThanThird(first *NodeID,
|
||||
second *NodeID,
|
||||
third *NodeID) bool {
|
||||
for idx := 0 ; idx < NodeIDLen ; idx++ {
|
||||
f := first[idx] ^ second[idx]
|
||||
t := third[idx] ^ second[idx]
|
||||
if f == t { continue }
|
||||
return f < t
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
345
src/yggdrasil/peer.go
Normal file
345
src/yggdrasil/peer.go
Normal file
@ -0,0 +1,345 @@
|
||||
package yggdrasil
|
||||
|
||||
// TODO cleanup, this file is kind of a mess
|
||||
|
||||
// FIXME? this part may be at least sligtly vulnerable to replay attacks
|
||||
// The switch message part should catch / drop old tstamps
|
||||
// So the damage is limited
|
||||
// But you could still mess up msgAnc / msgHops and break some things there
|
||||
|
||||
import "time"
|
||||
import "sync"
|
||||
import "sync/atomic"
|
||||
import "math"
|
||||
//import "fmt"
|
||||
|
||||
type peers struct {
|
||||
core *Core
|
||||
mutex sync.Mutex // Synchronize writes to atomic
|
||||
ports atomic.Value //map[Port]*peer, use CoW semantics
|
||||
//ports map[Port]*peer
|
||||
}
|
||||
|
||||
func (ps *peers) init(c *Core) {
|
||||
ps.mutex.Lock()
|
||||
defer ps.mutex.Unlock()
|
||||
ps.putPorts(make(map[switchPort]*peer))
|
||||
ps.core = c
|
||||
}
|
||||
|
||||
func (ps *peers) getPorts() map[switchPort]*peer {
|
||||
return ps.ports.Load().(map[switchPort]*peer)
|
||||
}
|
||||
|
||||
func (ps *peers) putPorts(ports map[switchPort]*peer) {
|
||||
ps.ports.Store(ports)
|
||||
}
|
||||
|
||||
type peer struct {
|
||||
// Rolling approximation of bandwidth, in bps, used by switch, updated by tcp
|
||||
// use get/update methods only! (atomic accessors as float64)
|
||||
bandwidth uint64
|
||||
// BUG: sync/atomic, 32 bit platforms need the above to be the first element
|
||||
box boxPubKey
|
||||
sig sigPubKey
|
||||
shared boxSharedKey
|
||||
//in <-chan []byte
|
||||
//out chan<- []byte
|
||||
//in func([]byte)
|
||||
out func([]byte)
|
||||
core *Core
|
||||
port switchPort
|
||||
msgAnc *msgAnnounce
|
||||
msgHops []*msgHop
|
||||
myMsg *switchMessage
|
||||
mySigs []sigInfo
|
||||
// This is used to limit how often we perform expensive operations
|
||||
// Specifically, processing switch messages, signing, and verifying sigs
|
||||
// Resets at the start of each tick
|
||||
throttle uint8
|
||||
}
|
||||
const peer_Throttle = 1
|
||||
|
||||
func (p *peer) getBandwidth() float64 {
|
||||
bits := atomic.LoadUint64(&p.bandwidth)
|
||||
return math.Float64frombits(bits)
|
||||
}
|
||||
|
||||
func (p *peer) updateBandwidth(bytes int, duration time.Duration) {
|
||||
if p == nil { return }
|
||||
for ok := false ; !ok ; {
|
||||
oldBits := atomic.LoadUint64(&p.bandwidth)
|
||||
oldBandwidth := math.Float64frombits(oldBits)
|
||||
bandwidth := oldBandwidth * 7 / 8 + float64(bytes)/duration.Seconds()
|
||||
bits := math.Float64bits(bandwidth)
|
||||
ok = atomic.CompareAndSwapUint64(&p.bandwidth, oldBits, bits)
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *peers) newPeer(box *boxPubKey,
|
||||
sig *sigPubKey) *peer {
|
||||
//in <-chan []byte,
|
||||
//out chan<- []byte) *peer {
|
||||
p := peer{box: *box,
|
||||
sig: *sig,
|
||||
shared: *getSharedKey(&ps.core.boxPriv, box),
|
||||
//in: in,
|
||||
//out: out,
|
||||
core: ps.core}
|
||||
ps.mutex.Lock()
|
||||
defer ps.mutex.Unlock()
|
||||
oldPorts := ps.getPorts()
|
||||
newPorts := make(map[switchPort]*peer)
|
||||
for k,v := range oldPorts{ newPorts[k] = v }
|
||||
for idx := switchPort(0) ; true ; idx++ {
|
||||
if _, isIn := newPorts[idx]; !isIn {
|
||||
p.port = switchPort(idx)
|
||||
newPorts[p.port] = &p
|
||||
break
|
||||
}
|
||||
}
|
||||
ps.putPorts(newPorts)
|
||||
return &p
|
||||
}
|
||||
|
||||
func (p *peer) linkLoop(in <-chan []byte) {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case packet, ok := <-in:
|
||||
if !ok { return }
|
||||
p.handleLinkTraffic(packet)
|
||||
case <-ticker.C: {
|
||||
p.throttle = 0
|
||||
if p.port == 0 { continue } // Don't send announces on selfInterface
|
||||
// Maybe we shouldn't time out, and instead wait for a kill signal?
|
||||
p.myMsg, p.mySigs = p.core.switchTable.createMessage(p.port)
|
||||
p.sendSwitchAnnounce()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *peer) handlePacket(packet []byte, linkIn (chan<- []byte)) {
|
||||
pType, pTypeLen := wire_decode_uint64(packet)
|
||||
if pTypeLen==0 { return }
|
||||
switch (pType) {
|
||||
case wire_Traffic: p.handleTraffic(packet, pTypeLen)
|
||||
case wire_ProtocolTraffic: p.handleTraffic(packet, pTypeLen)
|
||||
case wire_LinkProtocolTraffic: {
|
||||
select {
|
||||
case linkIn<-packet:
|
||||
default:
|
||||
}
|
||||
}
|
||||
default: /*panic(pType) ;*/ return
|
||||
}
|
||||
}
|
||||
|
||||
func (p *peer) handleTraffic(packet []byte, pTypeLen int) {
|
||||
ttl, ttlLen := wire_decode_uint64(packet[pTypeLen:])
|
||||
ttlBegin := pTypeLen
|
||||
ttlEnd := pTypeLen+ttlLen
|
||||
coords, coordLen := wire_decode_coords(packet[ttlEnd:])
|
||||
coordEnd := ttlEnd+coordLen
|
||||
if coordEnd == len(packet) { return } // No payload
|
||||
toPort, newTTL := p.core.switchTable.lookup(coords, ttl)
|
||||
if toPort == p.port { return } // FIXME? shouldn't happen, does it? would loop
|
||||
to := p.core.peers.getPorts()[toPort]
|
||||
if to == nil { return }
|
||||
newTTLSlice := wire_encode_uint64(newTTL)
|
||||
// This mutates the packet in-place if the length of the TTL changes!
|
||||
shift := ttlLen - len(newTTLSlice)
|
||||
copy(packet[ttlBegin+shift:], newTTLSlice)
|
||||
copy(packet[shift:], packet[:pTypeLen])
|
||||
packet = packet[shift:]
|
||||
to.sendPacket(packet)
|
||||
}
|
||||
|
||||
func (p *peer) sendPacket(packet []byte) {
|
||||
// Is there ever a case where something more complicated is needed?
|
||||
// What if p.out blocks?
|
||||
p.out(packet)
|
||||
}
|
||||
|
||||
func (p *peer) sendLinkPacket(packet []byte) {
|
||||
bs, nonce := boxSeal(&p.shared, packet, nil)
|
||||
linkPacket := wire_linkProtoTrafficPacket{
|
||||
toKey: p.box,
|
||||
fromKey: p.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: bs,
|
||||
}
|
||||
packet = linkPacket.encode()
|
||||
p.sendPacket(packet)
|
||||
}
|
||||
|
||||
func (p *peer) handleLinkTraffic(bs []byte) {
|
||||
packet := wire_linkProtoTrafficPacket{}
|
||||
// TODO throttle on returns?
|
||||
if !packet.decode(bs) { return }
|
||||
if packet.toKey != p.core.boxPub { return }
|
||||
if packet.fromKey != p.box { return }
|
||||
payload, isOK := boxOpen(&p.shared, packet.payload, &packet.nonce)
|
||||
if !isOK { return }
|
||||
pType, pTypeLen := wire_decode_uint64(payload)
|
||||
if pTypeLen == 0 { return }
|
||||
switch pType {
|
||||
case wire_SwitchAnnounce: p.handleSwitchAnnounce(payload)
|
||||
case wire_SwitchHopRequest: p.handleSwitchHopRequest(payload)
|
||||
case wire_SwitchHop: p.handleSwitchHop(payload)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *peer) handleSwitchAnnounce(packet []byte) {
|
||||
//p.core.log.Println("DEBUG: handleSwitchAnnounce")
|
||||
anc := msgAnnounce{}
|
||||
//err := wire_decode_struct(packet, &anc)
|
||||
//if err != nil { return }
|
||||
if !anc.decode(packet) { return }
|
||||
//if p.msgAnc != nil && anc.Seq != p.msgAnc.Seq { p.msgHops = nil }
|
||||
if p.msgAnc == nil ||
|
||||
anc.root != p.msgAnc.root ||
|
||||
anc.tstamp != p.msgAnc.tstamp ||
|
||||
anc.seq != p.msgAnc.seq { p.msgHops = nil }
|
||||
p.msgAnc = &anc
|
||||
p.processSwitchMessage()
|
||||
}
|
||||
|
||||
func (p *peer) requestHop(hop uint64) {
|
||||
//p.core.log.Println("DEBUG requestHop")
|
||||
req := msgHopReq{}
|
||||
req.root = p.msgAnc.root
|
||||
req.tstamp = p.msgAnc.tstamp
|
||||
req.seq = p.msgAnc.seq
|
||||
req.hop = hop
|
||||
packet := req.encode()
|
||||
p.sendLinkPacket(packet)
|
||||
}
|
||||
|
||||
func (p *peer) handleSwitchHopRequest(packet []byte) {
|
||||
//p.core.log.Println("DEBUG: handleSwitchHopRequest")
|
||||
if p.throttle > peer_Throttle { return }
|
||||
if p.myMsg == nil { return }
|
||||
req := msgHopReq{}
|
||||
if !req.decode(packet) { return }
|
||||
if req.root != p.myMsg.locator.root { return }
|
||||
if req.tstamp != p.myMsg.locator.tstamp { return }
|
||||
if req.seq != p.myMsg.seq { return }
|
||||
if uint64(len(p.myMsg.locator.coords)) <= req.hop { return }
|
||||
res := msgHop{}
|
||||
res.root = p.myMsg.locator.root
|
||||
res.tstamp = p.myMsg.locator.tstamp
|
||||
res.seq = p.myMsg.seq
|
||||
res.hop = req.hop
|
||||
res.port = p.myMsg.locator.coords[res.hop]
|
||||
sinfo := p.getSig(res.hop)
|
||||
//p.core.log.Println("DEBUG sig:", sinfo)
|
||||
res.next = sinfo.next
|
||||
res.sig = sinfo.sig
|
||||
packet = res.encode()
|
||||
p.sendLinkPacket(packet)
|
||||
}
|
||||
|
||||
func (p *peer) handleSwitchHop(packet []byte) {
|
||||
//p.core.log.Println("DEBUG: handleSwitchHop")
|
||||
if p.throttle > peer_Throttle { return }
|
||||
if p.msgAnc == nil { return }
|
||||
res := msgHop{}
|
||||
if !res.decode(packet) { return }
|
||||
if res.root != p.msgAnc.root { return }
|
||||
if res.tstamp != p.msgAnc.tstamp { return }
|
||||
if res.seq != p.msgAnc.seq { return }
|
||||
if res.hop != uint64(len(p.msgHops)) { return } // always process in order
|
||||
loc := switchLocator{coords: make([]switchPort, 0, len(p.msgHops)+1)}
|
||||
loc.root = res.root
|
||||
loc.tstamp = res.tstamp
|
||||
for _, hop := range p.msgHops { loc.coords = append(loc.coords, hop.port) }
|
||||
loc.coords = append(loc.coords, res.port)
|
||||
thisHopKey := &res.root
|
||||
if res.hop != 0 { thisHopKey = &p.msgHops[res.hop-1].next }
|
||||
bs := getBytesForSig(&res.next, &loc)
|
||||
if p.core.sigs.check(thisHopKey, &res.sig, bs) {
|
||||
p.msgHops = append(p.msgHops, &res)
|
||||
p.processSwitchMessage()
|
||||
} else {
|
||||
p.throttle++
|
||||
}
|
||||
}
|
||||
|
||||
func (p *peer) processSwitchMessage() {
|
||||
//p.core.log.Println("DEBUG: processSwitchMessage")
|
||||
if p.throttle > peer_Throttle { return }
|
||||
if p.msgAnc == nil { return }
|
||||
if uint64(len(p.msgHops)) < p.msgAnc.len {
|
||||
p.requestHop(uint64(len(p.msgHops)))
|
||||
return
|
||||
}
|
||||
p.throttle++
|
||||
if p.msgAnc.len != uint64(len(p.msgHops)) { return }
|
||||
msg := switchMessage{}
|
||||
coords := make([]switchPort, 0, len(p.msgHops))
|
||||
sigs := make([]sigInfo, 0, len(p.msgHops))
|
||||
for idx, hop := range p.msgHops {
|
||||
// Consistency checks, should be redundant (already checked these...)
|
||||
if hop.root != p.msgAnc.root { return }
|
||||
if hop.tstamp != p.msgAnc.tstamp { return }
|
||||
if hop.seq != p.msgAnc.seq { return }
|
||||
if hop.hop != uint64(idx) { return }
|
||||
coords = append(coords, hop.port)
|
||||
sigs = append(sigs, sigInfo{next: hop.next, sig: hop.sig})
|
||||
}
|
||||
msg.from = p.sig
|
||||
msg.locator.root = p.msgAnc.root
|
||||
msg.locator.tstamp = p.msgAnc.tstamp
|
||||
msg.locator.coords = coords
|
||||
msg.seq = p.msgAnc.seq
|
||||
//msg.RSeq = p.msgAnc.RSeq
|
||||
//msg.Degree = p.msgAnc.Deg
|
||||
p.core.switchTable.handleMessage(&msg, p.port, sigs)
|
||||
if len(coords) == 0 { return }
|
||||
// Reuse locator, set the coords to the peer's coords, to use in dht
|
||||
msg.locator.coords = coords[:len(coords)-1]
|
||||
// Pass a mesage to the dht informing it that this peer (still) exists
|
||||
dinfo := dhtInfo{
|
||||
key: p.box,
|
||||
coords: msg.locator.getCoords(),
|
||||
}
|
||||
p.core.dht.peers<-&dinfo
|
||||
}
|
||||
|
||||
func (p *peer) sendSwitchAnnounce() {
|
||||
anc := msgAnnounce{}
|
||||
anc.root = p.myMsg.locator.root
|
||||
anc.tstamp = p.myMsg.locator.tstamp
|
||||
anc.seq = p.myMsg.seq
|
||||
anc.len = uint64(len(p.myMsg.locator.coords))
|
||||
//anc.Deg = p.myMsg.Degree
|
||||
//anc.RSeq = p.myMsg.RSeq
|
||||
packet := anc.encode()
|
||||
p.sendLinkPacket(packet)
|
||||
}
|
||||
|
||||
func (p *peer) getSig(hop uint64) sigInfo {
|
||||
//p.core.log.Println("DEBUG getSig:", len(p.mySigs), hop)
|
||||
if hop < uint64(len(p.mySigs)) { return p.mySigs[hop] }
|
||||
bs := getBytesForSig(&p.sig, &p.myMsg.locator)
|
||||
sig := sigInfo{}
|
||||
sig.next = p.sig
|
||||
sig.sig = *sign(&p.core.sigPriv, bs)
|
||||
p.mySigs = append(p.mySigs, sig)
|
||||
//p.core.log.Println("DEBUG sig bs:", bs)
|
||||
return sig
|
||||
}
|
||||
|
||||
func getBytesForSig(next *sigPubKey, loc *switchLocator) []byte {
|
||||
//bs, err := wire_encode_locator(loc)
|
||||
//if err != nil { panic(err) }
|
||||
bs := append([]byte(nil), next[:]...)
|
||||
bs = append(bs, wire_encode_locator(loc)...)
|
||||
//bs := wire_encode_locator(loc)
|
||||
//bs = append(next[:], bs...)
|
||||
return bs
|
||||
}
|
||||
|
220
src/yggdrasil/router.go
Normal file
220
src/yggdrasil/router.go
Normal file
@ -0,0 +1,220 @@
|
||||
package yggdrasil
|
||||
|
||||
// This part does most of the work to handle packets to/from yourself
|
||||
// It also manages crypto and dht info
|
||||
// TODO? move dht stuff into another goroutine?
|
||||
|
||||
// Send:
|
||||
// Receive a packet from the tun
|
||||
// Look up session (if none exists, trigger a search)
|
||||
// Hand off to session (which encrypts, etc)
|
||||
// Session will pass it back to router.out, which hands it off to the self peer
|
||||
// The self peer triggers a lookup to find which peer to send to next
|
||||
// And then passes it to that's peer's peer.out function
|
||||
// The peer.out function sends it over the wire to the matching peer
|
||||
|
||||
// Recv:
|
||||
// A packet comes in off the wire, and goes to a peer.handlePacket
|
||||
// The peer does a lookup, sees no better peer than the self
|
||||
// Hands it to the self peer.out, which passes it to router.in
|
||||
// If it's dht/seach/etc. traffic, the router passes it to that part
|
||||
// If it's an encapsulated IPv6 packet, the router looks up the session for it
|
||||
// The packet is passed to the session, which decrypts it, router.recvPacket
|
||||
// The router then runs some sanity checks before passing it to the tun
|
||||
|
||||
import "time"
|
||||
//import "fmt"
|
||||
//import "net"
|
||||
|
||||
type router struct {
|
||||
core *Core
|
||||
addr address
|
||||
in <-chan []byte // packets we received from the network, link to peer's "out"
|
||||
out func([]byte) // packets we're sending to the network, link to peer's "in"
|
||||
recv chan<- []byte // place where the tun pulls received packets from
|
||||
send <-chan []byte // place where the tun puts outgoing packets
|
||||
reset chan struct{} // signal that coords changed (re-init sessions/dht)
|
||||
}
|
||||
|
||||
func (r *router) init(core *Core) {
|
||||
r.core = core
|
||||
r.addr = *address_addrForNodeID(&r.core.dht.nodeID)
|
||||
in := make(chan []byte, 1) // TODO something better than this...
|
||||
p := r.core.peers.newPeer(&r.core.boxPub, &r.core.sigPub)//, out, in)
|
||||
// TODO set in/out functions on the new peer...
|
||||
p.out = func(packet []byte) { in<-packet } // FIXME in theory it blocks...
|
||||
r.in = in
|
||||
// TODO? make caller responsible for go-ing if it needs to not block
|
||||
r.out = func(packet []byte) { p.handlePacket(packet, nil) }
|
||||
// TODO attach these to the tun
|
||||
// Maybe that's the core's job...
|
||||
// It creates tun, creates the router, creates channels, sets them?
|
||||
recv := make(chan []byte, 1)
|
||||
send := make(chan []byte, 1)
|
||||
r.recv = recv
|
||||
r.send = send
|
||||
r.core.tun.recv = recv
|
||||
r.core.tun.send = send
|
||||
r.reset = make(chan struct{}, 1)
|
||||
go r.mainLoop()
|
||||
}
|
||||
|
||||
func (r *router) mainLoop() {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case p := <-r.in: r.handleIn(p)
|
||||
case p := <-r.send: r.sendPacket(p)
|
||||
case info := <-r.core.dht.peers: r.core.dht.insert(info) //r.core.dht.insertIfNew(info)
|
||||
case <-r.reset: r.core.sessions.resetInits()
|
||||
case <-ticker.C: {
|
||||
// Any periodic maintenance stuff goes here
|
||||
r.core.dht.doMaintenance()
|
||||
util_getBytes() // To slowly drain things
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *router) sendPacket(bs []byte) {
|
||||
if len(bs) < 40 { panic("Tried to send a packet shorter than a header...") }
|
||||
var sourceAddr address
|
||||
var sourceSubnet subnet
|
||||
copy(sourceAddr[:], bs[8:])
|
||||
copy(sourceSubnet[:], bs[8:])
|
||||
if !sourceAddr.isValid() && !sourceSubnet.isValid() { return }
|
||||
var dest address
|
||||
copy(dest[:], bs[24:])
|
||||
var snet subnet
|
||||
copy(snet[:], bs[24:])
|
||||
if !dest.isValid() && !snet.isValid() { return }
|
||||
doSearch := func (packet []byte) {
|
||||
var nodeID, mask *NodeID
|
||||
if dest.isValid() { nodeID, mask = dest.getNodeIDandMask() }
|
||||
if snet.isValid() { nodeID, mask = snet.getNodeIDandMask() }
|
||||
sinfo, isIn := r.core.searches.searches[*nodeID]
|
||||
if !isIn { sinfo = r.core.searches.createSearch(nodeID, mask) }
|
||||
if packet != nil { sinfo.packet = packet }
|
||||
r.core.searches.sendSearch(sinfo)
|
||||
}
|
||||
var sinfo *sessionInfo
|
||||
var isIn bool
|
||||
if dest.isValid() { sinfo, isIn = r.core.sessions.getByTheirAddr(&dest) }
|
||||
if snet.isValid() { sinfo, isIn = r.core.sessions.getByTheirSubnet(&snet) }
|
||||
switch {
|
||||
case !isIn || !sinfo.init:
|
||||
// No or unintiialized session, so we need to search first
|
||||
doSearch(bs)
|
||||
case time.Since(sinfo.time) > 6*time.Second:
|
||||
// We haven't heard from the dest in a while; they may have changed coords
|
||||
// Maybe the connection is idle, or maybe one of us changed coords
|
||||
// Try searching to either ping them (a little overhead) or fix the coords
|
||||
doSearch(nil)
|
||||
fallthrough
|
||||
//default: go func() { sinfo.send<-bs }()
|
||||
default: sinfo.send<-bs
|
||||
}
|
||||
}
|
||||
|
||||
func (r *router) recvPacket(bs []byte, theirAddr *address) {
|
||||
// TODO pass their NodeID, check *that* instead
|
||||
// Or store their address in the session?...
|
||||
//fmt.Println("Recv packet")
|
||||
if theirAddr == nil { panic("Should not happen ever") }
|
||||
if len(bs) < 24 { return }
|
||||
var source address
|
||||
copy(source[:], bs[8:])
|
||||
var snet subnet
|
||||
copy(snet[:], bs[8:])
|
||||
if !source.isValid() && !snet.isValid() { return }
|
||||
//go func() { r.recv<-bs }()
|
||||
r.recv<-bs
|
||||
}
|
||||
|
||||
func (r *router) handleIn(packet []byte) {
|
||||
pType, pTypeLen := wire_decode_uint64(packet)
|
||||
if pTypeLen == 0 { return }
|
||||
switch pType {
|
||||
case wire_Traffic: r.handleTraffic(packet)
|
||||
case wire_ProtocolTraffic: r.handleProto(packet)
|
||||
default: /*panic("Should not happen in testing") ;*/ return
|
||||
}
|
||||
}
|
||||
|
||||
func (r *router) handleTraffic(packet []byte) {
|
||||
defer util_putBytes(packet)
|
||||
p := wire_trafficPacket{}
|
||||
if !p.decode(packet) { return }
|
||||
sinfo, isIn := r.core.sessions.getSessionForHandle(&p.handle)
|
||||
if !isIn { return }
|
||||
//go func () { sinfo.recv<-&p }()
|
||||
sinfo.recv<-&p
|
||||
}
|
||||
|
||||
func (r *router) handleProto(packet []byte) {
|
||||
// First parse the packet
|
||||
p := wire_protoTrafficPacket{}
|
||||
if !p.decode(packet) { return }
|
||||
// Now try to open the payload
|
||||
var sharedKey *boxSharedKey
|
||||
//var theirPermPub *boxPubKey
|
||||
if p.toKey == r.core.boxPub {
|
||||
// Try to open using our permanent key
|
||||
sharedKey = r.core.sessions.getSharedKey(&r.core.boxPriv, &p.fromKey)
|
||||
} else { return }
|
||||
bs, isOK := boxOpen(sharedKey, p.payload, &p.nonce)
|
||||
if !isOK { return }
|
||||
// Now do something with the bytes in bs...
|
||||
// send dht messages to dht, sessionRefresh to sessions, data to tun...
|
||||
// For data, should check that key and IP match...
|
||||
bsType, bsTypeLen := wire_decode_uint64(bs)
|
||||
if bsTypeLen == 0 { return }
|
||||
//fmt.Println("RECV bytes:", bs)
|
||||
switch bsType {
|
||||
case wire_SessionPing: r.handlePing(bs, &p.fromKey)
|
||||
case wire_SessionPong: r.handlePong(bs, &p.fromKey)
|
||||
case wire_DHTLookupRequest: r.handleDHTReq(bs, &p.fromKey)
|
||||
case wire_DHTLookupResponse: r.handleDHTRes(bs, &p.fromKey)
|
||||
case wire_SearchRequest: r.handleSearchReq(bs)
|
||||
case wire_SearchResponse: r.handleSearchRes(bs)
|
||||
default: /*panic("Should not happen in testing") ;*/ return
|
||||
}
|
||||
}
|
||||
|
||||
func (r *router) handlePing(bs []byte, fromKey *boxPubKey) {
|
||||
ping := sessionPing{}
|
||||
if !ping.decode(bs) { return }
|
||||
ping.sendPermPub = *fromKey
|
||||
r.core.sessions.handlePing(&ping)
|
||||
}
|
||||
|
||||
func (r *router) handlePong(bs []byte, fromKey *boxPubKey) {
|
||||
r.handlePing(bs, fromKey)
|
||||
}
|
||||
|
||||
func (r *router) handleDHTReq(bs []byte, fromKey *boxPubKey) {
|
||||
req := dhtReq{}
|
||||
if !req.decode(bs) { return }
|
||||
if req.key != *fromKey { return }
|
||||
r.core.dht.handleReq(&req)
|
||||
}
|
||||
|
||||
func (r *router) handleDHTRes(bs []byte, fromKey *boxPubKey) {
|
||||
res := dhtRes{}
|
||||
if !res.decode(bs) { return }
|
||||
if res.key != *fromKey { return }
|
||||
r.core.dht.handleRes(&res)
|
||||
}
|
||||
|
||||
func (r *router) handleSearchReq(bs []byte) {
|
||||
req := searchReq{}
|
||||
if !req.decode(bs) { return }
|
||||
r.core.searches.handleSearchReq(&req)
|
||||
}
|
||||
|
||||
func (r *router) handleSearchRes(bs []byte) {
|
||||
res := searchRes{}
|
||||
if !res.decode(bs) { return }
|
||||
r.core.searches.handleSearchRes(&res)
|
||||
}
|
168
src/yggdrasil/search.go
Normal file
168
src/yggdrasil/search.go
Normal file
@ -0,0 +1,168 @@
|
||||
package yggdrasil
|
||||
|
||||
// This thing manages search packets
|
||||
|
||||
// The basic idea is as follows:
|
||||
// We may know a NodeID (with a mask) and want to connect
|
||||
// We forward a searchReq packet through the dht
|
||||
// The last person in the dht will respond with a searchRes
|
||||
// If the responders nodeID is close enough to the requested key, it matches
|
||||
// The "close enough" is handled by a bitmask, set when the request is sent
|
||||
// For testing in the sim, it must match exactly
|
||||
// For the real world, the mask would need to map it to the desired IPv6
|
||||
// This is also where we store the temporary keys used to send a request
|
||||
// Would go in sessions, but can't open one without knowing perm key
|
||||
// This is largely to avoid using an iterative DHT lookup approach
|
||||
// The iterative parallel lookups from kad can skip over some DHT blackholes
|
||||
// This hides bugs, which I don't want to do right now
|
||||
|
||||
import "time"
|
||||
//import "fmt"
|
||||
|
||||
type searchInfo struct {
|
||||
dest *NodeID
|
||||
mask *NodeID
|
||||
time time.Time
|
||||
packet []byte
|
||||
}
|
||||
|
||||
type searches struct {
|
||||
core *Core
|
||||
searches map[NodeID]*searchInfo
|
||||
}
|
||||
|
||||
func (s *searches) init(core *Core) {
|
||||
s.core = core
|
||||
s.searches = make(map[NodeID]*searchInfo)
|
||||
}
|
||||
|
||||
func (s *searches) createSearch(dest *NodeID, mask *NodeID) *searchInfo {
|
||||
now := time.Now()
|
||||
for dest, sinfo := range s.searches {
|
||||
if now.Sub(sinfo.time) > time.Minute {
|
||||
delete(s.searches, dest)
|
||||
}
|
||||
}
|
||||
info := searchInfo{
|
||||
dest: dest,
|
||||
mask: mask,
|
||||
time: now.Add(-time.Second),
|
||||
}
|
||||
s.searches[*dest] = &info
|
||||
return &info
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type searchReq struct {
|
||||
key boxPubKey // Who I am
|
||||
coords []byte // Where I am
|
||||
dest NodeID // Who I'm trying to connect to
|
||||
}
|
||||
|
||||
type searchRes struct {
|
||||
key boxPubKey // Who I am
|
||||
coords []byte // Where I am
|
||||
dest NodeID // Who I was asked about
|
||||
}
|
||||
|
||||
func (s *searches) sendSearch(info *searchInfo) {
|
||||
now := time.Now()
|
||||
if now.Sub(info.time) < time.Second { return }
|
||||
loc := s.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
req := searchReq{
|
||||
key: s.core.boxPub,
|
||||
coords: coords,
|
||||
dest: *info.dest,
|
||||
}
|
||||
info.time = time.Now()
|
||||
s.handleSearchReq(&req)
|
||||
}
|
||||
|
||||
func (s *searches) handleSearchReq(req *searchReq) {
|
||||
lookup := s.core.dht.lookup(&req.dest)
|
||||
sent := false
|
||||
//fmt.Println("DEBUG len:", len(lookup))
|
||||
for _, info := range lookup {
|
||||
//fmt.Println("DEBUG lup:", info.getNodeID())
|
||||
if dht_firstCloserThanThird(info.getNodeID(),
|
||||
&req.dest,
|
||||
&s.core.dht.nodeID) {
|
||||
s.forwardSearch(req, info)
|
||||
sent = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !sent { s.sendSearchRes(req) }
|
||||
}
|
||||
|
||||
func (s *searches) forwardSearch(req *searchReq, next *dhtInfo) {
|
||||
//fmt.Println("DEBUG fwd:", req.dest, next.getNodeID())
|
||||
bs := req.encode()
|
||||
shared := s.core.sessions.getSharedKey(&s.core.boxPriv, &next.key)
|
||||
payload, nonce := boxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: next.coords,
|
||||
toKey: next.key,
|
||||
fromKey: s.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
s.core.router.out(packet)
|
||||
}
|
||||
|
||||
func (s *searches) sendSearchRes(req *searchReq) {
|
||||
//fmt.Println("DEBUG res:", req.dest, s.core.dht.nodeID)
|
||||
loc := s.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
res := searchRes{
|
||||
key: s.core.boxPub,
|
||||
coords: coords,
|
||||
dest: req.dest,
|
||||
}
|
||||
bs := res.encode()
|
||||
shared := s.core.sessions.getSharedKey(&s.core.boxPriv, &req.key)
|
||||
payload, nonce := boxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: req.coords,
|
||||
toKey: req.key,
|
||||
fromKey: s.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
s.core.router.out(packet)
|
||||
}
|
||||
|
||||
func (s *searches) handleSearchRes(res *searchRes) {
|
||||
info, isIn := s.searches[res.dest]
|
||||
if !isIn { return }
|
||||
them := getNodeID(&res.key)
|
||||
var destMasked NodeID
|
||||
var themMasked NodeID
|
||||
for idx := 0 ; idx < NodeIDLen ; idx++ {
|
||||
destMasked[idx] = info.dest[idx] & info.mask[idx]
|
||||
themMasked[idx] = them[idx] & info.mask[idx]
|
||||
}
|
||||
//fmt.Println("DEBUG search res1:", themMasked, destMasked)
|
||||
//fmt.Println("DEBUG search res2:", *them, *info.dest, *info.mask)
|
||||
if themMasked != destMasked { return }
|
||||
// They match, so create a session and send a sessionRequest
|
||||
sinfo, isIn := s.core.sessions.getByTheirPerm(&res.key)
|
||||
if !isIn {
|
||||
sinfo = s.core.sessions.createSession(&res.key)
|
||||
_, isIn := s.core.sessions.getByTheirPerm(&res.key)
|
||||
if !isIn { panic("This should never happen") }
|
||||
}
|
||||
// FIXME replay attacks could mess with coords?
|
||||
sinfo.coords = res.coords
|
||||
sinfo.packet = info.packet
|
||||
s.core.sessions.ping(sinfo)
|
||||
// Cleanup
|
||||
delete(s.searches, res.dest)
|
||||
}
|
||||
|
327
src/yggdrasil/session.go
Normal file
327
src/yggdrasil/session.go
Normal file
@ -0,0 +1,327 @@
|
||||
package yggdrasil
|
||||
|
||||
// This is the session manager
|
||||
// It's responsible for keeping track of open sessions to other nodes
|
||||
// The session information consists of crypto keys and coords
|
||||
|
||||
import "time"
|
||||
|
||||
type sessionInfo struct {
|
||||
core *Core
|
||||
theirAddr address
|
||||
theirSubnet subnet
|
||||
theirPermPub boxPubKey
|
||||
theirSesPub boxPubKey
|
||||
mySesPub boxPubKey
|
||||
mySesPriv boxPrivKey
|
||||
sharedSesKey boxSharedKey // derived from session keys
|
||||
theirHandle handle
|
||||
myHandle handle
|
||||
theirNonce boxNonce
|
||||
myNonce boxNonce
|
||||
time time.Time // Time we last received a packet
|
||||
coords []byte // coords of destination
|
||||
packet []byte // a buffered packet, sent immediately on ping/pong
|
||||
init bool // Reset if coords change
|
||||
send chan []byte
|
||||
recv chan *wire_trafficPacket
|
||||
nonceMask uint64
|
||||
tstamp int64 // tstamp from their last session ping, replay attack mitigation
|
||||
}
|
||||
|
||||
// FIXME replay attacks (include nonce or some sequence number)
|
||||
type sessionPing struct {
|
||||
sendPermPub boxPubKey // Sender's permanent key
|
||||
handle handle // Random number to ID session
|
||||
sendSesPub boxPubKey // Session key to use
|
||||
coords []byte
|
||||
tstamp int64 // unix time, but the only real requirement is that it increases
|
||||
isPong bool
|
||||
}
|
||||
|
||||
// Returns true if the session was updated, false otherwise
|
||||
func (s *sessionInfo) update(p *sessionPing) bool {
|
||||
if !(p.tstamp > s.tstamp) { return false }
|
||||
if p.sendPermPub != s.theirPermPub { return false } // Shouldn't happen
|
||||
if p.sendSesPub != s.theirSesPub {
|
||||
// FIXME need to protect against replay attacks
|
||||
// Put a sequence number or a timestamp or something in the pings?
|
||||
// Or just return false, make the session time out?
|
||||
s.theirSesPub = p.sendSesPub
|
||||
s.theirHandle = p.handle
|
||||
s.sharedSesKey = *getSharedKey(&s.mySesPriv, &s.theirSesPub)
|
||||
s.theirNonce = boxNonce{}
|
||||
s.nonceMask = 0
|
||||
}
|
||||
s.coords = append([]byte{}, p.coords...)
|
||||
s.time = time.Now()
|
||||
s.tstamp = p.tstamp
|
||||
s.init = true
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *sessionInfo) timedout() bool {
|
||||
return time.Since(s.time) > time.Minute
|
||||
}
|
||||
|
||||
type sessions struct {
|
||||
core *Core
|
||||
// Maps known permanent keys to their shared key, used by DHT a lot
|
||||
permShared map[boxPubKey]*boxSharedKey
|
||||
// Maps (secret) handle onto session info
|
||||
sinfos map[handle]*sessionInfo
|
||||
// Maps mySesPub onto handle
|
||||
byMySes map[boxPubKey]*handle
|
||||
// Maps theirPermPub onto handle
|
||||
byTheirPerm map[boxPubKey]*handle
|
||||
addrToPerm map[address]*boxPubKey
|
||||
subnetToPerm map[subnet]*boxPubKey
|
||||
}
|
||||
|
||||
func (ss *sessions) init(core *Core) {
|
||||
ss.core = core
|
||||
ss.permShared = make(map[boxPubKey]*boxSharedKey)
|
||||
ss.sinfos = make(map[handle]*sessionInfo)
|
||||
ss.byMySes = make(map[boxPubKey]*handle)
|
||||
ss.byTheirPerm = make(map[boxPubKey]*handle)
|
||||
ss.addrToPerm = make(map[address]*boxPubKey)
|
||||
ss.subnetToPerm = make(map[subnet]*boxPubKey)
|
||||
}
|
||||
|
||||
func (ss *sessions) getSessionForHandle(handle *handle) (*sessionInfo, bool) {
|
||||
sinfo, isIn := ss.sinfos[*handle]
|
||||
if isIn && sinfo.timedout() {
|
||||
// We have a session, but it has timed out
|
||||
return nil, false
|
||||
}
|
||||
return sinfo, isIn
|
||||
}
|
||||
|
||||
func (ss *sessions) getByMySes(key *boxPubKey) (*sessionInfo, bool) {
|
||||
h, isIn := ss.byMySes[*key]
|
||||
if !isIn { return nil, false }
|
||||
sinfo, isIn := ss.getSessionForHandle(h)
|
||||
return sinfo, isIn
|
||||
}
|
||||
|
||||
func (ss *sessions) getByTheirPerm(key *boxPubKey) (*sessionInfo, bool) {
|
||||
h, isIn := ss.byTheirPerm[*key]
|
||||
if !isIn { return nil, false }
|
||||
sinfo, isIn := ss.getSessionForHandle(h)
|
||||
return sinfo, isIn
|
||||
}
|
||||
|
||||
func (ss *sessions) getByTheirAddr(addr *address) (*sessionInfo, bool) {
|
||||
p, isIn := ss.addrToPerm[*addr]
|
||||
if !isIn { return nil, false }
|
||||
sinfo, isIn := ss.getByTheirPerm(p)
|
||||
return sinfo, isIn
|
||||
}
|
||||
|
||||
func (ss *sessions) getByTheirSubnet(snet *subnet) (*sessionInfo, bool) {
|
||||
p, isIn := ss.subnetToPerm[*snet]
|
||||
if !isIn { return nil, false }
|
||||
sinfo, isIn := ss.getByTheirPerm(p)
|
||||
return sinfo, isIn
|
||||
}
|
||||
|
||||
func (ss *sessions) createSession(theirPermKey *boxPubKey) *sessionInfo {
|
||||
sinfo := sessionInfo{}
|
||||
sinfo.core = ss.core
|
||||
sinfo.theirPermPub = *theirPermKey
|
||||
pub, priv := newBoxKeys()
|
||||
sinfo.mySesPub = *pub
|
||||
sinfo.mySesPriv = *priv
|
||||
sinfo.myNonce = *newBoxNonce() // TODO make sure nonceIsOK tolerates this
|
||||
higher := false
|
||||
for idx := range ss.core.boxPub {
|
||||
if ss.core.boxPub[idx] > sinfo.theirPermPub[idx] {
|
||||
higher = true
|
||||
break
|
||||
} else if ss.core.boxPub[idx] < sinfo.theirPermPub[idx] {
|
||||
break
|
||||
}
|
||||
}
|
||||
if higher {
|
||||
// higher => odd nonce
|
||||
sinfo.myNonce[len(sinfo.myNonce)-1] |= 0x01
|
||||
} else {
|
||||
// lower => even nonce
|
||||
sinfo.myNonce[len(sinfo.myNonce)-1] &= 0xfe
|
||||
}
|
||||
sinfo.myHandle = *newHandle()
|
||||
sinfo.theirAddr = *address_addrForNodeID(getNodeID(&sinfo.theirPermPub))
|
||||
sinfo.theirSubnet = *address_subnetForNodeID(getNodeID(&sinfo.theirPermPub))
|
||||
sinfo.send = make(chan []byte, 1)
|
||||
sinfo.recv = make(chan *wire_trafficPacket, 1)
|
||||
go sinfo.doWorker()
|
||||
sinfo.time = time.Now()
|
||||
// Do some cleanup
|
||||
// Time thresholds almost certainly could use some adjusting
|
||||
for _, s := range ss.sinfos {
|
||||
if s.timedout() { s.close() }
|
||||
}
|
||||
ss.sinfos[sinfo.myHandle] = &sinfo
|
||||
ss.byMySes[sinfo.mySesPub] = &sinfo.myHandle
|
||||
ss.byTheirPerm[sinfo.theirPermPub] = &sinfo.myHandle
|
||||
ss.addrToPerm[sinfo.theirAddr] = &sinfo.theirPermPub
|
||||
ss.subnetToPerm[sinfo.theirSubnet] = &sinfo.theirPermPub
|
||||
return &sinfo
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) close() {
|
||||
delete(sinfo.core.sessions.sinfos, sinfo.myHandle)
|
||||
delete(sinfo.core.sessions.byMySes, sinfo.mySesPub)
|
||||
delete(sinfo.core.sessions.byTheirPerm, sinfo.theirPermPub)
|
||||
delete(sinfo.core.sessions.addrToPerm, sinfo.theirAddr)
|
||||
delete(sinfo.core.sessions.subnetToPerm, sinfo.theirSubnet)
|
||||
close(sinfo.send)
|
||||
close(sinfo.recv)
|
||||
}
|
||||
|
||||
func (ss *sessions) getPing(sinfo *sessionInfo) sessionPing {
|
||||
loc := ss.core.switchTable.getLocator()
|
||||
coords := loc.getCoords()
|
||||
ref := sessionPing{
|
||||
sendPermPub: ss.core.boxPub,
|
||||
handle: sinfo.myHandle,
|
||||
sendSesPub: sinfo.mySesPub,
|
||||
tstamp: time.Now().Unix(),
|
||||
coords: coords,
|
||||
}
|
||||
sinfo.myNonce.update()
|
||||
return ref
|
||||
}
|
||||
|
||||
func (ss *sessions) getSharedKey(myPriv *boxPrivKey,
|
||||
theirPub *boxPubKey) *boxSharedKey {
|
||||
if skey, isIn := ss.permShared[*theirPub] ; isIn { return skey }
|
||||
// First do some cleanup
|
||||
const maxKeys = dht_bucket_number*dht_bucket_size
|
||||
for key := range ss.permShared {
|
||||
// Remove a random key until the store is small enough
|
||||
if len(ss.permShared) < maxKeys { break }
|
||||
delete(ss.permShared, key)
|
||||
}
|
||||
ss.permShared[*theirPub] = getSharedKey(myPriv, theirPub)
|
||||
return ss.permShared[*theirPub]
|
||||
}
|
||||
|
||||
func (ss *sessions) ping(sinfo *sessionInfo) {
|
||||
ss.sendPingPong(sinfo, false)
|
||||
}
|
||||
|
||||
func (ss *sessions) sendPingPong(sinfo *sessionInfo, isPong bool) {
|
||||
ping := ss.getPing(sinfo)
|
||||
ping.isPong = isPong
|
||||
bs := ping.encode()
|
||||
shared := ss.getSharedKey(&ss.core.boxPriv, &sinfo.theirPermPub)
|
||||
payload, nonce := boxSeal(shared, bs, nil)
|
||||
p := wire_protoTrafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: sinfo.coords,
|
||||
toKey: sinfo.theirPermPub,
|
||||
fromKey: ss.core.boxPub,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
ss.core.router.out(packet)
|
||||
}
|
||||
|
||||
func (ss *sessions) handlePing(ping *sessionPing) {
|
||||
// Get the corresponding session (or create a new session)
|
||||
sinfo, isIn := ss.getByTheirPerm(&ping.sendPermPub)
|
||||
if !isIn || sinfo.timedout() {
|
||||
if isIn { sinfo.close() }
|
||||
ss.createSession(&ping.sendPermPub)
|
||||
sinfo, isIn = ss.getByTheirPerm(&ping.sendPermPub)
|
||||
if !isIn { panic("This should not happen") }
|
||||
}
|
||||
// Update the session
|
||||
if !sinfo.update(ping) { /*panic("Should not happen in testing")*/ ; return }
|
||||
if !ping.isPong{ ss.sendPingPong(sinfo, true) }
|
||||
if sinfo.packet != nil {
|
||||
// send
|
||||
var bs []byte
|
||||
bs, sinfo.packet = sinfo.packet, nil
|
||||
go func() { sinfo.send<-bs }()
|
||||
}
|
||||
}
|
||||
|
||||
func (n *boxNonce) minus(m *boxNonce) int64 {
|
||||
diff := int64(0)
|
||||
for idx := range n {
|
||||
diff *= 256
|
||||
diff += int64(n[idx]) - int64(m[idx])
|
||||
if diff > 64 { diff = 64 }
|
||||
if diff < -64 { diff = -64 }
|
||||
}
|
||||
return diff
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) nonceIsOK(theirNonce *boxNonce) bool {
|
||||
// The bitmask is to allow for some non-duplicate out-of-order packets
|
||||
diff := theirNonce.minus(&sinfo.theirNonce)
|
||||
if diff > 0 { return true }
|
||||
return ^sinfo.nonceMask & (0x01 << uint64(-diff)) != 0
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) updateNonce(theirNonce *boxNonce) {
|
||||
// Shift nonce mask if needed
|
||||
// Set bit
|
||||
diff := theirNonce.minus(&sinfo.theirNonce)
|
||||
if diff > 0 {
|
||||
sinfo.nonceMask <<= uint64(diff)
|
||||
sinfo.nonceMask &= 0x01
|
||||
} else {
|
||||
sinfo.nonceMask &= 0x01 << uint64(-diff)
|
||||
}
|
||||
sinfo.theirNonce = *theirNonce
|
||||
}
|
||||
|
||||
func (ss *sessions) resetInits() {
|
||||
for _, sinfo := range ss.sinfos { sinfo.init = false }
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// This is for a per-session worker
|
||||
// It handles calling the relatively expensive crypto operations
|
||||
// It's also responsible for keeping nonces consistent
|
||||
|
||||
func (sinfo *sessionInfo) doWorker() {
|
||||
for {
|
||||
select {
|
||||
case p, ok := <-sinfo.recv: if ok { sinfo.doRecv(p) } else { return }
|
||||
case bs, ok := <-sinfo.send: if ok { sinfo.doSend(bs) } else { return }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) doSend(bs []byte) {
|
||||
defer util_putBytes(bs)
|
||||
if !sinfo.init { return } // To prevent using empty session keys
|
||||
payload, nonce := boxSeal(&sinfo.sharedSesKey, bs, &sinfo.myNonce)
|
||||
defer util_putBytes(payload)
|
||||
p := wire_trafficPacket{
|
||||
ttl: ^uint64(0),
|
||||
coords: sinfo.coords,
|
||||
handle: sinfo.theirHandle,
|
||||
nonce: *nonce,
|
||||
payload: payload,
|
||||
}
|
||||
packet := p.encode()
|
||||
sinfo.core.router.out(packet)
|
||||
}
|
||||
|
||||
func (sinfo *sessionInfo) doRecv(p *wire_trafficPacket) {
|
||||
defer util_putBytes(p.payload)
|
||||
if !sinfo.nonceIsOK(&p.nonce) { return }
|
||||
bs, isOK := boxOpen(&sinfo.sharedSesKey, p.payload, &p.nonce)
|
||||
if !isOK { util_putBytes(bs) ; return }
|
||||
sinfo.updateNonce(&p.nonce)
|
||||
sinfo.time = time.Now()
|
||||
sinfo.core.router.recvPacket(bs, &sinfo.theirAddr)
|
||||
}
|
||||
|
58
src/yggdrasil/signature.go
Normal file
58
src/yggdrasil/signature.go
Normal file
@ -0,0 +1,58 @@
|
||||
package yggdrasil
|
||||
|
||||
// This is where we record which signatures we've previously checked
|
||||
// It's so we can avoid needlessly checking them again
|
||||
|
||||
import "sync"
|
||||
import "time"
|
||||
|
||||
type sigManager struct {
|
||||
mutex sync.RWMutex
|
||||
checked map[sigBytes]knownSig
|
||||
lastCleaned time.Time
|
||||
}
|
||||
|
||||
type knownSig struct {
|
||||
bs []byte
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func (m *sigManager) init() {
|
||||
m.checked = make(map[sigBytes]knownSig)
|
||||
}
|
||||
|
||||
func (m *sigManager) check(key *sigPubKey, sig *sigBytes, bs []byte) bool {
|
||||
if m.isChecked(sig, bs) { return true }
|
||||
verified := verify(key, bs, sig)
|
||||
if verified { m.putChecked(sig, bs) }
|
||||
return verified
|
||||
}
|
||||
|
||||
func (m *sigManager) isChecked(sig *sigBytes, bs []byte) bool {
|
||||
m.mutex.RLock()
|
||||
defer m.mutex.RUnlock()
|
||||
k, isIn := m.checked[*sig]
|
||||
if !isIn { return false }
|
||||
if len(bs) != len(k.bs) { return false }
|
||||
for idx := 0 ; idx < len(bs) ; idx++ {
|
||||
if bs[idx] != k.bs[idx] { return false }
|
||||
}
|
||||
k.time = time.Now()
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *sigManager) putChecked(newsig *sigBytes, bs []byte) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
now := time.Now()
|
||||
if time.Since(m.lastCleaned) > 60*time.Second {
|
||||
// Since we have the write lock anyway, do some cleanup
|
||||
for s, k := range m.checked {
|
||||
if time.Since(k.time) > 60*time.Second { delete(m.checked, s) }
|
||||
}
|
||||
m.lastCleaned = now
|
||||
}
|
||||
k := knownSig{bs: bs, time: now}
|
||||
m.checked[*newsig] = k
|
||||
}
|
||||
|
398
src/yggdrasil/switch.go
Normal file
398
src/yggdrasil/switch.go
Normal file
@ -0,0 +1,398 @@
|
||||
package yggdrasil
|
||||
|
||||
// This part constructs a spanning tree of the network
|
||||
// It routes packets based on distance on the spanning tree
|
||||
// In general, this is *not* equivalent to routing on the tree
|
||||
// It falls back to the tree in the worst case, but it can take shortcuts too
|
||||
// This is the part that makse routing reasonably efficient on scale-free graphs
|
||||
|
||||
// TODO document/comment everything in a lot more detail
|
||||
|
||||
// TODO? use a pre-computed lookup table (python version had this)
|
||||
// A little annoying to do with constant changes from bandwidth estimates
|
||||
|
||||
import "time"
|
||||
import "sync"
|
||||
import "sync/atomic"
|
||||
|
||||
//import "fmt"
|
||||
|
||||
const switch_timeout = time.Minute
|
||||
|
||||
// You should be able to provide crypto signatures for this
|
||||
// 1 signature per coord, from the *sender* to that coord
|
||||
// E.g. A->B->C has sigA(A->B) and sigB(A->B->C)
|
||||
type switchLocator struct {
|
||||
root sigPubKey
|
||||
tstamp int64
|
||||
coords []switchPort
|
||||
}
|
||||
|
||||
func firstIsBetter(first, second *sigPubKey) bool {
|
||||
// Higher TreeID is better
|
||||
ftid := getTreeID(first)
|
||||
stid := getTreeID(second)
|
||||
for idx := 0 ; idx < len(ftid) ; idx++ {
|
||||
if ftid[idx] == stid[idx] { continue }
|
||||
return ftid[idx] > stid[idx]
|
||||
}
|
||||
// Edge case, when comparing identical IDs
|
||||
return false
|
||||
}
|
||||
|
||||
func (l *switchLocator) clone() switchLocator {
|
||||
// Used to create a deep copy for use in messages
|
||||
// Copy required because we need to mutate coords before sending
|
||||
// (By appending the port from us to the destination)
|
||||
loc := *l
|
||||
loc.coords = make([]switchPort, len(l.coords), len(l.coords)+1)
|
||||
copy(loc.coords, l.coords)
|
||||
return loc
|
||||
}
|
||||
|
||||
func (l *switchLocator) dist(dest []byte) int {
|
||||
// Returns distance (on the tree) from these coords
|
||||
offset := 0
|
||||
fdc := 0
|
||||
for {
|
||||
if fdc >= len(l.coords) { break }
|
||||
coord, length := wire_decode_uint64(dest[offset:])
|
||||
if length == 0 { break }
|
||||
if l.coords[fdc] != switchPort(coord) { break }
|
||||
fdc++
|
||||
offset += length
|
||||
}
|
||||
dist := len(l.coords[fdc:])
|
||||
for {
|
||||
_, length := wire_decode_uint64(dest[offset:])
|
||||
if length == 0 { break }
|
||||
dist++
|
||||
offset += length
|
||||
}
|
||||
return dist
|
||||
}
|
||||
|
||||
func (l *switchLocator) getCoords() []byte {
|
||||
bs := make([]byte, 0, len(l.coords))
|
||||
for _, coord := range l.coords {
|
||||
c := wire_encode_uint64(uint64(coord))
|
||||
bs = append(bs, c...)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (x *switchLocator) isAncestorOf(y *switchLocator) bool {
|
||||
if x.root != y.root { return false }
|
||||
if len(x.coords) > len(y.coords) { return false }
|
||||
for idx := range x.coords {
|
||||
if x.coords[idx] != y.coords[idx] { return false }
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type peerInfo struct {
|
||||
key sigPubKey // ID of this peer
|
||||
locator switchLocator // Should be able to respond with signatures upon request
|
||||
degree uint64 // Self-reported degree
|
||||
coords []switchPort // Coords of this peer (taken from coords of the sent locator)
|
||||
time time.Time // Time this node was last seen
|
||||
firstSeen time.Time
|
||||
port switchPort // Interface number of this peer
|
||||
seq uint64 // Seq number we last saw this peer advertise
|
||||
}
|
||||
|
||||
type switchMessage struct {
|
||||
from sigPubKey // key of the sender
|
||||
locator switchLocator // Locator advertised for the receiver, not the sender's loc!
|
||||
seq uint64
|
||||
}
|
||||
|
||||
type switchPort uint64
|
||||
type tableElem struct {
|
||||
locator switchLocator
|
||||
firstSeen time.Time
|
||||
}
|
||||
|
||||
type lookupTable struct {
|
||||
self switchLocator
|
||||
elems map[switchPort]tableElem
|
||||
}
|
||||
|
||||
type switchData struct {
|
||||
// All data that's mutable and used by exported Table methods
|
||||
// To be read/written with atomic.Value Store/Load calls
|
||||
locator switchLocator
|
||||
seq uint64 // Sequence number, reported to peers, so they know about changes
|
||||
peers map[switchPort]peerInfo
|
||||
sigs []sigInfo
|
||||
}
|
||||
|
||||
type switchTable struct {
|
||||
core *Core
|
||||
key sigPubKey // Our own key
|
||||
time time.Time // Time when locator.tstamp was last updated
|
||||
parent switchPort // Port of whatever peer is our parent, or self if we're root
|
||||
drop map[sigPubKey]int64 // Tstamp associated with a dropped root
|
||||
mutex sync.RWMutex // Lock for reads/writes of switchData
|
||||
data switchData
|
||||
updater atomic.Value //*sync.Once
|
||||
table atomic.Value //lookupTable
|
||||
}
|
||||
|
||||
func (t *switchTable) init(core *Core, key sigPubKey) {
|
||||
now := time.Now()
|
||||
t.core = core
|
||||
t.key = key
|
||||
locator := switchLocator{root: key, tstamp: now.Unix()}
|
||||
peers := make(map[switchPort]peerInfo)
|
||||
t.data = switchData{locator: locator, peers: peers}
|
||||
t.updater.Store(&sync.Once{})
|
||||
t.table.Store(lookupTable{elems: make(map[switchPort]tableElem)})
|
||||
t.drop = make(map[sigPubKey]int64)
|
||||
doTicker := func () {
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
<-ticker.C
|
||||
t.Tick()
|
||||
}
|
||||
}
|
||||
go doTicker()
|
||||
}
|
||||
|
||||
func (t *switchTable) getLocator() switchLocator {
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
return t.data.locator.clone()
|
||||
}
|
||||
|
||||
func (t *switchTable) Tick() {
|
||||
// Periodic maintenance work to keep things internally consistent
|
||||
t.mutex.Lock() // Write lock
|
||||
defer t.mutex.Unlock() // Release lock when we're done
|
||||
t.cleanRoot()
|
||||
t.cleanPeers()
|
||||
t.cleanDropped()
|
||||
}
|
||||
|
||||
func (t *switchTable) cleanRoot() {
|
||||
// TODO rethink how this is done?...
|
||||
// Get rid of the root if it looks like its timed out
|
||||
now := time.Now()
|
||||
doUpdate := false
|
||||
//fmt.Println("DEBUG clean root:", now.Sub(t.time))
|
||||
if now.Sub(t.time) > switch_timeout {
|
||||
//fmt.Println("root timed out", t.data.locator)
|
||||
dropped := t.data.peers[t.parent]
|
||||
dropped.time = t.time
|
||||
t.drop[t.data.locator.root] = t.data.locator.tstamp
|
||||
doUpdate = true
|
||||
//t.core.log.Println("DEBUG: switch root timeout", len(t.drop))
|
||||
}
|
||||
// Or, if we're better than our root, root ourself
|
||||
if firstIsBetter(&t.key, &t.data.locator.root) {
|
||||
//fmt.Println("root is worse than us", t.data.locator.Root)
|
||||
doUpdate = true
|
||||
//t.core.log.Println("DEBUG: switch root replace with self", t.data.locator.Root)
|
||||
}
|
||||
// Or, if we are the root, possibly update our timestamp
|
||||
if t.data.locator.root == t.key &&
|
||||
now.Sub(t.time) > switch_timeout/2 {
|
||||
//fmt.Println("root is self and old, updating", t.data.locator.Root)
|
||||
doUpdate = true
|
||||
}
|
||||
if doUpdate {
|
||||
t.parent = switchPort(0)
|
||||
t.time = now
|
||||
if t.data.locator.root != t.key {
|
||||
t.data.seq++
|
||||
t.updater.Store(&sync.Once{})
|
||||
select {
|
||||
case t.core.router.reset<-struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
t.data.locator = switchLocator{root: t.key, tstamp: now.Unix()}
|
||||
t.data.sigs = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (t *switchTable) cleanPeers() {
|
||||
now := time.Now()
|
||||
changed := false
|
||||
for idx, info := range t.data.peers {
|
||||
if info.port != switchPort(0) && now.Sub(info.time) > 6*time.Second /*switch_timeout*/ {
|
||||
//fmt.Println("peer timed out", t.key, info.locator)
|
||||
delete(t.data.peers, idx)
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
if changed { t.updater.Store(&sync.Once{}) }
|
||||
}
|
||||
|
||||
func (t *switchTable) cleanDropped() {
|
||||
// TODO only call this after root changes, not periodically
|
||||
for root, _ := range t.drop {
|
||||
if !firstIsBetter(&root, &t.data.locator.root) { delete(t.drop, root) }
|
||||
}
|
||||
}
|
||||
|
||||
func (t *switchTable) createMessage(port switchPort) (*switchMessage, []sigInfo) {
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
msg := switchMessage{from: t.key, locator: t.data.locator.clone()}
|
||||
msg.locator.coords = append(msg.locator.coords, port)
|
||||
msg.seq = t.data.seq
|
||||
return &msg, t.data.sigs
|
||||
}
|
||||
|
||||
func (t *switchTable) handleMessage(msg *switchMessage, fromPort switchPort, sigs []sigInfo) {
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
now := time.Now()
|
||||
if len(msg.locator.coords) == 0 { return } // Should always have >=1 links
|
||||
oldSender, isIn := t.data.peers[fromPort]
|
||||
if !isIn { oldSender.firstSeen = now }
|
||||
sender := peerInfo{key: msg.from,
|
||||
locator: msg.locator,
|
||||
coords: msg.locator.coords[:len(msg.locator.coords)-1],
|
||||
time: now,
|
||||
firstSeen: oldSender.firstSeen,
|
||||
port: fromPort,
|
||||
seq: msg.seq}
|
||||
equiv := func (x *switchLocator, y *switchLocator) bool {
|
||||
if x.root != y.root { return false }
|
||||
if len(x.coords) != len(y.coords) { return false }
|
||||
for idx := range x.coords {
|
||||
if x.coords[idx] != y.coords[idx] { return false }
|
||||
}
|
||||
return true
|
||||
}
|
||||
doUpdate := false
|
||||
if !equiv(&msg.locator, &oldSender.locator) {
|
||||
doUpdate = true
|
||||
sender.firstSeen = now
|
||||
}
|
||||
t.data.peers[fromPort] = sender
|
||||
updateRoot := false
|
||||
oldParent, isIn := t.data.peers[t.parent]
|
||||
noParent := !isIn
|
||||
noLoop := func () bool {
|
||||
for idx := 0 ; idx < len(sigs)-1 ; idx++ {
|
||||
if sigs[idx].next == t.core.sigPub { return false }
|
||||
}
|
||||
if msg.locator.root == t.core.sigPub { return false }
|
||||
return true
|
||||
}()
|
||||
sTime := now.Sub(sender.firstSeen)
|
||||
pTime := oldParent.time.Sub(oldParent.firstSeen) + switch_timeout
|
||||
// Really want to compare sLen/sTime and pLen/pTime
|
||||
// Cross multiplied to avoid divide-by-zero
|
||||
cost := len(msg.locator.coords)*int(pTime.Seconds())
|
||||
pCost := len(t.data.locator.coords)*int(sTime.Seconds())
|
||||
dropTstamp, isIn := t.drop[msg.locator.root]
|
||||
// Here be dragons
|
||||
switch {
|
||||
case !noLoop: // do nothing
|
||||
case isIn && dropTstamp >= msg.locator.tstamp: // do nothing
|
||||
case firstIsBetter(&msg.locator.root, &t.data.locator.root): updateRoot = true
|
||||
case t.data.locator.root != msg.locator.root: // do nothing
|
||||
case t.data.locator.tstamp > msg.locator.tstamp: // do nothing
|
||||
case noParent: updateRoot = true
|
||||
case cost < pCost: updateRoot = true
|
||||
case sender.port == t.parent &&
|
||||
(msg.locator.tstamp > t.data.locator.tstamp ||
|
||||
!equiv(&msg.locator, &t.data.locator)): updateRoot = true
|
||||
}
|
||||
if updateRoot {
|
||||
if !equiv(&msg.locator, &t.data.locator) {
|
||||
doUpdate = true
|
||||
t.data.seq++
|
||||
select {
|
||||
case t.core.router.reset<-struct{}{}:
|
||||
default:
|
||||
}
|
||||
//t.core.log.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords)
|
||||
//fmt.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords)
|
||||
}
|
||||
if t.data.locator.tstamp != msg.locator.tstamp { t.time = now }
|
||||
t.data.locator = msg.locator
|
||||
t.parent = sender.port
|
||||
t.data.sigs = sigs
|
||||
//t.core.log.Println("Switch update:", msg.Locator.Root, msg.Locator.Tstamp, msg.Locator.Coords)
|
||||
}
|
||||
if doUpdate { t.updater.Store(&sync.Once{}) }
|
||||
return
|
||||
}
|
||||
|
||||
func (t *switchTable) updateTable() {
|
||||
// WARNING this should only be called from within t.data.updater.Do()
|
||||
// It relies on the sync.Once for synchronization with messages and lookups
|
||||
// TODO use a pre-computed faster lookup table
|
||||
// Instead of checking distance for every destination every time
|
||||
// Array of structs, indexed by first coord that differs from self
|
||||
// Each struct has stores the best port to forward to, and a next coord map
|
||||
// Move to struct, then iterate over coord maps until you dead end
|
||||
// The last port before the dead end should be the closest
|
||||
t.mutex.RLock()
|
||||
defer t.mutex.RUnlock()
|
||||
newTable := lookupTable{
|
||||
self: t.data.locator.clone(),
|
||||
elems: make(map[switchPort]tableElem),
|
||||
}
|
||||
for _, pinfo := range t.data.peers {
|
||||
//if !pinfo.forward { continue }
|
||||
loc := pinfo.locator.clone()
|
||||
loc.coords = loc.coords[:len(loc.coords)-1] // Remove the them->self link
|
||||
newTable.elems[pinfo.port] = tableElem {
|
||||
locator: loc,
|
||||
//degree: pinfo.degree,
|
||||
firstSeen: pinfo.firstSeen,
|
||||
//forward: pinfo.forward,
|
||||
}
|
||||
}
|
||||
t.table.Store(newTable)
|
||||
}
|
||||
|
||||
func (t *switchTable) lookup(dest []byte, ttl uint64) (switchPort, uint64) {
|
||||
t.updater.Load().(*sync.Once).Do(t.updateTable)
|
||||
table := t.table.Load().(lookupTable)
|
||||
ports := t.core.peers.getPorts()
|
||||
getBandwidth := func (port switchPort) float64 {
|
||||
var bandwidth float64
|
||||
if p, isIn := ports[port]; isIn {
|
||||
bandwidth = p.getBandwidth()
|
||||
}
|
||||
return bandwidth
|
||||
}
|
||||
var best switchPort
|
||||
myDist := table.self.dist(dest) //getDist(table.self.coords)
|
||||
if !(uint64(myDist) < ttl) { return 0, 0 }
|
||||
// score is in units of bandwidth / distance
|
||||
bestScore := float64(-1)
|
||||
for port, info := range table.elems {
|
||||
if info.locator.root != table.self.root { continue }
|
||||
dist := info.locator.dist(dest) //getDist(info.locator.coords)
|
||||
if !(dist < myDist) { continue }
|
||||
score := getBandwidth(port)
|
||||
score /= float64(1+dist)
|
||||
if score > bestScore {
|
||||
best = port
|
||||
bestScore = score
|
||||
}
|
||||
}
|
||||
//t.core.log.Println("DEBUG: sending to", best, "bandwidth", getBandwidth(best))
|
||||
return best, uint64(myDist)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
//Signature stuff
|
||||
|
||||
type sigInfo struct {
|
||||
next sigPubKey
|
||||
sig sigBytes
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
246
src/yggdrasil/tcp.go
Normal file
246
src/yggdrasil/tcp.go
Normal file
@ -0,0 +1,246 @@
|
||||
package yggdrasil
|
||||
|
||||
// This sends packets to peers using TCP as a transport
|
||||
// It's generally better tested than the UDP implementation
|
||||
// Using it regularly is insane, but I find TCP easier to test/debug with it
|
||||
// Updating and optimizing the UDP version is a higher priority
|
||||
|
||||
// TODO:
|
||||
// Something needs to make sure we're getting *valid* packets
|
||||
// Could be used to DoS (connect, give someone else's keys, spew garbage)
|
||||
// I guess the "peer" part should watch for link packets, disconnect?
|
||||
|
||||
import "net"
|
||||
import "time"
|
||||
import "errors"
|
||||
import "sync"
|
||||
import "fmt"
|
||||
|
||||
const tcp_msgSize = 2048+65535 // TODO figure out what makes sense
|
||||
|
||||
type tcpInterface struct {
|
||||
core *Core
|
||||
serv *net.TCPListener
|
||||
mutex sync.Mutex // Protecting the below
|
||||
calls map[string]struct{}
|
||||
}
|
||||
|
||||
type tcpKeys struct {
|
||||
box boxPubKey
|
||||
sig sigPubKey
|
||||
}
|
||||
|
||||
func (iface *tcpInterface) init(core *Core, addr string) {
|
||||
iface.core = core
|
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil { panic(err) }
|
||||
iface.serv, err = net.ListenTCP("tcp", tcpAddr)
|
||||
if err != nil { panic(err) }
|
||||
iface.calls = make(map[string]struct{})
|
||||
go iface.listener()
|
||||
}
|
||||
|
||||
func (iface *tcpInterface) listener() {
|
||||
defer iface.serv.Close()
|
||||
iface.core.log.Println("Listening on:", iface.serv.Addr().String())
|
||||
for {
|
||||
sock, err := iface.serv.AcceptTCP()
|
||||
if err != nil { panic(err) }
|
||||
go iface.handler(sock)
|
||||
}
|
||||
}
|
||||
|
||||
func (iface *tcpInterface) call(saddr string) {
|
||||
go func() {
|
||||
quit := false
|
||||
iface.mutex.Lock()
|
||||
if _, isIn := iface.calls[saddr]; isIn {
|
||||
quit = true
|
||||
} else {
|
||||
iface.calls[saddr] = struct{}{}
|
||||
defer func() {
|
||||
iface.mutex.Lock()
|
||||
delete(iface.calls, saddr)
|
||||
iface.mutex.Unlock()
|
||||
}()
|
||||
}
|
||||
iface.mutex.Unlock()
|
||||
if !quit {
|
||||
conn, err := net.DialTimeout("tcp", saddr, 6*time.Second)
|
||||
if err != nil { return }
|
||||
sock := conn.(*net.TCPConn)
|
||||
iface.handler(sock)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (iface *tcpInterface) handler(sock *net.TCPConn) {
|
||||
defer sock.Close()
|
||||
// Get our keys
|
||||
keys := []byte{}
|
||||
keys = append(keys, tcp_key[:]...)
|
||||
keys = append(keys, iface.core.boxPub[:]...)
|
||||
keys = append(keys, iface.core.sigPub[:]...)
|
||||
_, err := sock.Write(keys)
|
||||
if err != nil { return }
|
||||
timeout := time.Now().Add(6*time.Second)
|
||||
sock.SetReadDeadline(timeout)
|
||||
n, err := sock.Read(keys)
|
||||
if err != nil { return }
|
||||
if n < len(keys) { /*panic("Partial key packet?") ;*/ return }
|
||||
ks := tcpKeys{}
|
||||
if !tcp_chop_keys(&ks.box, &ks.sig, &keys) { /*panic("Invalid key packet?") ;*/ return }
|
||||
// Quit the parent call if this is a connection to ourself
|
||||
equiv := func(k1, k2 []byte) bool {
|
||||
for idx := range k1 {
|
||||
if k1[idx] != k2[idx] { return false }
|
||||
}
|
||||
return true
|
||||
}
|
||||
if equiv(ks.box[:], iface.core.boxPub[:]) { return } // testing
|
||||
if equiv(ks.sig[:], iface.core.sigPub[:]) { return }
|
||||
// Note that multiple connections to the same node are allowed
|
||||
// E.g. over different interfaces
|
||||
linkIn := make(chan []byte, 1)
|
||||
p := iface.core.peers.newPeer(&ks.box, &ks.sig)//, in, out)
|
||||
in := func(bs []byte) {
|
||||
p.handlePacket(bs, linkIn)
|
||||
}
|
||||
out := make(chan []byte, 1024) // TODO? what size makes sense
|
||||
defer close(out)
|
||||
go func() {
|
||||
var stack [][]byte
|
||||
put := func(msg []byte) {
|
||||
stack = append(stack, msg)
|
||||
for len(stack) > 1024 {
|
||||
util_putBytes(stack[0])
|
||||
stack = stack[1:]
|
||||
}
|
||||
}
|
||||
send := func() {
|
||||
msg := stack[len(stack)-1]
|
||||
stack = stack[:len(stack)-1]
|
||||
buf := net.Buffers{tcp_msg[:],
|
||||
wire_encode_uint64(uint64(len(msg))),
|
||||
msg}
|
||||
size := 0
|
||||
for _, bs := range buf { size += len(bs) }
|
||||
start := time.Now()
|
||||
buf.WriteTo(sock)
|
||||
timed := time.Since(start)
|
||||
pType, _ := wire_decode_uint64(msg)
|
||||
if pType == wire_LinkProtocolTraffic {
|
||||
p.updateBandwidth(size, timed)
|
||||
}
|
||||
util_putBytes(msg)
|
||||
}
|
||||
for msg := range out {
|
||||
put(msg)
|
||||
for len(stack) > 0 {
|
||||
// Keep trying to fill the stack (LIFO order) while sending
|
||||
select {
|
||||
case msg, ok := <-out:
|
||||
if !ok { return }
|
||||
put(msg)
|
||||
default: send()
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
p.out = func(msg []byte) {
|
||||
defer func() { recover() }()
|
||||
for {
|
||||
select {
|
||||
case out<-msg: return
|
||||
default: util_putBytes(<-out)
|
||||
}
|
||||
}
|
||||
}
|
||||
sock.SetNoDelay(true)
|
||||
go p.linkLoop(linkIn)
|
||||
defer func() {
|
||||
// Put all of our cleanup here...
|
||||
p.core.peers.mutex.Lock()
|
||||
oldPorts := p.core.peers.getPorts()
|
||||
newPorts := make(map[switchPort]*peer)
|
||||
for k,v := range oldPorts{ newPorts[k] = v }
|
||||
delete(newPorts, p.port)
|
||||
p.core.peers.putPorts(newPorts)
|
||||
p.core.peers.mutex.Unlock()
|
||||
close(linkIn)
|
||||
}()
|
||||
them := sock.RemoteAddr()
|
||||
themNodeID := getNodeID(&ks.box)
|
||||
themAddr := address_addrForNodeID(themNodeID)
|
||||
themAddrString := net.IP(themAddr[:]).String()
|
||||
themString := fmt.Sprintf("%s@%s", themAddrString, them)
|
||||
iface.core.log.Println("Connected:", themString)
|
||||
iface.reader(sock, in) // In this goroutine, because of defers
|
||||
iface.core.log.Println("Disconnected:", themString)
|
||||
return
|
||||
}
|
||||
|
||||
func (iface *tcpInterface) reader(sock *net.TCPConn, in func([]byte)) {
|
||||
bs := make([]byte, 2*tcp_msgSize)
|
||||
frag := bs[:0]
|
||||
for {
|
||||
timeout := time.Now().Add(6*time.Second)
|
||||
sock.SetReadDeadline(timeout)
|
||||
n, err := sock.Read(bs[len(frag):])
|
||||
if err != nil || n == 0 { break }
|
||||
frag = bs[:len(frag)+n]
|
||||
for {
|
||||
msg, ok, err := tcp_chop_msg(&frag)
|
||||
if err != nil { return }
|
||||
if !ok { break } // We didn't get the whole message yet
|
||||
newMsg := append(util_getBytes(), msg...)
|
||||
in(newMsg)
|
||||
util_yield()
|
||||
}
|
||||
frag = append(bs[:0], frag...)
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Magic bytes to check
|
||||
var tcp_key = [...]byte{'k', 'e', 'y', 's'}
|
||||
var tcp_msg = [...]byte{0xde, 0xad, 0xb1, 0x75} // "dead bits"
|
||||
|
||||
func tcp_chop_keys(box *boxPubKey, sig *sigPubKey, bs *[]byte) bool {
|
||||
// This one is pretty simple: we know how long the message should be
|
||||
// So don't call this with a message that's too short
|
||||
if len(*bs) < len(tcp_key) + len(*box) + len(*sig) { return false }
|
||||
for idx := range tcp_key {
|
||||
if (*bs)[idx] != tcp_key[idx] { return false }
|
||||
}
|
||||
(*bs) = (*bs)[len(tcp_key):]
|
||||
copy(box[:], *bs)
|
||||
(*bs) = (*bs)[len(box):]
|
||||
copy(sig[:], *bs)
|
||||
(*bs) = (*bs)[len(sig):]
|
||||
return true
|
||||
}
|
||||
|
||||
func tcp_chop_msg(bs *[]byte) ([]byte, bool, error) {
|
||||
// Returns msg, ok, err
|
||||
if len(*bs) < len(tcp_msg) { return nil, false, nil }
|
||||
for idx := range tcp_msg {
|
||||
if (*bs)[idx] != tcp_msg[idx] {
|
||||
return nil, false, errors.New("Bad message!")
|
||||
}
|
||||
}
|
||||
msgLen, msgLenLen := wire_decode_uint64((*bs)[len(tcp_msg):])
|
||||
if msgLen > tcp_msgSize { return nil, false, errors.New("Oversized message!") }
|
||||
msgBegin := len(tcp_msg) + msgLenLen
|
||||
msgEnd := msgBegin + int(msgLen)
|
||||
if msgLenLen == 0 || len(*bs) < msgEnd {
|
||||
// We don't have the full message
|
||||
// Need to buffer this and wait for the rest to come in
|
||||
return nil, false, nil
|
||||
}
|
||||
msg := (*bs)[msgBegin:msgEnd]
|
||||
(*bs) = (*bs)[msgEnd:]
|
||||
return msg, true, nil
|
||||
}
|
||||
|
56
src/yggdrasil/tun.go
Normal file
56
src/yggdrasil/tun.go
Normal file
@ -0,0 +1,56 @@
|
||||
package yggdrasil
|
||||
|
||||
// This manages the tun driver to send/recv packets to/from applications
|
||||
|
||||
import water "github.com/songgao/water"
|
||||
|
||||
const IPv6_HEADER_LENGTH = 40
|
||||
|
||||
type tunDevice struct {
|
||||
core *Core
|
||||
send chan<- []byte
|
||||
recv <-chan []byte
|
||||
mtu int
|
||||
iface *water.Interface
|
||||
}
|
||||
|
||||
func (tun *tunDevice) init(core *Core) {
|
||||
tun.core = core
|
||||
}
|
||||
|
||||
func (tun *tunDevice) setup(addr string, mtu int) error {
|
||||
iface, err := water.New(water.Config{ DeviceType: water.TUN })
|
||||
if err != nil { panic(err) }
|
||||
tun.iface = iface
|
||||
tun.mtu = mtu //1280 // Lets default to the smallest thing allowed for now
|
||||
return tun.setupAddress(addr)
|
||||
}
|
||||
|
||||
func (tun *tunDevice) write() error {
|
||||
for {
|
||||
data := <-tun.recv
|
||||
if _, err := tun.iface.Write(data); err != nil { return err }
|
||||
util_putBytes(data)
|
||||
}
|
||||
}
|
||||
|
||||
func (tun *tunDevice) read() error {
|
||||
buf := make([]byte, tun.mtu)
|
||||
for {
|
||||
n, err := tun.iface.Read(buf)
|
||||
if err != nil { return err }
|
||||
if buf[0] & 0xf0 != 0x60 ||
|
||||
n != 256*int(buf[4]) + int(buf[5]) + IPv6_HEADER_LENGTH {
|
||||
// Either not an IPv6 packet or not the complete packet for some reason
|
||||
panic("Should not happen in testing")
|
||||
continue
|
||||
}
|
||||
packet := append(util_getBytes(), buf[:n]...)
|
||||
tun.send<-packet
|
||||
}
|
||||
}
|
||||
|
||||
func (tun *tunDevice) close() error {
|
||||
return tun.iface.Close()
|
||||
}
|
||||
|
36
src/yggdrasil/tun_linux.go
Normal file
36
src/yggdrasil/tun_linux.go
Normal file
@ -0,0 +1,36 @@
|
||||
package yggdrasil
|
||||
|
||||
// The linux platform specific tun parts
|
||||
// It depends on iproute2 being installed to set things on the tun device
|
||||
|
||||
import "fmt"
|
||||
import "os/exec"
|
||||
import "strings"
|
||||
|
||||
func (tun *tunDevice) setupAddress(addr string) error {
|
||||
// Set address
|
||||
cmd := exec.Command("ip", "-f", "inet6",
|
||||
"addr", "add", addr,
|
||||
"dev", tun.iface.Name())
|
||||
tun.core.log.Printf("ip command: %v", strings.Join(cmd.Args, " "))
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
tun.core.log.Printf("Linux ip failed: %v.", err)
|
||||
tun.core.log.Println(string(output))
|
||||
return err
|
||||
}
|
||||
// Set MTU and bring device up
|
||||
cmd = exec.Command("ip", "link", "set",
|
||||
"dev", tun.iface.Name(),
|
||||
"mtu", fmt.Sprintf("%d", tun.mtu),
|
||||
"up")
|
||||
tun.core.log.Printf("ip command: %v", strings.Join(cmd.Args, " "))
|
||||
output, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
tun.core.log.Printf("Linux ip failed: %v.", err)
|
||||
tun.core.log.Println(string(output))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
12
src/yggdrasil/tun_other.go
Normal file
12
src/yggdrasil/tun_other.go
Normal file
@ -0,0 +1,12 @@
|
||||
// +build !linux
|
||||
|
||||
package yggdrasil
|
||||
|
||||
// This is to catch unsupported platforms
|
||||
// If your platform supports tun devices, you could try configuring it manually
|
||||
|
||||
func (tun *tunDevice) setupAddress(addr string) error {
|
||||
tun.core.log.Println("Platform not supported, you must set the address of", tun.iface.Name(), "to", addr)
|
||||
return nil
|
||||
}
|
||||
|
275
src/yggdrasil/udp.go
Normal file
275
src/yggdrasil/udp.go
Normal file
@ -0,0 +1,275 @@
|
||||
package yggdrasil
|
||||
|
||||
// This communicates with peers via UDP
|
||||
// It's not as well tested or debugged as the TCP transport
|
||||
// It's intended to use UDP, so debugging/optimzing this is a high priority
|
||||
// TODO? use golang.org/x/net/ipv6.PacketConn's ReadBatch and WriteBatch?
|
||||
// To send all chunks of a message / recv all available chunks in one syscall
|
||||
// Chunks are currently murged, but outgoing messages aren't chunked
|
||||
// This is just to support chunking in the future, if it's needed and debugged
|
||||
// Basically, right now we might send UDP packets that are too large
|
||||
|
||||
import "net"
|
||||
import "time"
|
||||
import "sync"
|
||||
import "fmt"
|
||||
|
||||
type udpInterface struct {
|
||||
core *Core
|
||||
sock *net.UDPConn // Or more general PacketConn?
|
||||
mutex sync.RWMutex // each conn has an owner goroutine
|
||||
conns map[connAddr]*connInfo
|
||||
}
|
||||
|
||||
type connAddr string // TODO something more efficient, but still a valid map key
|
||||
type connInfo struct {
|
||||
addr connAddr
|
||||
peer *peer
|
||||
linkIn chan []byte
|
||||
keysIn chan *udpKeys
|
||||
timeout int // count of how many heartbeats have been missed
|
||||
in func([]byte)
|
||||
out chan []byte
|
||||
countIn uint8
|
||||
countOut uint8
|
||||
}
|
||||
|
||||
type udpKeys struct {
|
||||
box boxPubKey
|
||||
sig sigPubKey
|
||||
}
|
||||
|
||||
func (iface *udpInterface) init(core *Core, addr string) {
|
||||
iface.core = core
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", addr)
|
||||
if err != nil { panic(err) }
|
||||
iface.sock, err = net.ListenUDP("udp", udpAddr)
|
||||
if err != nil { panic(err) }
|
||||
iface.conns = make(map[connAddr]*connInfo)
|
||||
go iface.reader()
|
||||
}
|
||||
|
||||
func (iface *udpInterface) sendKeys(addr connAddr) {
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", string(addr))
|
||||
if err != nil { panic(err) }
|
||||
msg := []byte{}
|
||||
msg = udp_encode(msg, 0, 0, 0, nil)
|
||||
msg = append(msg, iface.core.boxPub[:]...)
|
||||
msg = append(msg, iface.core.sigPub[:]...)
|
||||
iface.sock.WriteToUDP(msg, udpAddr)
|
||||
}
|
||||
|
||||
func udp_isKeys(msg []byte) bool {
|
||||
keyLen := 3 + boxPubKeyLen + sigPubKeyLen
|
||||
return len(msg) == keyLen && msg[0] == 0x00
|
||||
}
|
||||
|
||||
func (iface *udpInterface) startConn(info *connInfo) {
|
||||
ticker := time.NewTicker(6*time.Second)
|
||||
defer ticker.Stop()
|
||||
defer func () {
|
||||
// Cleanup
|
||||
// FIXME this still leaks a peer struct
|
||||
iface.mutex.Lock()
|
||||
delete(iface.conns, info.addr)
|
||||
iface.mutex.Unlock()
|
||||
iface.core.peers.mutex.Lock()
|
||||
oldPorts := iface.core.peers.getPorts()
|
||||
newPorts := make(map[switchPort]*peer)
|
||||
for k,v := range oldPorts{ newPorts[k] = v }
|
||||
delete(newPorts, info.peer.port)
|
||||
iface.core.peers.putPorts(newPorts)
|
||||
iface.core.peers.mutex.Unlock()
|
||||
close(info.linkIn)
|
||||
close(info.keysIn)
|
||||
close(info.out)
|
||||
iface.core.log.Println("Removing peer:", info.addr)
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case ks := <-info.keysIn: {
|
||||
// FIXME? need signatures/sequence-numbers or something
|
||||
// Spoofers could lock out a peer with fake/bad keys
|
||||
if ks.box == info.peer.box && ks.sig == info.peer.sig {
|
||||
info.timeout = 0
|
||||
}
|
||||
}
|
||||
case <-ticker.C: {
|
||||
if info.timeout > 10 { return }
|
||||
info.timeout++
|
||||
iface.sendKeys(info.addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (iface *udpInterface) handleKeys(msg []byte, addr connAddr) {
|
||||
//defer util_putBytes(msg)
|
||||
var ks udpKeys
|
||||
_, _, _, bs := udp_decode(msg)
|
||||
switch {
|
||||
case !wire_chop_slice(ks.box[:], &bs): return
|
||||
case !wire_chop_slice(ks.sig[:], &bs): return
|
||||
}
|
||||
if ks.box == iface.core.boxPub { return }
|
||||
if ks.sig == iface.core.sigPub { return }
|
||||
iface.mutex.RLock()
|
||||
conn, isIn := iface.conns[addr]
|
||||
iface.mutex.RUnlock() // TODO? keep the lock longer?...
|
||||
if !isIn {
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", string(addr))
|
||||
if err != nil { panic(err) }
|
||||
conn = &connInfo{
|
||||
addr: connAddr(addr),
|
||||
peer: iface.core.peers.newPeer(&ks.box, &ks.sig),
|
||||
linkIn: make(chan []byte, 1),
|
||||
keysIn: make(chan *udpKeys, 1),
|
||||
out: make(chan []byte, 1024),
|
||||
}
|
||||
/*
|
||||
conn.in = func (msg []byte) { conn.peer.handlePacket(msg, conn.linkIn) }
|
||||
conn.peer.out = func (msg []byte) {
|
||||
start := time.Now()
|
||||
iface.sock.WriteToUDP(msg, udpAddr)
|
||||
timed := time.Since(start)
|
||||
conn.peer.updateBandwidth(len(msg), timed)
|
||||
util_putBytes(msg)
|
||||
} // Old version, always one syscall per packet
|
||||
//*/
|
||||
/*
|
||||
conn.peer.out = func (msg []byte) {
|
||||
defer func() { recover() }()
|
||||
select {
|
||||
case conn.out<-msg:
|
||||
default: util_putBytes(msg)
|
||||
}
|
||||
}
|
||||
go func () {
|
||||
for msg := range conn.out {
|
||||
start := time.Now()
|
||||
iface.sock.WriteToUDP(msg, udpAddr)
|
||||
timed := time.Since(start)
|
||||
conn.peer.updateBandwidth(len(msg), timed)
|
||||
util_putBytes(msg)
|
||||
}
|
||||
}()
|
||||
//*/
|
||||
//*
|
||||
var inChunks uint8
|
||||
var inBuf []byte
|
||||
conn.in = func(bs []byte) {
|
||||
//defer util_putBytes(bs)
|
||||
chunks, chunk, count, payload := udp_decode(bs)
|
||||
//iface.core.log.Println("DEBUG:", addr, chunks, chunk, count, len(payload))
|
||||
//iface.core.log.Println("DEBUG: payload:", payload)
|
||||
if count != conn.countIn {
|
||||
inChunks = 0
|
||||
inBuf = inBuf[:0]
|
||||
conn.countIn = count
|
||||
}
|
||||
if chunk <= chunks && chunk == inChunks + 1 {
|
||||
//iface.core.log.Println("GOING:", addr, chunks, chunk, count, len(payload))
|
||||
inChunks += 1
|
||||
inBuf = append(inBuf, payload...)
|
||||
if chunks != chunk { return }
|
||||
msg := append(util_getBytes(), inBuf...)
|
||||
conn.peer.handlePacket(msg, conn.linkIn)
|
||||
//iface.core.log.Println("DONE:", addr, chunks, chunk, count, len(payload))
|
||||
}
|
||||
}
|
||||
conn.peer.out = func (msg []byte) {
|
||||
defer func() { recover() }()
|
||||
select {
|
||||
case conn.out<-msg:
|
||||
default: util_putBytes(msg)
|
||||
}
|
||||
}
|
||||
go func () {
|
||||
//var chunks [][]byte
|
||||
var out []byte
|
||||
for msg := range conn.out {
|
||||
var chunks [][]byte
|
||||
bs := msg
|
||||
for len(bs) > udp_chunkSize {
|
||||
chunks, bs = append(chunks, bs[:udp_chunkSize]), bs[udp_chunkSize:]
|
||||
}
|
||||
chunks = append(chunks, bs)
|
||||
//iface.core.log.Println("DEBUG: out chunks:", len(chunks), len(msg))
|
||||
if len(chunks) > 255 { continue }
|
||||
start := time.Now()
|
||||
for idx,bs := range chunks {
|
||||
nChunks, nChunk, count := uint8(len(chunks)), uint8(idx)+1, conn.countOut
|
||||
out = udp_encode(out[:0], nChunks, nChunk, count, bs)
|
||||
//iface.core.log.Println("DEBUG out:", nChunks, nChunk, count, len(bs))
|
||||
iface.sock.WriteToUDP(out, udpAddr)
|
||||
}
|
||||
timed := time.Since(start)
|
||||
conn.countOut += 1
|
||||
conn.peer.updateBandwidth(len(msg), timed)
|
||||
util_putBytes(msg)
|
||||
}
|
||||
}()
|
||||
//*/
|
||||
iface.mutex.Lock()
|
||||
iface.conns[addr] = conn
|
||||
iface.mutex.Unlock()
|
||||
themNodeID := getNodeID(&ks.box)
|
||||
themAddr := address_addrForNodeID(themNodeID)
|
||||
themAddrString := net.IP(themAddr[:]).String()
|
||||
themString := fmt.Sprintf("%s@%s", themAddrString, addr)
|
||||
iface.core.log.Println("Adding peer:", themString)
|
||||
go iface.startConn(conn)
|
||||
go conn.peer.linkLoop(conn.linkIn)
|
||||
iface.sendKeys(conn.addr)
|
||||
}
|
||||
func() {
|
||||
defer func() { recover() }()
|
||||
select {
|
||||
case conn.keysIn<-&ks:
|
||||
default:
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (iface *udpInterface) handlePacket(msg []byte, addr connAddr) {
|
||||
iface.mutex.RLock()
|
||||
if conn, isIn := iface.conns[addr]; isIn {
|
||||
conn.in(msg)
|
||||
}
|
||||
iface.mutex.RUnlock()
|
||||
}
|
||||
|
||||
func (iface *udpInterface) reader() {
|
||||
bs := make([]byte, 2048) // This needs to be large enough for everything...
|
||||
for {
|
||||
//iface.core.log.Println("Starting read")
|
||||
n, udpAddr, err := iface.sock.ReadFromUDP(bs)
|
||||
//iface.core.log.Println("Read", n, udpAddr.String(), err)
|
||||
if err != nil { panic(err) ; break }
|
||||
if n > 1500 { panic(n) }
|
||||
//msg := append(util_getBytes(), bs[:n]...)
|
||||
msg := bs[:n]
|
||||
addr := connAddr(udpAddr.String())
|
||||
if udp_isKeys(msg) {
|
||||
iface.handleKeys(msg, addr)
|
||||
} else {
|
||||
iface.handlePacket(msg, addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
const udp_chunkSize = 65535
|
||||
|
||||
func udp_decode(bs []byte) (chunks, chunk, count uint8, payload []byte) {
|
||||
if len(bs) >= 3 {
|
||||
chunks, chunk, count, payload = bs[0], bs[1], bs[2], bs[3:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func udp_encode(out []byte, chunks, chunk, count uint8, payload []byte) []byte {
|
||||
return append(append(out, chunks, chunk, count), payload...)
|
||||
}
|
||||
|
79
src/yggdrasil/util.go
Normal file
79
src/yggdrasil/util.go
Normal file
@ -0,0 +1,79 @@
|
||||
package yggdrasil
|
||||
|
||||
// These are misc. utility functions that didn't really fit anywhere else
|
||||
|
||||
import "fmt"
|
||||
import "runtime"
|
||||
//import "sync"
|
||||
|
||||
func Util_testAddrIDMask() {
|
||||
for idx := 0 ; idx < 16 ; idx++ {
|
||||
var orig NodeID
|
||||
orig[8] = 42
|
||||
for bidx := 0 ; bidx < idx ; bidx++ {
|
||||
orig[bidx/8] |= (0x80 >> uint8(bidx % 8))
|
||||
}
|
||||
addr := address_addrForNodeID(&orig)
|
||||
nid, mask := addr.getNodeIDandMask()
|
||||
for b := 0 ; b < len(mask) ; b++ {
|
||||
nid[b] &= mask[b]
|
||||
orig[b] &= mask[b]
|
||||
}
|
||||
if *nid != orig {
|
||||
fmt.Println(orig)
|
||||
fmt.Println(*addr)
|
||||
fmt.Println(*nid)
|
||||
fmt.Println(*mask)
|
||||
panic(idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func util_yield() {
|
||||
runtime.Gosched()
|
||||
}
|
||||
|
||||
func util_lockthread() {
|
||||
runtime.LockOSThread()
|
||||
}
|
||||
|
||||
func util_unlockthread() {
|
||||
runtime.UnlockOSThread()
|
||||
}
|
||||
|
||||
/*
|
||||
var byteStore sync.Pool = sync.Pool{
|
||||
New: func () interface{} { return []byte(nil) },
|
||||
}
|
||||
|
||||
func util_getBytes() []byte {
|
||||
return byteStore.Get().([]byte)[:0]
|
||||
}
|
||||
|
||||
func util_putBytes(bs []byte) {
|
||||
byteStore.Put(bs) // FIXME? The cast to interface{} allocates...
|
||||
}
|
||||
*/
|
||||
|
||||
var byteStore chan []byte
|
||||
|
||||
func util_initByteStore() {
|
||||
if byteStore == nil {
|
||||
byteStore = make(chan []byte, 32)
|
||||
}
|
||||
}
|
||||
|
||||
func util_getBytes() []byte {
|
||||
select {
|
||||
case bs := <-byteStore: return bs[:0]
|
||||
default: return nil
|
||||
}
|
||||
}
|
||||
|
||||
func util_putBytes(bs []byte) {
|
||||
select {
|
||||
case byteStore<-bs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
492
src/yggdrasil/wire.go
Normal file
492
src/yggdrasil/wire.go
Normal file
@ -0,0 +1,492 @@
|
||||
package yggdrasil
|
||||
|
||||
// Wire formatting tools
|
||||
// These are all ugly and probably not very secure
|
||||
|
||||
// Packet types, as an Encode_uint64 at the start of each packet
|
||||
// TODO? make things still work after reordering (after things stabilize more?)
|
||||
// Type safety would also be nice, `type wire_type uint64`, rewrite as needed?
|
||||
const (
|
||||
wire_Traffic = iota // data being routed somewhere, handle for crypto
|
||||
wire_ProtocolTraffic // protocol traffic, pub keys for crypto
|
||||
wire_LinkProtocolTraffic // link proto traffic, pub keys for crypto
|
||||
wire_SwitchAnnounce // TODO put inside protocol traffic header
|
||||
wire_SwitchHopRequest // TODO put inside protocol traffic header
|
||||
wire_SwitchHop // TODO put inside protocol traffic header
|
||||
wire_SessionPing // inside protocol traffic header
|
||||
wire_SessionPong // inside protocol traffic header
|
||||
wire_DHTLookupRequest // inside protocol traffic header
|
||||
wire_DHTLookupResponse // inside protocol traffic header
|
||||
wire_SearchRequest // inside protocol traffic header
|
||||
wire_SearchResponse // inside protocol traffic header
|
||||
//wire_Keys // udp key packet (boxPub, sigPub)
|
||||
)
|
||||
|
||||
// Encode uint64 using a variable length scheme
|
||||
// Similar to binary.Uvarint, but big-endian
|
||||
func wire_encode_uint64(elem uint64) []byte {
|
||||
return wire_put_uint64(elem, nil)
|
||||
}
|
||||
|
||||
// Occasionally useful for appending to an existing slice (if there's room)
|
||||
func wire_put_uint64(elem uint64, out []byte) []byte {
|
||||
bs := make([]byte, 0, 10)
|
||||
bs = append(bs, byte(elem & 0x7f))
|
||||
for e := elem >> 7 ; e > 0 ; e >>= 7 {
|
||||
bs = append(bs, byte(e | 0x80))
|
||||
}
|
||||
// Now reverse bytes, because we set them in the wrong order
|
||||
// TODO just put them in the right place the first time...
|
||||
last := len(bs)-1
|
||||
for idx := 0 ; idx < len(bs)/2 ; idx++ {
|
||||
bs[idx], bs[last-idx] = bs[last-idx], bs[idx]
|
||||
}
|
||||
return append(out, bs...)
|
||||
}
|
||||
|
||||
// Decode uint64 from a []byte slice
|
||||
// Returns the decoded uint64 and the number of bytes used
|
||||
func wire_decode_uint64(bs []byte) (uint64, int) {
|
||||
length := 0
|
||||
elem := uint64(0)
|
||||
for _, b := range bs {
|
||||
elem <<= 7
|
||||
elem |= uint64(b & 0x7f)
|
||||
length++
|
||||
if b & 0x80 == 0 { break }
|
||||
}
|
||||
return elem, length
|
||||
}
|
||||
|
||||
func wire_intToUint(i int64) uint64 {
|
||||
var u uint64
|
||||
if i < 0 {
|
||||
u = uint64(-i) << 1
|
||||
u |= 0x01 // sign bit
|
||||
} else {
|
||||
u = uint64(i) << 1
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
func wire_intFromUint(u uint64) int64 {
|
||||
var i int64
|
||||
i = int64(u >> 1)
|
||||
if u & 0x01 != 0 { i *= -1 }
|
||||
return i
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Takes coords, returns coords prefixed with encoded coord length
|
||||
func wire_encode_coords(coords []byte) ([]byte) {
|
||||
coordLen := wire_encode_uint64(uint64(len(coords)))
|
||||
bs := make([]byte, 0, len(coordLen)+len(coords))
|
||||
bs = append(bs, coordLen...)
|
||||
bs = append(bs, coords...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func wire_put_coords(coords []byte, bs []byte) ([]byte) {
|
||||
bs = wire_put_uint64(uint64(len(coords)), bs)
|
||||
bs = append(bs, coords...)
|
||||
return bs
|
||||
}
|
||||
|
||||
// Takes a packet that begins with coords (starting with coord length)
|
||||
// Returns a slice of coords and the number of bytes read
|
||||
func wire_decode_coords(packet []byte) ([]byte, int) {
|
||||
coordLen, coordBegin := wire_decode_uint64(packet)
|
||||
coordEnd := coordBegin+int(coordLen)
|
||||
//if coordBegin == 0 { panic("No coords found") } // Testing
|
||||
//if coordEnd > len(packet) { panic("Packet too short") } // Testing
|
||||
if coordBegin == 0 || coordEnd > len(packet) { return nil, 0 }
|
||||
return packet[coordBegin:coordEnd], coordEnd
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// TODO move this msg stuff somewhere else, use encode() and decode() methods
|
||||
|
||||
// Announces that we can send parts of a Message with a particular seq
|
||||
type msgAnnounce struct {
|
||||
root sigPubKey
|
||||
tstamp int64
|
||||
seq uint64
|
||||
len uint64
|
||||
//Deg uint64
|
||||
//RSeq uint64
|
||||
}
|
||||
|
||||
func (m *msgAnnounce) encode() []byte {
|
||||
bs := wire_encode_uint64(wire_SwitchAnnounce)
|
||||
bs = append(bs, m.root[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...)
|
||||
bs = append(bs, wire_encode_uint64(m.seq)...)
|
||||
bs = append(bs, wire_encode_uint64(m.len)...)
|
||||
//bs = append(bs, wire_encode_uint64(m.Deg)...)
|
||||
//bs = append(bs, wire_encode_uint64(m.RSeq)...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func (m *msgAnnounce) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
var tstamp uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_SwitchAnnounce: return false
|
||||
case !wire_chop_slice(m.root[:], &bs): return false
|
||||
case !wire_chop_uint64(&tstamp, &bs): return false
|
||||
case !wire_chop_uint64(&m.seq, &bs): return false
|
||||
case !wire_chop_uint64(&m.len, &bs): return false
|
||||
//case !wire_chop_uint64(&m.Deg, &bs): return false
|
||||
//case !wire_chop_uint64(&m.RSeq, &bs): return false
|
||||
}
|
||||
m.tstamp = wire_intFromUint(tstamp)
|
||||
return true
|
||||
}
|
||||
|
||||
type msgHopReq struct {
|
||||
root sigPubKey
|
||||
tstamp int64
|
||||
seq uint64
|
||||
hop uint64
|
||||
}
|
||||
|
||||
func (m *msgHopReq) encode() []byte {
|
||||
bs := wire_encode_uint64(wire_SwitchHopRequest)
|
||||
bs = append(bs, m.root[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...)
|
||||
bs = append(bs, wire_encode_uint64(m.seq)...)
|
||||
bs = append(bs, wire_encode_uint64(m.hop)...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func (m *msgHopReq) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
var tstamp uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_SwitchHopRequest: return false
|
||||
case !wire_chop_slice(m.root[:], &bs): return false
|
||||
case !wire_chop_uint64(&tstamp, &bs): return false
|
||||
case !wire_chop_uint64(&m.seq, &bs): return false
|
||||
case !wire_chop_uint64(&m.hop, &bs): return false
|
||||
}
|
||||
m.tstamp = wire_intFromUint(tstamp)
|
||||
return true
|
||||
}
|
||||
|
||||
type msgHop struct {
|
||||
root sigPubKey
|
||||
tstamp int64
|
||||
seq uint64
|
||||
hop uint64
|
||||
port switchPort
|
||||
next sigPubKey
|
||||
sig sigBytes
|
||||
}
|
||||
|
||||
func (m *msgHop) encode() []byte {
|
||||
bs := wire_encode_uint64(wire_SwitchHop)
|
||||
bs = append(bs, m.root[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(m.tstamp))...)
|
||||
bs = append(bs, wire_encode_uint64(m.seq)...)
|
||||
bs = append(bs, wire_encode_uint64(m.hop)...)
|
||||
bs = append(bs, wire_encode_uint64(uint64(m.port))...)
|
||||
bs = append(bs, m.next[:]...)
|
||||
bs = append(bs, m.sig[:]...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func (m *msgHop) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
var tstamp uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_SwitchHop: return false
|
||||
case !wire_chop_slice(m.root[:], &bs): return false
|
||||
case !wire_chop_uint64(&tstamp, &bs): return false
|
||||
case !wire_chop_uint64(&m.seq, &bs): return false
|
||||
case !wire_chop_uint64(&m.hop, &bs): return false
|
||||
case !wire_chop_uint64((*uint64)(&m.port), &bs): return false
|
||||
case !wire_chop_slice(m.next[:], &bs): return false
|
||||
case !wire_chop_slice(m.sig[:], &bs): return false
|
||||
}
|
||||
m.tstamp = wire_intFromUint(tstamp)
|
||||
return true
|
||||
}
|
||||
|
||||
// Format used to check signatures only, so no need to also support decoding
|
||||
func wire_encode_locator(loc *switchLocator) []byte {
|
||||
coords := wire_encode_coords(loc.getCoords())
|
||||
var bs []byte
|
||||
bs = append(bs, loc.root[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(loc.tstamp))...)
|
||||
bs = append(bs, coords...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func wire_chop_slice(toSlice []byte, fromSlice *[]byte) bool {
|
||||
if len(*fromSlice) < len(toSlice) { return false }
|
||||
copy(toSlice, *fromSlice)
|
||||
*fromSlice = (*fromSlice)[len(toSlice):]
|
||||
return true
|
||||
}
|
||||
|
||||
func wire_chop_coords(toCoords *[]byte, fromSlice *[]byte) bool {
|
||||
coords, coordLen := wire_decode_coords(*fromSlice)
|
||||
if coordLen == 0 { return false }
|
||||
*toCoords = append((*toCoords)[:0], coords...)
|
||||
*fromSlice = (*fromSlice)[coordLen:]
|
||||
return true
|
||||
}
|
||||
|
||||
func wire_chop_uint64(toUInt64 *uint64, fromSlice *[]byte) bool {
|
||||
dec, decLen := wire_decode_uint64(*fromSlice)
|
||||
if decLen == 0 { return false }
|
||||
*toUInt64 = dec
|
||||
*fromSlice = (*fromSlice)[decLen:]
|
||||
return true
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Wire traffic packets
|
||||
|
||||
type wire_trafficPacket struct {
|
||||
ttl uint64 // TODO? hide this as a wire format detail, not set by user
|
||||
coords []byte
|
||||
handle handle
|
||||
nonce boxNonce
|
||||
payload []byte
|
||||
}
|
||||
|
||||
// This is basically MarshalBinary, but decode doesn't allow that...
|
||||
func (p *wire_trafficPacket) encode() []byte {
|
||||
bs := util_getBytes()
|
||||
bs = wire_put_uint64(wire_Traffic, bs)
|
||||
bs = wire_put_uint64(p.ttl, bs)
|
||||
bs = wire_put_coords(p.coords, bs)
|
||||
bs = append(bs, p.handle[:]...)
|
||||
bs = append(bs, p.nonce[:]...)
|
||||
bs = append(bs, p.payload...)
|
||||
return bs
|
||||
}
|
||||
|
||||
// Not just UnmarshalBinary becuase the original slice isn't always copied from
|
||||
func (p *wire_trafficPacket) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_Traffic: return false
|
||||
case !wire_chop_uint64(&p.ttl, &bs): return false
|
||||
case !wire_chop_coords(&p.coords, &bs): return false
|
||||
case !wire_chop_slice(p.handle[:], &bs): return false
|
||||
case !wire_chop_slice(p.nonce[:], &bs): return false
|
||||
}
|
||||
p.payload = append(util_getBytes(), bs...)
|
||||
return true
|
||||
}
|
||||
|
||||
type wire_protoTrafficPacket struct {
|
||||
ttl uint64 // TODO? hide this as a wire format detail, not set by user
|
||||
coords []byte
|
||||
toKey boxPubKey
|
||||
fromKey boxPubKey
|
||||
nonce boxNonce
|
||||
payload []byte
|
||||
}
|
||||
|
||||
func (p *wire_protoTrafficPacket) encode() []byte {
|
||||
coords := wire_encode_coords(p.coords)
|
||||
bs := wire_encode_uint64(wire_ProtocolTraffic)
|
||||
bs = append(bs, wire_encode_uint64(p.ttl)...)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, p.toKey[:]...)
|
||||
bs = append(bs, p.fromKey[:]...)
|
||||
bs = append(bs, p.nonce[:]...)
|
||||
bs = append(bs, p.payload...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func(p *wire_protoTrafficPacket) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_ProtocolTraffic: return false
|
||||
case !wire_chop_uint64(&p.ttl, &bs): return false
|
||||
case !wire_chop_coords(&p.coords, &bs): return false
|
||||
case !wire_chop_slice(p.toKey[:], &bs): return false
|
||||
case !wire_chop_slice(p.fromKey[:], &bs): return false
|
||||
case !wire_chop_slice(p.nonce[:], &bs): return false
|
||||
}
|
||||
p.payload = bs
|
||||
return true
|
||||
}
|
||||
|
||||
type wire_linkProtoTrafficPacket struct {
|
||||
toKey boxPubKey
|
||||
fromKey boxPubKey
|
||||
nonce boxNonce
|
||||
payload []byte
|
||||
}
|
||||
|
||||
func (p *wire_linkProtoTrafficPacket) encode() []byte {
|
||||
bs := wire_encode_uint64(wire_LinkProtocolTraffic)
|
||||
bs = append(bs, p.toKey[:]...)
|
||||
bs = append(bs, p.fromKey[:]...)
|
||||
bs = append(bs, p.nonce[:]...)
|
||||
bs = append(bs, p.payload...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func(p *wire_linkProtoTrafficPacket) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_LinkProtocolTraffic: return false
|
||||
case !wire_chop_slice(p.toKey[:], &bs): return false
|
||||
case !wire_chop_slice(p.fromKey[:], &bs): return false
|
||||
case !wire_chop_slice(p.nonce[:], &bs): return false
|
||||
}
|
||||
p.payload = bs
|
||||
return true
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (p *sessionPing) encode() []byte {
|
||||
var pTypeVal uint64
|
||||
if p.isPong {
|
||||
pTypeVal = wire_SessionPong
|
||||
} else {
|
||||
pTypeVal = wire_SessionPing
|
||||
}
|
||||
bs := wire_encode_uint64(pTypeVal)
|
||||
//p.sendPermPub used in top level (crypto), so skipped here
|
||||
bs = append(bs, p.handle[:]...)
|
||||
bs = append(bs, p.sendSesPub[:]...)
|
||||
bs = append(bs, wire_encode_uint64(wire_intToUint(p.tstamp))...)
|
||||
coords := wire_encode_coords(p.coords)
|
||||
bs = append(bs, coords...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func (p *sessionPing) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
var tstamp uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_SessionPing && pType != wire_SessionPong: return false
|
||||
//p.sendPermPub used in top level (crypto), so skipped here
|
||||
case !wire_chop_slice(p.handle[:], &bs): return false
|
||||
case !wire_chop_slice(p.sendSesPub[:], &bs): return false
|
||||
case !wire_chop_uint64(&tstamp, &bs): return false
|
||||
case !wire_chop_coords(&p.coords, &bs): return false
|
||||
}
|
||||
p.tstamp = wire_intFromUint(tstamp)
|
||||
if pType == wire_SessionPong { p.isPong = true }
|
||||
return true
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (r *dhtReq) encode() []byte {
|
||||
coords := wire_encode_coords(r.coords)
|
||||
bs := wire_encode_uint64(wire_DHTLookupRequest)
|
||||
bs = append(bs, r.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, r.dest[:]...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func (r *dhtReq) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_DHTLookupRequest: return false
|
||||
case !wire_chop_slice(r.key[:], &bs): return false
|
||||
case !wire_chop_coords(&r.coords, &bs): return false
|
||||
case !wire_chop_slice(r.dest[:], &bs): return false
|
||||
default: return true
|
||||
}
|
||||
}
|
||||
|
||||
func (r *dhtRes) encode() []byte {
|
||||
coords := wire_encode_coords(r.coords)
|
||||
bs := wire_encode_uint64(wire_DHTLookupResponse)
|
||||
bs = append(bs, r.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, r.dest[:]...)
|
||||
for _, info := range r.infos {
|
||||
coords = wire_encode_coords(info.coords)
|
||||
bs = append(bs, info.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (r *dhtRes) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_DHTLookupResponse: return false
|
||||
case !wire_chop_slice(r.key[:], &bs): return false
|
||||
case !wire_chop_coords(&r.coords, &bs): return false
|
||||
case !wire_chop_slice(r.dest[:], &bs): return false
|
||||
}
|
||||
for len(bs) > 0 {
|
||||
info := dhtInfo{}
|
||||
switch {
|
||||
case !wire_chop_slice(info.key[:], &bs): return false
|
||||
case !wire_chop_coords(&info.coords, &bs): return false
|
||||
}
|
||||
r.infos = append(r.infos, &info)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func (r *searchReq) encode() []byte {
|
||||
coords := wire_encode_coords(r.coords)
|
||||
bs := wire_encode_uint64(wire_SearchRequest)
|
||||
bs = append(bs, r.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, r.dest[:]...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func (r *searchReq) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_SearchRequest: return false
|
||||
case !wire_chop_slice(r.key[:], &bs): return false
|
||||
case !wire_chop_coords(&r.coords, &bs): return false
|
||||
case !wire_chop_slice(r.dest[:], &bs): return false
|
||||
default: return true
|
||||
}
|
||||
}
|
||||
|
||||
func (r *searchRes) encode() []byte {
|
||||
coords := wire_encode_coords(r.coords)
|
||||
bs := wire_encode_uint64(wire_SearchResponse)
|
||||
bs = append(bs, r.key[:]...)
|
||||
bs = append(bs, coords...)
|
||||
bs = append(bs, r.dest[:]...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func (r *searchRes) decode(bs []byte) bool {
|
||||
var pType uint64
|
||||
switch {
|
||||
case !wire_chop_uint64(&pType, &bs): return false
|
||||
case pType != wire_SearchResponse: return false
|
||||
case !wire_chop_slice(r.key[:], &bs): return false
|
||||
case !wire_chop_coords(&r.coords, &bs): return false
|
||||
case !wire_chop_slice(r.dest[:], &bs): return false
|
||||
default: return true
|
||||
}
|
||||
}
|
||||
|
209
yggdrasil.go
Normal file
209
yggdrasil.go
Normal file
@ -0,0 +1,209 @@
|
||||
package main
|
||||
|
||||
import "bytes"
|
||||
import "encoding/hex"
|
||||
import "encoding/json"
|
||||
import "flag"
|
||||
import "fmt"
|
||||
import "io/ioutil"
|
||||
import "net"
|
||||
import "os"
|
||||
import "os/signal"
|
||||
import "time"
|
||||
|
||||
import _ "net/http/pprof"
|
||||
import "net/http"
|
||||
import "log"
|
||||
import "runtime"
|
||||
|
||||
import "golang.org/x/net/ipv6"
|
||||
|
||||
import . "yggdrasil"
|
||||
|
||||
/**
|
||||
* This is a very crude wrapper around src/yggdrasil
|
||||
* It can generate a new config (--genconf)
|
||||
* It can read a config from stdin (--useconf)
|
||||
* It can run with an automatic config (--autoconf)
|
||||
*/
|
||||
|
||||
type nodeConfig struct {
|
||||
Listen string
|
||||
Peers []string
|
||||
BoxPub string
|
||||
BoxPriv string
|
||||
SigPub string
|
||||
SigPriv string
|
||||
Multicast bool
|
||||
}
|
||||
|
||||
type node struct {
|
||||
core Core
|
||||
sock *ipv6.PacketConn
|
||||
}
|
||||
|
||||
func (n *node) init(cfg *nodeConfig, logger *log.Logger) {
|
||||
boxPub, err := hex.DecodeString(cfg.BoxPub)
|
||||
if err != nil { panic(err) }
|
||||
boxPriv, err := hex.DecodeString(cfg.BoxPriv)
|
||||
if err != nil { panic(err) }
|
||||
sigPub, err := hex.DecodeString(cfg.SigPub)
|
||||
if err != nil { panic(err) }
|
||||
sigPriv, err := hex.DecodeString(cfg.SigPriv)
|
||||
if err != nil { panic(err) }
|
||||
n.core.DEBUG_init(boxPub, boxPriv, sigPub, sigPriv)
|
||||
n.core.DEBUG_setLogger(logger)
|
||||
logger.Println("Starting interface...")
|
||||
n.core.DEBUG_setupAndStartGlobalUDPInterface(cfg.Listen)
|
||||
logger.Println("Started interface")
|
||||
go func () {
|
||||
if len(cfg.Peers) == 0 { return }
|
||||
for {
|
||||
for _, p := range cfg.Peers {
|
||||
n.core.DEBUG_sendUDPKeys(p)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func generateConfig() *nodeConfig {
|
||||
core := Core{}
|
||||
bpub, bpriv := core.DEBUG_newBoxKeys()
|
||||
spub, spriv := core.DEBUG_newSigKeys()
|
||||
cfg := nodeConfig{}
|
||||
cfg.Listen = "[::]:0"
|
||||
cfg.BoxPub = hex.EncodeToString(bpub[:])
|
||||
cfg.BoxPriv = hex.EncodeToString(bpriv[:])
|
||||
cfg.SigPub = hex.EncodeToString(spub[:])
|
||||
cfg.SigPriv = hex.EncodeToString(spriv[:])
|
||||
cfg.Peers = []string{}
|
||||
cfg.Multicast = true
|
||||
return &cfg
|
||||
}
|
||||
|
||||
func doGenconf() string {
|
||||
cfg := generateConfig()
|
||||
bs, err := json.MarshalIndent(cfg, "", " ")
|
||||
if err != nil { panic(err) }
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
var multicastAddr = "[ff02::114]:9001"
|
||||
|
||||
func (n *node) listen() {
|
||||
groupAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
|
||||
if err != nil { panic(err) }
|
||||
bs := make([]byte, 2048)
|
||||
for {
|
||||
nBytes, rcm, fromAddr, err := n.sock.ReadFrom(bs)
|
||||
if err != nil { panic(err) }
|
||||
//if rcm == nil { continue } // wat
|
||||
//fmt.Println("DEBUG:", "packet from:", fromAddr.String())
|
||||
if !rcm.Dst.IsLinkLocalMulticast() { continue }
|
||||
if !rcm.Dst.Equal(groupAddr.IP) { continue }
|
||||
anAddr := string(bs[:nBytes])
|
||||
addr, err := net.ResolveUDPAddr("udp6", anAddr)
|
||||
if err != nil { panic(err) ; continue } // Panic for testing, remove later
|
||||
from := fromAddr.(*net.UDPAddr)
|
||||
//fmt.Println("DEBUG:", "heard:", addr.IP.String(), "from:", from.IP.String())
|
||||
if addr.IP.String() != from.IP.String() { continue }
|
||||
addr.Zone = from.Zone
|
||||
saddr := addr.String()
|
||||
//if _, isIn := n.peers[saddr]; isIn { continue }
|
||||
//n.peers[saddr] = struct{}{}
|
||||
n.core.DEBUG_sendUDPKeys(saddr)
|
||||
//fmt.Println("DEBUG:", "added multicast peer:", saddr)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) announce() {
|
||||
groupAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
|
||||
if err != nil { panic(err) }
|
||||
udpaddr := n.core.DEBUG_getGlobalUDPAddr()
|
||||
anAddr, err := net.ResolveUDPAddr("udp6", udpaddr.String())
|
||||
if err != nil { panic(err) }
|
||||
destAddr, err := net.ResolveUDPAddr("udp6", multicastAddr)
|
||||
if err != nil { panic(err) }
|
||||
for {
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil { panic(err) }
|
||||
for _, iface := range ifaces {
|
||||
n.sock.JoinGroup(&iface, groupAddr)
|
||||
//err := n.sock.JoinGroup(&iface, groupAddr)
|
||||
//if err != nil { panic(err) }
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil { panic(err) }
|
||||
for _, addr := range addrs {
|
||||
addrIP, _, _ := net.ParseCIDR(addr.String())
|
||||
if addrIP.To4() != nil { continue } // IPv6 only
|
||||
if !addrIP.IsLinkLocalUnicast() { continue }
|
||||
anAddr.IP = addrIP
|
||||
anAddr.Zone = iface.Name
|
||||
destAddr.Zone = iface.Name
|
||||
msg := []byte(anAddr.String())
|
||||
n.sock.WriteTo(msg, nil, destAddr)
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
var pprof = flag.Bool("pprof", false, "Run pprof, see http://localhost:6060/debug/pprof/")
|
||||
var genconf = flag.Bool("genconf", false, "print a new config to stdout")
|
||||
var useconf = flag.Bool("useconf", false, "read config from stdin")
|
||||
var autoconf = flag.Bool("autoconf", false, "automatic mode (dynamic IP, peer with IPv6 neighbors)")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
var cfg *nodeConfig
|
||||
switch {
|
||||
case *autoconf: cfg = generateConfig()
|
||||
case *useconf:
|
||||
config, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil { panic(err) }
|
||||
decoder := json.NewDecoder(bytes.NewReader(config))
|
||||
err = decoder.Decode(&cfg)
|
||||
if err != nil { panic(err) }
|
||||
case *genconf: fmt.Println(doGenconf())
|
||||
default: flag.PrintDefaults()
|
||||
}
|
||||
if cfg == nil { return }
|
||||
logger := log.New(os.Stdout, "", log.Flags())
|
||||
if *pprof {
|
||||
runtime.SetBlockProfileRate(1)
|
||||
go func() { log.Println(http.ListenAndServe("localhost:6060", nil)) }()
|
||||
}
|
||||
// Setup
|
||||
logger.Println("Initializing...")
|
||||
n := node{}
|
||||
n.init(cfg, logger)
|
||||
logger.Println("Starting tun...")
|
||||
n.core.DEBUG_startTun() // 1280, the smallest supported MTU
|
||||
//n.core.DEBUG_startTunWithMTU(65535) // Largest supported MTU
|
||||
defer func() {
|
||||
logger.Println("Closing...")
|
||||
n.core.DEBUG_stopTun()
|
||||
}()
|
||||
logger.Println("Started...")
|
||||
if cfg.Multicast {
|
||||
addr, err := net.ResolveUDPAddr("udp", multicastAddr)
|
||||
if err != nil { panic(err) }
|
||||
listenString := fmt.Sprintf("[::]:%v", addr.Port)
|
||||
conn, err := net.ListenPacket("udp6", listenString)
|
||||
if err != nil { panic(err) }
|
||||
//defer conn.Close() // Let it close on its own when the application exits
|
||||
n.sock = ipv6.NewPacketConn(conn)
|
||||
if err = n.sock.SetControlMessage(ipv6.FlagDst, true) ; err != nil { panic(err) }
|
||||
go n.listen()
|
||||
go n.announce()
|
||||
}
|
||||
// Catch interrupt to exit gracefully
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt)
|
||||
<-c
|
||||
logger.Println("Stopping...")
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user