mirror of
https://github.com/tailscale/tailscale.git
synced 2025-07-31 16:23:44 +00:00
tsconsensus: add a tsconsensus package
tsconsensus enables tsnet.Server instances to form a consensus. tsconsensus wraps hashicorp/raft with * the ability to do discovery via tailscale tags * inter node communication over tailscale * routing of commands to the leader Updates #14667 Signed-off-by: Fran Bull <fran@tailscale.com>
This commit is contained in:
parent
074372d6c5
commit
6ebb0c749d
11
go.mod
11
go.mod
@ -127,6 +127,11 @@ require (
|
||||
github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect
|
||||
github.com/alecthomas/go-check-sumtype v0.1.4 // indirect
|
||||
github.com/alexkohler/nakedret/v2 v2.0.4 // indirect
|
||||
<<<<<<< HEAD
|
||||
=======
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
>>>>>>> 348d01d82 (tsconsensus: add a tsconsensus package)
|
||||
github.com/bombsimon/wsl/v4 v4.2.1 // indirect
|
||||
github.com/butuzov/mirror v1.1.0 // indirect
|
||||
github.com/catenacyber/perfsprint v0.7.1 // indirect
|
||||
@ -145,6 +150,12 @@ require (
|
||||
github.com/golangci/plugin-module-register v0.1.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
|
||||
github.com/gorilla/securecookie v1.1.2 // indirect
|
||||
github.com/hashicorp/go-hclog v1.6.2 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-metrics v0.5.4 // indirect
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect
|
||||
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
||||
github.com/hashicorp/raft v1.7.2 // indirect
|
||||
github.com/jjti/go-spancheck v0.5.3 // indirect
|
||||
github.com/karamaru-alpha/copyloopvar v1.0.8 // indirect
|
||||
github.com/macabu/inamedparam v0.1.3 // indirect
|
||||
|
48
go.sum
48
go.sum
@ -61,6 +61,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
|
||||
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/Djarvur/go-err113 v0.1.0 h1:uCRZZOdMQ0TZPHYTdYpoC0bLYJKPEHPUJ8MeAa51lNU=
|
||||
@ -114,6 +115,8 @@ github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1
|
||||
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
@ -212,6 +215,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk=
|
||||
github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/ckaznocha/intrange v0.1.0 h1:ZiGBhvrdsKpoEfzh9CjBfDSZof6QB0ORY5tXasUtiew=
|
||||
github.com/ckaznocha/intrange v0.1.0/go.mod h1:Vwa9Ekex2BrEQMg6zlrWwbs/FtYw7eS5838Q7UjK7TQ=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
@ -288,8 +293,14 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0
|
||||
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||
github.com/evanw/esbuild v0.19.11 h1:mbPO1VJ/df//jjUd+p/nRLYCpizXxXb2w/zZMShxa2k=
|
||||
github.com/evanw/esbuild v0.19.11/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48=
|
||||
<<<<<<< HEAD
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
=======
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
|
||||
github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
|
||||
>>>>>>> 348d01d82 (tsconsensus: add a tsconsensus package)
|
||||
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
|
||||
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
@ -525,15 +536,36 @@ github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod
|
||||
github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY=
|
||||
github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
<<<<<<< HEAD
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
=======
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I=
|
||||
github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY=
|
||||
github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI=
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0=
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
>>>>>>> 348d01d82 (tsconsensus: add a tsconsensus package)
|
||||
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
|
||||
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/raft v1.7.2 h1:pyvxhfJ4R8VIAlHKvLoKQWElZspsCVT6YWuxVxsPAgc=
|
||||
github.com/hashicorp/raft v1.7.2/go.mod h1:DfvCGFxpAUPE0L4Uc8JLlTPtc3GzSbdH0MTJCLgnmJQ=
|
||||
github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU=
|
||||
github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo=
|
||||
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
||||
@ -578,6 +610,7 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX
|
||||
github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I=
|
||||
github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
@ -650,8 +683,12 @@ github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 h1:gWg6ZQ4JhDfJPqlo2
|
||||
github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
|
||||
github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE=
|
||||
github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
|
||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
@ -733,6 +770,7 @@ github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJ
|
||||
github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
|
||||
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
|
||||
github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml/v2 v2.2.0 h1:QLgLl2yMN7N+ruc31VynXs1vhMZa7CeHHejIeBAsoHo=
|
||||
github.com/pelletier/go-toml/v2 v2.2.0/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||
github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc=
|
||||
@ -761,8 +799,10 @@ github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyf
|
||||
github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
@ -773,6 +813,7 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
@ -780,6 +821,7 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
@ -880,6 +922,7 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
@ -946,6 +989,7 @@ github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
|
||||
github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ=
|
||||
github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/u-root/gobusybox/src v0.0.0-20231228173702-b69f654846aa h1:unMPGGK/CRzfg923allsikmvk2l7beBeFPUNC4RVX/8=
|
||||
github.com/u-root/gobusybox/src v0.0.0-20231228173702-b69f654846aa/go.mod h1:Zj4Tt22fJVn/nz/y6Ergm1SahR9dio1Zm/D2/S0TmXM=
|
||||
github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs=
|
||||
@ -1174,6 +1218,7 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -1199,9 +1244,12 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
130
tsconsensus/http.go
Normal file
130
tsconsensus/http.go
Normal file
@ -0,0 +1,130 @@
|
||||
package tsconsensus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
type joinRequest struct {
|
||||
RemoteHost string
|
||||
RemoteID string
|
||||
}
|
||||
|
||||
type commandClient struct {
|
||||
port uint16
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
func (rac *commandClient) Url(host string, path string) string {
|
||||
return fmt.Sprintf("http://%s:%d%s", host, rac.port, path)
|
||||
}
|
||||
|
||||
func (rac *commandClient) join(host string, jr joinRequest) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
rBs, err := json.Marshal(jr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
url := rac.Url(host, "/join")
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(rBs))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := rac.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
respBs, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("remote responded %d: %s", resp.StatusCode, string(respBs))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rac *commandClient) executeCommand(host string, bs []byte) (CommandResult, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
url := rac.Url(host, "/executeCommand")
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(bs))
|
||||
if err != nil {
|
||||
return CommandResult{}, err
|
||||
}
|
||||
resp, err := rac.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return CommandResult{}, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
respBs, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return CommandResult{}, err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return CommandResult{}, fmt.Errorf("remote responded %d: %s", resp.StatusCode, string(respBs))
|
||||
}
|
||||
var cr CommandResult
|
||||
if err = json.Unmarshal(respBs, &cr); err != nil {
|
||||
return CommandResult{}, err
|
||||
}
|
||||
return cr, nil
|
||||
}
|
||||
|
||||
func (c *Consensus) makeCommandMux() *http.ServeMux {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/join", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer r.Body.Close()
|
||||
decoder := json.NewDecoder(r.Body)
|
||||
var jr joinRequest
|
||||
err := decoder.Decode(&jr)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if jr.RemoteHost == "" {
|
||||
http.Error(w, "Required: remoteAddr", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if jr.RemoteID == "" {
|
||||
http.Error(w, "Required: remoteID", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
err = c.handleJoin(jr)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
})
|
||||
mux.HandleFunc("/executeCommand", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer r.Body.Close()
|
||||
decoder := json.NewDecoder(r.Body)
|
||||
var cmd Command
|
||||
err := decoder.Decode(&cmd)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
result, err := c.executeCommandLocally(cmd)
|
||||
if err := json.NewEncoder(w).Encode(result); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
})
|
||||
return mux
|
||||
}
|
138
tsconsensus/monitor.go
Normal file
138
tsconsensus/monitor.go
Normal file
@ -0,0 +1,138 @@
|
||||
package tsconsensus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/tsnet"
|
||||
)
|
||||
|
||||
type status struct {
|
||||
Status *ipnstate.Status
|
||||
RaftState string
|
||||
}
|
||||
|
||||
type monitor struct {
|
||||
ts *tsnet.Server
|
||||
con *Consensus
|
||||
}
|
||||
|
||||
func (m *monitor) getStatus(ctx context.Context) (status, error) {
|
||||
lc, err := m.ts.LocalClient()
|
||||
if err != nil {
|
||||
return status{}, err
|
||||
}
|
||||
tStatus, err := lc.Status(ctx)
|
||||
if err != nil {
|
||||
return status{}, err
|
||||
}
|
||||
return status{Status: tStatus, RaftState: m.con.raft.State().String()}, nil
|
||||
}
|
||||
|
||||
func serveMonitor(c *Consensus, ts *tsnet.Server, listenAddr string) (*http.Server, error) {
|
||||
ln, err := ts.Listen("tcp", listenAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := &monitor{con: c, ts: ts}
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/full", m.handleFullStatus)
|
||||
mux.HandleFunc("/", m.handleSummaryStatus)
|
||||
mux.HandleFunc("/netmap", m.handleNetmap)
|
||||
mux.HandleFunc("/dial", m.handleDial)
|
||||
srv := &http.Server{Handler: mux}
|
||||
go func() {
|
||||
defer ln.Close()
|
||||
err := srv.Serve(ln)
|
||||
log.Printf("MonitorHTTP stopped serving with error: %v", err)
|
||||
}()
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
func (m *monitor) handleFullStatus(w http.ResponseWriter, r *http.Request) {
|
||||
s, err := m.getStatus(r.Context())
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(s); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (m *monitor) handleSummaryStatus(w http.ResponseWriter, r *http.Request) {
|
||||
s, err := m.getStatus(r.Context())
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
lines := []string{}
|
||||
for _, p := range s.Status.Peer {
|
||||
if p.Online {
|
||||
lines = append(lines, fmt.Sprintf("%s\t\t%d\t%d\t%t", strings.Split(p.DNSName, ".")[0], p.RxBytes, p.TxBytes, p.Active))
|
||||
}
|
||||
}
|
||||
slices.Sort(lines)
|
||||
lines = append([]string{fmt.Sprintf("RaftState: %s", s.RaftState)}, lines...)
|
||||
txt := strings.Join(lines, "\n") + "\n"
|
||||
w.Write([]byte(txt))
|
||||
}
|
||||
|
||||
func (m *monitor) handleNetmap(w http.ResponseWriter, r *http.Request) {
|
||||
var mask ipn.NotifyWatchOpt = ipn.NotifyInitialNetMap
|
||||
mask |= ipn.NotifyNoPrivateKeys
|
||||
lc, err := m.ts.LocalClient()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
watcher, err := lc.WatchIPNBus(r.Context(), mask)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
n, err := watcher.Next()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
j, _ := json.MarshalIndent(n.NetMap, "", "\t")
|
||||
w.Write([]byte(j))
|
||||
return
|
||||
}
|
||||
|
||||
func (m *monitor) handleDial(w http.ResponseWriter, r *http.Request) {
|
||||
var dialParams struct {
|
||||
Addr string
|
||||
}
|
||||
defer r.Body.Close()
|
||||
bs, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
err = json.Unmarshal(bs, &dialParams)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
c, err := m.ts.Dial(r.Context(), "tcp", dialParams.Addr)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
defer c.Close()
|
||||
w.Write([]byte("ok\n"))
|
||||
return
|
||||
}
|
364
tsconsensus/tsconsensus.go
Normal file
364
tsconsensus/tsconsensus.go
Normal file
@ -0,0 +1,364 @@
|
||||
package tsconsensus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/raft"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/tsnet"
|
||||
)
|
||||
|
||||
/*
|
||||
Package tsconsensus implements a consensus algorithm for a group of tsnet.Servers
|
||||
|
||||
The Raft consensus algorithm relies on you implementing a state machine that will give the same
|
||||
result to a give command as long as the same logs have been applied in the same order.
|
||||
|
||||
tsconsensus uses the hashicorp/raft library to implement leader elections and log application.
|
||||
|
||||
tsconsensus provides:
|
||||
* cluster peer discovery based on tailscale tags
|
||||
* executing a command on the leader
|
||||
* communication between cluster peers over tailscale using tsnet
|
||||
|
||||
Users implement a state machine that satisfies the raft.FSM interface, with the business logic they desire.
|
||||
When changes to state are needed any node may
|
||||
* create a Command instance with serialized Args.
|
||||
* call ExecuteCommand with the Command instance
|
||||
this will propagate the command to the leader,
|
||||
and then from the reader to every node via raft.
|
||||
* the state machine then can implement raft.Apply, and dispatch commands via the Command.Name
|
||||
returning a CommandResult with an Err or a serialized Result.
|
||||
*/
|
||||
|
||||
func addr(host string, port uint16) string {
|
||||
return fmt.Sprintf("%s:%d", host, port)
|
||||
}
|
||||
|
||||
func raftAddr(host string, cfg Config) string {
|
||||
return addr(host, cfg.RaftPort)
|
||||
}
|
||||
|
||||
// A selfRaftNode is the info we need to talk to hashicorp/raft about our node.
|
||||
// We specify the ID and Addr on Consensus Start, and then use it later for raft
|
||||
// operations such as BootstrapCluster and AddVoter.
|
||||
type selfRaftNode struct {
|
||||
id string
|
||||
host string
|
||||
}
|
||||
|
||||
// A Config holds configurable values such as ports and timeouts.
|
||||
// Use DefaultConfig to get a useful Config.
|
||||
type Config struct {
|
||||
CommandPort uint16
|
||||
RaftPort uint16
|
||||
MonitorPort uint16
|
||||
Raft *raft.Config
|
||||
MaxConnPool int
|
||||
ConnTimeout time.Duration
|
||||
}
|
||||
|
||||
// DefaultConfig returns a Config populated with default values ready for use.
|
||||
func DefaultConfig() Config {
|
||||
return Config{
|
||||
CommandPort: 6271,
|
||||
RaftPort: 6270,
|
||||
MonitorPort: 8081,
|
||||
Raft: raft.DefaultConfig(),
|
||||
MaxConnPool: 5,
|
||||
ConnTimeout: 5 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// StreamLayer implements an interface asked for by raft.NetworkTransport.
|
||||
// It does the raft interprocess communication via tailscale.
|
||||
type StreamLayer struct {
|
||||
net.Listener
|
||||
s *tsnet.Server
|
||||
}
|
||||
|
||||
// Dial implements the raft.StreamLayer interface with the tsnet.Server's Dial.
|
||||
func (sl StreamLayer) Dial(address raft.ServerAddress, timeout time.Duration) (net.Conn, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
return sl.s.Dial(ctx, "tcp", string(address))
|
||||
}
|
||||
|
||||
// Start returns a pointer to a running Consensus instance.
|
||||
func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, targetTag string, cfg Config) (*Consensus, error) {
|
||||
v4, _ := ts.TailscaleIPs()
|
||||
cc := commandClient{
|
||||
port: cfg.CommandPort,
|
||||
httpClient: ts.HTTPClient(),
|
||||
}
|
||||
self := selfRaftNode{
|
||||
id: v4.String(),
|
||||
host: v4.String(),
|
||||
}
|
||||
c := Consensus{
|
||||
commandClient: &cc,
|
||||
self: self,
|
||||
config: cfg,
|
||||
}
|
||||
|
||||
lc, err := ts.LocalClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tStatus, err := lc.Status(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var targets []*ipnstate.PeerStatus
|
||||
if targetTag != "" && tStatus.Self.Tags != nil && slices.Contains(tStatus.Self.Tags.AsSlice(), targetTag) {
|
||||
for _, v := range tStatus.Peer {
|
||||
if v.Tags != nil && slices.Contains(v.Tags.AsSlice(), targetTag) {
|
||||
targets = append(targets, v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return nil, errors.New("targetTag empty, or this node is not tagged with it")
|
||||
}
|
||||
|
||||
r, err := startRaft(ts, &fsm, c.self, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.raft = r
|
||||
srv, err := c.serveCmdHttp(ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.cmdHttpServer = srv
|
||||
c.bootstrap(targets)
|
||||
srv, err = serveMonitor(&c, ts, addr(c.self.host, cfg.MonitorPort))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.monitorHttpServer = srv
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
func startRaft(ts *tsnet.Server, fsm *raft.FSM, self selfRaftNode, cfg Config) (*raft.Raft, error) {
|
||||
config := cfg.Raft
|
||||
config.LocalID = raft.ServerID(self.id)
|
||||
|
||||
// no persistence (for now?)
|
||||
logStore := raft.NewInmemStore()
|
||||
stableStore := raft.NewInmemStore()
|
||||
snapshots := raft.NewInmemSnapshotStore()
|
||||
|
||||
// opens the listener on the raft port, raft will close it when it thinks it's appropriate
|
||||
ln, err := ts.Listen("tcp", raftAddr(self.host, cfg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
transport := raft.NewNetworkTransport(StreamLayer{
|
||||
s: ts,
|
||||
Listener: ln,
|
||||
},
|
||||
cfg.MaxConnPool,
|
||||
cfg.ConnTimeout,
|
||||
nil) // TODO pass in proper logging
|
||||
|
||||
// after NewRaft it's possible some other raft node that has us in their configuration will get
|
||||
// in contact, so by the time we do anything else we may already be a functioning member
|
||||
// of a consensus
|
||||
return raft.NewRaft(config, *fsm, logStore, stableStore, snapshots, transport)
|
||||
}
|
||||
|
||||
// A Consensus is the consensus algorithm for a tsnet.Server
|
||||
// It wraps a raft.Raft instance and performs the peer discovery
|
||||
// and command execution on the leader.
|
||||
type Consensus struct {
|
||||
raft *raft.Raft
|
||||
commandClient *commandClient
|
||||
self selfRaftNode
|
||||
config Config
|
||||
cmdHttpServer *http.Server
|
||||
monitorHttpServer *http.Server
|
||||
}
|
||||
|
||||
// bootstrap tries to join a raft cluster, or start one.
|
||||
//
|
||||
// We need to do the very first raft cluster configuration, but after that raft manages it.
|
||||
// bootstrap is called at start up, and we are not currently aware of what the cluster config might be,
|
||||
// our node may already be in it. Try to join the raft cluster of all the other nodes we know about, and
|
||||
// if unsuccessful, assume we are the first and start our own.
|
||||
//
|
||||
// It's possible for bootstrap to return an error, or start a errant breakaway cluster.
|
||||
//
|
||||
// We have a list of expected cluster members already from control (the members of the tailnet with the tag)
|
||||
// so we could do the initial configuration with all servers specified.
|
||||
// Choose to start with just this machine in the raft configuration instead, as:
|
||||
// - We want to handle machines joining after start anyway.
|
||||
// - Not all tagged nodes tailscale believes are active are necessarily actually responsive right now,
|
||||
// so let each node opt in when able.
|
||||
func (c *Consensus) bootstrap(targets []*ipnstate.PeerStatus) error {
|
||||
log.Printf("Trying to find cluster: num targets to try: %d", len(targets))
|
||||
for _, p := range targets {
|
||||
if !p.Online {
|
||||
log.Printf("Trying to find cluster: tailscale reports not online: %s", p.TailscaleIPs[0])
|
||||
} else {
|
||||
log.Printf("Trying to find cluster: trying %s", p.TailscaleIPs[0])
|
||||
err := c.commandClient.join(p.TailscaleIPs[0].String(), joinRequest{
|
||||
RemoteHost: c.self.host,
|
||||
RemoteID: c.self.id,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("Trying to find cluster: could not join %s: %v", p.TailscaleIPs[0], err)
|
||||
} else {
|
||||
log.Printf("Trying to find cluster: joined %s", p.TailscaleIPs[0])
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Trying to find cluster: unsuccessful, starting as leader: %s", c.self.host)
|
||||
f := c.raft.BootstrapCluster(
|
||||
raft.Configuration{
|
||||
Servers: []raft.Server{
|
||||
{
|
||||
ID: raft.ServerID(c.self.id),
|
||||
Address: raft.ServerAddress(c.raftAddr(c.self.host)),
|
||||
},
|
||||
},
|
||||
})
|
||||
return f.Error()
|
||||
}
|
||||
|
||||
// ExecuteCommand propagates a Command to be executed on the leader. Which
|
||||
// uses raft to Apply it to the followers.
|
||||
func (c *Consensus) ExecuteCommand(cmd Command) (CommandResult, error) {
|
||||
b, err := json.Marshal(cmd)
|
||||
if err != nil {
|
||||
return CommandResult{}, err
|
||||
}
|
||||
result, err := c.executeCommandLocally(cmd)
|
||||
var leErr lookElsewhereError
|
||||
for errors.As(err, &leErr) {
|
||||
result, err = c.commandClient.executeCommand(leErr.where, b)
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Stop attempts to gracefully shutdown various components.
|
||||
func (c *Consensus) Stop(ctx context.Context) error {
|
||||
fut := c.raft.Shutdown()
|
||||
err := fut.Error()
|
||||
if err != nil {
|
||||
log.Printf("Stop: Error in Raft Shutdown: %v", err)
|
||||
}
|
||||
err = c.cmdHttpServer.Shutdown(ctx)
|
||||
if err != nil {
|
||||
log.Printf("Stop: Error in command HTTP Shutdown: %v", err)
|
||||
}
|
||||
err = c.monitorHttpServer.Shutdown(ctx)
|
||||
if err != nil {
|
||||
log.Printf("Stop: Error in monitor HTTP Shutdown: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A Command is a representation of a state machine action.
|
||||
// The Name can be used to dispatch the command when received.
|
||||
// The Args are serialized for transport.
|
||||
type Command struct {
|
||||
Name string
|
||||
Args []byte
|
||||
}
|
||||
|
||||
// A CommandResult is a representation of the result of a state
|
||||
// machine action.
|
||||
// Err is any error that occurred on the node that tried to execute the command,
|
||||
// including any error from the underlying operation and deserialization problems etc.
|
||||
// Result is serialized for transport.
|
||||
type CommandResult struct {
|
||||
Err error
|
||||
Result []byte
|
||||
}
|
||||
|
||||
type lookElsewhereError struct {
|
||||
where string
|
||||
}
|
||||
|
||||
func (e lookElsewhereError) Error() string {
|
||||
return fmt.Sprintf("not the leader, try: %s", e.where)
|
||||
}
|
||||
|
||||
var errLeaderUnknown = errors.New("Leader Unknown")
|
||||
|
||||
func (c *Consensus) serveCmdHttp(ts *tsnet.Server) (*http.Server, error) {
|
||||
ln, err := ts.Listen("tcp", c.commandAddr(c.self.host))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mux := c.makeCommandMux()
|
||||
srv := &http.Server{Handler: mux}
|
||||
go func() {
|
||||
defer ln.Close()
|
||||
err := srv.Serve(ln)
|
||||
log.Printf("CmdHttp stopped serving with err: %v", err)
|
||||
}()
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
func (c *Consensus) getLeader() (string, error) {
|
||||
raftLeaderAddr, _ := c.raft.LeaderWithID()
|
||||
leaderAddr := (string)(raftLeaderAddr)
|
||||
if leaderAddr == "" {
|
||||
// Raft doesn't know who the leader is.
|
||||
return "", errLeaderUnknown
|
||||
}
|
||||
// Raft gives us the address with the raft port, we don't always want that.
|
||||
host, _, err := net.SplitHostPort(leaderAddr)
|
||||
return host, err
|
||||
}
|
||||
|
||||
func (c *Consensus) executeCommandLocally(cmd Command) (CommandResult, error) {
|
||||
b, err := json.Marshal(cmd)
|
||||
if err != nil {
|
||||
return CommandResult{}, err
|
||||
}
|
||||
f := c.raft.Apply(b, 10*time.Second)
|
||||
err = f.Error()
|
||||
result := f.Response()
|
||||
if errors.Is(err, raft.ErrNotLeader) {
|
||||
leader, err := c.getLeader()
|
||||
if err != nil {
|
||||
// we know we're not leader but we were unable to give the address of the leader
|
||||
return CommandResult{}, err
|
||||
}
|
||||
return CommandResult{}, lookElsewhereError{where: leader}
|
||||
}
|
||||
if result == nil {
|
||||
result = CommandResult{}
|
||||
}
|
||||
return result.(CommandResult), err
|
||||
}
|
||||
|
||||
func (c *Consensus) handleJoin(jr joinRequest) error {
|
||||
remoteAddr := c.raftAddr(jr.RemoteHost)
|
||||
f := c.raft.AddVoter(raft.ServerID(jr.RemoteID), raft.ServerAddress(remoteAddr), 0, 0)
|
||||
if f.Error() != nil {
|
||||
return f.Error()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Consensus) raftAddr(host string) string {
|
||||
return raftAddr(host, c.config)
|
||||
}
|
||||
|
||||
func (c *Consensus) commandAddr(host string) string {
|
||||
return addr(host, c.config.CommandPort)
|
||||
}
|
453
tsconsensus/tsconsensus_test.go
Normal file
453
tsconsensus/tsconsensus_test.go
Normal file
@ -0,0 +1,453 @@
|
||||
package tsconsensus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/netip"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/raft"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/net/netns"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/tstest/integration"
|
||||
"tailscale.com/tstest/integration/testcontrol"
|
||||
"tailscale.com/tstest/nettest"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
type fsm struct {
|
||||
events []map[string]any
|
||||
count int
|
||||
}
|
||||
type fsmSnapshot struct{}
|
||||
|
||||
func (f *fsm) Apply(l *raft.Log) any {
|
||||
f.count++
|
||||
f.events = append(f.events, map[string]any{
|
||||
"type": "Apply",
|
||||
"l": l,
|
||||
})
|
||||
return CommandResult{
|
||||
Result: []byte{byte(f.count)},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fsm) Snapshot() (raft.FSMSnapshot, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *fsm) Restore(rc io.ReadCloser) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fsmSnapshot) Persist(sink raft.SnapshotSink) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fsmSnapshot) Release() {}
|
||||
|
||||
var verboseDERP = false
|
||||
var verboseNodes = false
|
||||
|
||||
func startControl(t *testing.T) (control *testcontrol.Server, controlURL string) {
|
||||
// Corp#4520: don't use netns for tests.
|
||||
netns.SetEnabled(false)
|
||||
t.Cleanup(func() {
|
||||
netns.SetEnabled(true)
|
||||
})
|
||||
|
||||
derpLogf := logger.Discard
|
||||
if verboseDERP {
|
||||
derpLogf = t.Logf
|
||||
}
|
||||
derpMap := integration.RunDERPAndSTUN(t, derpLogf, "127.0.0.1")
|
||||
control = &testcontrol.Server{
|
||||
DERPMap: derpMap,
|
||||
DNSConfig: &tailcfg.DNSConfig{
|
||||
Proxied: true,
|
||||
},
|
||||
MagicDNSDomain: "tail-scale.ts.net",
|
||||
}
|
||||
control.HTTPTestServer = httptest.NewUnstartedServer(control)
|
||||
control.HTTPTestServer.Start()
|
||||
t.Cleanup(control.HTTPTestServer.Close)
|
||||
controlURL = control.HTTPTestServer.URL
|
||||
t.Logf("testcontrol listening on %s", controlURL)
|
||||
return control, controlURL
|
||||
}
|
||||
|
||||
func startNode(t *testing.T, ctx context.Context, controlURL, hostname string) (*tsnet.Server, key.NodePublic, netip.Addr) {
|
||||
t.Helper()
|
||||
|
||||
tmp := filepath.Join(t.TempDir(), hostname)
|
||||
os.MkdirAll(tmp, 0755)
|
||||
s := &tsnet.Server{
|
||||
Dir: tmp,
|
||||
ControlURL: controlURL,
|
||||
Hostname: hostname,
|
||||
Store: new(mem.Store),
|
||||
Ephemeral: true,
|
||||
}
|
||||
if verboseNodes {
|
||||
s.Logf = log.Printf
|
||||
}
|
||||
t.Cleanup(func() { s.Close() })
|
||||
|
||||
status, err := s.Up(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return s, status.Self.PublicKey, status.TailscaleIPs[0]
|
||||
}
|
||||
|
||||
// pingNode sends a tailscale ping between two nodes. But that's not really relevant here
|
||||
// doing this has a side effect of causing the testcontrol.Server to recalculate and reissue
|
||||
// netmaps.
|
||||
func pingNode(t *testing.T, control *testcontrol.Server, nodeKey key.NodePublic) {
|
||||
t.Helper()
|
||||
gotPing := make(chan bool, 1)
|
||||
waitPing := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
gotPing <- true
|
||||
}))
|
||||
defer waitPing.Close()
|
||||
|
||||
for try := 0; try < 5; try++ {
|
||||
pr := &tailcfg.PingRequest{URL: fmt.Sprintf("%s/ping-%d", waitPing.URL, try), Log: true}
|
||||
if !control.AddPingRequest(nodeKey, pr) {
|
||||
t.Fatalf("failed to AddPingRequest")
|
||||
}
|
||||
pingTimeout := time.NewTimer(2 * time.Second)
|
||||
defer pingTimeout.Stop()
|
||||
select {
|
||||
case <-gotPing:
|
||||
// ok! the machinery that refreshes the netmap has been nudged
|
||||
return
|
||||
case <-pingTimeout.C:
|
||||
t.Logf("waiting for ping timed out: %d", try)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func tagNodes(t *testing.T, control *testcontrol.Server, nodeKeys []key.NodePublic, tag string) {
|
||||
t.Helper()
|
||||
for _, key := range nodeKeys {
|
||||
n := control.Node(key)
|
||||
n.Tags = append(n.Tags, tag)
|
||||
b := true
|
||||
n.Online = &b
|
||||
control.UpdateNode(n)
|
||||
}
|
||||
|
||||
// Cause the netmap to be recalculated and reissued, so we don't have to wait for it.
|
||||
for _, key := range nodeKeys {
|
||||
pingNode(t, control, key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStart(t *testing.T) {
|
||||
nettest.SkipIfNoNetwork(t)
|
||||
control, controlURL := startControl(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
one, k, _ := startNode(t, ctx, controlURL, "one")
|
||||
|
||||
clusterTag := "tag:whatever"
|
||||
// nodes must be tagged with the cluster tag, to find each other
|
||||
tagNodes(t, control, []key.NodePublic{k}, clusterTag)
|
||||
|
||||
sm := &fsm{}
|
||||
r, err := Start(ctx, one, (*fsm)(sm), clusterTag, DefaultConfig())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer r.Stop(ctx)
|
||||
}
|
||||
|
||||
func waitFor(t *testing.T, msg string, condition func() bool, nTries int, waitBetweenTries time.Duration) {
|
||||
for try := 0; try < nTries; try++ {
|
||||
done := condition()
|
||||
if done {
|
||||
t.Logf("waitFor success: %s: after %d tries", msg, try)
|
||||
return
|
||||
}
|
||||
time.Sleep(waitBetweenTries)
|
||||
}
|
||||
t.Fatalf("waitFor timed out: %s, after %d tries", msg, nTries)
|
||||
}
|
||||
|
||||
type participant struct {
|
||||
c *Consensus
|
||||
sm *fsm
|
||||
ts *tsnet.Server
|
||||
key key.NodePublic
|
||||
}
|
||||
|
||||
// starts and tags the *tsnet.Server nodes with the control, waits for the nodes to make successful
|
||||
// LocalClient Status calls that show the first node as Online.
|
||||
func startNodesAndWaitForPeerStatus(t *testing.T, ctx context.Context, clusterTag string, nNodes int) ([]*participant, *testcontrol.Server, string) {
|
||||
ps := make([]*participant, nNodes)
|
||||
keysToTag := make([]key.NodePublic, nNodes)
|
||||
localClients := make([]*tailscale.LocalClient, nNodes)
|
||||
control, controlURL := startControl(t)
|
||||
for i := 0; i < nNodes; i++ {
|
||||
ts, key, _ := startNode(t, ctx, controlURL, fmt.Sprintf("node: %d", i))
|
||||
ps[i] = &participant{ts: ts, key: key}
|
||||
keysToTag[i] = key
|
||||
lc, err := ts.LocalClient()
|
||||
if err != nil {
|
||||
t.Fatalf("%d: error getting local client: %v", i, err)
|
||||
}
|
||||
localClients[i] = lc
|
||||
}
|
||||
tagNodes(t, control, keysToTag, clusterTag)
|
||||
fxCameOnline := func() bool {
|
||||
// all the _other_ nodes see the first as online
|
||||
for i := 1; i < nNodes; i++ {
|
||||
status, err := localClients[i].Status(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: error getting status: %v", i, err)
|
||||
}
|
||||
if !status.Peer[ps[0].key].Online {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
waitFor(t, "other nodes see node 1 online in ts status", fxCameOnline, 10, 2*time.Second)
|
||||
return ps, control, controlURL
|
||||
}
|
||||
|
||||
// populates participants with their consensus fields, waits for all nodes to show all nodes
|
||||
// as part of the same consensus cluster. Starts the first participant first and waits for it to
|
||||
// become leader before adding other nodes.
|
||||
func createConsensusCluster(t *testing.T, ctx context.Context, clusterTag string, participants []*participant, cfg Config) {
|
||||
participants[0].sm = &fsm{}
|
||||
first, err := Start(ctx, participants[0].ts, (*fsm)(participants[0].sm), clusterTag, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fxFirstIsLeader := func() bool {
|
||||
return first.raft.State() == raft.Leader
|
||||
}
|
||||
waitFor(t, "node 0 is leader", fxFirstIsLeader, 10, 2*time.Second)
|
||||
participants[0].c = first
|
||||
|
||||
for i := 1; i < len(participants); i++ {
|
||||
participants[i].sm = &fsm{}
|
||||
c, err := Start(ctx, participants[i].ts, (*fsm)(participants[i].sm), clusterTag, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
participants[i].c = c
|
||||
}
|
||||
|
||||
fxRaftConfigContainsAll := func() bool {
|
||||
for i := 0; i < len(participants); i++ {
|
||||
fut := participants[i].c.raft.GetConfiguration()
|
||||
err = fut.Error()
|
||||
if err != nil {
|
||||
t.Fatalf("%d: Getting Configuration errored: %v", i, err)
|
||||
}
|
||||
if len(fut.Configuration().Servers) != len(participants) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
waitFor(t, "all raft machines have all servers in their config", fxRaftConfigContainsAll, 10, time.Second*2)
|
||||
}
|
||||
|
||||
func TestApply(t *testing.T) {
|
||||
nettest.SkipIfNoNetwork(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
clusterTag := "tag:whatever"
|
||||
ps, _, _ := startNodesAndWaitForPeerStatus(t, ctx, clusterTag, 2)
|
||||
cfg := DefaultConfig()
|
||||
createConsensusCluster(t, ctx, clusterTag, ps, cfg)
|
||||
|
||||
fut := ps[0].c.raft.Apply([]byte("woo"), 2*time.Second)
|
||||
err := fut.Error()
|
||||
if err != nil {
|
||||
t.Fatalf("Raft Apply Error: %v", err)
|
||||
}
|
||||
|
||||
fxBothMachinesHaveTheApply := func() bool {
|
||||
return len(ps[0].sm.events) == 1 && len(ps[1].sm.events) == 1
|
||||
}
|
||||
waitFor(t, "the apply event made it into both state machines", fxBothMachinesHaveTheApply, 10, time.Second*1)
|
||||
}
|
||||
|
||||
// calls ExecuteCommand on each participant and checks that all participants get all commands
|
||||
func assertCommandsWorkOnAnyNode(t *testing.T, participants []*participant) {
|
||||
for i, p := range participants {
|
||||
res, err := p.c.ExecuteCommand(Command{Args: []byte{byte(i)}})
|
||||
if err != nil {
|
||||
t.Fatalf("%d: Error ExecuteCommand: %v", i, err)
|
||||
}
|
||||
if res.Err != nil {
|
||||
t.Fatalf("%d: Result Error ExecuteCommand: %v", i, res.Err)
|
||||
}
|
||||
retVal := int(res.Result[0])
|
||||
// the test implementation of the fsm returns the count of events that have been received
|
||||
if retVal != i+1 {
|
||||
t.Fatalf("Result, want %d, got %d", i+1, retVal)
|
||||
}
|
||||
|
||||
expectedEventsLength := i + 1
|
||||
fxEventsInAll := func() bool {
|
||||
for _, pOther := range participants {
|
||||
if len(pOther.sm.events) != expectedEventsLength {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
waitFor(t, "event makes it to all", fxEventsInAll, 10, time.Second*1)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig(t *testing.T) {
|
||||
nettest.SkipIfNoNetwork(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
clusterTag := "tag:whatever"
|
||||
ps, _, _ := startNodesAndWaitForPeerStatus(t, ctx, clusterTag, 3)
|
||||
cfg := DefaultConfig()
|
||||
// test all is well with non default ports
|
||||
cfg.CommandPort = 12347
|
||||
cfg.RaftPort = 11882
|
||||
mp := uint16(8798)
|
||||
cfg.MonitorPort = mp
|
||||
createConsensusCluster(t, ctx, clusterTag, ps, cfg)
|
||||
assertCommandsWorkOnAnyNode(t, ps)
|
||||
|
||||
url := fmt.Sprintf("http://%s:%d/", ps[0].c.self.host, mp)
|
||||
httpClientOnTailnet := ps[1].ts.HTTPClient()
|
||||
rsp, err := httpClientOnTailnet.Get(url)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if rsp.StatusCode != 200 {
|
||||
t.Fatalf("monitor status want %d, got %d", 200, rsp.StatusCode)
|
||||
}
|
||||
defer rsp.Body.Close()
|
||||
body, err := io.ReadAll(rsp.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Not a great assertion because it relies on the format of the response.
|
||||
line1 := strings.Split(string(body), "\n")[0]
|
||||
if line1[:10] != "RaftState:" {
|
||||
t.Fatalf("getting monitor status, first line, want something that starts with 'RaftState:', got '%s'", line1)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFollowerFailover(t *testing.T) {
|
||||
nettest.SkipIfNoNetwork(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
clusterTag := "tag:whatever"
|
||||
ps, _, _ := startNodesAndWaitForPeerStatus(t, ctx, clusterTag, 3)
|
||||
cfg := DefaultConfig()
|
||||
createConsensusCluster(t, ctx, clusterTag, ps, cfg)
|
||||
|
||||
smThree := ps[2].sm
|
||||
|
||||
fut := ps[0].c.raft.Apply([]byte("a"), 2*time.Second)
|
||||
futTwo := ps[0].c.raft.Apply([]byte("b"), 2*time.Second)
|
||||
err := fut.Error()
|
||||
if err != nil {
|
||||
t.Fatalf("Apply Raft error %v", err)
|
||||
}
|
||||
err = futTwo.Error()
|
||||
if err != nil {
|
||||
t.Fatalf("Apply Raft error %v", err)
|
||||
}
|
||||
|
||||
fxAllMachinesHaveTheApplies := func() bool {
|
||||
return len(ps[0].sm.events) == 2 && len(ps[1].sm.events) == 2 && len(smThree.events) == 2
|
||||
}
|
||||
waitFor(t, "the apply events made it into all state machines", fxAllMachinesHaveTheApplies, 10, time.Second*1)
|
||||
|
||||
//a follower goes loses contact with the cluster
|
||||
ps[2].c.Stop(ctx)
|
||||
|
||||
// applies still make it to one and two
|
||||
futThree := ps[0].c.raft.Apply([]byte("c"), 2*time.Second)
|
||||
futFour := ps[0].c.raft.Apply([]byte("d"), 2*time.Second)
|
||||
err = futThree.Error()
|
||||
if err != nil {
|
||||
t.Fatalf("Apply Raft error %v", err)
|
||||
}
|
||||
err = futFour.Error()
|
||||
if err != nil {
|
||||
t.Fatalf("Apply Raft error %v", err)
|
||||
}
|
||||
fxAliveMachinesHaveTheApplies := func() bool {
|
||||
return len(ps[0].sm.events) == 4 && len(ps[1].sm.events) == 4 && len(smThree.events) == 2
|
||||
}
|
||||
waitFor(t, "the apply events made it into eligible state machines", fxAliveMachinesHaveTheApplies, 10, time.Second*1)
|
||||
|
||||
// follower comes back
|
||||
smThreeAgain := &fsm{}
|
||||
rThreeAgain, err := Start(ctx, ps[2].ts, (*fsm)(smThreeAgain), clusterTag, DefaultConfig())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer rThreeAgain.Stop(ctx)
|
||||
fxThreeGetsCaughtUp := func() bool {
|
||||
return len(smThreeAgain.events) == 4
|
||||
}
|
||||
waitFor(t, "the apply events made it into the third node when it appeared with an empty state machine", fxThreeGetsCaughtUp, 20, time.Second*2)
|
||||
if len(smThree.events) != 2 {
|
||||
t.Fatalf("Expected smThree to remain on 2 events: got %d", len(smThree.events))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRejoin(t *testing.T) {
|
||||
nettest.SkipIfNoNetwork(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
clusterTag := "tag:whatever"
|
||||
ps, control, controlURL := startNodesAndWaitForPeerStatus(t, ctx, clusterTag, 3)
|
||||
cfg := DefaultConfig()
|
||||
createConsensusCluster(t, ctx, clusterTag, ps, cfg)
|
||||
for _, p := range ps {
|
||||
defer p.c.Stop(ctx)
|
||||
}
|
||||
|
||||
// 1st node gets a redundant second join request from the second node
|
||||
ps[0].c.handleJoin(joinRequest{
|
||||
RemoteHost: ps[1].c.self.host,
|
||||
RemoteID: ps[1].c.self.id,
|
||||
})
|
||||
|
||||
tsJoiner, keyJoiner, _ := startNode(t, ctx, controlURL, "node: joiner")
|
||||
tagNodes(t, control, []key.NodePublic{keyJoiner}, clusterTag)
|
||||
smJoiner := &fsm{}
|
||||
cJoiner, err := Start(ctx, tsJoiner, (*fsm)(smJoiner), clusterTag, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ps = append(ps, &participant{
|
||||
sm: smJoiner,
|
||||
c: cJoiner,
|
||||
ts: tsJoiner,
|
||||
key: keyJoiner,
|
||||
})
|
||||
|
||||
assertCommandsWorkOnAnyNode(t, ps)
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user