Initial commit
This commit is contained in:
commit
53c07e1dc3
69 changed files with 26572 additions and 0 deletions
5
.gitignore
vendored
Normal file
5
.gitignore
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
hubble/node_modules/
|
||||
hubble/public/build/
|
||||
*.db
|
||||
.vim/
|
||||
.DS_Store
|
130
CHANGELOG.adoc
Normal file
130
CHANGELOG.adoc
Normal file
|
@ -0,0 +1,130 @@
|
|||
= Changelog
|
||||
|
||||
The format is based on https://keepachangelog.com/en/1.0.0/[Keep a Changelog],
|
||||
and this project adheres to https://semver.org/spec/v2.0.0.html[Semantic Versioning].
|
||||
|
||||
|
||||
[discrete]
|
||||
== 0.3.0 (2021-05-23)
|
||||
|
||||
[discrete]
|
||||
=== Added
|
||||
|
||||
* Quasar signs config using XPASETO tokens (additional security layer to HTTPS)
|
||||
* Network certificate fingerprints are shown in Hubble frontend
|
||||
* Node public key fingerprints are shown in Hubble frontend
|
||||
* JWT Middleware has been added to require username+password auth to manage networks.
|
||||
|
||||
[discrete]
|
||||
== 0.2.0 (2021-05-22)
|
||||
|
||||
[discrete]
|
||||
=== Added
|
||||
|
||||
* Added ability to modify firewall rules through Quasar
|
||||
* Added firewall update forms to Hubble
|
||||
* Time is saved when a node fetches the latest config
|
||||
* Latest config fetch time is shown in Hubble
|
||||
|
||||
[discrete]
|
||||
== 0.1.0 (2021-05-22)
|
||||
|
||||
[discrete]
|
||||
=== 2021-05-22
|
||||
|
||||
==== Added
|
||||
|
||||
* Listen port can now be changed through frontend
|
||||
|
||||
==== Fixed
|
||||
|
||||
* Formatting of static host map in nebula yaml config
|
||||
|
||||
[discrete]
|
||||
=== 2021-05-21
|
||||
|
||||
==== Added
|
||||
|
||||
* Quasar config endpoint fully working
|
||||
* Neutron get config and write to yaml file working
|
||||
|
||||
[discrete]
|
||||
=== 2021-05-20
|
||||
|
||||
==== Added
|
||||
|
||||
* Update node endpoint working in Quasar
|
||||
* Update node functionality working in hubble
|
||||
|
||||
[discrete]
|
||||
=== 2021-05-19
|
||||
|
||||
==== Changed
|
||||
|
||||
* Using negroni golang library for logging and future authentication middleware.
|
||||
|
||||
==== Added
|
||||
|
||||
* Added cipher type to network information in db and API responses.
|
||||
* Completed update network API endpoint.
|
||||
* Added node info endpoints.
|
||||
* Added groups array to networks in db and made them updatable.
|
||||
|
||||
[discrete]
|
||||
=== 2021-05-18
|
||||
|
||||
==== Added
|
||||
|
||||
* Started working on Hubble frontend using Svelte compiler
|
||||
* Created frontend app structure and integrated with quasar
|
||||
* Modified how CORS requests work with quasar to work with client
|
||||
|
||||
[discrete]
|
||||
=== 2021-05-17
|
||||
|
||||
==== Added
|
||||
|
||||
* Finished neutron join network capability
|
||||
* Started adding neutron update capability
|
||||
|
||||
[discrete]
|
||||
=== 2021-05-16
|
||||
|
||||
==== Added
|
||||
|
||||
* Node Endpoints for Quasar
|
||||
* Finished key network endpoints for Quasar
|
||||
* Added some neutron endpoints
|
||||
|
||||
[discrete]
|
||||
=== 2021-05-14
|
||||
|
||||
==== Changed
|
||||
|
||||
* Removed Python and switched back to golang due to crypto dependencies
|
||||
|
||||
[discrete]
|
||||
=== 2021-05-10
|
||||
|
||||
==== Changed
|
||||
|
||||
* Using Python for Quasar instead of golang
|
||||
|
||||
[discrete]
|
||||
=== Initial Stages - 2021-04-08
|
||||
|
||||
==== Added
|
||||
|
||||
* Project Structure
|
||||
** `cmd` directory for neutron and quasar main outputs
|
||||
** `examples` for example config
|
||||
** `nebutils` as library for neutron and quasar to share code
|
||||
** general repo files e.g. README, LICENSE, CHANGELOG
|
||||
* Started work on `quasar`
|
||||
** Added functionality to add a new network
|
||||
** Added basic http server functionality
|
||||
** added ability to interact with boltdb as a database interface
|
||||
* Started work on `neutron`
|
||||
** Added main function to parse user flags
|
||||
** Started on `init.go` which gets ca cert,
|
||||
generates keys and requests signing
|
21
LICENSE
Normal file
21
LICENSE
Normal file
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2021 Billy Bromell
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
14
Makefile
Normal file
14
Makefile
Normal file
|
@ -0,0 +1,14 @@
|
|||
SHELL := /bin/bash
|
||||
|
||||
default: release
|
||||
|
||||
neutron: cmd/neutron/*.go
|
||||
go build -o neutron cmd/neutron/*.go
|
||||
|
||||
quasar: cmd/quasar/*.go
|
||||
go build -o quasar cmd/quasar/*.go
|
||||
|
||||
release: neutron quasar
|
||||
cp neutron release/linux-x86_64
|
||||
cp quasar release/linux-x86_64
|
||||
tar -cvf release/linux-x86_64.tar.gz release/linux-x86_64
|
138
README.adoc
Normal file
138
README.adoc
Normal file
|
@ -0,0 +1,138 @@
|
|||
= Starship 🚀
|
||||
|
||||
== Overview
|
||||
|
||||
CAUTION: THIS PROJECT IS NOT PRODUCTION READY.
|
||||
|
||||
The goal of this project is to provide a config and certificate
|
||||
management system for link:https://github.com/slackhq/nebula[nebula].
|
||||
This project was done in a short amount of time and it is my first
|
||||
project using golang.
|
||||
I would not recommend using it without auditing it first.
|
||||
|
||||
== Quasar Server
|
||||
|
||||
=== Overview
|
||||
|
||||
Quasar is a Central Management System (CMS) for managing Starship networks.
|
||||
It provides APIs for two types of clients:
|
||||
|
||||
* Neutron Nodes
|
||||
** These authenticate by signing requests using their nebula private key
|
||||
* Frontend clients / management tools
|
||||
** These authenticate using JSON Web Tokens
|
||||
|
||||
Quasar can be configured using a yaml config file.
|
||||
|
||||
The API for neutron nodes provides the following endpoints:
|
||||
|
||||
The API for management clients provides endpoints for:
|
||||
|
||||
* listing networks
|
||||
* getting CA cert for a network
|
||||
* listing nodes in a network
|
||||
* updating network settings
|
||||
* updating node settings
|
||||
* approving / enabling / disabling nodes
|
||||
|
||||
=== Installation Instructions
|
||||
|
||||
[source,shell]
|
||||
----
|
||||
make quasar
|
||||
----
|
||||
|
||||
=== Operating Instructions
|
||||
|
||||
[source,shell]
|
||||
----
|
||||
# set JWT signing secret
|
||||
export QUASAR_AUTHSECRET=$(uuid)
|
||||
|
||||
# set admin account password
|
||||
export QUASAR_ADMINPASS="password"
|
||||
|
||||
# start server
|
||||
./quasar serve -config examples/quasar.yml
|
||||
----
|
||||
|
||||
== Neutron
|
||||
|
||||
=== Overview
|
||||
|
||||
Neutron is a client which Starship nodes use to request to join networks,
|
||||
and update their configuration and certificates.
|
||||
|
||||
When joining a new network, Neutron will create a new Nebula keypair.
|
||||
It will then send a request to Quasar to join a specific network.
|
||||
This request includes the node name, the network it wants to join,
|
||||
its hostname and its Nebula public key.
|
||||
This information is sent as a JSON payload, signed using the Nebula
|
||||
private key.
|
||||
This is encoded similarly to a PASETO token.
|
||||
PASETO tokens are similar to JSON Web Tokens (JWTs),
|
||||
however do not suffer the same vulnerabilities JWTs suffer due to the vague
|
||||
protocol specification.
|
||||
|
||||
When updating, Neutron will send requests to Quasar to obtain
|
||||
an updated certificate and configuration file.
|
||||
For Quasar to send these, Neutron must include a signed token
|
||||
which includes it's nodename and the network name it is trying to
|
||||
update, and the node must be approved and active on the Quasar server.
|
||||
The signature on the token is verified against the public key stored
|
||||
for the node on the Quasar server.
|
||||
|
||||
The update script can be run at frequent intervals to keep the node updated
|
||||
with the most recent configuration changes.
|
||||
|
||||
=== Installation Instructions
|
||||
|
||||
[source,shell]
|
||||
----
|
||||
# build
|
||||
cd starship
|
||||
|
||||
# equivalent of `go build -o neutron cmd/neutron/*.go`
|
||||
make neutron
|
||||
----
|
||||
|
||||
=== Operating Instructions
|
||||
|
||||
==== Manual install
|
||||
|
||||
[source,shell]
|
||||
----
|
||||
# request to join network
|
||||
./neutron join -quasar http://127.0.0.1:6947 -network NETWORK -name NAME
|
||||
|
||||
# approve node from frontend then fetch latest config from Quasar
|
||||
./neutron update -network NETWORK
|
||||
# send SIGHUP to nebula to force config reload
|
||||
pgrep nebula | xargs sudo kill -1
|
||||
----
|
||||
|
||||
==== Using Install Script
|
||||
|
||||
[source, shell]
|
||||
----
|
||||
# quick install from release
|
||||
wget https://github.com/b177y/starship/releases/download/v0.3.0/install-neutron.sh -O /tmp/install-neutron.sh
|
||||
|
||||
# check content
|
||||
less /tmp/install-neutron.sh
|
||||
bash /tmp/install-neutron.sh
|
||||
|
||||
# approve node from frontend then fetch latest config from Quasar
|
||||
neutron update -network NETWORK
|
||||
|
||||
# start nebula with systemd
|
||||
sudo systemctl start nebula@NETWORK
|
||||
|
||||
# send SIGHUP to nebula to force config reload
|
||||
pgrep nebula | xargs sudo kill -1
|
||||
----
|
||||
|
||||
== Hubble
|
||||
|
||||
Hubble is the frontend for managing Starship networks.
|
||||
See link:hubble/README.adoc[] for setup instructions.
|
57
cmd/neutron/genconfig.go
Normal file
57
cmd/neutron/genconfig.go
Normal file
|
@ -0,0 +1,57 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
type StaticHost struct {
|
||||
NebulaAddress string
|
||||
Endpoint []string
|
||||
}
|
||||
|
||||
type FirewallRule struct {
|
||||
Port string `json:"port"`
|
||||
Proto string `json:"proto"`
|
||||
Groups []string `json:"groups"`
|
||||
Any bool `json:"any"`
|
||||
}
|
||||
|
||||
type NodeConfigSchema struct {
|
||||
Address string `json:"address"`
|
||||
Lighthouses []string `json:"lighthouses"`
|
||||
AmLighthouse bool `json:"am_lighthouse"`
|
||||
StaticHosts []StaticHost `json:"static_hosts"`
|
||||
ListenPort int `json:"listen_port"`
|
||||
FirewallInbound []FirewallRule `json:"firewall_inbound"`
|
||||
FirewallOutbound []FirewallRule `json:"firewall_outbound"`
|
||||
Cipher string `json:"cipher"`
|
||||
Cert string `json:"cert"`
|
||||
Netname string
|
||||
}
|
||||
|
||||
func genConfig(config NodeConfigSchema) (err error) {
|
||||
templateData, err := Asset("cmd/neutron/template.yml")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path := fmt.Sprintf("/etc/nebula/%s/nebula.yml", config.Netname)
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.Truncate(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpl, err := template.New("NebulaConfig").Parse(string(templateData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = tmpl.Execute(f, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
}
|
184
cmd/neutron/join.go
Normal file
184
cmd/neutron/join.go
Normal file
|
@ -0,0 +1,184 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/b177y/starship/nebutils"
|
||||
"github.com/b177y/starship/wormhole"
|
||||
"github.com/b177y/starship/xpaseto"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/teris-io/shortid"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// config to save the address of a quasar server and the node name
|
||||
type NeutronConfig struct {
|
||||
Quasar string `yaml:"quasar"`
|
||||
Nodename string `yaml:"nodename"`
|
||||
}
|
||||
|
||||
// save the neutron config to a yaml file
|
||||
func saveNeutronConfig(netname, quasar, nodename string) error {
|
||||
nconf := NeutronConfig{
|
||||
Quasar: quasar,
|
||||
Nodename: nodename,
|
||||
}
|
||||
path := fmt.Sprintf("/etc/nebula/%s/neutron.yml", netname)
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.Truncate(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e := yaml.NewEncoder(f)
|
||||
e.Encode(nconf)
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
// request to join a Starship network
|
||||
func signReq(netname, hostname, nodename, qAddr string,
|
||||
privkey, pubkey []byte) {
|
||||
// create request payload
|
||||
t := wormhole.RequestJoinSchema{
|
||||
Netname: netname,
|
||||
Nodename: nodename,
|
||||
Hostname: hostname,
|
||||
PubKey: string(cert.MarshalX25519PublicKey(pubkey)),
|
||||
}
|
||||
|
||||
// turn payload into token
|
||||
jsonToken, err := wormhole.NewToken(t)
|
||||
if err != nil {
|
||||
log.Fatal("Error creating paseto." + err.Error())
|
||||
}
|
||||
// sign token using nebula private key
|
||||
signer := xpaseto.NewSigner(privkey, pubkey)
|
||||
token, err := signer.SelfSignPaseto(jsonToken)
|
||||
if err != nil {
|
||||
log.Fatal("Error signing paseto." + err.Error())
|
||||
}
|
||||
|
||||
body := bytes.NewBuffer([]byte(token))
|
||||
resp, err := http.Post(qAddr+"/api/neutron/join", "text/plain", body)
|
||||
if err != nil {
|
||||
log.Fatal("Error contacting quasar." + err.Error())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respBody, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Fatal("Error reading response body from quasar." + err.Error())
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
log.Fatal("Error status from quasar: " + string(respBody))
|
||||
}
|
||||
pubFingerprint := base64.StdEncoding.EncodeToString(pubkey)
|
||||
log.Println("Node Fingerprint: ", pubFingerprint)
|
||||
|
||||
}
|
||||
|
||||
// get the certificate of the CA of a network
|
||||
func getCaCert(qAddr string, netName string) {
|
||||
path := "/etc/nebula/"
|
||||
os.MkdirAll(path, 0775)
|
||||
path = path + netName
|
||||
err := os.Mkdir(path, 0770)
|
||||
if err != nil {
|
||||
if os.IsExist(err) {
|
||||
log.Warning("Network already exists locally. Existing config may be overwritten!")
|
||||
} else if os.IsPermission(err) {
|
||||
log.Fatal("Permission denied. Are you running as root?")
|
||||
} else {
|
||||
log.Fatal("Error: " + err.Error())
|
||||
}
|
||||
}
|
||||
ca_url := qAddr + "/api/networks/" + netName + "/cert"
|
||||
resp, err := http.Get(ca_url)
|
||||
if err != nil {
|
||||
log.Fatal("Could not get ca from Quasar server. " + err.Error())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if resp.StatusCode == 404 {
|
||||
log.Fatalf("Network %s does not exist.\n", netName)
|
||||
}
|
||||
log.Fatalf("Bad status code from Quasar: %d", resp.StatusCode)
|
||||
}
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Fatal("Could not read HTTP body")
|
||||
}
|
||||
cacert, _, err := cert.UnmarshalNebulaCertificateFromPEM(body)
|
||||
if err != nil {
|
||||
log.Fatal("Cert is not a valid Nebula Certificate.")
|
||||
}
|
||||
certfp, err := cacert.Sha256Sum()
|
||||
if err != nil {
|
||||
log.Fatal("Cert is not a valid Nebula Certificate. Could not calculate fingerprint.")
|
||||
}
|
||||
fmt.Printf("Certificate Fingerprint: %s\n", certfp)
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
fmt.Print("Trust fingerprint? (Y/n) ")
|
||||
inp, _ := reader.ReadString('\n')
|
||||
if inp == "n\n" || inp == "N\n" {
|
||||
log.Fatal("Certificate is not trusted. Exitting.")
|
||||
}
|
||||
log.Println("Saving certificate")
|
||||
dst := path + "/ca.crt"
|
||||
f, err := os.Create(dst)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file %s", dst)
|
||||
}
|
||||
r := bytes.NewReader(body)
|
||||
_, err = io.Copy(f, r)
|
||||
}
|
||||
|
||||
func initialise(qAddr string,
|
||||
nodeName string,
|
||||
netName string,
|
||||
) {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// validate commandline arguments
|
||||
if qAddr == "" {
|
||||
log.Fatal("Quasar address must be given with -quasar")
|
||||
}
|
||||
if netName == "" {
|
||||
log.Fatal("Network name must be given with -network")
|
||||
}
|
||||
if nodeName == "" {
|
||||
// generate node name based on hostname if none provided
|
||||
hostid, err := shortid.Generate()
|
||||
if err != nil {
|
||||
log.Fatal("Could not generate id for nodename" + err.Error())
|
||||
}
|
||||
nodeName = fmt.Sprintf("%s-%s", hostname, hostid)
|
||||
}
|
||||
getCaCert(qAddr, netName)
|
||||
// create nebula keypair
|
||||
pubkey, privkey := nebutils.X25519KeyPair()
|
||||
err = nebutils.SaveKey("/etc/nebula/"+netName,
|
||||
"neutron.key",
|
||||
privkey,
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal("Error saving key" + err.Error())
|
||||
}
|
||||
// request server to sign pubkey
|
||||
signReq(netName, hostname, nodeName, qAddr,
|
||||
privkey, pubkey)
|
||||
// save config
|
||||
saveNeutronConfig(netName, qAddr, nodeName)
|
||||
}
|
62
cmd/neutron/main.go
Normal file
62
cmd/neutron/main.go
Normal file
|
@ -0,0 +1,62 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Set log level
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
// define -help command flag
|
||||
printUsage := flag.Bool("help", false, "Print command line usage")
|
||||
|
||||
// define subcommands
|
||||
joinCommand := flag.NewFlagSet("join", flag.ExitOnError)
|
||||
updateCommand := flag.NewFlagSet("update", flag.ExitOnError)
|
||||
|
||||
// define flags for join subcommand
|
||||
joinQaddrFlag := joinCommand.String("quasar", "", "Quasar address")
|
||||
joinNameFlag := joinCommand.String("name", "", "Node name")
|
||||
joinNetnameFlag := joinCommand.String("network", "", "Name of network to join")
|
||||
|
||||
// define flags for update subcommand
|
||||
updateNetnameFlag := updateCommand.String("network", "", "Name of network to update")
|
||||
|
||||
// parse basic flags (for help option)
|
||||
flag.Parse()
|
||||
if *printUsage {
|
||||
flag.Usage()
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if len(os.Args) < 2 {
|
||||
fmt.Println("join or update subcommand is required.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
switch os.Args[1] {
|
||||
case "join":
|
||||
joinCommand.Parse(os.Args[2:])
|
||||
case "update":
|
||||
updateCommand.Parse(os.Args[2:])
|
||||
default:
|
||||
flag.PrintDefaults()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if joinCommand.Parsed() {
|
||||
// run initialise (join) function
|
||||
initialise(*joinQaddrFlag,
|
||||
*joinNameFlag,
|
||||
*joinNetnameFlag,
|
||||
)
|
||||
} else if updateCommand.Parsed() {
|
||||
// run update function
|
||||
update(*updateNetnameFlag)
|
||||
}
|
||||
}
|
248
cmd/neutron/template.go
Normal file
248
cmd/neutron/template.go
Normal file
|
@ -0,0 +1,248 @@
|
|||
// Code generated for package main by go-bindata DO NOT EDIT. (@generated)
|
||||
// sources:
|
||||
// cmd/neutron/template.yml
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Read %q: %v", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Read %q: %v", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// Name return file name
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
|
||||
// Size return file size
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
|
||||
// Mode return file mode
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
|
||||
// Mode return file modify time
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
|
||||
// IsDir return file whether a directory
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return fi.mode&os.ModeDir != 0
|
||||
}
|
||||
|
||||
// Sys return file is sys mode
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _cmdNeutronTemplateYml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x52\xcb\x8e\xdb\x30\x0c\xbc\xeb\x2b\xf8\x03\x75\xbc\x5d\xb4\x07\xdd\x82\xa2\x2f\x60\xb1\x2d\xd0\x0f\x30\x14\x89\x76\x84\xc8\x92\x2b\x51\x69\x02\xc3\xff\x5e\x50\x76\x63\xa7\x87\x45\x81\x5e\x37\xb9\x88\x33\xa3\xe1\x98\xd4\x70\xb2\x52\x00\x68\x25\x61\x87\xa4\x77\x1e\x0f\xd9\xa9\xdd\x38\x56\xcf\x48\x5e\xf5\x38\x4d\x3b\xad\x2a\x1d\x49\x00\x9c\xf0\xfa\x82\xcc\x63\xa6\x18\x7c\x75\xc2\x2b\x3b\x62\xa4\x7f\x10\xb3\xb1\x48\xa4\xc8\xea\xe6\x18\x12\x35\xbd\x1a\x38\xd0\x38\x46\xe5\x3b\x84\xea\x47\xe1\xbe\x84\x44\x69\x9a\x0a\x01\xd5\x73\x31\xdc\x1b\x13\x31\x25\x98\x26\x09\xe3\x58\x7d\xf4\x66\x08\xd6\xd3\xa2\x42\x6f\xa6\x49\x08\x67\xbb\x23\x1d\x43\x4e\xc8\xae\xaa\x6f\x36\x00\xdf\xda\xf7\x4f\x37\xa0\xdc\xb4\x9e\x30\x9e\x95\x93\xf0\xbe\x16\x00\x9c\x29\xdd\x05\x5a\xf5\x73\x20\x80\x37\x25\x14\xdc\x37\xd6\x76\x38\x62\x2c\x3d\x3e\x94\x23\x83\x43\xf6\xfa\x78\x65\xbb\x72\x92\x40\x31\x23\x87\x4c\x84\x5e\x2e\xed\x24\xd4\x55\xf9\xb3\x2c\xf0\x14\xd9\xfe\xa9\x68\xbe\x87\x48\xdc\x48\x50\x2e\x7a\x63\x93\x3a\x38\x34\x12\x5a\xe5\x12\x32\x82\xe7\xd2\xf4\x36\x6a\xb6\x31\x31\x0c\x8d\x0b\x5a\xb9\xe6\x10\x83\x32\x5a\x71\x9b\xdb\x15\x66\xfb\xec\xc8\xde\xe3\x74\x69\x7e\x66\xcc\x28\xe1\x5d\xcd\x2e\x3d\x65\x09\x0f\x8f\x75\x2d\x84\x0b\x5d\x67\x7d\xc7\x11\x1c\x9e\xd1\x49\xb0\xbe\x0d\x02\xa0\x0d\xb1\x57\x24\x81\xf0\x42\x42\xb4\x36\xe2\x2f\xe5\x5c\x79\x62\xc1\x7b\x8a\x4a\x9f\x64\x19\x1a\xe9\xa1\x21\xdb\x63\xc8\x24\xe1\xe1\x6d\x5f\xc0\x6c\x36\xe0\xe3\x8c\x19\x6c\x55\x76\xb4\x11\xd7\x33\xd1\xab\x4b\xc3\xa6\xa8\xc9\x06\x9f\x98\xe0\x9f\x00\x08\x99\x0e\x21\x7b\x33\x77\x1a\x47\x58\x56\xf7\x69\x89\xf3\x6d\xe1\x61\x59\x60\xd1\xd8\x16\xaa\xbd\xbf\xc2\x6d\xa9\xeb\xec\xff\x4c\x7d\xd6\x0e\x31\x50\x58\x08\x3e\xae\xcc\xbc\x3d\xe5\xaf\xab\x2b\xba\x84\xff\x63\xd9\xc5\x90\x87\x24\x97\x6a\xfb\x31\x9f\x0b\xb3\x2a\xef\xdf\xe1\x1a\xe0\xaf\xcf\xdc\xd4\xdb\xca\xfa\x17\x27\xf6\xd5\xbf\x0e\x6c\x5b\xfd\x0e\x00\x00\xff\xff\x25\x0b\x88\xc0\x37\x05\x00\x00")
|
||||
|
||||
func cmdNeutronTemplateYmlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_cmdNeutronTemplateYml,
|
||||
"cmd/neutron/template.yml",
|
||||
)
|
||||
}
|
||||
|
||||
func cmdNeutronTemplateYml() (*asset, error) {
|
||||
bytes, err := cmdNeutronTemplateYmlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "cmd/neutron/template.yml", size: 1335, mode: os.FileMode(420), modTime: time.Unix(1621717310, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[cannonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[cannonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"cmd/neutron/template.yml": cmdNeutronTemplateYml,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"}
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"}
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(cannonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"cmd": &bintree{nil, map[string]*bintree{
|
||||
"neutron": &bintree{nil, map[string]*bintree{
|
||||
"template.yml": &bintree{cmdNeutronTemplateYml, map[string]*bintree{}},
|
||||
}},
|
||||
}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
|
||||
}
|
75
cmd/neutron/template.yml
Normal file
75
cmd/neutron/template.yml
Normal file
|
@ -0,0 +1,75 @@
|
|||
pki:
|
||||
ca: /etc/nebula/{{.Netname}}/ca.crt
|
||||
key: /etc/nebula/{{.Netname}}/neutron.key
|
||||
cert: /etc/nebula/{{.Netname}}/neutron.crt
|
||||
|
||||
static_host_map:
|
||||
{{range .StaticHosts}}
|
||||
{{ .NebulaAddress }}: {{.Endpoint}}
|
||||
{{end}}
|
||||
|
||||
lighthouse:
|
||||
am_lighthouse: {{.AmLighthouse}}
|
||||
interval: 60
|
||||
hosts:
|
||||
{{range .Lighthouses}}
|
||||
- {{ . }}
|
||||
{{end}}
|
||||
|
||||
cipher: {{.Cipher}}
|
||||
|
||||
punchy:
|
||||
punch: true
|
||||
|
||||
listen:
|
||||
host: 0.0.0.0
|
||||
port: {{ .ListenPort }}
|
||||
|
||||
tun:
|
||||
disabled: false
|
||||
dev: {{.Netname}}0
|
||||
drop_local_broadcast: false
|
||||
drop_multicast: false
|
||||
tx_queue: 500
|
||||
mtu: 1300
|
||||
|
||||
logging:
|
||||
level: info
|
||||
format: text
|
||||
|
||||
firewall:
|
||||
conntrack:
|
||||
tcp_timeout: 12m
|
||||
udp_timeout: 3m
|
||||
default_timeout: 10m
|
||||
max_connections: 100000
|
||||
outbound:
|
||||
{{ range .FirewallOutbound }}
|
||||
{{ if .Any }}
|
||||
- port: {{ .Port }}
|
||||
proto: {{ .Proto }}
|
||||
host: any
|
||||
{{ else }}
|
||||
- port: {{ .Port }}
|
||||
proto: {{ .Proto }}
|
||||
groups:
|
||||
{{ range .Groups }}
|
||||
- {{ . }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
inbound:
|
||||
{{ range .FirewallInbound }}
|
||||
{{ if .Any }}
|
||||
- port: {{ .Port }}
|
||||
proto: {{ .Proto }}
|
||||
host: any
|
||||
{{ else }}
|
||||
- port: {{ .Port }}
|
||||
proto: {{ .Proto }}
|
||||
groups:
|
||||
{{ range .Groups }}
|
||||
- {{ . }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ end }}
|
163
cmd/neutron/update.go
Normal file
163
cmd/neutron/update.go
Normal file
|
@ -0,0 +1,163 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/b177y/starship/nebutils"
|
||||
"github.com/b177y/starship/wormhole"
|
||||
"github.com/b177y/starship/xpaseto"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// create signed XPASETO token to authenticate to Quasar server
|
||||
func getIdentityToken(netname, nodename string,
|
||||
privkey []byte) (token string,
|
||||
err error) {
|
||||
t := wormhole.NodeIdentitySchema{
|
||||
Netname: netname,
|
||||
Nodename: nodename,
|
||||
}
|
||||
jsonToken, err := wormhole.NewToken(t)
|
||||
if err != nil {
|
||||
log.Fatal("Error creating paseto." + err.Error())
|
||||
}
|
||||
signer := xpaseto.NewSigner(privkey, []byte{})
|
||||
token, err = signer.SelfSignPaseto(jsonToken)
|
||||
if err != nil {
|
||||
log.Fatal("Error signing paseto." + err.Error())
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// open private key to use for signing requests
|
||||
func getKey(netname string) (key []byte, err error) {
|
||||
privpem, err := ioutil.ReadFile(fmt.Sprintf("/etc/nebula/%s/neutron.key",
|
||||
netname))
|
||||
if err != nil {
|
||||
log.Error("Error reading private key from /etc/nebula")
|
||||
return nil, err
|
||||
}
|
||||
key, _, err = cert.UnmarshalX25519PrivateKey(privpem)
|
||||
if err != nil {
|
||||
log.Error("Error decoding key from /etc/nebula")
|
||||
return nil, err
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// Save certificate from fetched config to neutron network directory
|
||||
func saveCert(netname, cert string) error {
|
||||
path := fmt.Sprintf("/etc/nebula/%s/neutron.crt", netname)
|
||||
err := ioutil.WriteFile(path, []byte(cert), 0660)
|
||||
return err
|
||||
}
|
||||
|
||||
// get public key from CA certificate and validate cert
|
||||
func getPubkey(netname string) (pubkey []byte, err error) {
|
||||
cacert, err := ioutil.ReadFile(fmt.Sprintf("/etc/nebula/%s/ca.crt",
|
||||
netname))
|
||||
if err != nil {
|
||||
return pubkey, err
|
||||
}
|
||||
nc, _, err := cert.UnmarshalNebulaCertificateFromPEM(cacert)
|
||||
caPool := cert.NewCAPool()
|
||||
// validate certificate
|
||||
_, err = caPool.AddCACertificate(cacert)
|
||||
if err != nil {
|
||||
log.Fatalf("Cert for network %s is not valid: %s.\n", netname, err.Error())
|
||||
}
|
||||
// get public key and convert from edwards format to curve25519
|
||||
edpub := nc.Details.PublicKey
|
||||
pubkey = nebutils.Ed25519PublicKeyToCurve25519(edpub)
|
||||
return pubkey, nil
|
||||
|
||||
}
|
||||
|
||||
func getConfig(netname, nodename, qAddr string) {
|
||||
endPoint := fmt.Sprintf("%s/api/neutron/config?net=%s&node=%s",
|
||||
qAddr, netname, nodename)
|
||||
privkey, err := getKey(netname)
|
||||
token, err := getIdentityToken(netname, nodename, privkey)
|
||||
body := bytes.NewBuffer([]byte(token))
|
||||
req, err := http.NewRequest("GET", endPoint, body)
|
||||
if err != nil {
|
||||
log.Fatal("Error creating request." + err.Error())
|
||||
}
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
log.Fatal("Error contacting Quasar." + err.Error())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
if resp.StatusCode == 425 {
|
||||
log.Error("Node is not enabled - please enable it from the frontend!")
|
||||
return
|
||||
}
|
||||
log.Error("Error from Quasar: " + string(b))
|
||||
return
|
||||
}
|
||||
pubkey, err := getPubkey(netname)
|
||||
if err != nil {
|
||||
log.Fatal("Could not get CA pubkey: " + err.Error())
|
||||
}
|
||||
signer := xpaseto.NewSigner([]byte{}, pubkey)
|
||||
jsonToken, err := signer.ParsePaseto(string(b))
|
||||
if err != nil {
|
||||
log.Fatal("Could not decode response token: " + err.Error())
|
||||
}
|
||||
config := *new(NodeConfigSchema)
|
||||
err = wormhole.SchemaFromJSONToken(jsonToken, &config)
|
||||
if err != nil {
|
||||
log.Error("Can't decode node config: ", err)
|
||||
return
|
||||
}
|
||||
config.Netname = netname
|
||||
err = saveCert(netname, config.Cert)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
err = genConfig(config)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
log.Println("Successfully updated config.")
|
||||
}
|
||||
|
||||
func loadNeutronConfig(netname string) (config NeutronConfig, err error) {
|
||||
path := fmt.Sprintf("/etc/nebula/%s/neutron.yml", netname)
|
||||
config = NeutronConfig{}
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return config, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
d := yaml.NewDecoder(file)
|
||||
|
||||
if err := d.Decode(&config); err != nil {
|
||||
return config, err
|
||||
}
|
||||
|
||||
return config, err
|
||||
}
|
||||
func update(netname string) {
|
||||
config, err := loadNeutronConfig(netname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
getConfig(netname, config.Nodename, config.Quasar)
|
||||
}
|
67
cmd/quasar/address.go
Normal file
67
cmd/quasar/address.go
Normal file
|
@ -0,0 +1,67 @@
|
|||
package main
|
||||
|
||||
import "net"
|
||||
|
||||
// https://gist.github.com/kotakanbe/d3059af990252ba89a82
|
||||
func possibleAddresses(cidr string) ([]string, error) {
|
||||
ip, ipnet, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ips []string
|
||||
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) {
|
||||
ips = append(ips, ip.String())
|
||||
}
|
||||
// remove network address and broadcast address
|
||||
return ips[1 : len(ips)-1], nil
|
||||
}
|
||||
|
||||
// http://play.golang.org/p/m8TNTtygK0
|
||||
func inc(ip net.IP) {
|
||||
for j := len(ip) - 1; j >= 0; j-- {
|
||||
ip[j]++
|
||||
if ip[j] > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// based off https://www.codegrepper.com/code-examples/go/golang+diff+of+string+arrays
|
||||
func difference(slice1 []string, slice2 []string) []string {
|
||||
var diff []string
|
||||
|
||||
// Loop two times, first to find slice1 strings not in slice2,
|
||||
// second loop to find slice2 strings not in slice1
|
||||
for i := 0; i < 2; i++ {
|
||||
for _, s1 := range slice1 {
|
||||
found := false
|
||||
for _, s2 := range slice2 {
|
||||
if s1 == s2 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// String not found. We add it to return slice
|
||||
if !found {
|
||||
diff = append(diff, s1)
|
||||
}
|
||||
}
|
||||
// Swap the slices, only if it was the first loop
|
||||
if i == 0 {
|
||||
slice1, slice2 = slice2, slice1
|
||||
}
|
||||
}
|
||||
|
||||
return diff
|
||||
}
|
||||
|
||||
func newAddress(cidr string, usedAddresses []string) (address string,
|
||||
err error) {
|
||||
possible, err := possibleAddresses(cidr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
unused := difference(possible, usedAddresses)
|
||||
return unused[0], nil
|
||||
}
|
40
cmd/quasar/config.go
Normal file
40
cmd/quasar/config.go
Normal file
|
@ -0,0 +1,40 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Database struct {
|
||||
Type string `yaml:"type"`
|
||||
Source string `yaml:"src"`
|
||||
} `yaml:"db"`
|
||||
Quasar struct {
|
||||
Name string `yaml:"name"`
|
||||
Listen struct {
|
||||
Host string `yaml:"host"`
|
||||
Port int `yaml:"port"`
|
||||
} `yaml:"listen"`
|
||||
} `yaml:"quasar"`
|
||||
Authsecret string
|
||||
}
|
||||
|
||||
// https://dev.to/koddr/let-s-write-config-for-your-golang-web-app-on-right-way-yaml-5ggp
|
||||
func NewConfig(configPath string) (*Config, error) {
|
||||
config := &Config{}
|
||||
file, err := os.Open(configPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
d := yaml.NewDecoder(file)
|
||||
|
||||
if err := d.Decode(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
712
cmd/quasar/db.go
Normal file
712
cmd/quasar/db.go
Normal file
|
@ -0,0 +1,712 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type database interface {
|
||||
connect(filepath string) error
|
||||
addNetwork(
|
||||
cacert []byte,
|
||||
capriv []byte,
|
||||
name string,
|
||||
cidr string,
|
||||
cipher string,
|
||||
) error
|
||||
getCert(network, host string) ([]byte, error)
|
||||
updateLatestFetch(netname, nodename, timestamp string) error
|
||||
addJoinRequest(netname string,
|
||||
nodename string,
|
||||
hostname string,
|
||||
address string,
|
||||
pubkey []byte,
|
||||
) error
|
||||
allNetworks() (
|
||||
networks []NetOverviewSchema,
|
||||
err error,
|
||||
)
|
||||
deleteNetwork(netname string) error
|
||||
networkInfo(netname string) (
|
||||
network NetSchema,
|
||||
err error,
|
||||
)
|
||||
updateNetwork(netname, cidr, cipher string,
|
||||
groups []string) error
|
||||
allNodes(netname string) (
|
||||
nodes []NodeOverviewSchema,
|
||||
err error,
|
||||
)
|
||||
updateNodeStatus(netname string,
|
||||
nodename string,
|
||||
status string) error
|
||||
updateNodeInfo(netname string,
|
||||
nodename string,
|
||||
node NodeSchema) error
|
||||
getNodeStatus(netname string,
|
||||
nodename string) (
|
||||
status string,
|
||||
err error,
|
||||
)
|
||||
getNodeInfo(netname string, nodename string) (nodeinfo NodeSchema,
|
||||
err error)
|
||||
getNodeConfig(netname, nodename string) (config NodeConfigSchema,
|
||||
err error)
|
||||
getNodePubkey(netname string,
|
||||
nodename string) (pubkey []byte,
|
||||
err error)
|
||||
getNetworkCA(netname string) (privkey []byte,
|
||||
cert []byte,
|
||||
err error)
|
||||
saveNodeCert(netname string,
|
||||
nodename string,
|
||||
cert []byte) error
|
||||
newAddress(netname string) (address string,
|
||||
err error)
|
||||
}
|
||||
|
||||
// boltdb interface
|
||||
type boltdbi struct {
|
||||
db *bolt.DB
|
||||
}
|
||||
|
||||
func (b *boltdbi) addNetwork(cacert []byte,
|
||||
capriv []byte,
|
||||
name string,
|
||||
cidr string,
|
||||
cipher string,
|
||||
) (err error) {
|
||||
log.Info("Bolt adding network: ", name)
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
nb, err := tx.CreateBucket([]byte(name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = nb.Put([]byte("NET_NAME"), []byte(name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = nb.Put([]byte("CA_PRIV_KEY"), capriv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = nb.Put([]byte("CA_CERT"), cacert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = nb.Put([]byte("CIDR"), []byte(cidr))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
groups, err := json.Marshal([]string{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = nb.Put([]byte("GROUPS"), groups)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = nb.Put([]byte("CIPHER"), []byte(cipher))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *boltdbi) connect(filepath string) (err error) {
|
||||
log.Info("Bolt connecting ", filepath)
|
||||
b.db, err = bolt.Open(filepath,
|
||||
0600,
|
||||
&bolt.Options{Timeout: 3 * time.Second},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *boltdbi) getCert(network, host string) (cert []byte,
|
||||
err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(network))
|
||||
if bkt == nil {
|
||||
return fmt.Errorf("NONETWORK")
|
||||
}
|
||||
if host != "" {
|
||||
nodeBkt := bkt.Bucket([]byte(host))
|
||||
if nodeBkt == nil {
|
||||
return fmt.Errorf("NOHOST")
|
||||
}
|
||||
cert = nodeBkt.Get([]byte("cert"))
|
||||
return nil
|
||||
}
|
||||
cert = bkt.Get([]byte("CA_CERT"))
|
||||
return nil
|
||||
})
|
||||
return cert, err
|
||||
}
|
||||
|
||||
func (b *boltdbi) addJoinRequest(netname string,
|
||||
nodename string,
|
||||
hostname string,
|
||||
address string,
|
||||
pubkey []byte,
|
||||
) (err error) {
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
netBkt := tx.Bucket([]byte(netname))
|
||||
if netBkt == nil {
|
||||
return errors.Errorf("Network does not exist.")
|
||||
}
|
||||
nodeBkt, err := netBkt.CreateBucket([]byte(nodename))
|
||||
if err != nil {
|
||||
return errors.Errorf("Node exists in network.")
|
||||
}
|
||||
err = nodeBkt.Put([]byte("hostname"), []byte(hostname))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = nodeBkt.Put([]byte("address"), []byte(address))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = nodeBkt.Put([]byte("latest_fetch"), []byte("NEVER"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = nodeBkt.Put([]byte("pubkey"), pubkey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = nodeBkt.Put([]byte("status"), []byte("pending"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = nodeBkt.Put([]byte("listen_port"), []byte("0"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
groups, err := json.Marshal([]string{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = nodeBkt.Put([]byte("groups"), groups)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = nodeBkt.Put([]byte("is_lighthouse"), []byte("false"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = nodeBkt.Put([]byte("static_address"), []byte(""))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
inbound, outbound := defaultRules()
|
||||
log.Println("Converting to bytes", inbound, outbound)
|
||||
inboundBytes, err := json.Marshal(inbound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outboundBytes, err := json.Marshal(outbound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(outboundBytes, &outbound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = nodeBkt.Put([]byte("firewall_outbound"), outboundBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = nodeBkt.Put([]byte("firewall_inbound"), inboundBytes)
|
||||
return err
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *boltdbi) allNetworks() (networks []NetOverviewSchema, err error) {
|
||||
networks = make([]NetOverviewSchema, 0)
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
tx.ForEach(func(name []byte, b *bolt.Bucket) error {
|
||||
netcidr := b.Get([]byte("CIDR"))
|
||||
n := NetOverviewSchema{
|
||||
Name: string(name),
|
||||
Cidr: string(netcidr),
|
||||
}
|
||||
networks = append(networks, n)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
return networks, err
|
||||
}
|
||||
|
||||
func (b *boltdbi) deleteNetwork(netname string) (err error) {
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.DeleteBucket([]byte(netname))
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *boltdbi) networkInfo(netname string) (network NetSchema,
|
||||
err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(netname))
|
||||
if bkt == nil {
|
||||
return fmt.Errorf("NONETWORK")
|
||||
}
|
||||
netcidr := bkt.Get([]byte("CIDR"))
|
||||
cipher := bkt.Get([]byte("CIPHER"))
|
||||
groupsBytes := bkt.Get([]byte("GROUPS"))
|
||||
var groups []string
|
||||
err := json.Unmarshal(groupsBytes, &groups)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
network = NetSchema{
|
||||
Name: netname,
|
||||
Cidr: string(netcidr),
|
||||
Cipher: string(cipher),
|
||||
Groups: groups,
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return network, err
|
||||
}
|
||||
|
||||
func (b *boltdbi) allNodes(netname string) (nodes []NodeOverviewSchema,
|
||||
err error) {
|
||||
nodes = make([]NodeOverviewSchema, 0)
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(netname))
|
||||
if bkt == nil {
|
||||
return fmt.Errorf("NONETWORK")
|
||||
}
|
||||
err := bkt.ForEach(func(key, val []byte) error {
|
||||
if val == nil {
|
||||
// keyval is bucket so is node
|
||||
nb := bkt.Bucket(key)
|
||||
hostName := nb.Get([]byte("hostname"))
|
||||
status := nb.Get([]byte("status"))
|
||||
address := nb.Get([]byte("address"))
|
||||
pubkeyBytes := nb.Get([]byte("pubkey"))
|
||||
pubkey := base64.StdEncoding.EncodeToString([]byte(pubkeyBytes))
|
||||
latest_fetch := nb.Get([]byte("latest_fetch"))
|
||||
node := NodeOverviewSchema{
|
||||
Nodename: string(key),
|
||||
Hostname: string(hostName),
|
||||
Status: string(status),
|
||||
Address: string(address),
|
||||
LatestFetch: string(latest_fetch),
|
||||
PubKey: pubkey,
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
return err
|
||||
})
|
||||
return err
|
||||
})
|
||||
return nodes, err
|
||||
|
||||
}
|
||||
|
||||
func (b *boltdbi) updateNodeStatus(netname string,
|
||||
nodename string,
|
||||
status string,
|
||||
) (err error) {
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
netBkt := tx.Bucket([]byte(netname))
|
||||
if netBkt == nil {
|
||||
return errors.Errorf("Network does not exist.")
|
||||
}
|
||||
nodeBkt := netBkt.Bucket([]byte(nodename))
|
||||
if nodeBkt == nil {
|
||||
return errors.Errorf("Node does not exist in network.")
|
||||
}
|
||||
err = nodeBkt.Put([]byte("status"), []byte(status))
|
||||
return err
|
||||
})
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
func (b *boltdbi) updateNodeInfo(netname string,
|
||||
nodename string,
|
||||
node NodeSchema,
|
||||
) (err error) {
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
netBkt := tx.Bucket([]byte(netname))
|
||||
if netBkt == nil {
|
||||
return errors.Errorf("Network does not exist.")
|
||||
}
|
||||
nodeBkt := netBkt.Bucket([]byte(nodename))
|
||||
if nodeBkt == nil {
|
||||
return errors.Errorf("Node does not exist in network.")
|
||||
}
|
||||
inboundBytes, err := json.Marshal(node.FirewallInbound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outboundBytes, err := json.Marshal(node.FirewallOutbound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = nodeBkt.Put([]byte("firewall_outbound"), outboundBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = nodeBkt.Put([]byte("firewall_inbound"), inboundBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if node.StaticAddress != "" {
|
||||
err = nodeBkt.Put([]byte("static_address"), []byte(node.StaticAddress))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = nodeBkt.Put([]byte("is_lighthouse"), []byte(strconv.FormatBool(node.Lighthouse)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = nodeBkt.Put([]byte("listen_port"), []byte(fmt.Sprint(node.ListenPort)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Println("Adding groups to db:", node.Groups)
|
||||
groupsBytes, err := json.Marshal(node.Groups)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = nodeBkt.Put([]byte("groups"), groupsBytes)
|
||||
return err
|
||||
})
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
func (b *boltdbi) getNodeStatus(netname string,
|
||||
nodename string) (status string,
|
||||
err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
netBkt := tx.Bucket([]byte(netname))
|
||||
if netBkt == nil {
|
||||
return errors.Errorf("Network does not exist.")
|
||||
}
|
||||
nodeBkt := netBkt.Bucket([]byte(nodename))
|
||||
if nodeBkt == nil {
|
||||
return errors.Errorf("Node does not exist in network.")
|
||||
}
|
||||
statusBytes := nodeBkt.Get([]byte("status"))
|
||||
status = string(statusBytes)
|
||||
return err
|
||||
})
|
||||
return status, err
|
||||
}
|
||||
|
||||
func (b *boltdbi) getNodeInfo(netname string,
|
||||
nodename string) (node NodeSchema,
|
||||
err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
netBkt := tx.Bucket([]byte(netname))
|
||||
if netBkt == nil {
|
||||
return errors.Errorf("Network does not exist.")
|
||||
}
|
||||
nodeBkt := netBkt.Bucket([]byte(nodename))
|
||||
if nodeBkt == nil {
|
||||
return errors.Errorf("Node does not exist in network.")
|
||||
}
|
||||
statusBytes := nodeBkt.Get([]byte("status"))
|
||||
hostnameBytes := nodeBkt.Get([]byte("hostname"))
|
||||
addressBytes := nodeBkt.Get([]byte("address"))
|
||||
listenPortBytes := nodeBkt.Get([]byte("listen_port"))
|
||||
listenPort, err := strconv.Atoi(string(listenPortBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
staticAddressBytes := nodeBkt.Get([]byte("static_address"))
|
||||
groupsBytes := nodeBkt.Get([]byte("groups"))
|
||||
var groups []string
|
||||
err = json.Unmarshal(groupsBytes, &groups)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
inboundBytes := nodeBkt.Get([]byte("firewall_inbound"))
|
||||
outboundBytes := nodeBkt.Get([]byte("firewall_outbound"))
|
||||
var inbound []FirewallRule
|
||||
var outbound []FirewallRule
|
||||
err = json.Unmarshal(inboundBytes, &inbound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(outboundBytes, &outbound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lighthouseBytes := nodeBkt.Get([]byte("is_lighthouse"))
|
||||
is_lighthouse, err := strconv.ParseBool(string(lighthouseBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node = NodeSchema{
|
||||
Nodename: nodename,
|
||||
Hostname: string(hostnameBytes),
|
||||
Status: string(statusBytes),
|
||||
Address: string(addressBytes),
|
||||
StaticAddress: string(staticAddressBytes),
|
||||
ListenPort: listenPort,
|
||||
Lighthouse: is_lighthouse,
|
||||
Groups: groups,
|
||||
FirewallInbound: inbound,
|
||||
FirewallOutbound: outbound,
|
||||
}
|
||||
return err
|
||||
})
|
||||
return node, err
|
||||
}
|
||||
|
||||
func getLighthouses(bkt *bolt.Bucket) (lighthouses []string,
|
||||
err error) {
|
||||
lighthouses = []string{}
|
||||
err = bkt.ForEach(func(key, val []byte) error {
|
||||
if val == nil {
|
||||
nb := bkt.Bucket(key)
|
||||
log.Println("Checking if node is lighthouse", string(key))
|
||||
lhBytes := nb.Get([]byte("is_lighthouse"))
|
||||
isLighthouse, err := strconv.ParseBool(string(lhBytes))
|
||||
log.Println("lighthouse: ", isLighthouse, string(lhBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isLighthouse {
|
||||
addressBytes := nb.Get([]byte("address"))
|
||||
lighthouses = append(lighthouses, string(addressBytes))
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
return lighthouses, err
|
||||
}
|
||||
|
||||
func getStaticHosts(nodename string, bkt *bolt.Bucket) (hosts []StaticHost,
|
||||
err error) {
|
||||
hosts = []StaticHost{}
|
||||
err = bkt.ForEach(func(key, val []byte) error {
|
||||
if val == nil && string(key) != nodename {
|
||||
nb := bkt.Bucket(key)
|
||||
addressBytes := nb.Get([]byte("address"))
|
||||
staticAddressBytes := nb.Get([]byte("static_address"))
|
||||
staticAddress := string(staticAddressBytes)
|
||||
portBytes := nb.Get([]byte("listen_port"))
|
||||
port := string(portBytes)
|
||||
endpoint := fmt.Sprintf("%s:%s", staticAddress, port)
|
||||
if staticAddress != "" {
|
||||
host := StaticHost{
|
||||
NebulaAddress: string(addressBytes),
|
||||
Endpoint: []string{endpoint},
|
||||
}
|
||||
hosts = append(hosts, host)
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
return hosts, err
|
||||
}
|
||||
|
||||
func (b *boltdbi) getNodeConfig(netname string,
|
||||
nodename string) (config NodeConfigSchema,
|
||||
err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
netBkt := tx.Bucket([]byte(netname))
|
||||
if netBkt == nil {
|
||||
return errors.Errorf("Network does not exist.")
|
||||
}
|
||||
cipher := netBkt.Get([]byte("CIPHER"))
|
||||
nodeBkt := netBkt.Bucket([]byte(nodename))
|
||||
if nodeBkt == nil {
|
||||
return errors.Errorf("Node does not exist in network.")
|
||||
}
|
||||
addressBytes := nodeBkt.Get([]byte("address"))
|
||||
listenPortBytes := nodeBkt.Get([]byte("listen_port"))
|
||||
listenPort, err := strconv.Atoi(string(listenPortBytes))
|
||||
groupsBytes := nodeBkt.Get([]byte("groups"))
|
||||
var groups []string
|
||||
err = json.Unmarshal(groupsBytes, &groups)
|
||||
lighthouseBytes := nodeBkt.Get([]byte("is_lighthouse"))
|
||||
is_lighthouse, err := strconv.ParseBool(string(lighthouseBytes))
|
||||
var lighthouses []string
|
||||
if !is_lighthouse {
|
||||
lighthouses, err = getLighthouses(netBkt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
lighthouses = []string{}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
staticHosts, err := getStaticHosts(nodename, netBkt)
|
||||
inboundBytes := nodeBkt.Get([]byte("firewall_inbound"))
|
||||
outboundBytes := nodeBkt.Get([]byte("firewall_outbound"))
|
||||
var inbound []FirewallRule
|
||||
var outbound []FirewallRule
|
||||
err = json.Unmarshal(inboundBytes, &inbound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(outboundBytes, &outbound)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config = NodeConfigSchema{
|
||||
Address: string(addressBytes),
|
||||
AmLighthouse: is_lighthouse,
|
||||
Cipher: string(cipher),
|
||||
Lighthouses: lighthouses,
|
||||
StaticHosts: staticHosts,
|
||||
ListenPort: listenPort,
|
||||
FirewallInbound: inbound,
|
||||
FirewallOutbound: outbound,
|
||||
}
|
||||
return err
|
||||
})
|
||||
return config, err
|
||||
}
|
||||
|
||||
func (b *boltdbi) getNodePubkey(netname string,
|
||||
nodename string) (pubkey []byte,
|
||||
err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
netBkt := tx.Bucket([]byte(netname))
|
||||
if netBkt == nil {
|
||||
return errors.Errorf("Network does not exist.")
|
||||
}
|
||||
nodeBkt := netBkt.Bucket([]byte(nodename))
|
||||
if nodeBkt == nil {
|
||||
return errors.Errorf("Node does not exist in network.")
|
||||
}
|
||||
pubkey = nodeBkt.Get([]byte("pubkey"))
|
||||
return err
|
||||
})
|
||||
return pubkey, err
|
||||
}
|
||||
|
||||
func (b *boltdbi) getNetworkCA(netname string) (privkey []byte,
|
||||
cert []byte,
|
||||
err error) {
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(netname))
|
||||
if bkt == nil {
|
||||
return errors.Errorf("Network does not exist.")
|
||||
}
|
||||
privkey = bkt.Get([]byte("CA_PRIV_KEY"))
|
||||
cert = bkt.Get([]byte("CA_CERT"))
|
||||
return err
|
||||
})
|
||||
return privkey, cert, err
|
||||
}
|
||||
|
||||
func (b *boltdbi) saveNodeCert(netname string,
|
||||
nodename string,
|
||||
cert []byte,
|
||||
) (err error) {
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
netBkt := tx.Bucket([]byte(netname))
|
||||
if netBkt == nil {
|
||||
return errors.Errorf("Network does not exist.")
|
||||
}
|
||||
nodeBkt := netBkt.Bucket([]byte(nodename))
|
||||
if nodeBkt == nil {
|
||||
return errors.Errorf("Node does not exist in network.")
|
||||
}
|
||||
err = nodeBkt.Put([]byte("cert"), cert)
|
||||
return err
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *boltdbi) updateLatestFetch(netname string,
|
||||
nodename string,
|
||||
timestamp string,
|
||||
) (err error) {
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
netBkt := tx.Bucket([]byte(netname))
|
||||
if netBkt == nil {
|
||||
return errors.Errorf("Network does not exist.")
|
||||
}
|
||||
nodeBkt := netBkt.Bucket([]byte(nodename))
|
||||
if nodeBkt == nil {
|
||||
return errors.Errorf("Node does not exist in network.")
|
||||
}
|
||||
err = nodeBkt.Put([]byte("latest_fetch"), []byte(timestamp))
|
||||
return err
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *boltdbi) updateNetwork(netname, cidr,
|
||||
cipher string, groups []string) (err error) {
|
||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(netname))
|
||||
if bkt == nil {
|
||||
return errors.Errorf("Network does not exist.")
|
||||
}
|
||||
groupsBytes, err := json.Marshal(groups)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = bkt.Put([]byte("GROUPS"), groupsBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cidr != "" {
|
||||
err = bkt.Put([]byte("CIDR"), []byte(cidr))
|
||||
}
|
||||
if cipher != "" {
|
||||
err = bkt.Put([]byte("CIPHER"), []byte(cipher))
|
||||
}
|
||||
return err
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *boltdbi) newAddress(netname string) (address string,
|
||||
err error) {
|
||||
var used []string
|
||||
var cidr string
|
||||
err = b.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(netname))
|
||||
if bkt == nil {
|
||||
return errors.Errorf("Network does not exist.")
|
||||
}
|
||||
cidr = string(bkt.Get([]byte("CIDR")))
|
||||
err := bkt.ForEach(func(key, val []byte) error {
|
||||
if val == nil {
|
||||
// keyval is bucket so is node
|
||||
nb := bkt.Bucket(key)
|
||||
addr := nb.Get([]byte("address"))
|
||||
used = append(used, string(addr))
|
||||
}
|
||||
return err
|
||||
})
|
||||
return err
|
||||
})
|
||||
return newAddress(cidr, used)
|
||||
}
|
24
cmd/quasar/firewall.go
Normal file
24
cmd/quasar/firewall.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
package main
|
||||
|
||||
type FirewallRule struct {
|
||||
Port string `json:"port"`
|
||||
Proto string `json:"proto"`
|
||||
Groups []string `json:"groups"`
|
||||
Any bool `json:"any"`
|
||||
}
|
||||
|
||||
func defaultRules() (inbound, outbound []FirewallRule) {
|
||||
inbound = append(inbound, FirewallRule{
|
||||
Port: "any",
|
||||
Proto: "icmp",
|
||||
Any: true,
|
||||
Groups: []string{},
|
||||
})
|
||||
outbound = append(outbound, FirewallRule{
|
||||
Port: "any",
|
||||
Proto: "any",
|
||||
Any: true,
|
||||
Groups: []string{},
|
||||
})
|
||||
return inbound, outbound
|
||||
}
|
40
cmd/quasar/main.go
Normal file
40
cmd/quasar/main.go
Normal file
|
@ -0,0 +1,40 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Set log level
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
// define -help command flag
|
||||
printUsage := flag.Bool("help", false, "Print command line usage")
|
||||
|
||||
serveConfig := flag.String("config",
|
||||
"/etc/quasar/config.yml",
|
||||
"Quasar config file path",
|
||||
)
|
||||
serveListenAddress := flag.String("host",
|
||||
"",
|
||||
"Quasar server listen address",
|
||||
)
|
||||
serveListenPort := flag.Int("port",
|
||||
0,
|
||||
"Quasar server listen port",
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
if *printUsage {
|
||||
flag.Usage()
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
runServe(*serveConfig,
|
||||
*serveListenAddress,
|
||||
*serveListenPort,
|
||||
)
|
||||
}
|
235
cmd/quasar/networks.go
Normal file
235
cmd/quasar/networks.go
Normal file
|
@ -0,0 +1,235 @@
|
|||
// HTTP Endpoints Relating to Networks
|
||||
// These should be used by a client using standard auth
|
||||
// rather than XPASETO auth with a nebula key
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
)
|
||||
|
||||
type NetSchema struct {
|
||||
Name string `json:"name"`
|
||||
Cidr string `json:"cidr"`
|
||||
Cipher string `json:"cipher"`
|
||||
Groups []string `json:"groups"`
|
||||
CaFingerprint string `json:"ca_fingerprint"`
|
||||
}
|
||||
|
||||
type NetOverviewSchema struct {
|
||||
Name string `json:"name"`
|
||||
Cidr string `json:"cidr"`
|
||||
}
|
||||
|
||||
// /api/networks/all [GET]
|
||||
func (s *server) handleGetAllNetworks() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
log.Println("Getting all networks")
|
||||
networks, err := s.db.allNetworks()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
log.Println("Got all networks", networks)
|
||||
if err := json.NewEncoder(w).Encode(networks); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type NewNetSchema struct {
|
||||
Name string `json:"name"`
|
||||
Cidr string `json:"cidr"`
|
||||
}
|
||||
|
||||
// /api/networks/new [POST]
|
||||
func (s *server) handleNewNetwork() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
// get name and cidr from request body
|
||||
dec := json.NewDecoder(r.Body)
|
||||
dec.DisallowUnknownFields()
|
||||
var newnet NewNetSchema
|
||||
err := dec.Decode(&newnet)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
log.Println("Creating new network: ", newnet)
|
||||
|
||||
// generate keys
|
||||
pubkey, privkey, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
log.Error("Could not generate keys: " + err.Error())
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// generate and self-sign cert for ca key
|
||||
ip, cidr, err := net.ParseCIDR(newnet.Cidr)
|
||||
if err != nil {
|
||||
log.Error("Invalid cidr definition: " + newnet.Cidr)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
cidr.IP = ip
|
||||
subnet := cidr
|
||||
nc := cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: "quasar" + newnet.Name,
|
||||
Ips: []*net.IPNet{cidr},
|
||||
Groups: []string{},
|
||||
Subnets: []*net.IPNet{subnet},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(time.Duration(time.Hour * 2190)),
|
||||
PublicKey: pubkey,
|
||||
IsCA: true,
|
||||
},
|
||||
}
|
||||
|
||||
err = nc.Sign(privkey)
|
||||
if err != nil {
|
||||
log.Error("Error while signing ca key: %s", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
certbytes, err := nc.MarshalToPEM()
|
||||
|
||||
// write new network to database
|
||||
log.Println("ADDING NETWORK TO DB, priv: ", privkey)
|
||||
err = s.db.addNetwork(certbytes,
|
||||
privkey,
|
||||
newnet.Name,
|
||||
newnet.Cidr,
|
||||
"chachapoly",
|
||||
)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
log.Println("Added network to db: ", newnet.Name, newnet.Cidr)
|
||||
fmt.Fprintf(w, "SUCCESS")
|
||||
}
|
||||
}
|
||||
|
||||
// /api/networks/{NETWORK}/update [POST]
|
||||
func (s *server) handleUpdateNetwork() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
netname := vars["NETWORK"]
|
||||
dec := json.NewDecoder(r.Body)
|
||||
dec.DisallowUnknownFields()
|
||||
var network NetSchema
|
||||
err := dec.Decode(&network)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
cidr := network.Cidr
|
||||
if cidr != "" {
|
||||
_, ncidr, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
cidr = ncidr.String()
|
||||
}
|
||||
err = s.db.updateNetwork(netname, cidr, network.Cipher, network.Groups)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, "SUCCESS")
|
||||
}
|
||||
}
|
||||
|
||||
// /api/networks/{NETWORK}/delete [POST]
|
||||
func (s *server) handleDeleteNetwork() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
net := vars["NETWORK"]
|
||||
log.Println("Deleting network", net)
|
||||
err := s.db.deleteNetwork(net)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, "SUCCESS")
|
||||
}
|
||||
}
|
||||
|
||||
// /api/networks/{NETWORK}/info [GET]
|
||||
func (s *server) handleNetworkInfo() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
net := vars["NETWORK"]
|
||||
log.Println("Getting network info for", net)
|
||||
network, err := s.db.networkInfo(net)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
_, certBytes, err := s.db.getNetworkCA(net)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
cert, _, err := cert.UnmarshalNebulaCertificateFromPEM(certBytes)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
fingerprint, err := cert.Sha256Sum()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
network.CaFingerprint = fingerprint
|
||||
if err := json.NewEncoder(w).Encode(network); err != nil {
|
||||
log.Error(err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// /api/networks/{NETWORK}/cert [GET]
|
||||
func (s *server) handleGetNetworkCert() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
// unprotected route - anyone with network name can get ca cert
|
||||
// returns 404 if no network or ca cert file
|
||||
vars := mux.Vars(r)
|
||||
net := vars["NETWORK"]
|
||||
log.Printf("/api/networks/%s/cert requested.\n", net)
|
||||
cacert, err := s.db.getCert(net, "")
|
||||
log.Println("GOT CERT")
|
||||
if err != nil || string(cacert) == "" {
|
||||
switch errmsg := err.Error(); errmsg {
|
||||
case "NONETWORK":
|
||||
http.Error(w, "Network does not exist.", 404)
|
||||
default:
|
||||
http.Error(w, "Internal Server Error.", 500)
|
||||
}
|
||||
return
|
||||
}
|
||||
nc, _, err := cert.UnmarshalNebulaCertificateFromPEM(cacert)
|
||||
if err != nil {
|
||||
http.Error(w, "Could not decode CA Certificate", 500)
|
||||
}
|
||||
pemcert, err := nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
http.Error(w, "Could not marshal CA certificate to PEM", 500)
|
||||
}
|
||||
fmt.Fprintf(w, string(pemcert))
|
||||
}
|
||||
}
|
201
cmd/quasar/neutron.go
Normal file
201
cmd/quasar/neutron.go
Normal file
|
@ -0,0 +1,201 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/b177y/starship/nebutils"
|
||||
"github.com/b177y/starship/wormhole"
|
||||
"github.com/b177y/starship/xpaseto"
|
||||
"github.com/gorilla/mux"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
)
|
||||
|
||||
type StaticHost struct {
|
||||
NebulaAddress string
|
||||
Endpoint []string
|
||||
}
|
||||
|
||||
type NodeConfigSchema struct {
|
||||
Address string `json:"address"` // do i need this?
|
||||
Lighthouses []string `json:"lighthouses"`
|
||||
AmLighthouse bool `json:"am_lighthouse"`
|
||||
StaticHosts []StaticHost `json:"static_hosts"`
|
||||
ListenPort int `json:"listen_port"`
|
||||
|
||||
FirewallInbound []FirewallRule `json:"firewall_inbound"`
|
||||
FirewallOutbound []FirewallRule `json:"firewall_outbound"`
|
||||
Cipher string `json:"cipher"`
|
||||
Cert string `json:"cert"`
|
||||
}
|
||||
|
||||
func (s *server) CheckNodeIdentity(netname string,
|
||||
nodename string, token string) (err error) {
|
||||
log.Printf("Checking identity for node %s (network %s).\n", nodename,
|
||||
netname)
|
||||
// get node pubkey
|
||||
pubkey, err := s.db.getNodePubkey(netname, nodename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Println("Got node pubkey: ", pubkey)
|
||||
signer := xpaseto.NewSigner([]byte{0}, pubkey)
|
||||
jsonToken, err := signer.ParsePaseto(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodeIdentity := *new(wormhole.NodeIdentitySchema)
|
||||
err = wormhole.SchemaFromJSONToken(jsonToken, &nodeIdentity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if nodeIdentity.Netname != netname || nodeIdentity.Nodename != nodename {
|
||||
log.Error("Node identity does not match url params")
|
||||
return fmt.Errorf("Node Identity does not match url params.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *server) SignPayload(netname string,
|
||||
payload interface{}) (signed string, err error) {
|
||||
jsonToken, err := wormhole.NewToken(payload)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
edprivkey, _, err := s.db.getNetworkCA(netname)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
privkey := nebutils.PrivateKeyToCurve25519(edprivkey[:32])
|
||||
signer := xpaseto.NewSigner(privkey, []byte{})
|
||||
token, err := signer.SignPaseto(jsonToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// /api/neutron/config?net=NETWORK&node=NODE [GET]
|
||||
func (s *server) handleGetConfig() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
urlParams := r.URL.Query()
|
||||
netname := urlParams["net"][0]
|
||||
nodename := urlParams["node"][0]
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "Bad Request", 400)
|
||||
return
|
||||
}
|
||||
token := string(b)
|
||||
err = s.CheckNodeIdentity(netname, nodename, token)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 401)
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
status, err := s.db.getNodeStatus(netname, nodename)
|
||||
if err != nil {
|
||||
http.Error(w, "Could not get node status: "+err.Error(), 500)
|
||||
log.Error("Could not get node status: ", err)
|
||||
return
|
||||
}
|
||||
if status != "active" {
|
||||
http.Error(w, "425 - Node is not active. You may need to have it approved.", 425)
|
||||
log.Error("Node is not active, not returning cert.")
|
||||
return
|
||||
}
|
||||
log.Printf("Getting cert for node %s in network %s.\n", nodename, netname)
|
||||
err = s.signNodeCert(netname, nodename)
|
||||
if err != nil {
|
||||
http.Error(w, "Could not sign certificate", 503)
|
||||
return
|
||||
}
|
||||
nodeCert, err := s.db.getCert(netname, nodename)
|
||||
if err != nil || nodeCert == nil {
|
||||
http.Error(w, "Could not get certificate", 503)
|
||||
return
|
||||
}
|
||||
nc, err := cert.UnmarshalNebulaCertificate(nodeCert)
|
||||
if err != nil {
|
||||
http.Error(w, "Could not decode CA Certificate", 500)
|
||||
return
|
||||
}
|
||||
pemcert, err := nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
http.Error(w, "Could not marshal CA certificate to PEM", 500)
|
||||
return
|
||||
}
|
||||
node, err := s.db.getNodeConfig(netname, nodename)
|
||||
node.Cert = string(pemcert)
|
||||
err = s.db.updateLatestFetch(netname, nodename, time.Now().Format(time.RFC3339))
|
||||
signedResponse, err := s.SignPayload(netname, node)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, signedResponse)
|
||||
}
|
||||
}
|
||||
|
||||
// /api/neutron/join [POST]
|
||||
func (s *server) handleJoinNetwork() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
log.Println("Join network requested")
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "Bad Request", 400)
|
||||
return
|
||||
}
|
||||
token := string(b)
|
||||
signer := xpaseto.NewSigner([]byte{0}, []byte{0})
|
||||
jsonToken, err := signer.ParseSelfSigned(token)
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid Signature: "+err.Error(), 503)
|
||||
return
|
||||
}
|
||||
joinReq := *new(wormhole.RequestJoinSchema)
|
||||
err = wormhole.SchemaFromJSONToken(jsonToken, &joinReq)
|
||||
if err != nil {
|
||||
http.Error(w, "Internal Server Error", 500)
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
pubkey, _, err := cert.UnmarshalX25519PublicKey([]byte(joinReq.PubKey))
|
||||
if err != nil {
|
||||
http.Error(w, "Internal Server Error", 500)
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
address, err := s.db.newAddress(joinReq.Netname)
|
||||
if err != nil {
|
||||
http.Error(w, "Internal Server Error", 500)
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
err = s.db.addJoinRequest(joinReq.Netname,
|
||||
joinReq.Nodename,
|
||||
joinReq.Hostname,
|
||||
address,
|
||||
pubkey,
|
||||
)
|
||||
if err != nil {
|
||||
http.Error(w, "Internal Server Error: "+err.Error(), 500)
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, "SUCCESS")
|
||||
}
|
||||
}
|
||||
|
||||
// /api/neutron/leave?net=NETWORK&node=NODE [POST]
|
||||
func (s *server) handleLeaveNetwork() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
nodename := vars["NODENAME"]
|
||||
log.Printf("/api/neutron/%s/leave requested.\n", nodename)
|
||||
}
|
||||
}
|
203
cmd/quasar/nodes.go
Normal file
203
cmd/quasar/nodes.go
Normal file
|
@ -0,0 +1,203 @@
|
|||
// This contains endpoints for managing nodes through the API
|
||||
// using standard auth (not XPASETO auth)
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
)
|
||||
|
||||
type NodeOverviewSchema struct {
|
||||
Nodename string `json:"name"`
|
||||
Hostname string `json:"hostname"`
|
||||
LatestFetch string `json:"latest_fetch"`
|
||||
Status string `json:"status"`
|
||||
Address string `json:"address"`
|
||||
PubKey string `json:"pubkey"`
|
||||
}
|
||||
|
||||
type NodeSchema struct {
|
||||
Nodename string `json:"name"`
|
||||
Hostname string `json:"hostname"`
|
||||
Status string `json:"status"`
|
||||
Address string `json:"address"`
|
||||
StaticAddress string `json:"static_address"`
|
||||
ListenPort int `json:"listen_port"`
|
||||
Lighthouse bool `json:"is_lighthouse"`
|
||||
Groups []string `json:"groups"`
|
||||
FirewallOutbound []FirewallRule `json:"firewall_outbound"`
|
||||
FirewallInbound []FirewallRule `json:"firewall_inbound"`
|
||||
}
|
||||
|
||||
// /api/networks/{NETWORK}/nodes/all [GET]
|
||||
func (s *server) handleGetAllNodes() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
net := vars["NETWORK"]
|
||||
log.Println("Getting nodes in network", net)
|
||||
nodes, err := s.db.allNodes(net)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(nodes); err != nil {
|
||||
log.Error(err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) signNodeCert(netname string, nodename string) error {
|
||||
log.Printf("Signing cert for node %s in network %s.", nodename, netname)
|
||||
pubkey, err := s.db.getNodePubkey(netname, nodename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node, err := s.db.getNodeInfo(netname, nodename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
network, err := s.db.networkInfo(netname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
quasarPrivKey, certBytes, err := s.db.getNetworkCA(netname)
|
||||
log.Println("Trying to unmarshal cert")
|
||||
quasarCert, _, err := cert.UnmarshalNebulaCertificateFromPEM(certBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Println("Trying to get issuer")
|
||||
issuer, err := quasarCert.Sha256Sum()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Println("cidr stuffs.")
|
||||
ip, cidr, err := net.ParseCIDR(network.Cidr)
|
||||
ip = net.ParseIP(node.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cidr.IP = ip
|
||||
subnet := cidr
|
||||
exp := time.Until(quasarCert.Details.NotAfter) - time.Second*1
|
||||
nc := cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: nodename,
|
||||
Ips: []*net.IPNet{cidr},
|
||||
Groups: node.Groups,
|
||||
Subnets: []*net.IPNet{subnet},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(exp),
|
||||
PublicKey: pubkey,
|
||||
IsCA: false,
|
||||
Issuer: issuer,
|
||||
},
|
||||
}
|
||||
err = nc.Sign(quasarPrivKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signedCertBytes, err := nc.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = s.db.saveNodeCert(netname, nodename, signedCertBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pemBytes, _ := nc.MarshalToPEM()
|
||||
log.Println("Saved cert", string(pemBytes))
|
||||
return nil
|
||||
}
|
||||
|
||||
// /api/networks/{NETWORK}/nodes/{NODENAME}/approve [POST]
|
||||
func (s *server) handleApproveNode() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
net := vars["NETWORK"]
|
||||
node := vars["NODENAME"]
|
||||
log.Printf("Approving node %s in network %s.\n", node, net)
|
||||
// sign pubkey and create cert
|
||||
err := s.signNodeCert(net, node)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
// update status to active
|
||||
err = s.db.updateNodeStatus(net, node, "active")
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, "SUCCESS")
|
||||
}
|
||||
}
|
||||
|
||||
// /api/networks/{NETWORK}/nodes/{NODENAME}/update [POST]
|
||||
func (s *server) handleUpdateNode() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
netname := vars["NETWORK"]
|
||||
nodename := vars["NODENAME"]
|
||||
log.Printf("Updating node %s in network %s.\n", nodename, netname)
|
||||
dec := json.NewDecoder(r.Body)
|
||||
dec.DisallowUnknownFields()
|
||||
var node NodeSchema
|
||||
err := dec.Decode(&node)
|
||||
if err != nil {
|
||||
log.Error("Could not decode json", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
log.Println("UPDATING NODE WITH", node)
|
||||
err = s.db.updateNodeInfo(netname, nodename, node)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, "SUCCESS")
|
||||
}
|
||||
}
|
||||
|
||||
// /api/networks/{NETWORK}/nodes/{NODENAME}/info [GET]
|
||||
func (s *server) handleNodeInfo() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
netname := vars["NETWORK"]
|
||||
nodename := vars["NODENAME"]
|
||||
node, err := s.db.getNodeInfo(netname, nodename)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(node); err != nil {
|
||||
log.Error(err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// /api/networks/{NETWORK}/nodes/{NODENAME}/disable [POST]
|
||||
func (s *server) handleDisableNode() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
net := vars["NETWORK"]
|
||||
node := vars["NODENAME"]
|
||||
log.Printf("Disabling node %s in network %s.\n", node, net)
|
||||
err := s.db.updateNodeStatus(net, node, "disabled")
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, "SUCCESS")
|
||||
}
|
||||
}
|
208
cmd/quasar/serve.go
Normal file
208
cmd/quasar/serve.go
Normal file
|
@ -0,0 +1,208 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
jwtmiddleware "github.com/auth0/go-jwt-middleware"
|
||||
jwt "github.com/form3tech-oss/jwt-go"
|
||||
"github.com/gorilla/mux"
|
||||
negronilogrus "github.com/meatballhat/negroni-logrus"
|
||||
"github.com/rs/cors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/urfave/negroni"
|
||||
)
|
||||
|
||||
type server struct {
|
||||
db database
|
||||
router *mux.Router
|
||||
conf *Config
|
||||
n *negroni.Negroni
|
||||
jmw *jwtmiddleware.JWTMiddleware
|
||||
userpass map[string]string
|
||||
}
|
||||
|
||||
func (s *server) serve(endpoint string) {
|
||||
cor := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"POST", "GET", "OPTIONS", "DELETE"},
|
||||
AllowedHeaders: []string{"Accept", "Accept-Language", "Content-Type", "Authorization"},
|
||||
AllowCredentials: true,
|
||||
Debug: false,
|
||||
})
|
||||
handler := cor.Handler(s.router)
|
||||
s.n.Use(negronilogrus.NewMiddleware())
|
||||
s.n.UseHandler(handler)
|
||||
log.Fatal(http.ListenAndServe(endpoint, s.n))
|
||||
}
|
||||
|
||||
func (s *server) handleHome() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
log.Info("Index page / requested")
|
||||
fmt.Fprintf(w, "Quasar is running!")
|
||||
}
|
||||
}
|
||||
|
||||
type User struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
type JwtToken struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
func (s *server) handleLogin() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var user User
|
||||
json.NewDecoder(r.Body).Decode(&user)
|
||||
if pass, ok := s.userpass[user.Username]; ok {
|
||||
if pass != user.Password {
|
||||
http.Error(w, "Password Incorrect", 403)
|
||||
log.Error(fmt.Sprintf("Incorrect password for %s", user.Username))
|
||||
return
|
||||
}
|
||||
} else {
|
||||
http.Error(w, fmt.Sprintf("No user %s exists.", user.Username),
|
||||
403)
|
||||
log.Error(fmt.Sprintf("No user %s exists.", user.Username))
|
||||
return
|
||||
}
|
||||
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{
|
||||
"username": user.Username,
|
||||
})
|
||||
tokenString, err := token.SignedString([]byte(s.conf.Authsecret))
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(JwtToken{Token: tokenString})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
log.Println("Returning token: ", tokenString)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) routes() {
|
||||
s.router.HandleFunc("/", s.handleHome()).Methods("GET")
|
||||
// networks endpoints
|
||||
s.router.Handle("/api/login", negroni.New(
|
||||
negroni.Wrap(http.HandlerFunc(s.handleLogin())),
|
||||
)).Methods("POST")
|
||||
s.router.Handle("/api/networks/all", negroni.New(
|
||||
negroni.HandlerFunc(s.jmw.HandlerWithNext),
|
||||
negroni.Wrap(http.HandlerFunc(s.handleGetAllNetworks())),
|
||||
)).Methods("GET")
|
||||
s.router.Handle("/api/networks/new", negroni.New(
|
||||
negroni.HandlerFunc(s.jmw.HandlerWithNext),
|
||||
negroni.Wrap(http.HandlerFunc(s.handleNewNetwork())),
|
||||
)).Methods("POST")
|
||||
s.router.Handle("/api/networks/{NETWORK}/cert", negroni.New(
|
||||
negroni.Wrap(http.HandlerFunc(s.handleGetNetworkCert())),
|
||||
)).Methods("GET")
|
||||
s.router.Handle("/api/networks/{NETWORK}/info", negroni.New(
|
||||
negroni.HandlerFunc(s.jmw.HandlerWithNext),
|
||||
negroni.Wrap(http.HandlerFunc(s.handleNetworkInfo())),
|
||||
)).Methods("GET")
|
||||
s.router.Handle("/api/networks/{NETWORK}/delete", negroni.New(
|
||||
negroni.HandlerFunc(s.jmw.HandlerWithNext),
|
||||
negroni.Wrap(http.HandlerFunc(s.handleDeleteNetwork())),
|
||||
)).Methods("DELETE")
|
||||
s.router.Handle("/api/networks/{NETWORK}/update", negroni.New(
|
||||
negroni.HandlerFunc(s.jmw.HandlerWithNext),
|
||||
negroni.Wrap(http.HandlerFunc(s.handleUpdateNetwork())),
|
||||
)).Methods("POST")
|
||||
// node management endpoints
|
||||
s.router.Handle("/api/networks/{NETWORK}/nodes/all", negroni.New(
|
||||
negroni.HandlerFunc(s.jmw.HandlerWithNext),
|
||||
negroni.Wrap(http.HandlerFunc(s.handleGetAllNodes())),
|
||||
)).Methods("GET")
|
||||
s.router.Handle("/api/networks/{NETWORK}/nodes/{NODENAME}/approve", negroni.New(
|
||||
negroni.HandlerFunc(s.jmw.HandlerWithNext),
|
||||
negroni.Wrap(http.HandlerFunc(s.handleApproveNode())),
|
||||
)).Methods("POST")
|
||||
s.router.Handle("/api/networks/{NETWORK}/nodes/{NODENAME}/info", negroni.New(
|
||||
negroni.HandlerFunc(s.jmw.HandlerWithNext),
|
||||
negroni.Wrap(http.HandlerFunc(s.handleNodeInfo())),
|
||||
)).Methods("GET")
|
||||
s.router.Handle("/api/networks/{NETWORK}/nodes/{NODENAME}/disable", negroni.New(
|
||||
negroni.HandlerFunc(s.jmw.HandlerWithNext),
|
||||
negroni.Wrap(http.HandlerFunc(s.handleDisableNode())),
|
||||
)).Methods("POST")
|
||||
s.router.Handle("/api/networks/{NETWORK}/nodes/{NODENAME}/update", negroni.New(
|
||||
negroni.HandlerFunc(s.jmw.HandlerWithNext),
|
||||
negroni.Wrap(http.HandlerFunc(s.handleUpdateNode())),
|
||||
)).Methods("POST")
|
||||
// neutron endpoints
|
||||
s.router.HandleFunc("/api/neutron/join", s.handleJoinNetwork()).Methods("POST")
|
||||
s.router.HandleFunc("/api/neutron/config", s.handleGetConfig()).Methods("GET")
|
||||
s.router.HandleFunc("/api/neutron/leave", s.handleLeaveNetwork()).Methods("POST")
|
||||
}
|
||||
|
||||
func runServe(configPath string,
|
||||
listenAddress string,
|
||||
listenPort int,
|
||||
) {
|
||||
s := new(server)
|
||||
var err error
|
||||
s.conf, err = NewConfig(configPath)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if listenAddress == "" {
|
||||
listenAddress = s.conf.Quasar.Listen.Host
|
||||
}
|
||||
if listenPort == 0 {
|
||||
listenPort = s.conf.Quasar.Listen.Port
|
||||
}
|
||||
|
||||
endpoint := listenAddress + ":" + fmt.Sprint(listenPort)
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"config": configPath,
|
||||
}).Info("Loaded config")
|
||||
|
||||
if s.conf.Database.Type == "bolt" {
|
||||
s.db = new(boltdbi)
|
||||
err = s.db.connect(s.conf.Database.Source)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
log.WithFields(log.Fields{
|
||||
"requested_db": s.conf.Database.Type,
|
||||
}).Fatal("Currently only bolt is supported as a database type.")
|
||||
}
|
||||
s.conf.Authsecret = os.Getenv("QUASAR_AUTHSECRET")
|
||||
if s.conf.Authsecret == "" {
|
||||
log.Fatal("Environment variable QUASAR_AUTHSECRET cannot be empty.")
|
||||
}
|
||||
adminpassword := os.Getenv("QUASAR_ADMINPASS")
|
||||
if adminpassword == "" {
|
||||
log.Fatal("Environment variable QUASAR_ADMINPASS cannot be empty.")
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"endpoint": endpoint,
|
||||
}).Info("Starting Quasar server")
|
||||
s.router = mux.NewRouter().StrictSlash(true)
|
||||
s.n = negroni.New()
|
||||
s.userpass = make(map[string]string)
|
||||
s.userpass["admin"] = adminpassword
|
||||
s.jmw = jwtmiddleware.New(jwtmiddleware.Options{
|
||||
ValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {
|
||||
return []byte(s.conf.Authsecret), nil
|
||||
},
|
||||
SigningMethod: jwt.SigningMethodHS256,
|
||||
})
|
||||
s.routes()
|
||||
s.serve(endpoint)
|
||||
}
|
131
docs/conclusion.adoc
Normal file
131
docs/conclusion.adoc
Normal file
|
@ -0,0 +1,131 @@
|
|||
== Conclusions
|
||||
|
||||
The state of the toolset as of the submission is fully functional
|
||||
and fulfils all requirements from the project plan.
|
||||
|
||||
The toolset can be used to create and manage Nebula overlay networks.
|
||||
A demo of this (see introduction) shows that the tools successfully work
|
||||
together.
|
||||
|
||||
== Future Work
|
||||
|
||||
=== Improved Authentication
|
||||
|
||||
// management
|
||||
Currently there is a single user called 'admin',
|
||||
and the password is defined by the environment variable `QUASAR_ADMINPASS`.
|
||||
This is good enough for basic demonstrations and usage for a simple network
|
||||
such as a homelab which is managed by one person.
|
||||
Others can still join nodes to the network, while one person can manage access.
|
||||
|
||||
However, for larger scale networks such as those of small corporations or academic groups,
|
||||
it would be useful for multiple people to be able to manage networks.
|
||||
This could involve role based access e.g. one user can manage all nodes in
|
||||
a specific network while another user can only manage a specific node.
|
||||
|
||||
// basic auth on join request
|
||||
Additionally, there is no authentication on the Quasar endpoint that
|
||||
Neutron uses to request to join a network.
|
||||
This is not a direct security risk to the network as nodes
|
||||
must be approved by an authorised client before they can receive a certificate
|
||||
signed by the CA.
|
||||
However, if Quasar is running on the Internet,
|
||||
denial of service attacks could be possible as someone could repeatedly
|
||||
request to join a network.
|
||||
|
||||
A possible solution is for a token to be created for each network which
|
||||
would be required with a join request.
|
||||
These tokens could be rotated at intervals such as every 24 hours.
|
||||
The tokens would be less sensitive than credentials for the management endpoints
|
||||
as nodes would still require approval.
|
||||
This means you could share tokens with people
|
||||
who you partially trust so that they can join your network,
|
||||
and you wouldn't have to worry about them changing firewall rules in your network.
|
||||
You would then only be risking a denial of service from these people
|
||||
you partially trust, which is a significantly smaller attack surface
|
||||
compared to being open to the internet.
|
||||
|
||||
=== HTTPS Support
|
||||
|
||||
Currently HTTPS can be set up using a reverse proxy such as traefik or nginx.
|
||||
Using tools such as docker, this can be set up quickly and easily with a replicable
|
||||
setup.
|
||||
However, one of the big advantages of using Golang is that it compiles code to
|
||||
a static binary.
|
||||
Golang's built in HTTP server (which Quasar uses) has support for running over HTTPS.
|
||||
This means that it would be very easy to add support.
|
||||
|
||||
To use HTTPS you would run the server with:
|
||||
|
||||
[source, go]
|
||||
----
|
||||
http.ListenAndServeTLS(addr, certFile, keyFile, handler)
|
||||
----
|
||||
|
||||
Instead of:
|
||||
|
||||
[source, go]
|
||||
----
|
||||
http.ListenAndServe(addr, handler)
|
||||
----
|
||||
|
||||
Adding built in support would involve adding an option to
|
||||
the yaml config to enable HTTPS, and to configure key and cert paths.
|
||||
|
||||
=== Input Validation
|
||||
|
||||
When incoming JSON is decoded by the Quasar API,
|
||||
it isn't validated against any constraints.
|
||||
This means injection attacks could be attempted, for example
|
||||
it is possible to create a network called "<script>alert(0)</script>".
|
||||
|
||||
Although Svelte protects against this and renders the script tag as a string,
|
||||
it should be validated by Quasar to limit the possibilities of injection attacks.
|
||||
|
||||
Golang structs allow 'tags' on attributes, which are used by the json package
|
||||
to decode and encode json data.
|
||||
Third party libraries provide the ability to use additional tags to add validators
|
||||
to these tags.
|
||||
|
||||
For example when creating a new network, the `NewNetSchema` struct
|
||||
is used.
|
||||
|
||||
[source,go]
|
||||
----
|
||||
type NewNetSchema struct {
|
||||
Name string `json:"name"`
|
||||
Cidr string `json:"cidr"`
|
||||
}
|
||||
----
|
||||
|
||||
The `json:"name"` tag tells the json decoder if there is a field with the key
|
||||
`name`, it should use the value as the value for `NewNetSchema.Name`.
|
||||
Using an external library such as `go-playground/validator` you could add validators
|
||||
as follows:
|
||||
|
||||
[source,go]
|
||||
----
|
||||
type NewNetSchema struct {
|
||||
Name string `json:"name" validate:"max=30,alphanum"`
|
||||
Cidr string `json:"cidr" validate:"cidrv4"`
|
||||
}
|
||||
----
|
||||
|
||||
You could then validate requests using:
|
||||
|
||||
[source,go]
|
||||
----
|
||||
// example test struct
|
||||
net := NewNetSchema{
|
||||
Name: "testnet",
|
||||
Cidr: "192.168.1.0/24",
|
||||
}
|
||||
err := validate.Struct(net)
|
||||
if err := nil {
|
||||
log.Error(err)
|
||||
}
|
||||
----
|
||||
|
||||
Overall, although the current project fulfils its requirements and works as
|
||||
intended, there are lots of improvements that can be made to improve the security
|
||||
and usability of the toolset.
|
99
docs/hubble.adoc
Normal file
99
docs/hubble.adoc
Normal file
|
@ -0,0 +1,99 @@
|
|||
== Hubble
|
||||
|
||||
=== Overview
|
||||
|
||||
Hubble is a frontend application which communicates with the Quasar API
|
||||
in order to manage Starship networks.
|
||||
It shows all available networks in a sidebar, in addition to a 'Create New' button.
|
||||
When you select a network it shows network settings, which you can modify.
|
||||
You also have the option to delete a network.
|
||||
|
||||
.Hubble Network Page
|
||||
image::hubblenetwork.png[]
|
||||
|
||||
The network settings page also shows all nodes in the network as collapsible
|
||||
cards, which initially show simple information such as the nodename,
|
||||
hostname and IP address,
|
||||
but can be expanded to reveal settings for the node that can be updated such
|
||||
as firewall rules, and groups which the node is in etc.
|
||||
|
||||
.Hubble Node Management
|
||||
image:hubblenodes.png[]
|
||||
|
||||
=== Language and Paradigm Chosen
|
||||
|
||||
// and justification why
|
||||
To make the application easily accessible for users,
|
||||
I have built it as a web application.
|
||||
This means it works cross platform and does not require any installation
|
||||
on a client device.
|
||||
Hubble is built with HTML, CSS and JavaScript as this is what is needed
|
||||
to make an interactive web application.
|
||||
|
||||
In order to develop more efficiently, Hubble is built using a tool called 'Svelte'.
|
||||
Although this can be considered an alternative to frameworks such as React,
|
||||
Angular and Vue, it is not a framework as such but more of a compiler.
|
||||
It allows you to program reactively, with code broken down into components.
|
||||
The coding process is similar to when using frameworks such as React,
|
||||
but code is compiled to static HTML, CSS and native JavaScript - whereas many
|
||||
other frameworks bundle a full library which the client uses to interpret the
|
||||
code at runtime.
|
||||
This allows Svelte to have a small footprint in terms of both resource usage
|
||||
and size of the compiled site.
|
||||
|
||||
Svelte uses a reactive programming paradigm, which is a subset of declarative
|
||||
paradigms.
|
||||
You can declare what should happen as a result of something else - to make
|
||||
the frontend *react* to changes as they happen.
|
||||
For example if a change is made to a network's settings,
|
||||
other parts of the interface can *react* and update to reflect the change.
|
||||
|
||||
=== Installation Instructions
|
||||
|
||||
[source,shell]
|
||||
----
|
||||
cd hubble
|
||||
npm install
|
||||
|
||||
# to build to static site (not needed for running dev server)
|
||||
npm run build
|
||||
----
|
||||
|
||||
=== Operating Instructions
|
||||
|
||||
The build creates a `public` directory containing HTML, CSS and JavaScript files
|
||||
which can be served using any HTTP server.
|
||||
|
||||
[source,shell]
|
||||
----
|
||||
cd hubble
|
||||
|
||||
# to run dev server
|
||||
npm run dev
|
||||
|
||||
# to run 'production' server
|
||||
npm run start
|
||||
----
|
||||
|
||||
=== Libraries and Tools Needed to Run
|
||||
|
||||
* nodejs
|
||||
* npm - node package manager
|
||||
** svelte - compiler for `.svelte` files to HTML/CSS/JS
|
||||
** svelte-notifications - for notifications
|
||||
** axios - for API requests to Quasar
|
||||
** svelte-routing - For managing pages and navigation
|
||||
** tailwindcss - simple class based css framework
|
||||
|
||||
=== Issues
|
||||
|
||||
// A section outlining any issues that needed to be overcome during development and what mitigations were put in place. This can include things you tried but that didn’t work, things you wanted to do but couldn’t complete and the reasons why
|
||||
|
||||
There were no major issues with the development of the Hubble frontend,
|
||||
but it involved the challenge of learning a new style of web app development
|
||||
as I had never used Svelte before.
|
||||
|
||||
Coming from a React background meant I had to learn new concepts
|
||||
of global stores, event management and overall project management.
|
||||
However, Svelte is simple and easy to learn so I was able to pick
|
||||
these up reasonably quickly.
|
BIN
docs/images/hubblenetwork.png
Normal file
BIN
docs/images/hubblenetwork.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 607 KiB |
BIN
docs/images/hubblenodes.png
Normal file
BIN
docs/images/hubblenodes.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 518 KiB |
66
docs/introduction.adoc
Normal file
66
docs/introduction.adoc
Normal file
|
@ -0,0 +1,66 @@
|
|||
== Introduction
|
||||
|
||||
=== Overview
|
||||
|
||||
Nebula is "a scalable overlay networking tool" which allows you to
|
||||
"seamlessly connect computers anywhere in the world".
|
||||
footnote:[GitHub. 2021. slackhq/nebula. [online\] Available at: https://github.com/slackhq/nebula]
|
||||
It uses UDP hole punching to allow nodes to connect directly even if they
|
||||
are behind a firewall which only allows established traffic through.
|
||||
|
||||
To run Nebula on a node, you must have the `nebula` binary, along with
|
||||
a private key, certificate and yaml config file.
|
||||
Signing certificates and customising the config files can become
|
||||
tedious when the size of a Nebula network grows beyond a few nodes.
|
||||
Often users will create keypairs and certificates for all nodes from a single host,
|
||||
then transfer the private keys and certificates to the correct nodes.
|
||||
This goes against best security practices as it involves transferring a
|
||||
private key, often across a network, and it means that a host, other than
|
||||
the node which will use the key, has had access to this private key.
|
||||
|
||||
This toolkit aims to overcome some of these issues by making it easy to bring
|
||||
up a new node,
|
||||
provisioning a certificate and giving it configuration, without the private key
|
||||
leaving the node.
|
||||
|
||||
Although Nebula can scale to support thousands of nodes,
|
||||
this toolkit is currently focused on (but not limited to) managing smaller networks
|
||||
such as homelab networks - where some hosts are based on a home network,
|
||||
some may be running in 'the cloud' with VPS services, and some nodes such as laptops
|
||||
and mobiles may be constantly moving between different private networks.
|
||||
|
||||
The 'Starship' toolkit includes an API server (Quasar) with a database
|
||||
which acts as a central management system,
|
||||
a client tool (Neutron) used by nodes to request to join networks
|
||||
and update their certificates and configuration,
|
||||
and a web client (Hubble) which communicates with the API server in order
|
||||
to manage networks.
|
||||
The management system can support multiple networks,
|
||||
and the client tool will allow a node to join multiple networks.
|
||||
The management system will sign certificates for nodes when they
|
||||
have been approved using the API.
|
||||
|
||||
A demo of this project can be found here: https://youtu.be/glIgz1huZPI
|
||||
|
||||
=== Alternatives
|
||||
|
||||
One alternative to designing a new system for certificate signing would be
|
||||
to create an extension or a fork of the `step-ca` certificate management
|
||||
tool.
|
||||
This would be very powerful and useful for large datacentres
|
||||
as it would integrate with many different forms of authentication and identity
|
||||
services.
|
||||
It is likely that there would be large groups of nodes which could use the same
|
||||
configuration,
|
||||
meaning tools such as Ansible, Chef or Puppet would be able to set up
|
||||
and give the configuration files to the correct nodes.
|
||||
|
||||
However this would lose the ability to have fine grained control over configuration.
|
||||
For smaller networks, this control can be very useful where each node has
|
||||
a specific purpose and therefore need different firewall rules.
|
||||
|
||||
Additionally, `step-ca` is now a large and mature project which would take time to
|
||||
understand well enough to successfully add the ability to sign Nebula
|
||||
certificates.
|
||||
Considering that I'm working with a language that I have never used before,
|
||||
it made more sense to write a new program from scratch, albeit less complex and powerful.
|
139
docs/neutron.adoc
Normal file
139
docs/neutron.adoc
Normal file
|
@ -0,0 +1,139 @@
|
|||
== Neutron
|
||||
|
||||
=== Overview
|
||||
|
||||
Neutron is a client which Starship nodes use to request to join networks,
|
||||
and update their configuration and certificates.
|
||||
|
||||
When joining a new network, Neutron will create a new Nebula keypair.
|
||||
It will then send a request to Quasar to join a specific network.
|
||||
This request includes the node name, the network it wants to join,
|
||||
its hostname and its Nebula public key.
|
||||
This information is sent as a JSON payload, signed using the Nebula
|
||||
private key.
|
||||
This is encoded similarly to a PASETO token.
|
||||
PASETO tokens are similar to JSON Web Tokens (JWTs),
|
||||
however do not suffer the same vulnerabilities JWTs suffer due to the vague
|
||||
protocol specification.
|
||||
|
||||
When updating, Neutron will send requests to Quasar to obtain
|
||||
an updated certificate and configuration file.
|
||||
For Quasar to send these, Neutron must include a signed token
|
||||
which includes it's nodename and the network name it is trying to
|
||||
update, and the node must be approved and active on the Quasar server.
|
||||
The signature on the token is verified against the public key stored
|
||||
for the node on the Quasar server.
|
||||
|
||||
The update script can be run at frequent intervals to keep the node updated
|
||||
with the most recent configuration changes.
|
||||
|
||||
=== Language and Paradigm Chosen
|
||||
|
||||
Neutron is written in Golang.
|
||||
There were many reasons for this, but the most significant is that Golang
|
||||
can statically compile binaries easily.
|
||||
This means that a small binary can be downloaded to a node with no extra dependencies
|
||||
required to use the tool.
|
||||
|
||||
Golang has many other advantages.
|
||||
For example, it is strongly typed, and there is little 'magic' as with
|
||||
languages such as Python.
|
||||
The go compiler is also 'fussy'.
|
||||
For example, it will refuse to compile when you have an unused variable declared.
|
||||
Although this makes it harder to work with initially,
|
||||
it means it is easier to write good code.
|
||||
|
||||
Golang is an imperative language,
|
||||
but it supports programming in object oriented and functional paradigms.
|
||||
An imperative language is necessary due to the complexity and unique nature
|
||||
of the tools.
|
||||
Features of object oriented programming such as classes and inheritence are
|
||||
not available in golang,
|
||||
but other features including polymorphism (using interfaces) and methods
|
||||
are available and have been used in this tool.
|
||||
|
||||
// paradigm
|
||||
|
||||
=== Installation Instructions
|
||||
|
||||
[source,shell]
|
||||
----
|
||||
# build
|
||||
cd starship
|
||||
|
||||
# equivalent of `go build -o neutron cmd/neutron/*.go`
|
||||
make neutron
|
||||
----
|
||||
|
||||
=== Operating Instructions
|
||||
|
||||
==== Manual install
|
||||
|
||||
[source,shell]
|
||||
----
|
||||
# request to join network
|
||||
./neutron join -quasar http://127.0.0.1:6947 -network NETWORK -name NAME
|
||||
|
||||
# approve node from frontend then fetch latest config from Quasar
|
||||
./neutron update -network NETWORK
|
||||
# send SIGHUP to nebula to force config reload
|
||||
pgrep nebula | xargs sudo kill -1
|
||||
----
|
||||
|
||||
==== Using Install Script
|
||||
|
||||
[source, shell]
|
||||
----
|
||||
# quick install from release
|
||||
wget https://github.com/b177y/starship-public/releases/download/v0.3.0/install-neutron.sh -O /tmp/install-neutron.sh
|
||||
|
||||
# check content
|
||||
less /tmp/install-neutron.sh
|
||||
bash /tmp/install-neutron.sh
|
||||
|
||||
# approve node from frontend then fetch latest config from Quasar
|
||||
neutron update -network NETWORK
|
||||
|
||||
# start nebula with systemd
|
||||
sudo systemctl start nebula@NETWORK
|
||||
|
||||
# send SIGHUP to nebula to force config reload
|
||||
pgrep nebula | xargs sudo kill -1
|
||||
----
|
||||
|
||||
=== Libraries and Tools Needed to Run
|
||||
|
||||
* Golang
|
||||
** slackhq/nebula - nebula certificate tools
|
||||
** sirupsen/logrus - logging library
|
||||
** tetris-io/shortid - library for creating short uuids
|
||||
* systemd (not a hard requirement, but used for example setup)
|
||||
* Nebula - this is provided by the install script but otherwise must
|
||||
be downloaded from link:https://github.com/slackhq/nebula/releases[here]
|
||||
|
||||
=== Issues
|
||||
|
||||
// A section outlining any issues that needed to be overcome during development and what mitigations were put in place. This can include things you tried but that didn’t work, things you wanted to do but couldn’t complete and the reasons why
|
||||
|
||||
The keys used by nebula are saved in the Montgomery format as they are used
|
||||
for x25519 Diffie-Helman key exchange.
|
||||
This means they cannot be used to sign standard PASETO tokens - which can only use
|
||||
ed25519 signatures for asymmetric key authentication.
|
||||
This requires Edwards formatted keys rather than Montgomery.
|
||||
The "twisted Edwards curve used by Ed25519 and the Montgomery
|
||||
curve used by X25519 are birationally equivalent"
|
||||
footnote:[Valsorda, F. 2019. Using Ed25519 signing keys for encryption [online\] Available at: https://blog.filippo.io/using-ed25519-keys-for-encryption/]
|
||||
which means you can convert between the two key formats.
|
||||
However you can only convert directly from Edwards to Montgomery,
|
||||
not the other way around.
|
||||
|
||||
To avoid having multiple private keys for each network a node is in
|
||||
(one for Nebula and one for communicating with Quasar),
|
||||
I created a library for signing and verifying 'XPASETO' tokens.
|
||||
These use Montgomery keys for XEdDSA signatures, outlined by Signal.
|
||||
footnote:[Perrin, T. 2016. The XEdDSA and VXEdDSA Signature Schemes [online\] Available at: https://signal.org/docs/specifications/xeddsa/]
|
||||
This package is based off an existing paseto library,
|
||||
footnote:[GitHub. 2021. o1egl/paseto. [online\] Available at: https://github.com/o1egl/paseto]
|
||||
from which functions are borrowed where it wasn't necessary to rewrite them.
|
||||
It should be noted that the XPASETO library does NOT conform with the PASETO
|
||||
standard (see https://paseto.io/rfc/ section 5.2).
|
129
docs/quasar.adoc
Normal file
129
docs/quasar.adoc
Normal file
|
@ -0,0 +1,129 @@
|
|||
== Quasar
|
||||
|
||||
=== Overview
|
||||
|
||||
Quasar is a Central Management System (CMS) for managing Starship networks.
|
||||
It provides APIs for two types of clients:
|
||||
|
||||
* Neutron Nodes
|
||||
** These authenticate by signing requests using their nebula private key
|
||||
* Frontend clients / management tools
|
||||
** These authenticate using JSON Web Tokens
|
||||
|
||||
Quasar can be configured using a yaml config file.
|
||||
By default the API listens on port `6947` as the Helix Nebula
|
||||
is 694.7 light years away from earth.
|
||||
|
||||
The API for neutron nodes provides the following endpoints:
|
||||
|
||||
* /api/neutron/join - for a node to request to join a network.
|
||||
This request includes the Nebula public key for the node.
|
||||
The request is signed by the corresponding private key.
|
||||
This self-signed request is verified by Quasar.
|
||||
* /api/neutron/update - for a node to request configuration information
|
||||
and a certificate.
|
||||
Quasar will work out the configuration options based off the node's config
|
||||
in the database, and the config of other nodes.
|
||||
|
||||
The API for management clients provides endpoints for:
|
||||
|
||||
* listing networks
|
||||
* getting CA cert for a network
|
||||
* listing nodes in a network
|
||||
* updating network settings
|
||||
* updating node settings
|
||||
* approving / enabling / disabling nodes
|
||||
|
||||
=== Language and Paradigm Chosen
|
||||
|
||||
Quasar is written in Golang, for many of the same reasons as Neutron.
|
||||
In addition to these reasons,
|
||||
Nebula tools and libraries are written in Golang.
|
||||
Nebula has a custom certificate format (not x509 or SSH certs)
|
||||
and slack have made the library for interacting with these certificates
|
||||
open source so it is easy to include in a project.
|
||||
|
||||
Although it would be possible to use the `nebula-cert` commandline tool
|
||||
with other languages using subprocesses,
|
||||
this would be less clean and less efficient than importing and using the
|
||||
native functions needed.
|
||||
Using a language other than Go would have to add this as an extra dependency for the tool.
|
||||
|
||||
Furthermore, I could not find a way to use Montgomery keys for XEdDSA
|
||||
signatures in Python (the most likely alternative to Golang for this tool),
|
||||
and writing the cryptography functions from scratch myself would
|
||||
be a security (and time management) risk as maths and cryptography are
|
||||
not my areas of expertise.
|
||||
Golang has well maintained cryptography libraries as part of the language's
|
||||
standard package.
|
||||
Using the built in libraries in addition to some code borrowed from third party
|
||||
libraries,
|
||||
I was able to write a JSON token signing library which uses XEdDSA signatures.
|
||||
|
||||
Golang uses an imperative programming paradigm.
|
||||
See the Neutron section for more on this.
|
||||
|
||||
=== Installation Instructions
|
||||
|
||||
[source,shell]
|
||||
----
|
||||
make quasar
|
||||
----
|
||||
|
||||
=== Operating Instructions
|
||||
|
||||
[source,shell]
|
||||
----
|
||||
# set JWT signing secret
|
||||
export QUASAR_AUTHSECRET=$(uuid)
|
||||
|
||||
# set admin account password
|
||||
export QUASAR_ADMINPASS="password"
|
||||
|
||||
# start server
|
||||
./quasar serve -config examples/quasar.yml
|
||||
----
|
||||
|
||||
=== Libraries and Tools Needed to Run
|
||||
|
||||
* Golang
|
||||
** slackhq/nebula - nebula certificate tools
|
||||
** boltdb/bolt - embedded key/value database
|
||||
** gorilla/mux - http router
|
||||
** urfave/negroni - http middleware manager
|
||||
** meatballhat/negroni-logrus - logging middleware support
|
||||
** sirupsen/logrus - logging library
|
||||
** rs/cors - CORS middleware
|
||||
|
||||
=== Issues
|
||||
|
||||
// A section outlining any issues that needed to be overcome during development and what mitigations were put in place. This can include things you tried but that didn’t work, things you wanted to do but couldn’t complete and the reasons why
|
||||
|
||||
Part way through the project I decided to rewrite Quasar in Python,
|
||||
as I am more familiar with Python and I was running into time constraints.
|
||||
I had rewritten most of the API in Python when I tried to replicate
|
||||
the XPASETO library I had earlier written in Golang.
|
||||
I was unable to find the necessary libraries in Python to support this.
|
||||
Although Golang is a newer language than Python,
|
||||
it was created by Google and has always had a focus on security,
|
||||
meaning the built in crypto libraries are more advanced.
|
||||
|
||||
Another problem I had with the Python rewrite was that I had to use
|
||||
the `nebula-cert` binary with subprocesses for creating and signing
|
||||
certificates.
|
||||
This adds an extra dependency to the project and is not a clean way
|
||||
of interacting with certificates.
|
||||
|
||||
I decided to switch back to Golang for these reasons,
|
||||
but fortunately I ended up finding it easier than I thought it would be.
|
||||
|
||||
Another problem I had was with the conversion of Edwards keys (used by the CA)
|
||||
to Montgomery Curve25519 keys (used by Nebula nodes).
|
||||
I used functions from a project by Filippo Valsorda (Go team security lead)
|
||||
to perform the key conversion.
|
||||
footnote:[GitHub. 2021. FiloSottile/age. [online\] Available at: https://github.com/FiloSottile/age/blob/bbab440e198a4d67ba78591176c7853e62d29e04/internal/age/ssh.go#L174]
|
||||
The function for converting public keys worked,
|
||||
but the private key function did not.
|
||||
After lots of research, I found that key clamping
|
||||
footnote:[Craige, J. 2021. An Explainer On Ed25519 Clamping [online\] Available at: https://www.jcraige.com/an-explainer-on-ed25519-clamping]
|
||||
was needed.
|
28
docs/report.adoc
Normal file
28
docs/report.adoc
Normal file
|
@ -0,0 +1,28 @@
|
|||
= PLCS Report - Starship Toolset
|
||||
:toc: left
|
||||
:toclevels: 3
|
||||
:icons: font
|
||||
:experimental:
|
||||
:source-highlighter: pygments
|
||||
:pygments-style: onedark
|
||||
:imagesdir: ./images
|
||||
:pdf-themesdir: /home/billy/repos/asciidoc-themes/pdf/themes
|
||||
:pdf-fontsdir: /home/billy/repos/asciidoc-themes/pdf/fonts
|
||||
:pdf-theme: b177y
|
||||
|
||||
include::introduction.adoc[]
|
||||
|
||||
include::hubble.adoc[]
|
||||
|
||||
include::neutron.adoc[]
|
||||
|
||||
include::quasar.adoc[]
|
||||
|
||||
include::conclusion.adoc[]
|
||||
|
||||
// include appendices here if necessary
|
||||
== Appendices
|
||||
|
||||
include::../CHANGELOG.adoc[leveloffset=+1]
|
||||
|
||||
== References
|
42
examples/install-neutron.sh
Executable file
42
examples/install-neutron.sh
Executable file
|
@ -0,0 +1,42 @@
|
|||
#!/bin/bash
|
||||
|
||||
function setupNebulaDir(){
|
||||
sudo chown -R $(whoami) /etc/nebula
|
||||
}
|
||||
|
||||
function download(){
|
||||
# Could handle multiple platforms here
|
||||
cd /tmp
|
||||
wget https://github.com/b177y/starship/releases/download/v0.3.0/$(uname -s)-$(uname -m).tar.gz -O /tmp/starship.tar.gz
|
||||
rm -r /tmp/release
|
||||
tar -xf /tmp/starship.tar.gz
|
||||
release=$(echo "/tmp/release/$(uname -s)-$(uname -m)" | tr '[:upper:]' '[:lower:]')
|
||||
sudo mv "${release}/nebula" /usr/local/bin/nebula
|
||||
sudo mv "${release}/neutron" /usr/local/bin/neutron
|
||||
sudo chown root:root /usr/local/bin/nebula
|
||||
sudo chown $(whoami):$(whoami) /usr/local/bin/neutron
|
||||
sudo mv "${release}/nebula@.service" /etc/systemd/system/nebula@.service
|
||||
sudo chown root:root /etc/systemd/system/nebula@.service
|
||||
sudo systemctl daemon-reload
|
||||
cd -
|
||||
}
|
||||
|
||||
function runNeutron(){
|
||||
echo -n "Quasar Endpoint: "
|
||||
read qaddr
|
||||
echo -n "Network to Join: "
|
||||
read netname
|
||||
echo -n "Node Name: "
|
||||
read nodename
|
||||
/usr/local/bin/neutron join -quasar $qaddr -network $netname -name $nodename || exit
|
||||
echo "Run \`neutron update -network $netname\` to get the node config once you have approved the node."
|
||||
echo "Once the config has been fetched you can start nebula with \`systemctl start nebula@$netname\`"
|
||||
}
|
||||
|
||||
function main(){
|
||||
download
|
||||
setupNebulaDir
|
||||
runNeutron
|
||||
}
|
||||
|
||||
main
|
16
examples/nebula@.service
Normal file
16
examples/nebula@.service
Normal file
|
@ -0,0 +1,16 @@
|
|||
[Unit]
|
||||
Description=Nebula for %i
|
||||
Wants=basic.target
|
||||
After=basic.target network.target
|
||||
Before=sshd.service
|
||||
|
||||
[Service]
|
||||
SyslogIdentifier=nebula
|
||||
StandardOutput=syslog
|
||||
StandardError=syslog
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/local/bin/nebula -config /etc/nebula/%i/nebula.yml
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
9
examples/quasar.yml
Normal file
9
examples/quasar.yml
Normal file
|
@ -0,0 +1,9 @@
|
|||
db:
|
||||
type: "bolt" # bolt used by default, later add etcd
|
||||
src: "test.db"
|
||||
|
||||
quasar:
|
||||
name: "Test Server"
|
||||
listen:
|
||||
host: "127.0.0.1"
|
||||
port: 6947
|
27
go.mod
Normal file
27
go.mod
Normal file
|
@ -0,0 +1,27 @@
|
|||
module github.com/b177y/starship
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/aead/chacha20poly1305 v0.0.0-20201124145622-1a5aba2a8b29 // indirect
|
||||
github.com/auth0/go-jwt-middleware v1.0.0
|
||||
github.com/boltdb/bolt v1.3.1
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/meatballhat/negroni-logrus v1.1.1
|
||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||
github.com/o1egl/paseto v1.0.0
|
||||
github.com/o1egl/paseto/v2 v2.1.1
|
||||
github.com/pieterbork/ed25519 v0.0.0-20200301051623-f19b832d0d2e // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rs/cors v1.7.0
|
||||
github.com/signal-golang/ed25519 v0.0.0-20200301051623-f19b832d0d2e
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/slackhq/nebula v1.4.0
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125
|
||||
github.com/urfave/negroni v1.0.0
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
218
go.sum
Normal file
218
go.sum
Normal file
|
@ -0,0 +1,218 @@
|
|||
github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da h1:KjTM2ks9d14ZYCvmHS9iAKVt9AyzRSqNU1qabPih5BY=
|
||||
github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da/go.mod h1:eHEWzANqSiWQsof+nXEI9bUVUyV6F53Fp89EuCh2EAA=
|
||||
github.com/aead/chacha20poly1305 v0.0.0-20170617001512-233f39982aeb/go.mod h1:UzH9IX1MMqOcwhoNOIjmTQeAxrFgzs50j4golQtXXxU=
|
||||
github.com/aead/chacha20poly1305 v0.0.0-20201124145622-1a5aba2a8b29 h1:1DcvRPZOdbQRg5nAHt2jrc5QbV0AGuhDdfQI6gXjiFE=
|
||||
github.com/aead/chacha20poly1305 v0.0.0-20201124145622-1a5aba2a8b29/go.mod h1:UzH9IX1MMqOcwhoNOIjmTQeAxrFgzs50j4golQtXXxU=
|
||||
github.com/aead/poly1305 v0.0.0-20180717145839-3fee0db0b635 h1:52m0LGchQBBVqJRyYYufQuIbVqRawmubW3OFGqK1ekw=
|
||||
github.com/aead/poly1305 v0.0.0-20180717145839-3fee0db0b635/go.mod h1:lmLxL+FV291OopO93Bwf9fQLQeLyt33VJRUg5VJ30us=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/auth0/go-jwt-middleware v1.0.0 h1:76t55qLQu3xjMFbkirbSCA8ZPcO1ny+20Uq1wkSTRDE=
|
||||
github.com/auth0/go-jwt-middleware v1.0.0/go.mod h1:nX2S0GmCyl087kdNSSItfOvMYokq5PSTG1yGIP5Le4U=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/flynn/noise v0.0.0-20210331153838-4bdb43be3117/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kardianos/service v1.1.0/go.mod h1:RrJI2xn5vve/r32U5suTbeaSGoMU6GbNPoj36CVYcHc=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/meatballhat/negroni-logrus v1.1.1 h1:eDgsDdJYy97gI9kr+YS/uDKCaqK4S6CUQLPG0vNDqZA=
|
||||
github.com/meatballhat/negroni-logrus v1.1.1/go.mod h1:FlwPdXB6PeT8EG/gCd/2766M2LNF7SwZiNGD6t2NRGU=
|
||||
github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nbrownus/go-metrics-prometheus v0.0.0-20180622211546-6e6d5173d99c/go.mod h1:1yMri853KAI2pPAUnESjaqZj9JeImOUM+6A4GuuPmTs=
|
||||
github.com/o1egl/paseto v1.0.0 h1:bwpvPu2au176w4IBlhbyUv/S5VPptERIA99Oap5qUd0=
|
||||
github.com/o1egl/paseto v1.0.0/go.mod h1:5HxsZPmw/3RI2pAwGo1HhOOwSdvBpcuVzO7uDkm+CLU=
|
||||
github.com/o1egl/paseto/v2 v2.1.1 h1:vWP5o9P/3UEXXQ+/BHQRrpdXpK+X9RMtD4IvB30FWF0=
|
||||
github.com/o1egl/paseto/v2 v2.1.1/go.mod h1:HQ4aS/uX2A/v1h/BIh5XTFStRm+eMdI7G/jBaQ0vaCA=
|
||||
github.com/pieterbork/ed25519 v0.0.0-20200301051623-f19b832d0d2e h1:iTTj9cGouNkvs1qyL/PexzFJ32MZu4aQsaAjS2uIwVw=
|
||||
github.com/pieterbork/ed25519 v0.0.0-20200301051623-f19b832d0d2e/go.mod h1:0s8sTU9YA2e8B5N+4O0BiuVuzcUbAEJGz3GyAtcqCFw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20191202183732-d1d2010b5bee/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/signal-golang/ed25519 v0.0.0-20200301051623-f19b832d0d2e h1:XLsRkdVt2FOctJUbBbyBcOtmSgRv+jMQYgqESfOgTxo=
|
||||
github.com/signal-golang/ed25519 v0.0.0-20200301051623-f19b832d0d2e/go.mod h1:2Ad7iWk5/yN+AiUcyx6EteImlJBcxBM0Q2R/bmXoCA0=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
|
||||
github.com/slackhq/nebula v1.4.0 h1:EwjObdoI1a0V4hXGn8cc/5gbGvMKuKBp1H+bOCnyZU8=
|
||||
github.com/slackhq/nebula v1.4.0/go.mod h1:N4OtbI4997CFRdZZiJSOwuQdvslvef5CkWR6Nd+tUB4=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.1.0 h1:MkTeG1DMwsrdH7QtLXy5W+fUxWq+vmb6cLmyJ7aRtF0=
|
||||
github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/songgao/water v0.0.0-20190725173103-fd331bda3f4b/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125 h1:3SNcvBmEPE1YlB1JpVZouslJpI3GBNoiqW7+wb0Rz7w=
|
||||
github.com/teris-io/shortid v0.0.0-20201117134242-e59966efd125/go.mod h1:M8agBzgqHIhgj7wEn9/0hJUZcrvt9VY+Ln+S1I5Mha0=
|
||||
github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc=
|
||||
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
||||
github.com/vishvananda/netlink v1.0.1-0.20190522153524-00009fb8606a/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 h1:F5Gozwx4I1xtr/sr/8CFbb57iKi3297KFs0QDbGN60A=
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
50
hubble/README.adoc
Normal file
50
hubble/README.adoc
Normal file
|
@ -0,0 +1,50 @@
|
|||
== Hubble
|
||||
:imagesdir: ../docs/images
|
||||
|
||||
=== Overview
|
||||
|
||||
Hubble is a frontend application which communicates with the Quasar API
|
||||
in order to manage Starship networks.
|
||||
It shows all available networks in a sidebar, in addition to a 'Create New' button.
|
||||
When you select a network it shows network settings, which you can modify.
|
||||
You also have the option to delete a network.
|
||||
|
||||
.Hubble Network Page
|
||||
image::hubblenetwork.png[]
|
||||
|
||||
The network settings page also shows all nodes in the network as collapsible
|
||||
cards, which initially show simple information such as the nodename,
|
||||
hostname and IP address,
|
||||
but can be expanded to reveal settings for the node that can be updated such
|
||||
as firewall rules, and groups which the node is in etc.
|
||||
|
||||
.Hubble Node Management
|
||||
image:hubblenodes.png[]
|
||||
|
||||
=== Installation
|
||||
|
||||
[source,shell]
|
||||
----
|
||||
cd hubble
|
||||
npm install
|
||||
|
||||
# to build to static site (not needed for running dev server)
|
||||
npm run build
|
||||
----
|
||||
|
||||
=== Operating Instructions
|
||||
|
||||
The build creates a `public` directory containing HTML, CSS and JavaScript files
|
||||
which can be served using any HTTP server.
|
||||
|
||||
[source,shell]
|
||||
----
|
||||
cd hubble
|
||||
|
||||
# to run dev server
|
||||
npm run dev
|
||||
|
||||
# to run 'production' server
|
||||
npm run start
|
||||
----
|
||||
|
3553
hubble/package-lock.json
generated
Normal file
3553
hubble/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load diff
30
hubble/package.json
Normal file
30
hubble/package.json
Normal file
|
@ -0,0 +1,30 @@
|
|||
{
|
||||
"name": "svelte-app",
|
||||
"version": "1.0.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"build": "rollup -c",
|
||||
"dev": "rollup -c -w",
|
||||
"start": "sirv public --no-clear -s"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@rollup/plugin-commonjs": "^17.0.0",
|
||||
"@rollup/plugin-node-resolve": "^11.0.0",
|
||||
"rollup": "^2.3.4",
|
||||
"rollup-plugin-css-only": "^3.1.0",
|
||||
"rollup-plugin-livereload": "^2.0.0",
|
||||
"rollup-plugin-svelte": "^7.0.0",
|
||||
"rollup-plugin-terser": "^7.0.0",
|
||||
"svelte": "^3.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"autoprefixer": "^9.8.6",
|
||||
"axios": "^0.21.1",
|
||||
"postcss": "^7.0.35",
|
||||
"sirv-cli": "^1.0.0",
|
||||
"svelte-notifications": "^0.9.9",
|
||||
"svelte-preprocess": "^4.7.3",
|
||||
"svelte-routing": "^1.6.0",
|
||||
"tailwindcss": "npm:@tailwindcss/postcss7-compat@^2.1.2"
|
||||
}
|
||||
}
|
BIN
hubble/public/favicon.png
Normal file
BIN
hubble/public/favicon.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 30 KiB |
63
hubble/public/global.css
Normal file
63
hubble/public/global.css
Normal file
|
@ -0,0 +1,63 @@
|
|||
html, body {
|
||||
position: relative;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
body {
|
||||
color: #333;
|
||||
margin: 0;
|
||||
box-sizing: border-box;
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen-Sans, Ubuntu, Cantarell, "Helvetica Neue", sans-serif;
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
a {
|
||||
color: rgb(0,100,200);
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
a:visited {
|
||||
color: rgb(0,80,160);
|
||||
}
|
||||
|
||||
label {
|
||||
display: block;
|
||||
}
|
||||
|
||||
input, button, select, textarea {
|
||||
font-family: inherit;
|
||||
font-size: inherit;
|
||||
-webkit-padding: 0.4em 0;
|
||||
padding: 0.4em;
|
||||
margin: 0 0 0.5em 0;
|
||||
box-sizing: border-box;
|
||||
border: 1px solid #ccc;
|
||||
border-radius: 2px;
|
||||
}
|
||||
|
||||
input:disabled {
|
||||
color: #ccc;
|
||||
}
|
||||
|
||||
button {
|
||||
color: #333;
|
||||
background-color: #f4f4f4;
|
||||
outline: none;
|
||||
}
|
||||
|
||||
button:disabled {
|
||||
color: #999;
|
||||
}
|
||||
|
||||
button:not(:disabled):active {
|
||||
background-color: #ddd;
|
||||
}
|
||||
|
||||
button:focus {
|
||||
border-color: #666;
|
||||
}
|
19
hubble/public/index.html
Normal file
19
hubble/public/index.html
Normal file
|
@ -0,0 +1,19 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset='utf-8'>
|
||||
<meta name='viewport' content='width=device-width,initial-scale=1'>
|
||||
|
||||
<title>Hubble</title>
|
||||
|
||||
<link rel='icon' type='image/png' href='/favicon.png'>
|
||||
<!-- Icons made by <a href="https://www.flaticon.com/authors/icongeek26" title="Icongeek26">Icongeek26</a> from <a href="https://www.flaticon.com/" title="Flaticon">www.flaticon.com</a></div> -->
|
||||
<link rel='stylesheet' href='/global.css'>
|
||||
<link rel='stylesheet' href='/build/bundle.css'>
|
||||
|
||||
<script defer src='/build/bundle.js'></script>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
</body>
|
||||
</html>
|
87
hubble/rollup.config.js
Normal file
87
hubble/rollup.config.js
Normal file
|
@ -0,0 +1,87 @@
|
|||
import svelte from 'rollup-plugin-svelte';
|
||||
import commonjs from '@rollup/plugin-commonjs';
|
||||
import resolve from '@rollup/plugin-node-resolve';
|
||||
import livereload from 'rollup-plugin-livereload';
|
||||
import sveltePreprocess from "svelte-preprocess";
|
||||
import { terser } from 'rollup-plugin-terser';
|
||||
import css from 'rollup-plugin-css-only';
|
||||
|
||||
const production = !process.env.ROLLUP_WATCH;
|
||||
|
||||
function serve() {
|
||||
let server;
|
||||
|
||||
function toExit() {
|
||||
if (server) server.kill(0);
|
||||
}
|
||||
|
||||
return {
|
||||
writeBundle() {
|
||||
if (server) return;
|
||||
server = require('child_process').spawn('npm', ['run', 'start', '--', '--dev'], {
|
||||
stdio: ['ignore', 'inherit', 'inherit'],
|
||||
shell: true
|
||||
});
|
||||
|
||||
process.on('SIGTERM', toExit);
|
||||
process.on('exit', toExit);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
export default {
|
||||
input: 'src/main.js',
|
||||
output: {
|
||||
sourcemap: true,
|
||||
format: 'iife',
|
||||
name: 'app',
|
||||
file: 'public/build/bundle.js'
|
||||
},
|
||||
plugins: [
|
||||
svelte({
|
||||
compilerOptions: {
|
||||
// enable run-time checks when not in production
|
||||
dev: !production,
|
||||
hydratable: true,
|
||||
},
|
||||
preprocess: sveltePreprocess({
|
||||
sourceMap: !production,
|
||||
postcss: {
|
||||
plugins: [
|
||||
require("tailwindcss"),
|
||||
require("autoprefixer"),
|
||||
],
|
||||
},
|
||||
}),
|
||||
}),
|
||||
// we'll extract any component CSS out into
|
||||
// a separate file - better for performance
|
||||
css({ output: 'bundle.css' }),
|
||||
|
||||
// If you have external dependencies installed from
|
||||
// npm, you'll most likely need these plugins. In
|
||||
// some cases you'll need additional configuration -
|
||||
// consult the documentation for details:
|
||||
// https://github.com/rollup/plugins/tree/master/packages/commonjs
|
||||
resolve({
|
||||
browser: true,
|
||||
dedupe: ['svelte']
|
||||
}),
|
||||
commonjs(),
|
||||
|
||||
// In dev mode, call `npm run start` once
|
||||
// the bundle has been generated
|
||||
!production && serve(),
|
||||
|
||||
// Watch the `public` directory and refresh the
|
||||
// browser on changes when not in production
|
||||
!production && livereload('public'),
|
||||
|
||||
// If we're building for production (npm run build
|
||||
// instead of npm run dev), minify
|
||||
production && terser(),
|
||||
],
|
||||
watch: {
|
||||
clearScreen: false
|
||||
}
|
||||
};
|
117
hubble/scripts/setupTypeScript.js
Normal file
117
hubble/scripts/setupTypeScript.js
Normal file
|
@ -0,0 +1,117 @@
|
|||
// @ts-check
|
||||
|
||||
/** This script modifies the project to support TS code in .svelte files like:
|
||||
|
||||
<script lang="ts">
|
||||
export let name: string;
|
||||
</script>
|
||||
|
||||
As well as validating the code for CI.
|
||||
*/
|
||||
|
||||
/** To work on this script:
|
||||
rm -rf test-template template && git clone sveltejs/template test-template && node scripts/setupTypeScript.js test-template
|
||||
*/
|
||||
|
||||
const fs = require("fs")
|
||||
const path = require("path")
|
||||
const { argv } = require("process")
|
||||
|
||||
const projectRoot = argv[2] || path.join(__dirname, "..")
|
||||
|
||||
// Add deps to pkg.json
|
||||
const packageJSON = JSON.parse(fs.readFileSync(path.join(projectRoot, "package.json"), "utf8"))
|
||||
packageJSON.devDependencies = Object.assign(packageJSON.devDependencies, {
|
||||
"svelte-check": "^1.0.0",
|
||||
"svelte-preprocess": "^4.0.0",
|
||||
"@rollup/plugin-typescript": "^8.0.0",
|
||||
"typescript": "^4.0.0",
|
||||
"tslib": "^2.0.0",
|
||||
"@tsconfig/svelte": "^1.0.0"
|
||||
})
|
||||
|
||||
// Add script for checking
|
||||
packageJSON.scripts = Object.assign(packageJSON.scripts, {
|
||||
"validate": "svelte-check"
|
||||
})
|
||||
|
||||
// Write the package JSON
|
||||
fs.writeFileSync(path.join(projectRoot, "package.json"), JSON.stringify(packageJSON, null, " "))
|
||||
|
||||
// mv src/main.js to main.ts - note, we need to edit rollup.config.js for this too
|
||||
const beforeMainJSPath = path.join(projectRoot, "src", "main.js")
|
||||
const afterMainTSPath = path.join(projectRoot, "src", "main.ts")
|
||||
fs.renameSync(beforeMainJSPath, afterMainTSPath)
|
||||
|
||||
// Switch the app.svelte file to use TS
|
||||
const appSveltePath = path.join(projectRoot, "src", "App.svelte")
|
||||
let appFile = fs.readFileSync(appSveltePath, "utf8")
|
||||
appFile = appFile.replace("<script>", '<script lang="ts">')
|
||||
appFile = appFile.replace("export let name;", 'export let name: string;')
|
||||
fs.writeFileSync(appSveltePath, appFile)
|
||||
|
||||
// Edit rollup config
|
||||
const rollupConfigPath = path.join(projectRoot, "rollup.config.js")
|
||||
let rollupConfig = fs.readFileSync(rollupConfigPath, "utf8")
|
||||
|
||||
// Edit imports
|
||||
rollupConfig = rollupConfig.replace(`'rollup-plugin-terser';`, `'rollup-plugin-terser';
|
||||
import sveltePreprocess from 'svelte-preprocess';
|
||||
import typescript from '@rollup/plugin-typescript';`)
|
||||
|
||||
// Replace name of entry point
|
||||
rollupConfig = rollupConfig.replace(`'src/main.js'`, `'src/main.ts'`)
|
||||
|
||||
// Add preprocessor
|
||||
rollupConfig = rollupConfig.replace(
|
||||
'compilerOptions:',
|
||||
'preprocess: sveltePreprocess({ sourceMap: !production }),\n\t\t\tcompilerOptions:'
|
||||
);
|
||||
|
||||
// Add TypeScript
|
||||
rollupConfig = rollupConfig.replace(
|
||||
'commonjs(),',
|
||||
'commonjs(),\n\t\ttypescript({\n\t\t\tsourceMap: !production,\n\t\t\tinlineSources: !production\n\t\t}),'
|
||||
);
|
||||
fs.writeFileSync(rollupConfigPath, rollupConfig)
|
||||
|
||||
// Add TSConfig
|
||||
const tsconfig = `{
|
||||
"extends": "@tsconfig/svelte/tsconfig.json",
|
||||
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules/*", "__sapper__/*", "public/*"]
|
||||
}`
|
||||
const tsconfigPath = path.join(projectRoot, "tsconfig.json")
|
||||
fs.writeFileSync(tsconfigPath, tsconfig)
|
||||
|
||||
// Delete this script, but not during testing
|
||||
if (!argv[2]) {
|
||||
// Remove the script
|
||||
fs.unlinkSync(path.join(__filename))
|
||||
|
||||
// Check for Mac's DS_store file, and if it's the only one left remove it
|
||||
const remainingFiles = fs.readdirSync(path.join(__dirname))
|
||||
if (remainingFiles.length === 1 && remainingFiles[0] === '.DS_store') {
|
||||
fs.unlinkSync(path.join(__dirname, '.DS_store'))
|
||||
}
|
||||
|
||||
// Check if the scripts folder is empty
|
||||
if (fs.readdirSync(path.join(__dirname)).length === 0) {
|
||||
// Remove the scripts folder
|
||||
fs.rmdirSync(path.join(__dirname))
|
||||
}
|
||||
}
|
||||
|
||||
// Adds the extension recommendation
|
||||
fs.mkdirSync(path.join(projectRoot, ".vscode"), { recursive: true })
|
||||
fs.writeFileSync(path.join(projectRoot, ".vscode", "extensions.json"), `{
|
||||
"recommendations": ["svelte.svelte-vscode"]
|
||||
}
|
||||
`)
|
||||
|
||||
console.log("Converted to TypeScript.")
|
||||
|
||||
if (fs.existsSync(path.join(projectRoot, "node_modules"))) {
|
||||
console.log("\nYou will need to re-run your dependency manager to get started.")
|
||||
}
|
62
hubble/src/App.svelte
Normal file
62
hubble/src/App.svelte
Normal file
|
@ -0,0 +1,62 @@
|
|||
<script>
|
||||
import { Router, Route } from "svelte-routing";
|
||||
import Notifications from 'svelte-notifications';
|
||||
import Home from "./pages/Home.svelte";
|
||||
import NetworkPage from "./pages/network.svelte";
|
||||
import NewNetworkPage from "./pages/newnet.svelte";
|
||||
import LoginPage from "./pages/login.svelte";
|
||||
import NetworkSidebar from "./sidebar.svelte";
|
||||
export let url = ""; //This property is necessary declare to avoid ignore the Router
|
||||
let networks = [];
|
||||
import API from './api.js';
|
||||
export const getAllNetworks = async() => {
|
||||
try {
|
||||
const response = await API.get("/networks/all");
|
||||
console.log(response)
|
||||
return response;
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
}
|
||||
};
|
||||
export const updateNetworks = async() => {
|
||||
console.log("Updating network list")
|
||||
networks = await getAllNetworks();
|
||||
}
|
||||
import { onDestroy } from 'svelte';
|
||||
import { LoggedIn } from './store.js';
|
||||
|
||||
let showLogin;
|
||||
const unLoggedIn = LoggedIn.subscribe(v => showLogin = !v);
|
||||
console.log("showLogin", showLogin)
|
||||
onDestroy(unLoggedIn);
|
||||
|
||||
</script>
|
||||
|
||||
<main class="h-screen">
|
||||
<Notifications>
|
||||
{#if !showLogin}
|
||||
<div class="grid grid-cols-5 h-screen">
|
||||
<Router url="{url}">
|
||||
<NetworkSidebar networks={networks} on:updateNetworks={updateNetworks}/>
|
||||
<div class="col-span-4 bg-gray-100">
|
||||
<Route path="/"><Home /></Route>
|
||||
<Route path="networks/:netname" let:params>
|
||||
<NetworkPage netname="{params.netname}" on:updateNetworks={updateNetworks}/>
|
||||
</Route>
|
||||
<Route path="networks/new">
|
||||
<NewNetworkPage on:updateNetworks={updateNetworks}/>
|
||||
</Route>
|
||||
</div>
|
||||
</Router>
|
||||
</div>
|
||||
{:else}
|
||||
<LoginPage />
|
||||
{/if}
|
||||
</Notifications>
|
||||
</main>
|
||||
|
||||
<style global lang="postcss">
|
||||
@tailwind base;
|
||||
@tailwind components;
|
||||
@tailwind utilities;
|
||||
</style>
|
59
hubble/src/api.js
Normal file
59
hubble/src/api.js
Normal file
|
@ -0,0 +1,59 @@
|
|||
// https://dev.to/lukocastillo/svelte-3-how-to-connect-your-app-with-a-rest-api-axios-2h4e
|
||||
|
||||
import axios from "axios";
|
||||
import { AuthToken } from './store';
|
||||
|
||||
// Create a instance of axios to use the same base url.
|
||||
const axiosAPI = axios.create({
|
||||
baseURL : "http://127.0.0.1:6947/api/"
|
||||
});
|
||||
|
||||
let token
|
||||
const unsubscribe = AuthToken.subscribe(value => {
|
||||
token = value;
|
||||
});
|
||||
|
||||
// implement a method to execute all the request from here.
|
||||
const apiRequest = (method, url, request) => {
|
||||
const headers = {
|
||||
authorization: "Bearer "+token,
|
||||
};
|
||||
//using the axios instance to perform the request that received from each http method
|
||||
return axiosAPI({
|
||||
method,
|
||||
url,
|
||||
data: request,
|
||||
headers
|
||||
}).then(res => {
|
||||
return Promise.resolve(res.data);
|
||||
})
|
||||
.catch(err => {
|
||||
return Promise.reject(err);
|
||||
});
|
||||
};
|
||||
|
||||
// function to execute the http get request
|
||||
const get = (url, request) => apiRequest("get",url,request);
|
||||
|
||||
// function to execute the http delete request
|
||||
const deleteRequest = (url, request) => apiRequest("delete", url, request);
|
||||
|
||||
// function to execute the http post request
|
||||
const post = (url, request) => apiRequest("post", url, request);
|
||||
|
||||
// function to execute the http put request
|
||||
const put = (url, request) => apiRequest("put", url, request);
|
||||
|
||||
// function to execute the http path request
|
||||
const patch = (url, request) => apiRequest("patch", url, request);
|
||||
|
||||
// expose your method to other services or actions
|
||||
const API ={
|
||||
get,
|
||||
delete: deleteRequest,
|
||||
post,
|
||||
put,
|
||||
patch
|
||||
};
|
||||
|
||||
export default API;
|
68
hubble/src/components/firewall.svelte
Normal file
68
hubble/src/components/firewall.svelte
Normal file
|
@ -0,0 +1,68 @@
|
|||
<script>
|
||||
export let rule
|
||||
export let index
|
||||
export let netgroups
|
||||
import { createEventDispatcher } from 'svelte';
|
||||
const dispatch = createEventDispatcher();
|
||||
function dispatchUpdate(){
|
||||
dispatch('updateRule', {
|
||||
index: index,
|
||||
rule: rule
|
||||
})
|
||||
}
|
||||
function groupChecked(e, group){
|
||||
console.log(group, e.target.checked)
|
||||
groups = rule.groups
|
||||
if (e.target.checked){ // add group
|
||||
groups.push(group)
|
||||
} else {
|
||||
for( var i = 0; i < groups.length; i++){
|
||||
if ( groups[i] === group) {
|
||||
groups.splice(i, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
rule.groups = groups;
|
||||
dispatchUpdate()
|
||||
}
|
||||
</script>
|
||||
|
||||
<main class="grid grid-cols-5 mt-2 flex items-start">
|
||||
<div class="flex flex-row">
|
||||
<label for="port" class="my-auto">Port</label>
|
||||
<input id="port" name="port" type="text" bind:value={rule.port}
|
||||
class="w-1/2 h-7 pl-3 pr-6 text-base placeholder-gray-600 border rounded-lg appearance-none focus:shadow-outline my-auto mx-4 bg-gray-100"/>
|
||||
</div>
|
||||
<div class="flex flex-row">
|
||||
<label for="protocol" class="my-auto">Protocol</label>
|
||||
<select bind:value={rule.proto} on:blur={dispatchUpdate}
|
||||
class="w-1/2 h-7 pl-3 pr-6 text-base placeholder-gray-600 border rounded-lg appearance-none focus:shadow-outline my-auto mx-4">
|
||||
<option value="any">Any</option>
|
||||
<option value="icmp">ICMP</option>
|
||||
<option value="udp">UDP</option>
|
||||
<option value="tcp">TCP</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="flex flex-row">
|
||||
<label for="any" class="my-auto pr-4">Any Host</label>
|
||||
<input type="checkbox" bind:checked={rule.any} class="my-auto" />
|
||||
</div>
|
||||
{#if !rule.any}
|
||||
<div id="groups">
|
||||
<h1><b>Groups</b></h1>
|
||||
{#each netgroups as group}
|
||||
<div class="flex flex-row">
|
||||
<input type="checkbox" id={group} class="my-auto"
|
||||
checked={rule.groups.includes(group)} on:change={(e) => groupChecked(e, group)}/>
|
||||
<label for="{group}" class="my-auto pl-2">{group}</label>
|
||||
</div>
|
||||
{/each}
|
||||
</div>
|
||||
{:else}
|
||||
<div class="w-1/5"></div>
|
||||
{/if}
|
||||
<div class="flex justify-end">
|
||||
<button on:click={() => dispatch('deleteRule')}
|
||||
class="bg-red-400 text-white rounded-xl p-1 h-7 w-1/2">Delete</button>
|
||||
</div>
|
||||
</main>
|
112
hubble/src/components/nodecard.svelte
Normal file
112
hubble/src/components/nodecard.svelte
Normal file
|
@ -0,0 +1,112 @@
|
|||
<script>
|
||||
import NodeSettings from './nodesettings.svelte';
|
||||
export let groups;
|
||||
export let netname;
|
||||
export let node = {};
|
||||
export let statusColour = "bg-gray-300"
|
||||
let collapsed = true;
|
||||
$: if (node.status === "pending"){
|
||||
statusColour = "bg-gray-400";
|
||||
} else if (node.status === "active"){
|
||||
statusColour = "bg-green-400";
|
||||
} else {
|
||||
statusColour = "bg-red-400"
|
||||
}
|
||||
import API from '../api.js';
|
||||
import { createEventDispatcher } from 'svelte';
|
||||
const dispatch = createEventDispatcher();
|
||||
import { getNotificationsContext } from 'svelte-notifications';
|
||||
const { addNotification } = getNotificationsContext();
|
||||
export const approveNode = async() => {
|
||||
try {
|
||||
const response = await API.post(`/networks/${netname}/nodes/${node.name}/approve`);
|
||||
console.log(response)
|
||||
dispatch('updateNodes');
|
||||
addNotification({
|
||||
text: `Enabled node ${node.name}`,
|
||||
position: 'top-right',
|
||||
type: 'success',
|
||||
removeAfter: 2000,
|
||||
})
|
||||
return response;
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
addNotification({
|
||||
text: `Could not enable node ${node.name}`,
|
||||
position: 'top-right',
|
||||
type: 'danger',
|
||||
removeAfter: 2000,
|
||||
})
|
||||
}
|
||||
};
|
||||
export const disableNode = async() => {
|
||||
try {
|
||||
const response = await API.post(`/networks/${netname}/nodes/${node.name}/disable`);
|
||||
console.log(response)
|
||||
dispatch('updateNodes');
|
||||
addNotification({
|
||||
text: `Disabled node ${node.name}`,
|
||||
position: 'top-right',
|
||||
type: 'success',
|
||||
removeAfter: 2000,
|
||||
})
|
||||
return response;
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
addNotification({
|
||||
text: `Could not disable node ${node.name}`,
|
||||
position: 'top-right',
|
||||
type: 'danger',
|
||||
removeAfter: 2000,
|
||||
})
|
||||
}
|
||||
};
|
||||
</script>
|
||||
<main>
|
||||
<div class="shadow mt-2 p-4 hover:shadow-lg">
|
||||
<div class="flex flex-row justify-between">
|
||||
<div>
|
||||
<h3 class="text-gray-700">
|
||||
<b class="text-black">{node.name}</b>
|
||||
<span class="dot {statusColour}"></span>
|
||||
{ node.latest_fetch == "NEVER" ? "Config not yet fetched by node." : `Latest fetch at ${node.latest_fetch}`}
|
||||
</h3>
|
||||
<h4 class="text-gray-400">{node.pubkey}</h4>
|
||||
<h4>Hostname: {node.hostname}</h4>
|
||||
<h4>Address: {node.address}</h4>
|
||||
</div>
|
||||
<div>
|
||||
{#if node.status === "pending"}
|
||||
<button on:click={async() => {await approveNode()}}
|
||||
class="bg-green-600 rounded-xl p-1 text-white">
|
||||
Approve
|
||||
</button>
|
||||
{:else if node.status === "active"}
|
||||
<button on:click={async() => {await disableNode()}}
|
||||
class="bg-red-500 rounded-xl p-1 text-white">Disable</button>
|
||||
{:else}
|
||||
<button on:click={async() => {await approveNode()}}
|
||||
class="bg-green-600 rounded-xl p-1 text-white">Enable</button>
|
||||
{/if}
|
||||
</div>
|
||||
</div>
|
||||
{#if !collapsed}
|
||||
<NodeSettings nodename={node.name} netname={netname} netgroups={groups}/>
|
||||
{/if}
|
||||
<div class="flex justify-end">
|
||||
<i class="fas fa-times"></i>
|
||||
<button on:click={()=>{collapsed = !collapsed}} class="p-1 border-1 rounded-xl">
|
||||
{ collapsed ? "EXPAND" : "COLLAPSE" }
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</main>
|
||||
|
||||
<style>
|
||||
.dot {
|
||||
height: 10px;
|
||||
width: 10px;
|
||||
border-radius: 50%;
|
||||
display: inline-block;
|
||||
}
|
||||
</style>
|
206
hubble/src/components/nodesettings.svelte
Normal file
206
hubble/src/components/nodesettings.svelte
Normal file
|
@ -0,0 +1,206 @@
|
|||
<script>
|
||||
export let netname
|
||||
export let nodename
|
||||
export let netgroups
|
||||
export let netgroupOptions = netgroups
|
||||
let newgroup
|
||||
let node = {
|
||||
is_lighthouse: false,
|
||||
name: '',
|
||||
hostname: '',
|
||||
address: '',
|
||||
static_address: '',
|
||||
listen_port: 0,
|
||||
firewall_outbound: [],
|
||||
firewall_inbound: [],
|
||||
groups: [],
|
||||
}
|
||||
import { getNotificationsContext } from 'svelte-notifications';
|
||||
const { addNotification } = getNotificationsContext();
|
||||
import API from '../api.js';
|
||||
import FirewallRule from './firewall.svelte';
|
||||
export const updateNode = async() => {
|
||||
try {
|
||||
const response = await API.post(`/networks/${netname}/nodes/${nodename}/update`,
|
||||
{
|
||||
'is_lighthouse': node.is_lighthouse,
|
||||
'static_address': node.static_address,
|
||||
'listen_port': node.listen_port,
|
||||
'groups': node.groups,
|
||||
'firewall_inbound': node.firewall_inbound,
|
||||
'firewall_outbound': node.firewall_outbound,
|
||||
});
|
||||
console.log(response)
|
||||
addNotification({
|
||||
text: `Updated node ${node.name}`,
|
||||
position: 'top-right',
|
||||
type: 'success',
|
||||
removeAfter: 2000,
|
||||
})
|
||||
return response;
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
addNotification({
|
||||
text: `Could not update node ${node.name}`,
|
||||
position: 'top-right',
|
||||
type: 'danger',
|
||||
removeAfter: 2000,
|
||||
})
|
||||
}
|
||||
};
|
||||
async function handleClick(){
|
||||
console.log("UPDATING NETWORK", node.is_lighthouse, static_address, groups)
|
||||
await updateNode();
|
||||
console.log("SEND REQUEST TO UPDATE NODE")
|
||||
}
|
||||
export const getNodeInfo = async() => {
|
||||
try {
|
||||
const response = await API.get(`/networks/${netname}/nodes/${nodename}/info`);
|
||||
console.log("NODEINFO", response)
|
||||
return response;
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
addNotification({
|
||||
text: `Could not get info for node ${node.name}`,
|
||||
position: 'top-right',
|
||||
type: 'danger',
|
||||
removeAfter: 2000,
|
||||
})
|
||||
}
|
||||
};
|
||||
async function refreshInfo(){
|
||||
const res = await getNodeInfo();
|
||||
node = res;
|
||||
}
|
||||
import { onMount } from 'svelte';
|
||||
onMount(async() => {
|
||||
await refreshInfo()
|
||||
})
|
||||
function addGroup(){
|
||||
if (newgroup == ""){return}
|
||||
console.log("Adding", newgroup, "to", node.groups)
|
||||
groups = node.groups
|
||||
groups.push(newgroup);
|
||||
node.groups = groups;
|
||||
newgroup = "";
|
||||
}
|
||||
function removeGroup(groupname){
|
||||
console.log("Removing", groupname, "from", node.groups);
|
||||
groups = node.groups;
|
||||
for( var i = 0; i < groups.length; i++){
|
||||
if ( groups[i] === groupname) {
|
||||
groups.splice(i, 1);
|
||||
}
|
||||
}
|
||||
node.groups = groups;
|
||||
}
|
||||
function addInboundRule(){
|
||||
var rules = node.firewall_inbound
|
||||
rules.push({
|
||||
port: 'any',
|
||||
proto: 'any',
|
||||
groups: [],
|
||||
any: true,
|
||||
})
|
||||
node.firewall_inbound = rules
|
||||
}
|
||||
function addOutboundRule(){
|
||||
var rules = node.firewall_outbound
|
||||
rules.push({
|
||||
port: 'any',
|
||||
proto: 'any',
|
||||
groups: [],
|
||||
any: true,
|
||||
})
|
||||
node.firewall_outbound = rules
|
||||
}
|
||||
function deleteInboundRule(index){
|
||||
console.log("Deleting rule at ", index)
|
||||
var rules = node.firewall_inbound
|
||||
rules.splice(index, 1)
|
||||
node.firewall_inbound = rules
|
||||
}
|
||||
function deleteOutboundRule(index){
|
||||
console.log("Deleting rule at ", index)
|
||||
var rules = node.firewall_outbound
|
||||
rules.splice(index, 1)
|
||||
node.firewall_outbound = rules
|
||||
}
|
||||
$: {
|
||||
netgroupOptions = netgroups.filter(n => !node.groups.includes(n))
|
||||
}
|
||||
</script>
|
||||
|
||||
<main>
|
||||
<div class="flex flex-row justify-between my-4">
|
||||
<div class="flex flex-row justify-center">
|
||||
<label for="lighthouse" class="my-auto">Is Lighthouse</label>
|
||||
<input type="checkbox" id="lighthouse" name="lighthouse"
|
||||
class="my-auto ml-4"
|
||||
bind:checked={node.is_lighthouse}/>
|
||||
</div>
|
||||
<div class="flex flex-row justify-center">
|
||||
<label for="static_address" class="my-auto">Static Address</label>
|
||||
<input type="text" id="static_address" name="static_address"
|
||||
class="w-1/2 h-6 pl-3 pr-6 text-base placeholder-gray-600 border rounded-lg appearance-none focus:shadow-outline my-auto mx-4 bg-gray-100"
|
||||
bind:value={node.static_address}/>
|
||||
</div>
|
||||
<div class="flex flex-row justify-center">
|
||||
<label for="listen_port" class="my-auto">Listen Port</label>
|
||||
<input type="number" id="listen_port" name="listen_port"
|
||||
class="w-1/4 h-6 pl-3 pr-6 text-base placeholder-gray-600 border rounded-lg appearance-none focus:shadow-outline my-auto mx-4 bg-gray-100"
|
||||
bind:value={node.listen_port} />
|
||||
</div>
|
||||
</div>
|
||||
<div id="groups" class="bg-gray-200 rounded p-4">
|
||||
<h1 class="text-xl">Groups</h1>
|
||||
<select name="addgroup"
|
||||
class="w-1/4 h-10 mt-2 pl-3 pr-6 text-base placeholder-gray-600 border rounded-lg appearance-none focus:shadow-outline my-auto"
|
||||
bind:value={newgroup}>
|
||||
<option value="">Select group</option>
|
||||
{#each netgroupOptions as group}
|
||||
<option value={group}>{group}</option>
|
||||
{/each}
|
||||
</select>
|
||||
<button on:click={() => addGroup(newgroup)}
|
||||
class="bg-indigo-500 text-white rounded-xl p-1 px-2 h-10"
|
||||
>Add Group</button>
|
||||
<div class="flex flex-row flex-wrap pt-4 ml-2">
|
||||
{#each node.groups as group}
|
||||
<div class="bg-purple-400 rounded-xl text-white m-2 p-1 my-auto">
|
||||
{group}
|
||||
<button on:click={() => removeGroup(group)} class="outline-none border-none text-gray-500 mx-2 my-auto">x</button>
|
||||
</div>
|
||||
{/each}
|
||||
</div>
|
||||
</div>
|
||||
<div id="firewall" class="mt-4">
|
||||
<h1 class="text-xl">Firewall Rules</h1>
|
||||
<div>
|
||||
<h2 class="text-lg">Outbound</h2>
|
||||
{#each node.firewall_outbound as fr, i}
|
||||
<FirewallRule rule={fr} netgroups={netgroups} index={i}
|
||||
on:deleteRule={() => deleteOutboundRule(i)}/>
|
||||
{/each}
|
||||
<div class="flex justify-end mt-3">
|
||||
<button class="bg-blue-400 rounded-xl text-white p-1"
|
||||
on:click={addOutboundRule}>New</button>
|
||||
</div>
|
||||
</div>
|
||||
<div>
|
||||
<h2 class="text-lg">Inbound</h2>
|
||||
{#each node.firewall_inbound as fr, i}
|
||||
<FirewallRule rule={fr} netgroups={netgroups} index={i}
|
||||
on:deleteRule={() => deleteInboundRule(i)}/>
|
||||
{/each}
|
||||
<div class="flex justify-end mt-3">
|
||||
<button class="bg-blue-400 rounded-xl text-white p-1"
|
||||
on:click={addInboundRule}>New</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="update">
|
||||
<button class="bg-green-400 rounded-xl text-white p-1"
|
||||
on:click={async() => await handleClick()}>Update</button>
|
||||
</div>
|
||||
</main>
|
130
hubble/src/components/settings.svelte
Normal file
130
hubble/src/components/settings.svelte
Normal file
|
@ -0,0 +1,130 @@
|
|||
<script>
|
||||
export let netname
|
||||
let newgroup;
|
||||
let form = "readonly";
|
||||
export let network;
|
||||
import { navigate } from "svelte-routing";
|
||||
import { createEventDispatcher } from 'svelte';
|
||||
const dispatch = createEventDispatcher();
|
||||
import API from '../api.js';
|
||||
import { getNotificationsContext } from 'svelte-notifications';
|
||||
const { addNotification } = getNotificationsContext();
|
||||
export const updateNetwork = async() => {
|
||||
try {
|
||||
const response = await API.post(`/networks/${network.name}/update`,
|
||||
{
|
||||
'name': network.name,
|
||||
'cidr': network.cidr,
|
||||
'cipher': network.cipher,
|
||||
'groups': network.groups,
|
||||
});
|
||||
console.log(response)
|
||||
addNotification({
|
||||
text: `Updated network ${network.name}`,
|
||||
position: 'top-right',
|
||||
type: 'success',
|
||||
removeAfter: 2000,
|
||||
})
|
||||
return response;
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
addNotification({
|
||||
text: `Could not update network ${network.name}`,
|
||||
position: 'top-right',
|
||||
type: 'danger',
|
||||
removeAfter: 2000,
|
||||
})
|
||||
}
|
||||
};
|
||||
async function handleClick(){
|
||||
await updateNetwork();
|
||||
dispatch('updateNetworkInfo');
|
||||
console.log("FORM SUBMITTED", network.cipher, network.cidr, network.name);
|
||||
}
|
||||
export const deleteNetwork = async() => {
|
||||
try {
|
||||
const response = await API.delete(`/networks/${netname}/delete`);
|
||||
console.log(response)
|
||||
dispatch('updateNetworks');
|
||||
navigate('/');
|
||||
addNotification({
|
||||
text: `Deleted network ${network.name}`,
|
||||
position: 'top-right',
|
||||
type: 'success',
|
||||
removeAfter: 2000,
|
||||
})
|
||||
return response;
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
addNotification({
|
||||
text: `Could not delete network ${network.name}`,
|
||||
position: 'top-right',
|
||||
type: 'danger',
|
||||
removeAfter: 2000,
|
||||
})
|
||||
}
|
||||
};
|
||||
$: {
|
||||
if (network.name != "") {
|
||||
form = "" // enable form
|
||||
}
|
||||
}
|
||||
function addGroup(){
|
||||
console.log("Adding", newgroup, "to", network.groups)
|
||||
var groups = network.groups
|
||||
groups.push(newgroup);
|
||||
network.groups = groups;
|
||||
newgroup = "";
|
||||
}
|
||||
function removeGroup(groupname){
|
||||
console.log("Removing", groupname, "from", network.groups);
|
||||
var groups = network.groups;
|
||||
for( var i = 0; i < groups.length; i++){
|
||||
if ( groups[i] === groupname) {
|
||||
groups.splice(i, 1);
|
||||
}
|
||||
}
|
||||
network.groups = groups;
|
||||
}
|
||||
</script>
|
||||
<main class="shadow p-4 mt-4">
|
||||
<h2 class="text-2xl">Network Settings</h2>
|
||||
<div id="cidr" class="flex flex-row my-4">
|
||||
<label for="netname" class="my-auto text-lg">Network CIDR: </label>
|
||||
<input type="text" id="name" name="name" bind:value={network.cidr} readonly={form}
|
||||
class="w-1/4 h-10 pl-3 pr-6 text-base border rounded-lg appearance-none focus:shadow-outline my-auto mx-4 bg-gray-100">
|
||||
</div>
|
||||
<div id="cipher" class="flex flex-row my-4">
|
||||
<label for="cipher" class="my-auto text-lg">Cipher Algorithm: </label>
|
||||
<select name="cipher" id="cipher" bind:value={network.cipher}
|
||||
class="w-1/4 h-10 pl-3 pr-6 text-base placeholder-gray-600 border rounded-lg appearance-none focus:shadow-outline my-auto mx-4">
|
||||
<option value="aes">AES</option>
|
||||
<option value="chachapoly">chacha</option>
|
||||
</select>
|
||||
</div>
|
||||
<div id="groups" class="bg-gray-200 rounded p-4">
|
||||
<h1 class="text-lg ml-4 mb-2">Groups</h1>
|
||||
<input type="text" id="newgroup" name="newgroup" bind:value={newgroup}
|
||||
class="w-1/4 h-10 pl-3 pr-6 text-base placeholder-gray-600 border rounded-lg appearance-none focus:shadow-outline my-auto ml-4 mr-2 bg-gray-100"
|
||||
/>
|
||||
<button on:click={addGroup} class="bg-indigo-500 text-white rounded-xl p-1 px-2">+</button>
|
||||
<div class="flex flex-row flex-wrap pt-4 ml-2">
|
||||
{#each network.groups as group}
|
||||
<div class="bg-pink-400 rounded-xl text-white m-2 p-1 my-auto">
|
||||
{group}
|
||||
<button on:click={() => removeGroup(group)} class="outline-none border-none text-gray-500 mx-2 my-auto">x</button>
|
||||
</div>
|
||||
{/each}
|
||||
</div>
|
||||
</div>
|
||||
<div class="text-white my-4 flex flex-row-reverse">
|
||||
<button on:click|preventDefault={async() => {await deleteNetwork()}}
|
||||
class="bg-red-500 rounded-xl p-1 mx-4" {form}>
|
||||
Delete
|
||||
</button>
|
||||
<button on:click|preventDefault={async() => {await handleClick()}}
|
||||
class="bg-green-400 rounded-xl p-1 text-white" {form}>
|
||||
Update
|
||||
</button>
|
||||
</div>
|
||||
</main>
|
11
hubble/src/main.js
Normal file
11
hubble/src/main.js
Normal file
|
@ -0,0 +1,11 @@
|
|||
import App from './App.svelte';
|
||||
|
||||
const app = new App({
|
||||
target: document.body,
|
||||
props: {
|
||||
name: 'world'
|
||||
},
|
||||
hydrate: true
|
||||
});
|
||||
|
||||
export default app;
|
4
hubble/src/pages/Home.svelte
Normal file
4
hubble/src/pages/Home.svelte
Normal file
|
@ -0,0 +1,4 @@
|
|||
<main class="p-4 flex justify-items-center items-center content-center justify-center w-full h-full">
|
||||
<h1 class="text-5xl">Choose a Network</h1>
|
||||
</main>
|
||||
|
61
hubble/src/pages/login.svelte
Normal file
61
hubble/src/pages/login.svelte
Normal file
|
@ -0,0 +1,61 @@
|
|||
<script>
|
||||
import { LoggedIn, AuthToken } from '../store.js';
|
||||
import { getNotificationsContext } from 'svelte-notifications';
|
||||
const { addNotification } = getNotificationsContext();
|
||||
import { navigate } from "svelte-routing";
|
||||
import API from '../api.js';
|
||||
let username = "";
|
||||
let password = "";
|
||||
export const loginRequest = async() => {
|
||||
try {
|
||||
const response = await API.post("/login",
|
||||
{
|
||||
'username': username,
|
||||
'password': password,
|
||||
})
|
||||
console.log(response)
|
||||
addNotification({
|
||||
text: `Signed In`,
|
||||
position: 'top-right',
|
||||
type: 'success',
|
||||
removeAfter: 2000,
|
||||
})
|
||||
LoggedIn.update(_ => true)
|
||||
AuthToken.update(_ => response.token)
|
||||
navigate('/')
|
||||
} catch(error){
|
||||
console.log(error);
|
||||
addNotification({
|
||||
text: `Could not sign in`,
|
||||
position: 'top-right',
|
||||
type: 'danger',
|
||||
removeAfter: 2000,
|
||||
})
|
||||
}
|
||||
}
|
||||
</script>
|
||||
<main class="p-4 flex justify-items-center w-full justify-center align-center">
|
||||
<div class="flex flex-col justify-center w-1/3 shadow p-8 mt-16">
|
||||
<h1 class="text-5xl">Login</h1>
|
||||
<div>
|
||||
<div class="flex flex-row mt-4">
|
||||
<label for="username" class="my-auto">Username</label>
|
||||
<input type="text" id="username" name="username"
|
||||
class="w-1/2 h-6 pl-3 pr-6 text-base placeholder-gray-600 border rounded-lg appearance-none focus:shadow-outline my-auto ml-4 mr-2 bg-gray-100"
|
||||
bind:value={username}
|
||||
>
|
||||
</div>
|
||||
<div class="flex flex-row mt-4 sm:bg-black md:bg-green-400 xl:bg-red-500">
|
||||
<label for="password" class="my-auto">Password</label>
|
||||
<input type="password" id="password" name="password"
|
||||
class="w-1/2 h-6 pl-3 pr-6 text-base placeholder-gray-600 border rounded-lg appearance-none focus:shadow-outline my-auto ml-4 mr-2 bg-gray-100"
|
||||
bind:value={password}
|
||||
>
|
||||
</div>
|
||||
<div class="flex justify-end">
|
||||
<button class="bg-green-500 text-white rounded-xl p-1 px-2"
|
||||
on:click|preventDefault={async() => await loginRequest()}>Login</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</main>
|
83
hubble/src/pages/network.svelte
Normal file
83
hubble/src/pages/network.svelte
Normal file
|
@ -0,0 +1,83 @@
|
|||
<script>
|
||||
let nodes = []
|
||||
let network = {
|
||||
name: "",
|
||||
cidr: "",
|
||||
cipher: "",
|
||||
groups: [],
|
||||
}
|
||||
export let netname;
|
||||
import API from '../api.js';
|
||||
import { onMount } from 'svelte';
|
||||
import NodeCard from '../components/nodecard.svelte';
|
||||
import NetworkSettings from '../components/settings.svelte';
|
||||
import { createEventDispatcher } from 'svelte';
|
||||
const dispatch = createEventDispatcher();
|
||||
export const getNetworkInfo = async() => {
|
||||
try {
|
||||
const response = await API.get(`/networks/${netname}/info`);
|
||||
console.log(response)
|
||||
return response;
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
}
|
||||
};
|
||||
export const getAllNodes = async() => {
|
||||
try {
|
||||
const response = await API.get(`/networks/${netname}/nodes/all`);
|
||||
console.log(response)
|
||||
return response;
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
}
|
||||
};
|
||||
|
||||
async function updateNodes(){
|
||||
const res = await getAllNodes();
|
||||
nodes = res;
|
||||
}
|
||||
async function updateNetworkInfo(){
|
||||
const res = await getNetworkInfo();
|
||||
network = res;
|
||||
}
|
||||
async function refreshNet(){
|
||||
await updateNetworkInfo()
|
||||
await updateNodes()
|
||||
}
|
||||
onMount(async () => {
|
||||
console.log("Mounting component for network ", netname)
|
||||
const res = await getAllNodes();
|
||||
nodes = res;
|
||||
await updateNetworkInfo();
|
||||
});
|
||||
|
||||
$: {
|
||||
console.log("Routing to ", netname);
|
||||
getAllNodes()
|
||||
.then(res => nodes = res)
|
||||
getNetworkInfo()
|
||||
.then(res => network = res)
|
||||
}
|
||||
|
||||
</script>
|
||||
|
||||
<main class="p-4 flex justify-center w-full">
|
||||
<div class="flex flex-col justify-center w-8/12">
|
||||
<div class="flex flex-row justify-between">
|
||||
<div class="flex flex-row items-end">
|
||||
<h1 class="text-2xl">{netname}</h1>
|
||||
<h2 class="text-gray-400 text-lg ml-4">{network.ca_fingerprint}</h2>
|
||||
</div>
|
||||
<button
|
||||
on:click={async() => {await refreshNet()}}
|
||||
class="text-xl text-indigo-800 outline-none border-none">{'\u27F3'}</button>
|
||||
</div>
|
||||
<NetworkSettings netname={netname} network={network} on:updateNetworks={() => dispatch('updateNetworks')} on:updateNetworkInfo={async() => {await updateNetworkInfo()}}/>
|
||||
<h2 class="text-2xl mt-4">Nodes</h2>
|
||||
<div>
|
||||
{#each nodes as n}
|
||||
<NodeCard node={n} netname={netname} groups={network.groups} on:updateNodes={updateNodes}/>
|
||||
{/each}
|
||||
</div>
|
||||
</div>
|
||||
</main>
|
68
hubble/src/pages/newnet.svelte
Normal file
68
hubble/src/pages/newnet.svelte
Normal file
|
@ -0,0 +1,68 @@
|
|||
<script>
|
||||
let name = "";
|
||||
let cidr = "";
|
||||
import API from '../api.js';
|
||||
import { navigate } from "svelte-routing";
|
||||
import { getNotificationsContext } from 'svelte-notifications';
|
||||
const { addNotification } = getNotificationsContext();
|
||||
import { createEventDispatcher } from 'svelte';
|
||||
const dispatch = createEventDispatcher();
|
||||
export const newNetwork = async() => {
|
||||
console.log("starting req")
|
||||
try {
|
||||
const response = await API.post("/networks/new",
|
||||
{
|
||||
'name': name,
|
||||
'cidr': cidr,
|
||||
})
|
||||
console.log(response)
|
||||
dispatch('updateNetworks');
|
||||
navigate(`/networks/${name}`)
|
||||
console.log("ADDING NOTIFICATION")
|
||||
addNotification({
|
||||
text: `Created network ${name}`,
|
||||
position: 'top-right',
|
||||
type: 'success',
|
||||
removeAfter: 2000,
|
||||
})
|
||||
} catch(error){
|
||||
console.log(error);
|
||||
console.log("ADDING NOTIFICATION")
|
||||
addNotification({
|
||||
text: `Could not create network ${name}`,
|
||||
position: 'top-right',
|
||||
type: 'danger',
|
||||
removeAfter: 2000,
|
||||
})
|
||||
}
|
||||
}
|
||||
function handleClick(){
|
||||
console.log("starting handle click")
|
||||
newNetwork()
|
||||
}
|
||||
</script>
|
||||
|
||||
<main class="p-4 flex justify-items-center w-full justify-center align-center">
|
||||
<div class="flex flex-col justify-center w-8/12 shadow p-8 mt-8">
|
||||
<h1 class="text-5xl">New Network</h1>
|
||||
<div>
|
||||
<div class="flex flex-row mt-4">
|
||||
<label for="name" class="my-auto">Network Name</label>
|
||||
<input type="text" id="name" name="name"
|
||||
class="w-1/4 h-6 pl-3 pr-6 text-base placeholder-gray-600 border rounded-lg appearance-none focus:shadow-outline my-auto ml-4 mr-2 bg-gray-100"
|
||||
bind:value={name}>
|
||||
</div>
|
||||
<div class="flex flex-row mt-4">
|
||||
<label for="cidr" class="my-auto">CIDR</label>
|
||||
<input type="text" id="cidr" name="cidr"
|
||||
class="w-1/4 h-6 pl-3 pr-6 text-base placeholder-gray-600 border rounded-lg appearance-none focus:shadow-outline my-auto ml-4 mr-2 bg-gray-100"
|
||||
bind:value={cidr}>
|
||||
</div>
|
||||
<div class="flex justify-end">
|
||||
<button class="bg-green-500 text-white rounded-xl p-1 px-2"
|
||||
on:click|preventDefault={handleClick}>create</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</main>
|
||||
|
14
hubble/src/server.js
Normal file
14
hubble/src/server.js
Normal file
|
@ -0,0 +1,14 @@
|
|||
const { createServer } = require("http");
|
||||
const app = require("./dist/App.js");
|
||||
|
||||
createServer((req, res) => {
|
||||
const { html } = app.render({ url: req.url });
|
||||
|
||||
res.write(`
|
||||
<!DOCTYPE html>
|
||||
<div id="app">${html}</div>
|
||||
<script src="/dist/bundle.js"></script>
|
||||
`);
|
||||
|
||||
res.end();
|
||||
}).listen(3000);
|
50
hubble/src/sidebar.svelte
Normal file
50
hubble/src/sidebar.svelte
Normal file
|
@ -0,0 +1,50 @@
|
|||
<script>
|
||||
import { link } from "svelte-routing";
|
||||
export let networks = [];
|
||||
import { onMount } from "svelte";
|
||||
import { createEventDispatcher } from 'svelte';
|
||||
const dispatch = createEventDispatcher();
|
||||
onMount(async () => {
|
||||
dispatch('updateNetworks', {node: node.node});
|
||||
});
|
||||
import { navigate } from "svelte-routing";
|
||||
import { LoggedIn, AuthToken } from './store.js';
|
||||
function logout(){
|
||||
AuthToken.update(_ => "")
|
||||
LoggedIn.update(_ => false)
|
||||
navigate('/');
|
||||
}
|
||||
</script>
|
||||
|
||||
<main>
|
||||
<aside class="col-span-1 from-purple-600 bg-purple-800 h-screen text-white p-4 bg-gradient-to-tr top-0 sticky">
|
||||
<div class="flex flex-row justify-between align-end mb-4">
|
||||
<h1 class="text-xl">All Networks</h1>
|
||||
<button class="bg-purple-400 rounded-xl p-1"
|
||||
on:click={logout}
|
||||
>Logout</button>
|
||||
</div>
|
||||
{#each networks as net}
|
||||
<a href="/networks/{net.name}" use:link class="no-underline hover:no-underline text-white hover:text-white visited:text-white link">
|
||||
<div class="shadow-xl hover:shadow-2xl my-2 bg-purple-900 p-2 rounded">
|
||||
<h3 class="visited:text-white">Network: {net.name}</h3>
|
||||
<h5>CIDR: {net.cidr}</h5>
|
||||
</div>
|
||||
</a>
|
||||
{/each}
|
||||
<a href="/networks/new" use:link class="w-full flex justify-center">
|
||||
<div class="shadow-xl hover:shadow-2xl my-2 bg-purple-500 p-2 rounded-xl w-6/12 flex justify-center">
|
||||
<h3 class="visited:text-white">Create New</h3>
|
||||
</div>
|
||||
</a>
|
||||
</aside>
|
||||
</main>
|
||||
|
||||
<style>
|
||||
a {
|
||||
text-decoration: none;
|
||||
}
|
||||
a:visited {
|
||||
color: white;
|
||||
}
|
||||
</style>
|
23
hubble/src/store.js
Normal file
23
hubble/src/store.js
Normal file
|
@ -0,0 +1,23 @@
|
|||
import { writable } from 'svelte/store';
|
||||
|
||||
const storedLoggedIn = localStorage.getItem("loggedIn");
|
||||
const storedAuthToken = localStorage.getItem("authToken");
|
||||
console.log("Getting localStorage loggedIn: ", storedLoggedIn)
|
||||
|
||||
var sl = (storedLoggedIn === null ? false : JSON.parse(storedLoggedIn))
|
||||
var at = (storedAuthToken === null ? "" : storedAuthToken)
|
||||
|
||||
console.log("writable using", sl)
|
||||
|
||||
export const LoggedIn = writable(sl);
|
||||
export const AuthToken = writable(at);
|
||||
|
||||
LoggedIn.subscribe(value => {
|
||||
console.log("Setting localStorage loggedIn to", value)
|
||||
localStorage.setItem("loggedIn", value)
|
||||
})
|
||||
|
||||
AuthToken.subscribe(value => {
|
||||
localStorage.setItem("authToken", value)
|
||||
})
|
||||
|
21
hubble/tailwind.config.js
Normal file
21
hubble/tailwind.config.js
Normal file
|
@ -0,0 +1,21 @@
|
|||
// tailwind.config.js
|
||||
module.exports = {
|
||||
purge: {
|
||||
enabled: !process.env.ROLLUP_WATCH,
|
||||
content: ['./public/index.html', './src/**/*.svelte'],
|
||||
options: {
|
||||
defaultExtractor: content => [
|
||||
...(content.match(/[^<>"'`\s]*[^<>"'`\s:]/g) || []),
|
||||
...(content.match(/(?<=class:)[^=>\/\s]*/g) || []),
|
||||
],
|
||||
},
|
||||
},
|
||||
darkMode: false, // or 'media' or 'class'
|
||||
theme: {
|
||||
extend: {},
|
||||
},
|
||||
variants: {
|
||||
extend: {},
|
||||
},
|
||||
plugins: [],
|
||||
}
|
11
nebutils/config.go
Normal file
11
nebutils/config.go
Normal file
|
@ -0,0 +1,11 @@
|
|||
package nebutils
|
||||
|
||||
type Config struct {
|
||||
Pki struct {
|
||||
Ca string `yaml:"ca"`
|
||||
Cert string `yaml:"cert"`
|
||||
Key string `yaml:"key"`
|
||||
} `yaml:"pki"`
|
||||
StaticHostmap struct {
|
||||
}
|
||||
}
|
49
nebutils/keyconvert.go
Normal file
49
nebutils/keyconvert.go
Normal file
|
@ -0,0 +1,49 @@
|
|||
package nebutils
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"crypto/sha512"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/curve25519"
|
||||
)
|
||||
|
||||
// Convert an Edwards private key to a curve25519 private key
|
||||
func PrivateKeyToCurve25519(privateKey []byte) (curvePrivate []byte) {
|
||||
h := sha512.New()
|
||||
h.Write(privateKey)
|
||||
digest := h.Sum(nil)
|
||||
|
||||
// key clamping
|
||||
digest[0] &= 248
|
||||
digest[31] &= 127
|
||||
digest[31] |= 64
|
||||
|
||||
return digest[:32]
|
||||
}
|
||||
|
||||
var curve25519P, _ = new(big.Int).SetString("57896044618658097711785492504343953926634992332820282019728792003956564819949", 10)
|
||||
|
||||
// from Filo Sottile's 'age' : https://github.com/FiloSottile/age/blob/bbab440e198a4d67ba78591176c7853e62d29e04/internal/age/ssh.go#L174
|
||||
// See https://blog.filippo.io/using-ed25519-keys-for-encryption.
|
||||
func Ed25519PublicKeyToCurve25519(pk ed25519.PublicKey) []byte {
|
||||
bigEndianY := make([]byte, ed25519.PublicKeySize)
|
||||
for i, b := range pk {
|
||||
bigEndianY[ed25519.PublicKeySize-i-1] = b
|
||||
}
|
||||
bigEndianY[0] &= 0b0111_1111
|
||||
|
||||
y := new(big.Int).SetBytes(bigEndianY)
|
||||
denom := big.NewInt(1)
|
||||
denom.ModInverse(denom.Sub(denom, y), curve25519P)
|
||||
u := y.Mul(y.Add(y, big.NewInt(1)), denom)
|
||||
u.Mod(u, curve25519P)
|
||||
|
||||
out := make([]byte, curve25519.PointSize)
|
||||
uBytes := u.Bytes()
|
||||
for i, b := range uBytes {
|
||||
out[len(uBytes)-i-1] = b
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
41
nebutils/keygen.go
Normal file
41
nebutils/keygen.go
Normal file
|
@ -0,0 +1,41 @@
|
|||
package nebutils
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"golang.org/x/crypto/curve25519"
|
||||
)
|
||||
|
||||
// Create a curve25519 keypair
|
||||
func X25519KeyPair() (kpub []byte, kpriv []byte) {
|
||||
var pubkey, privkey [32]byte
|
||||
if _, err := io.ReadFull(rand.Reader, privkey[:]); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
privkey[0] &= 248
|
||||
privkey[31] &= 63
|
||||
privkey[31] |= 64
|
||||
curve25519.ScalarBaseMult(&pubkey, &privkey)
|
||||
return pubkey[:], privkey[:]
|
||||
}
|
||||
|
||||
func SaveKey(directory, name string,
|
||||
keyBytes []byte) (err error) {
|
||||
key_fn := filepath.Join(directory, name)
|
||||
log.WithFields(log.Fields{
|
||||
"directory": directory,
|
||||
"name": name,
|
||||
"priv_fn": key_fn,
|
||||
}).Info("Saving Key")
|
||||
err = ioutil.WriteFile(key_fn, cert.MarshalX25519PrivateKey(keyBytes), 0660)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-key: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
17208
report.pdf
Normal file
17208
report.pdf
Normal file
File diff suppressed because it is too large
Load diff
13
wormhole/schemas.go
Normal file
13
wormhole/schemas.go
Normal file
|
@ -0,0 +1,13 @@
|
|||
package wormhole
|
||||
|
||||
type RequestJoinSchema struct {
|
||||
Netname string
|
||||
Nodename string
|
||||
Hostname string
|
||||
PubKey string
|
||||
}
|
||||
|
||||
type NodeIdentitySchema struct {
|
||||
Netname string
|
||||
Nodename string
|
||||
}
|
43
wormhole/tokens.go
Normal file
43
wormhole/tokens.go
Normal file
|
@ -0,0 +1,43 @@
|
|||
package wormhole
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
paseto "github.com/o1egl/paseto/v2"
|
||||
)
|
||||
|
||||
// NewToken returns a paseto.JSONToken, combined with the payload struct it receives
|
||||
func NewToken(payload interface{}) (jsonToken paseto.JSONToken, err error) {
|
||||
jsonToken = paseto.JSONToken{
|
||||
IssuedAt: time.Now(),
|
||||
NotBefore: time.Now(),
|
||||
Expiration: time.Now().Add(5 * time.Second),
|
||||
}
|
||||
v := reflect.ValueOf(payload)
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
jsonToken.Set(v.Type().Field(i).Name, v.Field(i).Interface())
|
||||
}
|
||||
body, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return jsonToken, err
|
||||
}
|
||||
jsonToken.Set("body", body)
|
||||
return jsonToken, nil
|
||||
}
|
||||
|
||||
// SchemaFromJSONToken fills in a schema struct with values from
|
||||
// additional claims in the PASETO token
|
||||
// if not all values of the struct are present and strict is true,
|
||||
// an error will be returned
|
||||
func SchemaFromJSONToken(jsonToken paseto.JSONToken,
|
||||
schema interface{}) error {
|
||||
var body []byte
|
||||
err := jsonToken.Get("body", &body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.Unmarshal(body, &schema)
|
||||
return nil
|
||||
}
|
109
xpaseto/curve25519.go
Normal file
109
xpaseto/curve25519.go
Normal file
|
@ -0,0 +1,109 @@
|
|||
// curve25519.go - Mostly copied from https://github.com/signal-golang/textsecure
|
||||
// signal-golang/textsecure license: GPL-3.0 - https://github.com/signal-golang/textsecure/blob/master/COPYING
|
||||
// This file contains the Sign and Verify functions for XEdDSA Signatures
|
||||
// XEdDSA is for using Montgomery keys (traditionally used for X25519 Diffie-Hellman functions) for creating and verifying EdDSA compatible signatures.
|
||||
// See more here: https://signal.org/docs/specifications/xeddsa/#xeddsa
|
||||
|
||||
package xpaseto
|
||||
|
||||
import (
|
||||
"crypto/sha512"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/signal-golang/ed25519"
|
||||
"github.com/signal-golang/ed25519/edwards25519"
|
||||
)
|
||||
|
||||
// Sign signs a message with an X25519 key and returns a signature.
|
||||
//
|
||||
// An error will be returned if an invalid private key is given.
|
||||
func Sign(privateKey []byte, message []byte, random [64]byte) (signature []byte,
|
||||
err error) {
|
||||
sig := new([64]byte)
|
||||
var privkey [32]byte
|
||||
if n := copy(privkey[:], privateKey); n != 32 {
|
||||
return signature, errors.Errorf("Invalid Private Key. Cannot Sign Payload. ")
|
||||
}
|
||||
|
||||
// Calculate Ed25519 public key from Curve25519 private key
|
||||
var A edwards25519.ExtendedGroupElement
|
||||
var publicKey [32]byte
|
||||
edwards25519.GeScalarMultBase(&A, &privkey)
|
||||
A.ToBytes(&publicKey)
|
||||
|
||||
// Calculate r
|
||||
diversifier := [32]byte{
|
||||
0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
|
||||
|
||||
var r [64]byte
|
||||
h := sha512.New()
|
||||
h.Write(diversifier[:])
|
||||
h.Write(privkey[:])
|
||||
h.Write(message)
|
||||
h.Write(random[:])
|
||||
h.Sum(r[:0])
|
||||
|
||||
// Calculate R
|
||||
var rReduced [32]byte
|
||||
edwards25519.ScReduce(&rReduced, &r)
|
||||
var R edwards25519.ExtendedGroupElement
|
||||
edwards25519.GeScalarMultBase(&R, &rReduced)
|
||||
|
||||
var encodedR [32]byte
|
||||
R.ToBytes(&encodedR)
|
||||
|
||||
// Calculate S = r + SHA2-512(R || A_ed || msg) * a (mod L)
|
||||
var hramDigest [64]byte
|
||||
h.Reset()
|
||||
h.Write(encodedR[:])
|
||||
h.Write(publicKey[:])
|
||||
h.Write(message)
|
||||
h.Sum(hramDigest[:0])
|
||||
var hramDigestReduced [32]byte
|
||||
edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
|
||||
|
||||
var s [32]byte
|
||||
edwards25519.ScMulAdd(&s, &hramDigestReduced, &privkey, &rReduced)
|
||||
|
||||
copy(sig[:], encodedR[:])
|
||||
copy(sig[32:], s[:])
|
||||
sig[63] |= publicKey[31] & 0x80
|
||||
|
||||
signature = sig[:]
|
||||
return signature, nil
|
||||
}
|
||||
|
||||
// Verify checks whether the message has a valid signature.
|
||||
//
|
||||
// Returns true if the signature is valid, otherwise returns false.
|
||||
func Verify(publicKey []byte, message []byte, signature []byte) bool {
|
||||
|
||||
var sig [64]byte
|
||||
if n := copy(sig[:], signature); n != 64 {
|
||||
return false
|
||||
}
|
||||
var pubkey [32]byte
|
||||
if n := copy(pubkey[:], publicKey); n != 32 {
|
||||
return false
|
||||
}
|
||||
pubkey[31] &= 0x7F
|
||||
|
||||
var edY, one, montX, montXMinusOne, montXPlusOne edwards25519.FieldElement
|
||||
edwards25519.FeFromBytes(&montX, &pubkey)
|
||||
edwards25519.FeOne(&one)
|
||||
edwards25519.FeSub(&montXMinusOne, &montX, &one)
|
||||
edwards25519.FeAdd(&montXPlusOne, &montX, &one)
|
||||
edwards25519.FeInvert(&montXPlusOne, &montXPlusOne)
|
||||
edwards25519.FeMul(&edY, &montXMinusOne, &montXPlusOne)
|
||||
|
||||
var A_ed [32]byte
|
||||
edwards25519.FeToBytes(&A_ed, &edY)
|
||||
|
||||
A_ed[31] |= sig[63] & 0x80
|
||||
sig[63] &= 0x7F
|
||||
|
||||
return ed25519.Verify(&A_ed, message, &sig)
|
||||
}
|
59
xpaseto/curve25519_test.go
Normal file
59
xpaseto/curve25519_test.go
Normal file
|
@ -0,0 +1,59 @@
|
|||
package xpaseto_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/b177y/starship/nebutils"
|
||||
"github.com/b177y/starship/xpaseto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/crypto/curve25519"
|
||||
)
|
||||
|
||||
func randBytes(data []byte) {
|
||||
if _, err := io.ReadFull(rand.Reader, data); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSign(t *testing.T) {
|
||||
msg := make([]byte, 200)
|
||||
|
||||
var priv, pub [32]byte
|
||||
var random [64]byte
|
||||
|
||||
// Test for random values of the keys, nonce and message
|
||||
for i := 0; i < 100; i++ {
|
||||
randBytes(priv[:])
|
||||
priv[0] &= 248
|
||||
priv[31] &= 63
|
||||
priv[31] |= 64
|
||||
curve25519.ScalarBaseMult(&pub, &priv)
|
||||
pub := pub[:]
|
||||
priv := priv[:]
|
||||
randBytes(random[:])
|
||||
randBytes(msg)
|
||||
sig, err := xpaseto.Sign(priv, msg, random)
|
||||
assert.True(t, err == nil, "Sign must work")
|
||||
v := xpaseto.Verify(pub, msg, sig)
|
||||
assert.True(t, v, "Verify must work")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignNebkey(t *testing.T) {
|
||||
msg := make([]byte, 200)
|
||||
|
||||
var random [64]byte
|
||||
|
||||
// Test for random values of the keys, nonce and message
|
||||
for i := 0; i < 100; i++ {
|
||||
pub, priv := nebutils.X25519KeyPair()
|
||||
randBytes(random[:])
|
||||
randBytes(msg)
|
||||
sig, err := xpaseto.Sign(priv, msg, random)
|
||||
assert.True(t, err == nil, "Sign must work")
|
||||
v := xpaseto.Verify(pub, msg, sig)
|
||||
assert.True(t, v, "Verify must work")
|
||||
}
|
||||
}
|
84
xpaseto/signer.go
Normal file
84
xpaseto/signer.go
Normal file
|
@ -0,0 +1,84 @@
|
|||
package xpaseto
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
paseto "github.com/o1egl/paseto/v2"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
)
|
||||
|
||||
// Signer is a struct which is used to Sign and Verify XPASETO tokens
|
||||
// Privkey is a montgomery x25519 private key
|
||||
// Pubkey is the public counterpart to the Privkey
|
||||
type Signer struct {
|
||||
paseto *XV2
|
||||
Privkey []byte
|
||||
Pubkey []byte
|
||||
}
|
||||
|
||||
// NewSigner returns a new Signer, initialised with the given
|
||||
// private and public x25519 keys
|
||||
func NewSigner(privkey []byte,
|
||||
pubkey []byte) Signer {
|
||||
return Signer{
|
||||
paseto: NewXV2(),
|
||||
Privkey: privkey,
|
||||
Pubkey: pubkey,
|
||||
}
|
||||
}
|
||||
|
||||
// SignPaseto returns a token from the given paseto.JSONToken
|
||||
func (s *Signer) SignPaseto(jsonToken paseto.JSONToken) (token string, err error) {
|
||||
token, err = s.paseto.Sign(s.Privkey, jsonToken, "STARSHIP")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// ParsePaseto returns a paseto.JSONToken from a raw token string
|
||||
// If the token signature is invalid, an error will be returned
|
||||
func (s *Signer) ParsePaseto(token string) (jsonToken paseto.JSONToken, err error) {
|
||||
var payload paseto.JSONToken
|
||||
var footer string
|
||||
err = s.paseto.Verify(token, s.Pubkey, &payload, &footer)
|
||||
if err != nil {
|
||||
return payload, err
|
||||
}
|
||||
err = payload.Validate(paseto.ValidAt(time.Now()))
|
||||
if err != nil {
|
||||
return payload, err
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// SelfSignPaseto sets the "pubkey" additional claim to the value of the public part
|
||||
// of the keypair used to sign the XPASETO token
|
||||
func (s *Signer) SelfSignPaseto(jsonToken paseto.JSONToken) (token string, err error) {
|
||||
pubpem := string(cert.MarshalX25519PublicKey(s.Pubkey))
|
||||
jsonToken.Set("pubkey", pubpem)
|
||||
return s.SignPaseto(jsonToken)
|
||||
}
|
||||
|
||||
// ParseSelfSigned parses and validates an XPASETO token against the 'pubkey'
|
||||
// in the additional claims.
|
||||
func (s *Signer) ParseSelfSigned(token string) (jsonToken paseto.JSONToken,
|
||||
err error) {
|
||||
data, _, err := splitToken([]byte(token), headerXV2Public)
|
||||
payloadBytes := data[:len(data)-64]
|
||||
err = fillValue(payloadBytes, &jsonToken)
|
||||
if err != nil {
|
||||
return jsonToken, err
|
||||
}
|
||||
var pubkey string
|
||||
err = jsonToken.Get("pubkey", &pubkey)
|
||||
|
||||
if err != nil {
|
||||
return jsonToken, err
|
||||
}
|
||||
s.Pubkey, _, err = cert.UnmarshalX25519PublicKey([]byte(pubkey))
|
||||
if err != nil {
|
||||
return jsonToken, err
|
||||
}
|
||||
return s.ParsePaseto(token)
|
||||
}
|
41
xpaseto/signer_test.go
Normal file
41
xpaseto/signer_test.go
Normal file
|
@ -0,0 +1,41 @@
|
|||
package xpaseto_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/b177y/starship/nebutils"
|
||||
"github.com/b177y/starship/xpaseto"
|
||||
"github.com/o1egl/paseto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewSigner(t *testing.T) {
|
||||
pub, priv := nebutils.X25519KeyPair()
|
||||
signer := xpaseto.NewSigner(priv, pub)
|
||||
assert.IsType(t, *new(xpaseto.Signer), signer, "signer should be of type xpaseto.Signer")
|
||||
}
|
||||
|
||||
func TestSignAndVerifyToken(t *testing.T) {
|
||||
jsonToken := paseto.JSONToken{
|
||||
Audience: "example_audience",
|
||||
Issuer: "example_issuer",
|
||||
}
|
||||
jsonToken.Set("example", "value")
|
||||
jsonToken.Set("anotherexample", "anothervalue")
|
||||
|
||||
pub, priv := nebutils.X25519KeyPair()
|
||||
signer := xpaseto.NewSigner(priv, pub)
|
||||
|
||||
// Test that we can sign
|
||||
token, err := signer.SignPaseto(jsonToken)
|
||||
assert.NoError(t, err)
|
||||
assert.IsType(t, *new(string), token, "token should be a string")
|
||||
|
||||
// Test that we can parse the token
|
||||
jt, err := signer.ParsePaseto(token)
|
||||
assert.NoError(t, err)
|
||||
assert.IsType(t, *new(paseto.JSONToken), jt, "parsed object should be paseto.JSONToken")
|
||||
|
||||
assert.Equal(t, jsonToken, jt, "Parsed json token should be the same as the orignal json")
|
||||
|
||||
}
|
126
xpaseto/utils.go
Normal file
126
xpaseto/utils.go
Normal file
|
@ -0,0 +1,126 @@
|
|||
// Copied from https://github.com/o1egl/paseto/blob/master/utils.go
|
||||
|
||||
package xpaseto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var tokenEncoder = base64.RawURLEncoding
|
||||
|
||||
/*
|
||||
Format the Additional Associated Data.
|
||||
Prefix with the length (64-bit unsigned little-endian integer)
|
||||
followed by each message. This provides a more explicit domain
|
||||
separation between each piece of the message.
|
||||
*/
|
||||
func preAuthEncode(pieces ...[]byte) []byte {
|
||||
buf := new(bytes.Buffer)
|
||||
byteOrder := binary.LittleEndian
|
||||
_ = binary.Write(buf, byteOrder, int64(len(pieces)))
|
||||
for _, p := range pieces {
|
||||
_ = binary.Write(buf, byteOrder, int64(len(p)))
|
||||
buf.Write(p)
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func infToByteArr(i interface{}) ([]byte, error) {
|
||||
switch v := i.(type) {
|
||||
case string:
|
||||
return []byte(v), nil
|
||||
case *string:
|
||||
if v != nil {
|
||||
return []byte(*v), nil
|
||||
}
|
||||
case []byte:
|
||||
return v, nil
|
||||
case *[]byte:
|
||||
if v != nil {
|
||||
return *v, nil
|
||||
}
|
||||
default:
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func createToken(header, body, footer []byte) string {
|
||||
encodedPayload := make([]byte, tokenEncoder.EncodedLen(len(body)))
|
||||
tokenEncoder.Encode(encodedPayload, body)
|
||||
|
||||
footerLen := 0
|
||||
var encodedFooter []byte
|
||||
if len(footer) > 0 {
|
||||
encodedFooter = make([]byte, tokenEncoder.EncodedLen(len(footer)))
|
||||
tokenEncoder.Encode(encodedFooter, footer)
|
||||
footerLen = len(encodedFooter) + 1
|
||||
}
|
||||
|
||||
token := make([]byte, len(header)+len(encodedPayload)+footerLen)
|
||||
|
||||
offset := 0
|
||||
offset += copy(token[offset:], header)
|
||||
offset += copy(token[offset:], encodedPayload)
|
||||
if encodedFooter != nil {
|
||||
offset += copy(token[offset:], ".")
|
||||
copy(token[offset:], encodedFooter)
|
||||
}
|
||||
return string(token)
|
||||
}
|
||||
|
||||
func fillValue(data []byte, i interface{}) error {
|
||||
switch f := i.(type) {
|
||||
case *string:
|
||||
*f = string(data)
|
||||
case *[]byte:
|
||||
*f = append(*f, data...)
|
||||
default:
|
||||
if err := json.Unmarshal(data, i); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func splitToken(token, header []byte) (payload, footer []byte, err error) {
|
||||
var (
|
||||
encodedPayload []byte
|
||||
encodedFooter []byte
|
||||
)
|
||||
|
||||
if !bytes.HasPrefix(token, header) {
|
||||
return nil, nil, errors.Errorf("Incorrect token header")
|
||||
}
|
||||
|
||||
parts := bytes.Split(token[len(header):], []byte("."))
|
||||
switch len(parts) {
|
||||
case 1:
|
||||
encodedPayload = parts[0]
|
||||
case 2:
|
||||
encodedPayload = parts[0]
|
||||
encodedFooter = parts[1]
|
||||
default:
|
||||
return nil, nil, errors.Errorf("Incorrect token format")
|
||||
}
|
||||
|
||||
payload = make([]byte, tokenEncoder.DecodedLen(len(encodedPayload)))
|
||||
if _, err = tokenEncoder.Decode(payload, encodedPayload); err != nil {
|
||||
return nil, nil, errors.Errorf("failed to decode payload: %s", err.Error())
|
||||
}
|
||||
|
||||
if encodedFooter != nil {
|
||||
footer = make([]byte, tokenEncoder.DecodedLen(len(encodedFooter)))
|
||||
if _, err = tokenEncoder.Decode(footer, encodedFooter); err != nil {
|
||||
return nil, nil, errors.Errorf("failed to decode footer: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return payload, footer, nil
|
||||
}
|
103
xpaseto/v2.go
Normal file
103
xpaseto/v2.go
Normal file
|
@ -0,0 +1,103 @@
|
|||
// Package xpaseto provides basic abilities to sign and verify PASETO-style tokens
|
||||
// using XEdDSA signatures. This allows the same keys to be used for X25519
|
||||
// Diffie-Hellman exchanges, and for payload signing and verification.
|
||||
// This package is based off https://github.com/o1egl/paseto and uses imports from
|
||||
// this library in places.
|
||||
// o1egl/paseto license: MIT - https://github.com/o1egl/paseto/blob/master/LICENSE
|
||||
//
|
||||
// The tokens generated by this package do NOT conform with the PASETO standard.
|
||||
// See https://paseto.io/rfc/ Section 5.2
|
||||
//
|
||||
// Private PASETO tokens have not been implemented in this package.
|
||||
package xpaseto
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"io"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// This defines the PASETO header value to be used for XPASETO Public tokens.
|
||||
var headerXV2Public = []byte("xv2.public.")
|
||||
|
||||
// This defines an XV2 struct which can be used to sign and verify payloads
|
||||
// This is currently a redundant struct, and methods could be converted to standard functions
|
||||
// However the XV2 struct could be built upon in the future to satisfy the
|
||||
// 'Protocol' interface defined by "o1egl/paseto"
|
||||
type XV2 struct{}
|
||||
|
||||
// NewXV2 returns an instance of an XV2 struct
|
||||
func NewXV2() *XV2 {
|
||||
return &XV2{}
|
||||
}
|
||||
|
||||
// Sign creates a signature of a payload using a public key and encodes it
|
||||
// to an XPASETO token.
|
||||
// An error will be returned if the payload cannot be signed or encoded
|
||||
func (x *XV2) Sign(privkey []byte,
|
||||
payload, footer interface{}) (token string, err error) {
|
||||
payloadBytes, err := infToByteArr(payload)
|
||||
if err != nil {
|
||||
return "", errors.Errorf("Failed to encode payload to byte array: %s", err.Error())
|
||||
}
|
||||
footerBytes, err := infToByteArr(footer)
|
||||
if err != nil {
|
||||
return "", errors.Errorf("Failed to encode footer to byte array: %s", err.Error())
|
||||
}
|
||||
|
||||
msgBytes := preAuthEncode(headerXV2Public, payloadBytes, footerBytes)
|
||||
var randomBytes [64]byte
|
||||
if _, err := io.ReadFull(rand.Reader, randomBytes[:]); err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig, err := Sign(privkey, msgBytes, randomBytes)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
token = createToken(headerXV2Public, append(payloadBytes, sig...), footerBytes)
|
||||
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// Verify checks if an XPASETO token is valid
|
||||
// An error will be returned if the token cannot be decoded, or if the
|
||||
// signature is invalid.
|
||||
// If the token is valid, this function will return 'nil'
|
||||
func (x *XV2) Verify(token string, pubkey []byte,
|
||||
payload, footer interface{}) error {
|
||||
|
||||
data, footerBytes, err := splitToken([]byte(token), headerXV2Public)
|
||||
if err != nil {
|
||||
return errors.Errorf("Failed to decode token: %s", err.Error())
|
||||
}
|
||||
|
||||
if len(data) < 64 {
|
||||
return errors.Errorf("Incorrect token size: %d", len(data))
|
||||
}
|
||||
|
||||
payloadBytes := data[:len(data)-64]
|
||||
sig := data[len(data)-64:]
|
||||
|
||||
msgBytes := preAuthEncode(headerXV2Public, payloadBytes, footerBytes)
|
||||
|
||||
valid := Verify(pubkey, msgBytes, sig)
|
||||
if !valid {
|
||||
return errors.Errorf("Invalid signature!")
|
||||
}
|
||||
|
||||
if payload != nil {
|
||||
if err := fillValue(payloadBytes, payload); err != nil {
|
||||
return errors.Errorf("Failed to decode payload: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if footer != nil {
|
||||
if err := fillValue(footerBytes, footer); err != nil {
|
||||
return errors.Errorf("Failed to decode footer: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
63
xpaseto/v2_test.go
Normal file
63
xpaseto/v2_test.go
Normal file
|
@ -0,0 +1,63 @@
|
|||
package xpaseto_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/b177y/starship/nebutils"
|
||||
"github.com/b177y/starship/xpaseto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestVerifyAndSign(t *testing.T) {
|
||||
xv2 := xpaseto.NewXV2()
|
||||
pubkey, privkey := nebutils.X25519KeyPair()
|
||||
|
||||
testMsg := "Hello World!"
|
||||
|
||||
// Test Sign
|
||||
token, err := xv2.Sign(privkey, testMsg, "test_footer")
|
||||
assert.NoError(t, err, "XV2.Sign should not return an error")
|
||||
assert.IsType(t, *new(string), token, "token should be a string")
|
||||
|
||||
// Test Verify
|
||||
var payload string
|
||||
var footer string
|
||||
err = xv2.Verify(token, pubkey, &payload, &footer)
|
||||
assert.NoError(t, err, "XV2.Verify should succeed.")
|
||||
assert.Equal(t, testMsg, payload)
|
||||
|
||||
}
|
||||
|
||||
func TestIncorrectPubkey(t *testing.T) {
|
||||
xv2 := xpaseto.NewXV2()
|
||||
_, privkey := nebutils.X25519KeyPair()
|
||||
differentPubkey, _ := nebutils.X25519KeyPair()
|
||||
|
||||
testMsg := "Hello World!"
|
||||
|
||||
// Sign
|
||||
token, err := xv2.Sign(privkey, testMsg, "test_footer")
|
||||
|
||||
// Test Verify
|
||||
var payload string
|
||||
var footer string
|
||||
err = xv2.Verify(token, differentPubkey, &payload, &footer)
|
||||
assert.Error(t, err, "XV2.Verify should fail with incorrect public key.")
|
||||
|
||||
}
|
||||
|
||||
func TestInvalidPrivKey(t *testing.T) {
|
||||
xv2 := xpaseto.NewXV2()
|
||||
var privkey [33]byte
|
||||
io.ReadFull(rand.Reader, privkey[:])
|
||||
var privkey2 [31]byte
|
||||
io.ReadFull(rand.Reader, privkey[:])
|
||||
testMsg := "Hello World!"
|
||||
|
||||
_, err := xv2.Sign(privkey[:], testMsg, "test_footer")
|
||||
assert.Error(t, err, "XV2.Sign should fail with invalid private key")
|
||||
_, err = xv2.Sign(privkey2[:], testMsg, "test_footer")
|
||||
assert.Error(t, err, "XV2.Sign should fail with invalid private key")
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue