Compare commits
6 commits
master
...
add-http-p
Author | SHA1 | Date | |
---|---|---|---|
|
0af7e6a1dd | ||
|
c14ad0f27b | ||
|
af958676b8 | ||
|
70779b26ad | ||
|
90aa8efb29 | ||
|
dd7f12f7c0 |
197 changed files with 10253 additions and 17181 deletions
22
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
22
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
|
@ -8,13 +8,13 @@ body:
|
|||
attributes:
|
||||
value: |
|
||||
### Thank you for taking the time to file a bug report!
|
||||
|
||||
|
||||
Please fill out this form as completely as possible.
|
||||
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: What version of `nebula` are you using? (`nebula -version`)
|
||||
label: What version of `nebula` are you using?
|
||||
placeholder: 0.0.0
|
||||
validations:
|
||||
required: true
|
||||
|
@ -41,17 +41,10 @@ body:
|
|||
attributes:
|
||||
label: Logs from affected hosts
|
||||
description: |
|
||||
Please provide logs from ALL affected hosts during the time of the issue. If you do not provide logs we will be unable to assist you!
|
||||
|
||||
[Learn how to find Nebula logs here.](https://nebula.defined.net/docs/guides/viewing-nebula-logs/)
|
||||
|
||||
Provide logs from all affected hosts during the time of the issue.
|
||||
Improve formatting by using <code>```</code> at the beginning and end of each log block.
|
||||
value: |
|
||||
```
|
||||
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: configs
|
||||
|
@ -59,11 +52,6 @@ body:
|
|||
label: Config files from affected hosts
|
||||
description: |
|
||||
Provide config files for all affected hosts.
|
||||
|
||||
Improve formatting by using <code>```</code> at the beginning and end of each config file.
|
||||
value: |
|
||||
```
|
||||
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
required: false
|
||||
|
|
22
.github/dependabot.yml
vendored
22
.github/dependabot.yml
vendored
|
@ -1,22 +0,0 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
groups:
|
||||
golang-x-dependencies:
|
||||
patterns:
|
||||
- "golang.org/x/*"
|
||||
zx2c4-dependencies:
|
||||
patterns:
|
||||
- "golang.zx2c4.com/*"
|
||||
protobuf-dependencies:
|
||||
patterns:
|
||||
- "github.com/golang/protobuf"
|
||||
- "google.golang.org/protobuf"
|
6
.github/workflows/gofmt.yml
vendored
6
.github/workflows/gofmt.yml
vendored
|
@ -14,11 +14,11 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24'
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- name: Install goimports
|
||||
|
|
82
.github/workflows/release.yml
vendored
82
.github/workflows/release.yml
vendored
|
@ -7,24 +7,24 @@ name: Create release and upload binaries
|
|||
|
||||
jobs:
|
||||
build-linux:
|
||||
name: Build Linux/BSD All
|
||||
name: Build Linux All
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24'
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux release-freebsd release-openbsd release-netbsd
|
||||
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux release-freebsd
|
||||
mkdir release
|
||||
mv build/*.tar.gz release
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: linux-latest
|
||||
path: release
|
||||
|
@ -33,11 +33,11 @@ jobs:
|
|||
name: Build Windows
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24'
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
|
@ -55,7 +55,7 @@ jobs:
|
|||
mv dist\windows\wintun build\dist\windows\
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: windows-latest
|
||||
path: build
|
||||
|
@ -64,18 +64,18 @@ jobs:
|
|||
name: Build Universal Darwin
|
||||
env:
|
||||
HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }}
|
||||
runs-on: macos-latest
|
||||
runs-on: macos-11
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24'
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- name: Import certificates
|
||||
if: env.HAS_SIGNING_CREDS == 'true'
|
||||
uses: Apple-Actions/import-codesign-certs@v3
|
||||
uses: Apple-Actions/import-codesign-certs@v1
|
||||
with:
|
||||
p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }}
|
||||
p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }}
|
||||
|
@ -104,66 +104,20 @@ jobs:
|
|||
fi
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: darwin-latest
|
||||
path: ./release/*
|
||||
|
||||
build-docker:
|
||||
name: Create and Upload Docker Images
|
||||
# Technically we only need build-linux to succeed, but if any platforms fail we'll
|
||||
# want to investigate and restart the build
|
||||
needs: [build-linux, build-darwin, build-windows]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
HAS_DOCKER_CREDS: ${{ vars.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }}
|
||||
# XXX It's not possible to write a conditional here, so instead we do it on every step
|
||||
#if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
steps:
|
||||
# Be sure to checkout the code before downloading artifacts, or they will
|
||||
# be overwritten
|
||||
- name: Checkout code
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download artifacts
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: linux-latest
|
||||
path: artifacts
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push images
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
env:
|
||||
DOCKER_IMAGE_REPO: ${{ vars.DOCKER_IMAGE_REPO || 'nebulaoss/nebula' }}
|
||||
DOCKER_IMAGE_TAG: ${{ vars.DOCKER_IMAGE_TAG || 'latest' }}
|
||||
run: |
|
||||
mkdir -p build/linux-{amd64,arm64}
|
||||
tar -zxvf artifacts/nebula-linux-amd64.tar.gz -C build/linux-amd64/
|
||||
tar -zxvf artifacts/nebula-linux-arm64.tar.gz -C build/linux-arm64/
|
||||
docker buildx build . --push -f docker/Dockerfile --platform linux/amd64,linux/arm64 --tag "${DOCKER_IMAGE_REPO}:${DOCKER_IMAGE_TAG}" --tag "${DOCKER_IMAGE_REPO}:${GITHUB_REF#refs/tags/v}"
|
||||
|
||||
release:
|
||||
name: Create and Upload Release
|
||||
needs: [build-linux, build-darwin, build-windows]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
|
|
51
.github/workflows/smoke-extra.yml
vendored
51
.github/workflows/smoke-extra.yml
vendored
|
@ -1,51 +0,0 @@
|
|||
name: smoke-extra
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
types: [opened, synchronize, labeled, reopened]
|
||||
paths:
|
||||
- '.github/workflows/smoke**'
|
||||
- '**Makefile'
|
||||
- '**.go'
|
||||
- '**.proto'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
jobs:
|
||||
|
||||
smoke-extra:
|
||||
if: github.ref == 'refs/heads/master' || contains(github.event.pull_request.labels.*.name, 'smoke-test-extra')
|
||||
name: Run extra smoke tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- name: add hashicorp source
|
||||
run: wget -O- https://apt.releases.hashicorp.com/gpg | gpg --dearmor | sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg && echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
|
||||
|
||||
- name: install vagrant
|
||||
run: sudo apt-get update && sudo apt-get install -y vagrant virtualbox
|
||||
|
||||
- name: freebsd-amd64
|
||||
run: make smoke-vagrant/freebsd-amd64
|
||||
|
||||
- name: openbsd-amd64
|
||||
run: make smoke-vagrant/openbsd-amd64
|
||||
|
||||
- name: netbsd-amd64
|
||||
run: make smoke-vagrant/netbsd-amd64
|
||||
|
||||
- name: linux-386
|
||||
run: make smoke-vagrant/linux-386
|
||||
|
||||
- name: linux-amd64-ipv6disable
|
||||
run: make smoke-vagrant/linux-amd64-ipv6disable
|
||||
|
||||
timeout-minutes: 30
|
8
.github/workflows/smoke.yml
vendored
8
.github/workflows/smoke.yml
vendored
|
@ -18,15 +18,15 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24'
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- name: build
|
||||
run: make bin-docker CGO_ENABLED=1 BUILD_ARGS=-race
|
||||
run: make bin-docker
|
||||
|
||||
- name: setup docker image
|
||||
working-directory: ./.github/workflows/smoke
|
||||
|
|
2
.github/workflows/smoke/build-relay.sh
vendored
2
.github/workflows/smoke/build-relay.sh
vendored
|
@ -41,4 +41,4 @@ EOF
|
|||
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
|
||||
)
|
||||
|
||||
docker build -t nebula:smoke-relay .
|
||||
sudo docker build -t nebula:smoke-relay .
|
||||
|
|
17
.github/workflows/smoke/build.sh
vendored
17
.github/workflows/smoke/build.sh
vendored
|
@ -5,36 +5,27 @@ set -e -x
|
|||
rm -rf ./build
|
||||
mkdir ./build
|
||||
|
||||
# TODO: Assumes your docker bridge network is a /24, and the first container that launches will be .1
|
||||
# - We could make this better by launching the lighthouse first and then fetching what IP it is.
|
||||
NET="$(docker network inspect bridge -f '{{ range .IPAM.Config }}{{ .Subnet }}{{ end }}' | cut -d. -f1-3)"
|
||||
|
||||
(
|
||||
cd build
|
||||
|
||||
cp ../../../../build/linux-amd64/nebula .
|
||||
cp ../../../../build/linux-amd64/nebula-cert .
|
||||
|
||||
if [ "$1" ]
|
||||
then
|
||||
cp "../../../../build/$1/nebula" "$1-nebula"
|
||||
fi
|
||||
|
||||
HOST="lighthouse1" \
|
||||
AM_LIGHTHOUSE=true \
|
||||
../genconfig.sh >lighthouse1.yml
|
||||
|
||||
HOST="host2" \
|
||||
LIGHTHOUSES="192.168.100.1 $NET.2:4242" \
|
||||
LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" \
|
||||
../genconfig.sh >host2.yml
|
||||
|
||||
HOST="host3" \
|
||||
LIGHTHOUSES="192.168.100.1 $NET.2:4242" \
|
||||
LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" \
|
||||
INBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \
|
||||
../genconfig.sh >host3.yml
|
||||
|
||||
HOST="host4" \
|
||||
LIGHTHOUSES="192.168.100.1 $NET.2:4242" \
|
||||
LIGHTHOUSES="192.168.100.1 172.17.0.2:4242" \
|
||||
OUTBOUND='[{"port": "any", "proto": "icmp", "group": "lighthouse"}]' \
|
||||
../genconfig.sh >host4.yml
|
||||
|
||||
|
@ -45,4 +36,4 @@ NET="$(docker network inspect bridge -f '{{ range .IPAM.Config }}{{ .Subnet }}{{
|
|||
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
|
||||
)
|
||||
|
||||
docker build -t "nebula:${NAME:-smoke}" .
|
||||
sudo docker build -t "nebula:${NAME:-smoke}" .
|
||||
|
|
2
.github/workflows/smoke/genconfig.sh
vendored
2
.github/workflows/smoke/genconfig.sh
vendored
|
@ -47,7 +47,7 @@ listen:
|
|||
port: ${LISTEN_PORT:-4242}
|
||||
|
||||
tun:
|
||||
dev: ${TUN_DEV:-tun0}
|
||||
dev: ${TUN_DEV:-nebula1}
|
||||
|
||||
firewall:
|
||||
inbound_action: reject
|
||||
|
|
52
.github/workflows/smoke/smoke-relay.sh
vendored
52
.github/workflows/smoke/smoke-relay.sh
vendored
|
@ -14,24 +14,24 @@ cleanup() {
|
|||
set +e
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
docker kill lighthouse1 host2 host3 host4
|
||||
sudo docker kill lighthouse1 host2 host3 host4
|
||||
fi
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
docker run --name lighthouse1 --rm nebula:smoke-relay -config lighthouse1.yml -test
|
||||
docker run --name host2 --rm nebula:smoke-relay -config host2.yml -test
|
||||
docker run --name host3 --rm nebula:smoke-relay -config host3.yml -test
|
||||
docker run --name host4 --rm nebula:smoke-relay -config host4.yml -test
|
||||
sudo docker run --name lighthouse1 --rm nebula:smoke-relay -config lighthouse1.yml -test
|
||||
sudo docker run --name host2 --rm nebula:smoke-relay -config host2.yml -test
|
||||
sudo docker run --name host3 --rm nebula:smoke-relay -config host3.yml -test
|
||||
sudo docker run --name host4 --rm nebula:smoke-relay -config host4.yml -test
|
||||
|
||||
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||
sleep 1
|
||||
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||
sleep 1
|
||||
docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||
sleep 1
|
||||
docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
||||
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
||||
sleep 1
|
||||
|
||||
set +x
|
||||
|
@ -39,44 +39,44 @@ echo
|
|||
echo " *** Testing ping from lighthouse1"
|
||||
echo
|
||||
set -x
|
||||
docker exec lighthouse1 ping -c1 192.168.100.2
|
||||
docker exec lighthouse1 ping -c1 192.168.100.3
|
||||
docker exec lighthouse1 ping -c1 192.168.100.4
|
||||
sudo docker exec lighthouse1 ping -c1 192.168.100.2
|
||||
sudo docker exec lighthouse1 ping -c1 192.168.100.3
|
||||
sudo docker exec lighthouse1 ping -c1 192.168.100.4
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host2"
|
||||
echo
|
||||
set -x
|
||||
docker exec host2 ping -c1 192.168.100.1
|
||||
sudo docker exec host2 ping -c1 192.168.100.1
|
||||
# Should fail because no relay configured in this direction
|
||||
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
! docker exec host2 ping -c1 192.168.100.4 -w5 || exit 1
|
||||
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
! sudo docker exec host2 ping -c1 192.168.100.4 -w5 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host3"
|
||||
echo
|
||||
set -x
|
||||
docker exec host3 ping -c1 192.168.100.1
|
||||
docker exec host3 ping -c1 192.168.100.2
|
||||
docker exec host3 ping -c1 192.168.100.4
|
||||
sudo docker exec host3 ping -c1 192.168.100.1
|
||||
sudo docker exec host3 ping -c1 192.168.100.2
|
||||
sudo docker exec host3 ping -c1 192.168.100.4
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host4"
|
||||
echo
|
||||
set -x
|
||||
docker exec host4 ping -c1 192.168.100.1
|
||||
sudo docker exec host4 ping -c1 192.168.100.1
|
||||
# Should fail because relays not allowed
|
||||
! docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||
docker exec host4 ping -c1 192.168.100.3
|
||||
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||
sudo docker exec host4 ping -c1 192.168.100.3
|
||||
|
||||
docker exec host4 sh -c 'kill 1'
|
||||
docker exec host3 sh -c 'kill 1'
|
||||
docker exec host2 sh -c 'kill 1'
|
||||
docker exec lighthouse1 sh -c 'kill 1'
|
||||
sleep 5
|
||||
sudo docker exec host4 sh -c 'kill 1'
|
||||
sudo docker exec host3 sh -c 'kill 1'
|
||||
sudo docker exec host2 sh -c 'kill 1'
|
||||
sudo docker exec lighthouse1 sh -c 'kill 1'
|
||||
sleep 1
|
||||
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
|
|
105
.github/workflows/smoke/smoke-vagrant.sh
vendored
105
.github/workflows/smoke/smoke-vagrant.sh
vendored
|
@ -1,105 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e -x
|
||||
|
||||
set -o pipefail
|
||||
|
||||
export VAGRANT_CWD="$PWD/vagrant-$1"
|
||||
|
||||
mkdir -p logs
|
||||
|
||||
cleanup() {
|
||||
echo
|
||||
echo " *** cleanup"
|
||||
echo
|
||||
|
||||
set +e
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
docker kill lighthouse1 host2
|
||||
fi
|
||||
vagrant destroy -f
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
CONTAINER="nebula:${NAME:-smoke}"
|
||||
|
||||
docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
|
||||
docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
|
||||
|
||||
vagrant up
|
||||
vagrant ssh -c "cd /nebula && /nebula/$1-nebula -config host3.yml -test" -- -T
|
||||
|
||||
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||
sleep 1
|
||||
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||
sleep 1
|
||||
vagrant ssh -c "cd /nebula && sudo sh -c 'echo \$\$ >/nebula/pid && exec /nebula/$1-nebula -config host3.yml'" 2>&1 -- -T | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||
sleep 15
|
||||
|
||||
# grab tcpdump pcaps for debugging
|
||||
docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
|
||||
docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
|
||||
docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
|
||||
docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
|
||||
# vagrant ssh -c "tcpdump -i nebula1 -q -w - -U" 2>logs/host3.inside.log >logs/host3.inside.pcap &
|
||||
# vagrant ssh -c "tcpdump -i eth0 -q -w - -U" 2>logs/host3.outside.log >logs/host3.outside.pcap &
|
||||
|
||||
#docker exec host2 ncat -nklv 0.0.0.0 2000 &
|
||||
#vagrant ssh -c "ncat -nklv 0.0.0.0 2000" &
|
||||
#docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
|
||||
#vagrant ssh -c "ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000" &
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from lighthouse1"
|
||||
echo
|
||||
set -x
|
||||
docker exec lighthouse1 ping -c1 192.168.100.2
|
||||
docker exec lighthouse1 ping -c1 192.168.100.3
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host2"
|
||||
echo
|
||||
set -x
|
||||
docker exec host2 ping -c1 192.168.100.1
|
||||
# Should fail because not allowed by host3 inbound firewall
|
||||
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
|
||||
#set +x
|
||||
#echo
|
||||
#echo " *** Testing ncat from host2"
|
||||
#echo
|
||||
#set -x
|
||||
# Should fail because not allowed by host3 inbound firewall
|
||||
#! docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||
#! docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host3"
|
||||
echo
|
||||
set -x
|
||||
vagrant ssh -c "ping -c1 192.168.100.1" -- -T
|
||||
vagrant ssh -c "ping -c1 192.168.100.2" -- -T
|
||||
|
||||
#set +x
|
||||
#echo
|
||||
#echo " *** Testing ncat from host3"
|
||||
#echo
|
||||
#set -x
|
||||
#vagrant ssh -c "ncat -nzv -w5 192.168.100.2 2000"
|
||||
#vagrant ssh -c "ncat -nzuv -w5 192.168.100.2 3000" | grep -q host2
|
||||
|
||||
vagrant ssh -c "sudo xargs kill </nebula/pid" -- -T
|
||||
docker exec host2 sh -c 'kill 1'
|
||||
docker exec lighthouse1 sh -c 'kill 1'
|
||||
sleep 1
|
||||
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
echo "nebula still running after SIGTERM sent" >&2
|
||||
exit 1
|
||||
fi
|
92
.github/workflows/smoke/smoke.sh
vendored
92
.github/workflows/smoke/smoke.sh
vendored
|
@ -14,7 +14,7 @@ cleanup() {
|
|||
set +e
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
docker kill lighthouse1 host2 host3 host4
|
||||
sudo docker kill lighthouse1 host2 host3 host4
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -22,51 +22,51 @@ trap cleanup EXIT
|
|||
|
||||
CONTAINER="nebula:${NAME:-smoke}"
|
||||
|
||||
docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
|
||||
docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
|
||||
docker run --name host3 --rm "$CONTAINER" -config host3.yml -test
|
||||
docker run --name host4 --rm "$CONTAINER" -config host4.yml -test
|
||||
sudo docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
|
||||
sudo docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
|
||||
sudo docker run --name host3 --rm "$CONTAINER" -config host3.yml -test
|
||||
sudo docker run --name host4 --rm "$CONTAINER" -config host4.yml -test
|
||||
|
||||
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||
sleep 1
|
||||
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||
sleep 1
|
||||
docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||
sleep 1
|
||||
docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
||||
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
||||
sleep 1
|
||||
|
||||
# grab tcpdump pcaps for debugging
|
||||
docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
|
||||
docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
|
||||
docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
|
||||
docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
|
||||
docker exec host3 tcpdump -i nebula1 -q -w - -U 2>logs/host3.inside.log >logs/host3.inside.pcap &
|
||||
docker exec host3 tcpdump -i eth0 -q -w - -U 2>logs/host3.outside.log >logs/host3.outside.pcap &
|
||||
docker exec host4 tcpdump -i nebula1 -q -w - -U 2>logs/host4.inside.log >logs/host4.inside.pcap &
|
||||
docker exec host4 tcpdump -i eth0 -q -w - -U 2>logs/host4.outside.log >logs/host4.outside.pcap &
|
||||
sudo docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
|
||||
sudo docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
|
||||
sudo docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
|
||||
sudo docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
|
||||
sudo docker exec host3 tcpdump -i nebula1 -q -w - -U 2>logs/host3.inside.log >logs/host3.inside.pcap &
|
||||
sudo docker exec host3 tcpdump -i eth0 -q -w - -U 2>logs/host3.outside.log >logs/host3.outside.pcap &
|
||||
sudo docker exec host4 tcpdump -i nebula1 -q -w - -U 2>logs/host4.inside.log >logs/host4.inside.pcap &
|
||||
sudo docker exec host4 tcpdump -i eth0 -q -w - -U 2>logs/host4.outside.log >logs/host4.outside.pcap &
|
||||
|
||||
docker exec host2 ncat -nklv 0.0.0.0 2000 &
|
||||
docker exec host3 ncat -nklv 0.0.0.0 2000 &
|
||||
docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
|
||||
docker exec host3 ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000 &
|
||||
sudo docker exec host2 ncat -nklv 0.0.0.0 2000 &
|
||||
sudo docker exec host3 ncat -nklv 0.0.0.0 2000 &
|
||||
sudo docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
|
||||
sudo docker exec host3 ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000 &
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from lighthouse1"
|
||||
echo
|
||||
set -x
|
||||
docker exec lighthouse1 ping -c1 192.168.100.2
|
||||
docker exec lighthouse1 ping -c1 192.168.100.3
|
||||
sudo docker exec lighthouse1 ping -c1 192.168.100.2
|
||||
sudo docker exec lighthouse1 ping -c1 192.168.100.3
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host2"
|
||||
echo
|
||||
set -x
|
||||
docker exec host2 ping -c1 192.168.100.1
|
||||
sudo docker exec host2 ping -c1 192.168.100.1
|
||||
# Should fail because not allowed by host3 inbound firewall
|
||||
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
|
@ -74,34 +74,34 @@ echo " *** Testing ncat from host2"
|
|||
echo
|
||||
set -x
|
||||
# Should fail because not allowed by host3 inbound firewall
|
||||
! docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||
! docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||
! sudo docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||
! sudo docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host3"
|
||||
echo
|
||||
set -x
|
||||
docker exec host3 ping -c1 192.168.100.1
|
||||
docker exec host3 ping -c1 192.168.100.2
|
||||
sudo docker exec host3 ping -c1 192.168.100.1
|
||||
sudo docker exec host3 ping -c1 192.168.100.2
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ncat from host3"
|
||||
echo
|
||||
set -x
|
||||
docker exec host3 ncat -nzv -w5 192.168.100.2 2000
|
||||
docker exec host3 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2
|
||||
sudo docker exec host3 ncat -nzv -w5 192.168.100.2 2000
|
||||
sudo docker exec host3 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host4"
|
||||
echo
|
||||
set -x
|
||||
docker exec host4 ping -c1 192.168.100.1
|
||||
sudo docker exec host4 ping -c1 192.168.100.1
|
||||
# Should fail because not allowed by host4 outbound firewall
|
||||
! docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||
! docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||
! sudo docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
|
@ -109,10 +109,10 @@ echo " *** Testing ncat from host4"
|
|||
echo
|
||||
set -x
|
||||
# Should fail because not allowed by host4 outbound firewall
|
||||
! docker exec host4 ncat -nzv -w5 192.168.100.2 2000 || exit 1
|
||||
! docker exec host4 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||
! docker exec host4 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2 || exit 1
|
||||
! docker exec host4 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||
! sudo docker exec host4 ncat -nzv -w5 192.168.100.2 2000 || exit 1
|
||||
! sudo docker exec host4 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||
! sudo docker exec host4 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2 || exit 1
|
||||
! sudo docker exec host4 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
|
@ -120,16 +120,16 @@ echo " *** Testing conntrack"
|
|||
echo
|
||||
set -x
|
||||
# host2 can ping host3 now that host3 pinged it first
|
||||
docker exec host2 ping -c1 192.168.100.3
|
||||
sudo docker exec host2 ping -c1 192.168.100.3
|
||||
# host4 can ping host2 once conntrack established
|
||||
docker exec host2 ping -c1 192.168.100.4
|
||||
docker exec host4 ping -c1 192.168.100.2
|
||||
sudo docker exec host2 ping -c1 192.168.100.4
|
||||
sudo docker exec host4 ping -c1 192.168.100.2
|
||||
|
||||
docker exec host4 sh -c 'kill 1'
|
||||
docker exec host3 sh -c 'kill 1'
|
||||
docker exec host2 sh -c 'kill 1'
|
||||
docker exec lighthouse1 sh -c 'kill 1'
|
||||
sleep 5
|
||||
sudo docker exec host4 sh -c 'kill 1'
|
||||
sudo docker exec host3 sh -c 'kill 1'
|
||||
sudo docker exec host2 sh -c 'kill 1'
|
||||
sudo docker exec lighthouse1 sh -c 'kill 1'
|
||||
sleep 1
|
||||
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "generic/freebsd14"
|
||||
|
||||
config.vm.synced_folder "../build", "/nebula", type: "rsync"
|
||||
end
|
|
@ -1,7 +0,0 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "ubuntu/xenial32"
|
||||
|
||||
config.vm.synced_folder "../build", "/nebula"
|
||||
end
|
|
@ -1,16 +0,0 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "ubuntu/jammy64"
|
||||
|
||||
config.vm.synced_folder "../build", "/nebula"
|
||||
|
||||
config.vm.provision :shell do |shell|
|
||||
shell.inline = <<-EOF
|
||||
sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="ipv6.disable=1"/' /etc/default/grub
|
||||
update-grub
|
||||
EOF
|
||||
shell.privileged = true
|
||||
shell.reboot = true
|
||||
end
|
||||
end
|
|
@ -1,7 +0,0 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "generic/netbsd9"
|
||||
|
||||
config.vm.synced_folder "../build", "/nebula", type: "rsync"
|
||||
end
|
|
@ -1,7 +0,0 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "generic/openbsd7"
|
||||
|
||||
config.vm.synced_folder "../build", "/nebula", type: "rsync"
|
||||
end
|
65
.github/workflows/test.yml
vendored
65
.github/workflows/test.yml
vendored
|
@ -18,11 +18,11 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24'
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
|
@ -31,24 +31,16 @@ jobs:
|
|||
- name: Vet
|
||||
run: make vet
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: v1.64
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: End 2 end
|
||||
run: make e2evv
|
||||
|
||||
- name: Build test mobile
|
||||
run: make build-test-mobile
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: e2e packet flow linux-latest
|
||||
path: e2e/mermaid/linux-latest
|
||||
name: e2e packet flow
|
||||
path: e2e/mermaid/
|
||||
if-no-files-found: warn
|
||||
|
||||
test-linux-boringcrypto:
|
||||
|
@ -56,11 +48,11 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24'
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
|
@ -70,39 +62,21 @@ jobs:
|
|||
run: make test-boringcrypto
|
||||
|
||||
- name: End 2 end
|
||||
run: make e2e GOEXPERIMENT=boringcrypto CGO_ENABLED=1 TEST_ENV="TEST_LOGS=1" TEST_FLAGS="-v -ldflags -checklinkname=0"
|
||||
|
||||
test-linux-pkcs11:
|
||||
name: Build and test on linux with pkcs11
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
run: make bin-pkcs11
|
||||
|
||||
- name: Test
|
||||
run: make test-pkcs11
|
||||
run: make e2evv GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
||||
|
||||
test:
|
||||
name: Build and test on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [windows-latest, macos-latest]
|
||||
os: [windows-latest, macos-11]
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24'
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- name: Build nebula
|
||||
|
@ -114,19 +88,14 @@ jobs:
|
|||
- name: Vet
|
||||
run: make vet
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: v1.64
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: End 2 end
|
||||
run: make e2evv
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: e2e packet flow ${{ matrix.os }}
|
||||
path: e2e/mermaid/${{ matrix.os }}
|
||||
name: e2e packet flow
|
||||
path: e2e/mermaid/
|
||||
if-no-files-found: warn
|
||||
|
|
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -5,8 +5,7 @@
|
|||
/nebula-darwin
|
||||
/nebula.exe
|
||||
/nebula-cert.exe
|
||||
**/coverage.out
|
||||
**/cover.out
|
||||
/coverage.out
|
||||
/cpu.pprof
|
||||
/build
|
||||
/*.tar.gz
|
||||
|
@ -14,6 +13,5 @@
|
|||
**.crt
|
||||
**.key
|
||||
**.pem
|
||||
**.pub
|
||||
!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.key
|
||||
!/examples/quickstart-vagrant/ansible/roles/nebula/files/vagrant-test-ca.crt
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
# yaml-language-server: $schema=https://golangci-lint.run/jsonschema/golangci.jsonschema.json
|
||||
linters:
|
||||
# Disable all linters.
|
||||
# Default: false
|
||||
disable-all: true
|
||||
# Enable specific linter
|
||||
# https://golangci-lint.run/usage/linters/#enabled-by-default
|
||||
enable:
|
||||
- testifylint
|
186
CHANGELOG.md
186
CHANGELOG.md
|
@ -7,182 +7,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
## [1.9.4] - 2024-09-09
|
||||
|
||||
### Added
|
||||
|
||||
- Support UDP dialing with gVisor. (#1181)
|
||||
|
||||
### Changed
|
||||
|
||||
- Make some Nebula state programmatically available via control object. (#1188)
|
||||
- Switch internal representation of IPs to netip, to prepare for IPv6 support
|
||||
in the overlay. (#1173)
|
||||
- Minor build and cleanup changes. (#1171, #1164, #1162)
|
||||
- Various dependency updates. (#1195, #1190, #1174, #1168, #1167, #1161, #1147, #1146)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix a bug on big endian hosts, like mips. (#1194)
|
||||
- Fix a rare panic if a local index collision happens. (#1191)
|
||||
- Fix integer wraparound in the calculation of handshake timeouts on 32-bit targets. (#1185)
|
||||
|
||||
## [1.9.3] - 2024-06-06
|
||||
|
||||
### Fixed
|
||||
|
||||
- Initialize messageCounter to 2 instead of verifying later. (#1156)
|
||||
|
||||
## [1.9.2] - 2024-06-03
|
||||
|
||||
### Fixed
|
||||
|
||||
- Ensure messageCounter is set before handshake is complete. (#1154)
|
||||
|
||||
## [1.9.1] - 2024-05-29
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed a potential deadlock in GetOrHandshake. (#1151)
|
||||
|
||||
## [1.9.0] - 2024-05-07
|
||||
|
||||
### Deprecated
|
||||
|
||||
- This release adds a new setting `default_local_cidr_any` that defaults to
|
||||
true to match previous behavior, but will default to false in the next
|
||||
release (1.10). When set to false, `local_cidr` is matched correctly for
|
||||
firewall rules on hosts acting as unsafe routers, and should be set for any
|
||||
firewall rules you want to allow unsafe route hosts to access. See the issue
|
||||
and example config for more details. (#1071, #1099)
|
||||
|
||||
### Added
|
||||
|
||||
- Nebula now has an official Docker image `nebulaoss/nebula` that is
|
||||
distroless and contains just the `nebula` and `nebula-cert` binaries. You
|
||||
can find it here: https://hub.docker.com/r/nebulaoss/nebula (#1037)
|
||||
|
||||
- Experimental binaries for `loong64` are now provided. (#1003)
|
||||
|
||||
- Added example service script for OpenRC. (#711)
|
||||
|
||||
- The SSH daemon now supports inlined host keys. (#1054)
|
||||
|
||||
- The SSH daemon now supports certificates with `sshd.trusted_cas`. (#1098)
|
||||
|
||||
### Changed
|
||||
|
||||
- Config setting `tun.unsafe_routes` is now reloadable. (#1083)
|
||||
|
||||
- Small documentation and internal improvements. (#1065, #1067, #1069, #1108,
|
||||
#1109, #1111, #1135)
|
||||
|
||||
- Various dependency updates. (#1139, #1138, #1134, #1133, #1126, #1123, #1110,
|
||||
#1094, #1092, #1087, #1086, #1085, #1072, #1063, #1059, #1055, #1053, #1047,
|
||||
#1046, #1034, #1022)
|
||||
|
||||
### Removed
|
||||
|
||||
- Support for the deprecated `local_range` option has been removed. Please
|
||||
change to `preferred_ranges` (which is also now reloadable). (#1043)
|
||||
|
||||
- We are now building with go1.22, which means that for Windows you need at
|
||||
least Windows 10 or Windows Server 2016. This is because support for earlier
|
||||
versions was removed in Go 1.21. See https://go.dev/doc/go1.21#windows (#981)
|
||||
|
||||
- Removed vagrant example, as it was unmaintained. (#1129)
|
||||
|
||||
- Removed Fedora and Arch nebula.service files, as they are maintained in the
|
||||
upstream repos. (#1128, #1132)
|
||||
|
||||
- Remove the TCP round trip tracking metrics, as they never had correct data
|
||||
and were an experiment to begin with. (#1114)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed a potential deadlock introduced in 1.8.1. (#1112)
|
||||
|
||||
- Fixed support for Linux when IPv6 has been disabled at the OS level. (#787)
|
||||
|
||||
- DNS will return NXDOMAIN now when there are no results. (#845)
|
||||
|
||||
- Allow `::` in `lighthouse.dns.host`. (#1115)
|
||||
|
||||
- Capitalization of `NotAfter` fixed in DNS TXT response. (#1127)
|
||||
|
||||
- Don't log invalid certificates. It is untrusted data and can cause a large
|
||||
volume of logs. (#1116)
|
||||
|
||||
## [1.8.2] - 2024-01-08
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix multiple routines when listen.port is zero. This was a regression
|
||||
introduced in v1.6.0. (#1057)
|
||||
|
||||
### Changed
|
||||
|
||||
- Small dependency update for Noise. (#1038)
|
||||
|
||||
## [1.8.1] - 2023-12-19
|
||||
|
||||
### Security
|
||||
|
||||
- Update `golang.org/x/crypto`, which includes a fix for CVE-2023-48795. (#1048)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix a deadlock introduced in v1.8.0 that could occur during handshakes. (#1044)
|
||||
|
||||
- Fix mobile builds. (#1035)
|
||||
|
||||
## [1.8.0] - 2023-12-06
|
||||
|
||||
### Deprecated
|
||||
|
||||
- The next minor release of Nebula, 1.9.0, will require at least Windows 10 or
|
||||
Windows Server 2016. This is because support for earlier versions was removed
|
||||
in Go 1.21. See https://go.dev/doc/go1.21#windows
|
||||
|
||||
### Added
|
||||
|
||||
- Linux: Notify systemd of service readiness. This should resolve timing issues
|
||||
with services that depend on Nebula being active. For an example of how to
|
||||
enable this, see: `examples/service_scripts/nebula.service`. (#929)
|
||||
|
||||
- Windows: Use Registered IO (RIO) when possible. Testing on a Windows 11
|
||||
machine shows ~50x improvement in throughput. (#905)
|
||||
|
||||
- NetBSD, OpenBSD: Added rudimentary support. (#916, #812)
|
||||
|
||||
- FreeBSD: Add support for naming tun devices. (#903)
|
||||
|
||||
### Changed
|
||||
|
||||
- `pki.disconnect_invalid` will now default to true. This means that once a
|
||||
certificate expires, the tunnel will be disconnected. If you use SIGHUP to
|
||||
reload certificates without restarting Nebula, you should ensure all of your
|
||||
clients are on 1.7.0 or newer before you enable this feature. (#859)
|
||||
|
||||
- Limit how often a busy tunnel can requery the lighthouse. The new config
|
||||
option `timers.requery_wait_duration` defaults to `60s`. (#940)
|
||||
|
||||
- The internal structures for hostmaps were refactored to reduce memory usage
|
||||
and the potential for subtle bugs. (#843, #938, #953, #954, #955)
|
||||
|
||||
- Lots of dependency updates.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Windows: Retry wintun device creation if it fails the first time. (#985)
|
||||
|
||||
- Fix issues with firewall reject packets that could cause panics. (#957)
|
||||
|
||||
- Fix relay migration during re-handshakes. (#964)
|
||||
|
||||
- Various other refactors and fixes. (#935, #952, #972, #961, #996, #1002,
|
||||
#987, #1004, #1030, #1032, ...)
|
||||
|
||||
## [1.7.2] - 2023-06-01
|
||||
|
||||
### Fixed
|
||||
|
@ -664,15 +488,7 @@ created.)
|
|||
|
||||
- Initial public release.
|
||||
|
||||
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.9.4...HEAD
|
||||
[1.9.4]: https://github.com/slackhq/nebula/releases/tag/v1.9.4
|
||||
[1.9.3]: https://github.com/slackhq/nebula/releases/tag/v1.9.3
|
||||
[1.9.2]: https://github.com/slackhq/nebula/releases/tag/v1.9.2
|
||||
[1.9.1]: https://github.com/slackhq/nebula/releases/tag/v1.9.1
|
||||
[1.9.0]: https://github.com/slackhq/nebula/releases/tag/v1.9.0
|
||||
[1.8.2]: https://github.com/slackhq/nebula/releases/tag/v1.8.2
|
||||
[1.8.1]: https://github.com/slackhq/nebula/releases/tag/v1.8.1
|
||||
[1.8.0]: https://github.com/slackhq/nebula/releases/tag/v1.8.0
|
||||
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.7.2...HEAD
|
||||
[1.7.2]: https://github.com/slackhq/nebula/releases/tag/v1.7.2
|
||||
[1.7.1]: https://github.com/slackhq/nebula/releases/tag/v1.7.1
|
||||
[1.7.0]: https://github.com/slackhq/nebula/releases/tag/v1.7.0
|
||||
|
|
|
@ -33,5 +33,6 @@ l.WithError(err).
|
|||
WithField("vpnIp", IntIp(hostinfo.hostId)).
|
||||
WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix"}).
|
||||
WithField("cert", remoteCert).
|
||||
Info("Invalid certificate from host")
|
||||
```
|
67
Makefile
67
Makefile
|
@ -1,14 +1,20 @@
|
|||
GOMINVERSION = 1.20
|
||||
NEBULA_CMD_PATH = "./cmd/nebula"
|
||||
GO111MODULE = on
|
||||
export GO111MODULE
|
||||
CGO_ENABLED = 0
|
||||
export CGO_ENABLED
|
||||
|
||||
# Set up OS specific bits
|
||||
ifeq ($(OS),Windows_NT)
|
||||
#TODO: we should be able to ditch awk as well
|
||||
GOVERSION := $(shell go version | awk "{print substr($$3, 3)}")
|
||||
GOISMIN := $(shell IF "$(GOVERSION)" GEQ "$(GOMINVERSION)" ECHO 1)
|
||||
NEBULA_CMD_SUFFIX = .exe
|
||||
NULL_FILE = nul
|
||||
# RIO on windows does pointer stuff that makes go vet angry
|
||||
VET_FLAGS = -unsafeptr=false
|
||||
else
|
||||
GOVERSION := $(shell go version | awk '{print substr($$3, 3)}')
|
||||
GOISMIN := $(shell expr "$(GOVERSION)" ">=" "$(GOMINVERSION)")
|
||||
NEBULA_CMD_SUFFIX =
|
||||
NULL_FILE = /dev/null
|
||||
endif
|
||||
|
@ -22,9 +28,6 @@ ifndef BUILD_NUMBER
|
|||
endif
|
||||
endif
|
||||
|
||||
DOCKER_IMAGE_REPO ?= nebulaoss/nebula
|
||||
DOCKER_IMAGE_TAG ?= latest
|
||||
|
||||
LDFLAGS = -X main.Build=$(BUILD_NUMBER)
|
||||
|
||||
ALL_LINUX = linux-amd64 \
|
||||
|
@ -39,22 +42,13 @@ ALL_LINUX = linux-amd64 \
|
|||
linux-mips64 \
|
||||
linux-mips64le \
|
||||
linux-mips-softfloat \
|
||||
linux-riscv64 \
|
||||
linux-loong64
|
||||
linux-riscv64
|
||||
|
||||
ALL_FREEBSD = freebsd-amd64 \
|
||||
freebsd-arm64
|
||||
|
||||
ALL_OPENBSD = openbsd-amd64 \
|
||||
openbsd-arm64
|
||||
|
||||
ALL_NETBSD = netbsd-amd64 \
|
||||
netbsd-arm64
|
||||
|
||||
ALL = $(ALL_LINUX) \
|
||||
$(ALL_FREEBSD) \
|
||||
$(ALL_OPENBSD) \
|
||||
$(ALL_NETBSD) \
|
||||
darwin-amd64 \
|
||||
darwin-arm64 \
|
||||
windows-amd64 \
|
||||
|
@ -63,7 +57,7 @@ ALL = $(ALL_LINUX) \
|
|||
e2e:
|
||||
$(TEST_ENV) go test -tags=e2e_testing -count=1 $(TEST_FLAGS) ./e2e
|
||||
|
||||
e2ev: TEST_FLAGS += -v
|
||||
e2ev: TEST_FLAGS = -v
|
||||
e2ev: e2e
|
||||
|
||||
e2evv: TEST_ENV += TEST_LOGS=1
|
||||
|
@ -78,25 +72,17 @@ e2evvvv: e2ev
|
|||
e2e-bench: TEST_FLAGS = -bench=. -benchmem -run=^$
|
||||
e2e-bench: e2e
|
||||
|
||||
DOCKER_BIN = build/linux-amd64/nebula build/linux-amd64/nebula-cert
|
||||
|
||||
all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert)
|
||||
|
||||
docker: docker/linux-$(shell go env GOARCH)
|
||||
|
||||
release: $(ALL:%=build/nebula-%.tar.gz)
|
||||
|
||||
release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz)
|
||||
|
||||
release-freebsd: $(ALL_FREEBSD:%=build/nebula-%.tar.gz)
|
||||
|
||||
release-openbsd: $(ALL_OPENBSD:%=build/nebula-%.tar.gz)
|
||||
|
||||
release-netbsd: $(ALL_NETBSD:%=build/nebula-%.tar.gz)
|
||||
|
||||
release-boringcrypto: build/nebula-linux-$(shell go env GOARCH)-boringcrypto.tar.gz
|
||||
|
||||
BUILD_ARGS += -trimpath
|
||||
BUILD_ARGS = -trimpath
|
||||
|
||||
bin-windows: build/windows-amd64/nebula.exe build/windows-amd64/nebula-cert.exe
|
||||
mv $? .
|
||||
|
@ -116,10 +102,6 @@ bin-freebsd-arm64: build/freebsd-arm64/nebula build/freebsd-arm64/nebula-cert
|
|||
bin-boringcrypto: build/linux-$(shell go env GOARCH)-boringcrypto/nebula build/linux-$(shell go env GOARCH)-boringcrypto/nebula-cert
|
||||
mv $? .
|
||||
|
||||
bin-pkcs11: BUILD_ARGS += -tags pkcs11
|
||||
bin-pkcs11: CGO_ENABLED = 1
|
||||
bin-pkcs11: bin
|
||||
|
||||
bin:
|
||||
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula${NEBULA_CMD_SUFFIX} ${NEBULA_CMD_PATH}
|
||||
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula-cert${NEBULA_CMD_SUFFIX} ./cmd/nebula-cert
|
||||
|
@ -137,8 +119,6 @@ build/linux-mips-softfloat/%: LDFLAGS += -s -w
|
|||
# boringcrypto
|
||||
build/linux-amd64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
||||
build/linux-arm64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
|
||||
build/linux-amd64-boringcrypto/%: LDFLAGS += -checklinkname=0
|
||||
build/linux-arm64-boringcrypto/%: LDFLAGS += -checklinkname=0
|
||||
|
||||
build/%/nebula: .FORCE
|
||||
GOOS=$(firstword $(subst -, , $*)) \
|
||||
|
@ -162,31 +142,19 @@ build/nebula-%.tar.gz: build/%/nebula build/%/nebula-cert
|
|||
build/nebula-%.zip: build/%/nebula.exe build/%/nebula-cert.exe
|
||||
cd build/$* && zip ../nebula-$*.zip nebula.exe nebula-cert.exe
|
||||
|
||||
docker/%: build/%/nebula build/%/nebula-cert
|
||||
docker build . $(DOCKER_BUILD_ARGS) -f docker/Dockerfile --platform "$(subst -,/,$*)" --tag "${DOCKER_IMAGE_REPO}:${DOCKER_IMAGE_TAG}" --tag "${DOCKER_IMAGE_REPO}:$(BUILD_NUMBER)"
|
||||
|
||||
vet:
|
||||
go vet $(VET_FLAGS) -v ./...
|
||||
go vet -v ./...
|
||||
|
||||
test:
|
||||
go test -v ./...
|
||||
|
||||
test-boringcrypto:
|
||||
GOEXPERIMENT=boringcrypto CGO_ENABLED=1 go test -ldflags "-checklinkname=0" -v ./...
|
||||
|
||||
test-pkcs11:
|
||||
CGO_ENABLED=1 go test -v -tags pkcs11 ./...
|
||||
GOEXPERIMENT=boringcrypto CGO_ENABLED=1 go test -v ./...
|
||||
|
||||
test-cov-html:
|
||||
go test -coverprofile=coverage.out
|
||||
go tool cover -html=coverage.out
|
||||
|
||||
build-test-mobile:
|
||||
GOARCH=amd64 GOOS=ios go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||
GOARCH=arm64 GOOS=ios go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||
GOARCH=amd64 GOOS=android go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||
GOARCH=arm64 GOOS=android go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||
|
||||
bench:
|
||||
go test -bench=.
|
||||
|
||||
|
@ -198,7 +166,7 @@ bench-cpu-long:
|
|||
go test -bench=. -benchtime=60s -cpuprofile=cpu.pprof
|
||||
go tool pprof go-audit.test cpu.pprof
|
||||
|
||||
proto: nebula.pb.go cert/cert_v1.pb.go
|
||||
proto: nebula.pb.go cert/cert.pb.go
|
||||
|
||||
nebula.pb.go: nebula.proto .FORCE
|
||||
go build github.com/gogo/protobuf/protoc-gen-gogofaster
|
||||
|
@ -228,13 +196,8 @@ smoke-relay-docker: bin-docker
|
|||
cd .github/workflows/smoke/ && ./smoke-relay.sh
|
||||
|
||||
smoke-docker-race: BUILD_ARGS = -race
|
||||
smoke-docker-race: CGO_ENABLED = 1
|
||||
smoke-docker-race: smoke-docker
|
||||
|
||||
smoke-vagrant/%: bin-docker build/%/nebula
|
||||
cd .github/workflows/smoke/ && ./build.sh $*
|
||||
cd .github/workflows/smoke/ && ./smoke-vagrant.sh $*
|
||||
|
||||
.FORCE:
|
||||
.PHONY: bench bench-cpu bench-cpu-long bin build-test-mobile e2e e2ev e2evv e2evvv e2evvvv proto release service smoke-docker smoke-docker-race test test-cov-html smoke-vagrant/%
|
||||
.PHONY: e2e e2ev e2evv e2evvv e2evvvv test test-cov-html bench bench-cpu bench-cpu-long bin proto release service smoke-docker smoke-docker-race
|
||||
.DEFAULT_GOAL := bin
|
||||
|
|
24
README.md
24
README.md
|
@ -12,7 +12,7 @@ Further documentation can be found [here](https://nebula.defined.net/docs/).
|
|||
|
||||
You can read more about Nebula [here](https://medium.com/p/884110a5579).
|
||||
|
||||
You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/zt-2xqe6e7vn-k_KGi8s13nsr7cvHVvHvuQ).
|
||||
You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU).
|
||||
|
||||
## Supported Platforms
|
||||
|
||||
|
@ -27,36 +27,20 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
|
|||
|
||||
#### Distribution Packages
|
||||
|
||||
- [Arch Linux](https://archlinux.org/packages/extra/x86_64/nebula/)
|
||||
- [Arch Linux](https://archlinux.org/packages/community/x86_64/nebula/)
|
||||
```
|
||||
$ sudo pacman -S nebula
|
||||
```
|
||||
|
||||
- [Fedora Linux](https://src.fedoraproject.org/rpms/nebula)
|
||||
```
|
||||
$ sudo dnf install nebula
|
||||
```
|
||||
|
||||
- [Debian Linux](https://packages.debian.org/source/stable/nebula)
|
||||
```
|
||||
$ sudo apt install nebula
|
||||
```
|
||||
|
||||
- [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=nebula)
|
||||
```
|
||||
$ sudo apk add nebula
|
||||
```
|
||||
|
||||
- [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/n/nebula.rb)
|
||||
- [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/nebula.rb)
|
||||
```
|
||||
$ brew install nebula
|
||||
```
|
||||
|
||||
- [Docker](https://hub.docker.com/r/nebulaoss/nebula)
|
||||
```
|
||||
$ docker pull nebulaoss/nebula
|
||||
```
|
||||
|
||||
#### Mobile
|
||||
|
||||
- [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&itscg=30200)
|
||||
|
@ -124,7 +108,7 @@ For each host, copy the nebula binary to the host, along with `config.yml` from
|
|||
|
||||
## Building Nebula from source
|
||||
|
||||
Make sure you have [go](https://go.dev/doc/install) installed and clone this repo. Change to the nebula directory.
|
||||
Download go and clone this repo. Change to the nebula directory.
|
||||
|
||||
To build nebula for all platforms:
|
||||
`make all`
|
||||
|
|
127
allow_list.go
127
allow_list.go
|
@ -2,16 +2,17 @@ package nebula
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"net"
|
||||
"regexp"
|
||||
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/slackhq/nebula/cidr"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
)
|
||||
|
||||
type AllowList struct {
|
||||
// The values of this cidrTree are `bool`, signifying allow/deny
|
||||
cidrTree *bart.Table[bool]
|
||||
cidrTree *cidr.Tree6
|
||||
}
|
||||
|
||||
type RemoteAllowList struct {
|
||||
|
@ -19,7 +20,7 @@ type RemoteAllowList struct {
|
|||
|
||||
// Inside Range Specific, keys of this tree are inside CIDRs and values
|
||||
// are *AllowList
|
||||
insideAllowLists *bart.Table[*AllowList]
|
||||
insideAllowLists *cidr.Tree6
|
||||
}
|
||||
|
||||
type LocalAllowList struct {
|
||||
|
@ -87,7 +88,7 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
|
|||
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw)
|
||||
}
|
||||
|
||||
tree := new(bart.Table[bool])
|
||||
tree := cidr.NewTree6()
|
||||
|
||||
// Keep track of the rules we have added for both ipv4 and ipv6
|
||||
type allowListRules struct {
|
||||
|
@ -121,19 +122,18 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
|
|||
return nil, fmt.Errorf("config `%s` has invalid value (type %T): %v", k, rawValue, rawValue)
|
||||
}
|
||||
|
||||
ipNet, err := netip.ParsePrefix(rawCIDR)
|
||||
_, ipNet, err := net.ParseCIDR(rawCIDR)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s. %w", k, rawCIDR, err)
|
||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||
}
|
||||
|
||||
ipNet = netip.PrefixFrom(ipNet.Addr().Unmap(), ipNet.Bits())
|
||||
// TODO: should we error on duplicate CIDRs in the config?
|
||||
tree.AddCIDR(ipNet, value)
|
||||
|
||||
tree.Insert(ipNet, value)
|
||||
|
||||
maskBits := ipNet.Bits()
|
||||
maskBits, maskSize := ipNet.Mask.Size()
|
||||
|
||||
var rules *allowListRules
|
||||
if ipNet.Addr().Is4() {
|
||||
if maskSize == 32 {
|
||||
rules = &rules4
|
||||
} else {
|
||||
rules = &rules6
|
||||
|
@ -156,7 +156,8 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
|
|||
|
||||
if !rules4.defaultSet {
|
||||
if rules4.allValuesMatch {
|
||||
tree.Insert(netip.PrefixFrom(netip.IPv4Unspecified(), 0), !rules4.allValues)
|
||||
_, zeroCIDR, _ := net.ParseCIDR("0.0.0.0/0")
|
||||
tree.AddCIDR(zeroCIDR, !rules4.allValues)
|
||||
} else {
|
||||
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for 0.0.0.0/0", k)
|
||||
}
|
||||
|
@ -164,7 +165,8 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
|
|||
|
||||
if !rules6.defaultSet {
|
||||
if rules6.allValuesMatch {
|
||||
tree.Insert(netip.PrefixFrom(netip.IPv6Unspecified(), 0), !rules6.allValues)
|
||||
_, zeroCIDR, _ := net.ParseCIDR("::/0")
|
||||
tree.AddCIDR(zeroCIDR, !rules6.allValues)
|
||||
} else {
|
||||
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for ::/0", k)
|
||||
}
|
||||
|
@ -216,13 +218,13 @@ func getAllowListInterfaces(k string, v interface{}) ([]AllowListNameRule, error
|
|||
return nameRules, nil
|
||||
}
|
||||
|
||||
func getRemoteAllowRanges(c *config.C, k string) (*bart.Table[*AllowList], error) {
|
||||
func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6, error) {
|
||||
value := c.Get(k)
|
||||
if value == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
remoteAllowRanges := new(bart.Table[*AllowList])
|
||||
remoteAllowRanges := cidr.NewTree6()
|
||||
|
||||
rawMap, ok := value.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
|
@ -239,31 +241,64 @@ func getRemoteAllowRanges(c *config.C, k string) (*bart.Table[*AllowList], error
|
|||
return nil, err
|
||||
}
|
||||
|
||||
ipNet, err := netip.ParsePrefix(rawCIDR)
|
||||
_, ipNet, err := net.ParseCIDR(rawCIDR)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s. %w", k, rawCIDR, err)
|
||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||
}
|
||||
|
||||
remoteAllowRanges.Insert(netip.PrefixFrom(ipNet.Addr().Unmap(), ipNet.Bits()), allowList)
|
||||
remoteAllowRanges.AddCIDR(ipNet, allowList)
|
||||
}
|
||||
|
||||
return remoteAllowRanges, nil
|
||||
}
|
||||
|
||||
func (al *AllowList) Allow(addr netip.Addr) bool {
|
||||
func (al *AllowList) Allow(ip net.IP) bool {
|
||||
if al == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
result, _ := al.cidrTree.Lookup(addr)
|
||||
return result
|
||||
result := al.cidrTree.MostSpecificContains(ip)
|
||||
switch v := result.(type) {
|
||||
case bool:
|
||||
return v
|
||||
default:
|
||||
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
|
||||
}
|
||||
}
|
||||
|
||||
func (al *LocalAllowList) Allow(udpAddr netip.Addr) bool {
|
||||
func (al *AllowList) AllowIpV4(ip iputil.VpnIp) bool {
|
||||
if al == nil {
|
||||
return true
|
||||
}
|
||||
return al.AllowList.Allow(udpAddr)
|
||||
|
||||
result := al.cidrTree.MostSpecificContainsIpV4(ip)
|
||||
switch v := result.(type) {
|
||||
case bool:
|
||||
return v
|
||||
default:
|
||||
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
|
||||
}
|
||||
}
|
||||
|
||||
func (al *AllowList) AllowIpV6(hi, lo uint64) bool {
|
||||
if al == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
result := al.cidrTree.MostSpecificContainsIpV6(hi, lo)
|
||||
switch v := result.(type) {
|
||||
case bool:
|
||||
return v
|
||||
default:
|
||||
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
|
||||
}
|
||||
}
|
||||
|
||||
func (al *LocalAllowList) Allow(ip net.IP) bool {
|
||||
if al == nil {
|
||||
return true
|
||||
}
|
||||
return al.AllowList.Allow(ip)
|
||||
}
|
||||
|
||||
func (al *LocalAllowList) AllowName(name string) bool {
|
||||
|
@ -281,39 +316,45 @@ func (al *LocalAllowList) AllowName(name string) bool {
|
|||
return !al.nameRules[0].Allow
|
||||
}
|
||||
|
||||
func (al *RemoteAllowList) AllowUnknownVpnAddr(vpnAddr netip.Addr) bool {
|
||||
func (al *RemoteAllowList) AllowUnknownVpnIp(ip net.IP) bool {
|
||||
if al == nil {
|
||||
return true
|
||||
}
|
||||
return al.AllowList.Allow(vpnAddr)
|
||||
return al.AllowList.Allow(ip)
|
||||
}
|
||||
|
||||
func (al *RemoteAllowList) Allow(vpnAddr netip.Addr, udpAddr netip.Addr) bool {
|
||||
if !al.getInsideAllowList(vpnAddr).Allow(udpAddr) {
|
||||
func (al *RemoteAllowList) Allow(vpnIp iputil.VpnIp, ip net.IP) bool {
|
||||
if !al.getInsideAllowList(vpnIp).Allow(ip) {
|
||||
return false
|
||||
}
|
||||
return al.AllowList.Allow(udpAddr)
|
||||
return al.AllowList.Allow(ip)
|
||||
}
|
||||
|
||||
func (al *RemoteAllowList) AllowAll(vpnAddrs []netip.Addr, udpAddr netip.Addr) bool {
|
||||
if !al.AllowList.Allow(udpAddr) {
|
||||
func (al *RemoteAllowList) AllowIpV4(vpnIp iputil.VpnIp, ip iputil.VpnIp) bool {
|
||||
if al == nil {
|
||||
return true
|
||||
}
|
||||
if !al.getInsideAllowList(vpnIp).AllowIpV4(ip) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, vpnAddr := range vpnAddrs {
|
||||
if !al.getInsideAllowList(vpnAddr).Allow(udpAddr) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
return al.AllowList.AllowIpV4(ip)
|
||||
}
|
||||
|
||||
func (al *RemoteAllowList) getInsideAllowList(vpnAddr netip.Addr) *AllowList {
|
||||
func (al *RemoteAllowList) AllowIpV6(vpnIp iputil.VpnIp, hi, lo uint64) bool {
|
||||
if al == nil {
|
||||
return true
|
||||
}
|
||||
if !al.getInsideAllowList(vpnIp).AllowIpV6(hi, lo) {
|
||||
return false
|
||||
}
|
||||
return al.AllowList.AllowIpV6(hi, lo)
|
||||
}
|
||||
|
||||
func (al *RemoteAllowList) getInsideAllowList(vpnIp iputil.VpnIp) *AllowList {
|
||||
if al.insideAllowLists != nil {
|
||||
inside, ok := al.insideAllowLists.Lookup(vpnAddr)
|
||||
if ok {
|
||||
return inside
|
||||
inside := al.insideAllowLists.MostSpecificContainsIpV4(vpnIp)
|
||||
if inside != nil {
|
||||
return inside.(*AllowList)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
package nebula
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"net"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/slackhq/nebula/cidr"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewAllowListFromConfig(t *testing.T) {
|
||||
|
@ -19,21 +18,21 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
|||
"192.168.0.0": true,
|
||||
}
|
||||
r, err := newAllowListFromConfig(c, "allowlist", nil)
|
||||
require.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0. netip.ParsePrefix(\"192.168.0.0\"): no '/'")
|
||||
assert.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0")
|
||||
assert.Nil(t, r)
|
||||
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
"192.168.0.0/16": "abc",
|
||||
}
|
||||
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
||||
require.EqualError(t, err, "config `allowlist` has invalid value (type string): abc")
|
||||
assert.EqualError(t, err, "config `allowlist` has invalid value (type string): abc")
|
||||
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
"192.168.0.0/16": true,
|
||||
"10.0.0.0/8": false,
|
||||
}
|
||||
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
||||
require.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for 0.0.0.0/0")
|
||||
assert.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for 0.0.0.0/0")
|
||||
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
"0.0.0.0/0": true,
|
||||
|
@ -43,7 +42,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
|||
"fd00:fd00::/16": false,
|
||||
}
|
||||
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
||||
require.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for ::/0")
|
||||
assert.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for ::/0")
|
||||
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
"0.0.0.0/0": true,
|
||||
|
@ -76,7 +75,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
|||
},
|
||||
}
|
||||
lr, err := NewLocalAllowListFromConfig(c, "allowlist")
|
||||
require.EqualError(t, err, "config `allowlist.interfaces` has invalid value (type string): foo")
|
||||
assert.EqualError(t, err, "config `allowlist.interfaces` has invalid value (type string): foo")
|
||||
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
"interfaces": map[interface{}]interface{}{
|
||||
|
@ -85,7 +84,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
|||
},
|
||||
}
|
||||
lr, err = NewLocalAllowListFromConfig(c, "allowlist")
|
||||
require.EqualError(t, err, "config `allowlist.interfaces` values must all be the same true/false value")
|
||||
assert.EqualError(t, err, "config `allowlist.interfaces` values must all be the same true/false value")
|
||||
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
"interfaces": map[interface{}]interface{}{
|
||||
|
@ -99,30 +98,30 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllowList_Allow(t *testing.T) {
|
||||
assert.True(t, ((*AllowList)(nil)).Allow(netip.MustParseAddr("1.1.1.1")))
|
||||
assert.Equal(t, true, ((*AllowList)(nil)).Allow(net.ParseIP("1.1.1.1")))
|
||||
|
||||
tree := new(bart.Table[bool])
|
||||
tree.Insert(netip.MustParsePrefix("0.0.0.0/0"), true)
|
||||
tree.Insert(netip.MustParsePrefix("10.0.0.0/8"), false)
|
||||
tree.Insert(netip.MustParsePrefix("10.42.42.42/32"), true)
|
||||
tree.Insert(netip.MustParsePrefix("10.42.0.0/16"), true)
|
||||
tree.Insert(netip.MustParsePrefix("10.42.42.0/24"), true)
|
||||
tree.Insert(netip.MustParsePrefix("10.42.42.0/24"), false)
|
||||
tree.Insert(netip.MustParsePrefix("::1/128"), true)
|
||||
tree.Insert(netip.MustParsePrefix("::2/128"), false)
|
||||
tree := cidr.NewTree6()
|
||||
tree.AddCIDR(cidr.Parse("0.0.0.0/0"), true)
|
||||
tree.AddCIDR(cidr.Parse("10.0.0.0/8"), false)
|
||||
tree.AddCIDR(cidr.Parse("10.42.42.42/32"), true)
|
||||
tree.AddCIDR(cidr.Parse("10.42.0.0/16"), true)
|
||||
tree.AddCIDR(cidr.Parse("10.42.42.0/24"), true)
|
||||
tree.AddCIDR(cidr.Parse("10.42.42.0/24"), false)
|
||||
tree.AddCIDR(cidr.Parse("::1/128"), true)
|
||||
tree.AddCIDR(cidr.Parse("::2/128"), false)
|
||||
al := &AllowList{cidrTree: tree}
|
||||
|
||||
assert.True(t, al.Allow(netip.MustParseAddr("1.1.1.1")))
|
||||
assert.False(t, al.Allow(netip.MustParseAddr("10.0.0.4")))
|
||||
assert.True(t, al.Allow(netip.MustParseAddr("10.42.42.42")))
|
||||
assert.False(t, al.Allow(netip.MustParseAddr("10.42.42.41")))
|
||||
assert.True(t, al.Allow(netip.MustParseAddr("10.42.0.1")))
|
||||
assert.True(t, al.Allow(netip.MustParseAddr("::1")))
|
||||
assert.False(t, al.Allow(netip.MustParseAddr("::2")))
|
||||
assert.Equal(t, true, al.Allow(net.ParseIP("1.1.1.1")))
|
||||
assert.Equal(t, false, al.Allow(net.ParseIP("10.0.0.4")))
|
||||
assert.Equal(t, true, al.Allow(net.ParseIP("10.42.42.42")))
|
||||
assert.Equal(t, false, al.Allow(net.ParseIP("10.42.42.41")))
|
||||
assert.Equal(t, true, al.Allow(net.ParseIP("10.42.0.1")))
|
||||
assert.Equal(t, true, al.Allow(net.ParseIP("::1")))
|
||||
assert.Equal(t, false, al.Allow(net.ParseIP("::2")))
|
||||
}
|
||||
|
||||
func TestLocalAllowList_AllowName(t *testing.T) {
|
||||
assert.True(t, ((*LocalAllowList)(nil)).AllowName("docker0"))
|
||||
assert.Equal(t, true, ((*LocalAllowList)(nil)).AllowName("docker0"))
|
||||
|
||||
rules := []AllowListNameRule{
|
||||
{Name: regexp.MustCompile("^docker.*$"), Allow: false},
|
||||
|
@ -130,9 +129,9 @@ func TestLocalAllowList_AllowName(t *testing.T) {
|
|||
}
|
||||
al := &LocalAllowList{nameRules: rules}
|
||||
|
||||
assert.False(t, al.AllowName("docker0"))
|
||||
assert.False(t, al.AllowName("tun0"))
|
||||
assert.True(t, al.AllowName("eth0"))
|
||||
assert.Equal(t, false, al.AllowName("docker0"))
|
||||
assert.Equal(t, false, al.AllowName("tun0"))
|
||||
assert.Equal(t, true, al.AllowName("eth0"))
|
||||
|
||||
rules = []AllowListNameRule{
|
||||
{Name: regexp.MustCompile("^eth.*$"), Allow: true},
|
||||
|
@ -140,7 +139,7 @@ func TestLocalAllowList_AllowName(t *testing.T) {
|
|||
}
|
||||
al = &LocalAllowList{nameRules: rules}
|
||||
|
||||
assert.False(t, al.AllowName("docker0"))
|
||||
assert.True(t, al.AllowName("eth0"))
|
||||
assert.True(t, al.AllowName("ens5"))
|
||||
assert.Equal(t, false, al.AllowName("docker0"))
|
||||
assert.Equal(t, true, al.AllowName("eth0"))
|
||||
assert.Equal(t, true, al.AllowName("ens5"))
|
||||
}
|
||||
|
|
|
@ -1,40 +1,41 @@
|
|||
package nebula
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/slackhq/nebula/cidr"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
)
|
||||
|
||||
// This allows us to "guess" what the remote might be for a host while we wait
|
||||
// for the lighthouse response. See "lighthouse.calculated_remotes" in the
|
||||
// example config file.
|
||||
type calculatedRemote struct {
|
||||
ipNet netip.Prefix
|
||||
mask netip.Prefix
|
||||
port uint32
|
||||
ipNet net.IPNet
|
||||
maskIP iputil.VpnIp
|
||||
mask iputil.VpnIp
|
||||
port uint32
|
||||
}
|
||||
|
||||
func newCalculatedRemote(cidr, maskCidr netip.Prefix, port int) (*calculatedRemote, error) {
|
||||
if maskCidr.Addr().BitLen() != cidr.Addr().BitLen() {
|
||||
return nil, fmt.Errorf("invalid mask: %s for cidr: %s", maskCidr, cidr)
|
||||
func newCalculatedRemote(ipNet *net.IPNet, port int) (*calculatedRemote, error) {
|
||||
// Ensure this is an IPv4 mask that we expect
|
||||
ones, bits := ipNet.Mask.Size()
|
||||
if ones == 0 || bits != 32 {
|
||||
return nil, fmt.Errorf("invalid mask: %v", ipNet)
|
||||
}
|
||||
|
||||
masked := maskCidr.Masked()
|
||||
if port < 0 || port > math.MaxUint16 {
|
||||
return nil, fmt.Errorf("invalid port: %d", port)
|
||||
}
|
||||
|
||||
return &calculatedRemote{
|
||||
ipNet: maskCidr,
|
||||
mask: masked,
|
||||
port: uint32(port),
|
||||
ipNet: *ipNet,
|
||||
maskIP: iputil.Ip2VpnIp(ipNet.IP),
|
||||
mask: iputil.Ip2VpnIp(ipNet.Mask),
|
||||
port: uint32(port),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -42,47 +43,21 @@ func (c *calculatedRemote) String() string {
|
|||
return fmt.Sprintf("CalculatedRemote(mask=%v port=%d)", c.ipNet, c.port)
|
||||
}
|
||||
|
||||
func (c *calculatedRemote) ApplyV4(addr netip.Addr) *V4AddrPort {
|
||||
// Combine the masked bytes of the "mask" IP with the unmasked bytes of the overlay IP
|
||||
maskb := net.CIDRMask(c.mask.Bits(), c.mask.Addr().BitLen())
|
||||
mask := binary.BigEndian.Uint32(maskb[:])
|
||||
func (c *calculatedRemote) Apply(ip iputil.VpnIp) *Ip4AndPort {
|
||||
// Combine the masked bytes of the "mask" IP with the unmasked bytes
|
||||
// of the overlay IP
|
||||
masked := (c.maskIP & c.mask) | (ip & ^c.mask)
|
||||
|
||||
b := c.mask.Addr().As4()
|
||||
maskAddr := binary.BigEndian.Uint32(b[:])
|
||||
|
||||
b = addr.As4()
|
||||
intAddr := binary.BigEndian.Uint32(b[:])
|
||||
|
||||
return &V4AddrPort{(maskAddr & mask) | (intAddr & ^mask), c.port}
|
||||
return &Ip4AndPort{Ip: uint32(masked), Port: c.port}
|
||||
}
|
||||
|
||||
func (c *calculatedRemote) ApplyV6(addr netip.Addr) *V6AddrPort {
|
||||
mask := net.CIDRMask(c.mask.Bits(), c.mask.Addr().BitLen())
|
||||
maskAddr := c.mask.Addr().As16()
|
||||
calcAddr := addr.As16()
|
||||
|
||||
ap := V6AddrPort{Port: c.port}
|
||||
|
||||
maskb := binary.BigEndian.Uint64(mask[:8])
|
||||
maskAddrb := binary.BigEndian.Uint64(maskAddr[:8])
|
||||
calcAddrb := binary.BigEndian.Uint64(calcAddr[:8])
|
||||
ap.Hi = (maskAddrb & maskb) | (calcAddrb & ^maskb)
|
||||
|
||||
maskb = binary.BigEndian.Uint64(mask[8:])
|
||||
maskAddrb = binary.BigEndian.Uint64(maskAddr[8:])
|
||||
calcAddrb = binary.BigEndian.Uint64(calcAddr[8:])
|
||||
ap.Lo = (maskAddrb & maskb) | (calcAddrb & ^maskb)
|
||||
|
||||
return &ap
|
||||
}
|
||||
|
||||
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*bart.Table[[]*calculatedRemote], error) {
|
||||
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*cidr.Tree4, error) {
|
||||
value := c.Get(k)
|
||||
if value == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
calculatedRemotes := new(bart.Table[[]*calculatedRemote])
|
||||
calculatedRemotes := cidr.NewTree4()
|
||||
|
||||
rawMap, ok := value.(map[any]any)
|
||||
if !ok {
|
||||
|
@ -94,23 +69,23 @@ func NewCalculatedRemotesFromConfig(c *config.C, k string) (*bart.Table[[]*calcu
|
|||
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
||||
}
|
||||
|
||||
cidr, err := netip.ParsePrefix(rawCIDR)
|
||||
_, ipNet, err := net.ParseCIDR(rawCIDR)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||
}
|
||||
|
||||
entry, err := newCalculatedRemotesListFromConfig(cidr, rawValue)
|
||||
entry, err := newCalculatedRemotesListFromConfig(rawValue)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("config '%s.%s': %w", k, rawCIDR, err)
|
||||
}
|
||||
|
||||
calculatedRemotes.Insert(cidr, entry)
|
||||
calculatedRemotes.AddCIDR(ipNet, entry)
|
||||
}
|
||||
|
||||
return calculatedRemotes, nil
|
||||
}
|
||||
|
||||
func newCalculatedRemotesListFromConfig(cidr netip.Prefix, raw any) ([]*calculatedRemote, error) {
|
||||
func newCalculatedRemotesListFromConfig(raw any) ([]*calculatedRemote, error) {
|
||||
rawList, ok := raw.([]any)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("calculated_remotes entry has invalid type: %T", raw)
|
||||
|
@ -118,7 +93,7 @@ func newCalculatedRemotesListFromConfig(cidr netip.Prefix, raw any) ([]*calculat
|
|||
|
||||
var l []*calculatedRemote
|
||||
for _, e := range rawList {
|
||||
c, err := newCalculatedRemotesEntryFromConfig(cidr, e)
|
||||
c, err := newCalculatedRemotesEntryFromConfig(e)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("calculated_remotes entry: %w", err)
|
||||
}
|
||||
|
@ -128,7 +103,7 @@ func newCalculatedRemotesListFromConfig(cidr netip.Prefix, raw any) ([]*calculat
|
|||
return l, nil
|
||||
}
|
||||
|
||||
func newCalculatedRemotesEntryFromConfig(cidr netip.Prefix, raw any) (*calculatedRemote, error) {
|
||||
func newCalculatedRemotesEntryFromConfig(raw any) (*calculatedRemote, error) {
|
||||
rawMap, ok := raw.(map[any]any)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid type: %T", raw)
|
||||
|
@ -142,7 +117,7 @@ func newCalculatedRemotesEntryFromConfig(cidr netip.Prefix, raw any) (*calculate
|
|||
if !ok {
|
||||
return nil, fmt.Errorf("invalid mask (type %T): %v", rawValue, rawValue)
|
||||
}
|
||||
maskCidr, err := netip.ParsePrefix(rawMask)
|
||||
_, ipNet, err := net.ParseCIDR(rawMask)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid mask: %s", rawMask)
|
||||
}
|
||||
|
@ -164,5 +139,5 @@ func newCalculatedRemotesEntryFromConfig(cidr netip.Prefix, raw any) (*calculate
|
|||
return nil, fmt.Errorf("invalid port (type %T): %v", rawValue, rawValue)
|
||||
}
|
||||
|
||||
return newCalculatedRemote(cidr, maskCidr, port)
|
||||
return newCalculatedRemote(ipNet, port)
|
||||
}
|
||||
|
|
|
@ -1,81 +1,27 @@
|
|||
package nebula
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCalculatedRemoteApply(t *testing.T) {
|
||||
// Test v4 addresses
|
||||
ipNet := netip.MustParsePrefix("192.168.1.0/24")
|
||||
c, err := newCalculatedRemote(ipNet, ipNet, 4242)
|
||||
_, ipNet, err := net.ParseCIDR("192.168.1.0/24")
|
||||
require.NoError(t, err)
|
||||
|
||||
input, err := netip.ParseAddr("10.0.10.182")
|
||||
c, err := newCalculatedRemote(ipNet, 4242)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected, err := netip.ParseAddr("192.168.1.182")
|
||||
require.NoError(t, err)
|
||||
input := iputil.Ip2VpnIp([]byte{10, 0, 10, 182})
|
||||
|
||||
assert.Equal(t, netAddrToProtoV4AddrPort(expected, 4242), c.ApplyV4(input))
|
||||
expected := &Ip4AndPort{
|
||||
Ip: uint32(iputil.Ip2VpnIp([]byte{192, 168, 1, 182})),
|
||||
Port: 4242,
|
||||
}
|
||||
|
||||
// Test v6 addresses
|
||||
ipNet = netip.MustParsePrefix("ffff:ffff:ffff:ffff::0/64")
|
||||
c, err = newCalculatedRemote(ipNet, ipNet, 4242)
|
||||
require.NoError(t, err)
|
||||
|
||||
input, err = netip.ParseAddr("beef:beef:beef:beef:beef:beef:beef:beef")
|
||||
require.NoError(t, err)
|
||||
|
||||
expected, err = netip.ParseAddr("ffff:ffff:ffff:ffff:beef:beef:beef:beef")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, netAddrToProtoV6AddrPort(expected, 4242), c.ApplyV6(input))
|
||||
|
||||
// Test v6 addresses part 2
|
||||
ipNet = netip.MustParsePrefix("ffff:ffff:ffff:ffff:ffff::0/80")
|
||||
c, err = newCalculatedRemote(ipNet, ipNet, 4242)
|
||||
require.NoError(t, err)
|
||||
|
||||
input, err = netip.ParseAddr("beef:beef:beef:beef:beef:beef:beef:beef")
|
||||
require.NoError(t, err)
|
||||
|
||||
expected, err = netip.ParseAddr("ffff:ffff:ffff:ffff:ffff:beef:beef:beef")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, netAddrToProtoV6AddrPort(expected, 4242), c.ApplyV6(input))
|
||||
|
||||
// Test v6 addresses part 2
|
||||
ipNet = netip.MustParsePrefix("ffff:ffff:ffff::0/48")
|
||||
c, err = newCalculatedRemote(ipNet, ipNet, 4242)
|
||||
require.NoError(t, err)
|
||||
|
||||
input, err = netip.ParseAddr("beef:beef:beef:beef:beef:beef:beef:beef")
|
||||
require.NoError(t, err)
|
||||
|
||||
expected, err = netip.ParseAddr("ffff:ffff:ffff:beef:beef:beef:beef:beef")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, netAddrToProtoV6AddrPort(expected, 4242), c.ApplyV6(input))
|
||||
}
|
||||
|
||||
func Test_newCalculatedRemote(t *testing.T) {
|
||||
c, err := newCalculatedRemote(netip.MustParsePrefix("1::1/128"), netip.MustParsePrefix("1.0.0.0/32"), 4242)
|
||||
require.EqualError(t, err, "invalid mask: 1.0.0.0/32 for cidr: 1::1/128")
|
||||
require.Nil(t, c)
|
||||
|
||||
c, err = newCalculatedRemote(netip.MustParsePrefix("1.0.0.0/32"), netip.MustParsePrefix("1::1/128"), 4242)
|
||||
require.EqualError(t, err, "invalid mask: 1::1/128 for cidr: 1.0.0.0/32")
|
||||
require.Nil(t, c)
|
||||
|
||||
c, err = newCalculatedRemote(netip.MustParsePrefix("1.0.0.0/32"), netip.MustParsePrefix("1.0.0.0/32"), 4242)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, c)
|
||||
|
||||
c, err = newCalculatedRemote(netip.MustParsePrefix("1::1/128"), netip.MustParsePrefix("1::1/128"), 4242)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, c)
|
||||
assert.Equal(t, expected, c.Apply(input))
|
||||
}
|
||||
|
|
163
cert.go
Normal file
163
cert.go
Normal file
|
@ -0,0 +1,163 @@
|
|||
package nebula
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/config"
|
||||
)
|
||||
|
||||
type CertState struct {
|
||||
certificate *cert.NebulaCertificate
|
||||
rawCertificate []byte
|
||||
rawCertificateNoKey []byte
|
||||
publicKey []byte
|
||||
privateKey []byte
|
||||
}
|
||||
|
||||
func NewCertState(certificate *cert.NebulaCertificate, privateKey []byte) (*CertState, error) {
|
||||
// Marshal the certificate to ensure it is valid
|
||||
rawCertificate, err := certificate.Marshal()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid nebula certificate on interface: %s", err)
|
||||
}
|
||||
|
||||
publicKey := certificate.Details.PublicKey
|
||||
cs := &CertState{
|
||||
rawCertificate: rawCertificate,
|
||||
certificate: certificate, // PublicKey has been set to nil above
|
||||
privateKey: privateKey,
|
||||
publicKey: publicKey,
|
||||
}
|
||||
|
||||
cs.certificate.Details.PublicKey = nil
|
||||
rawCertNoKey, err := cs.certificate.Marshal()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshalling certificate no key: %s", err)
|
||||
}
|
||||
cs.rawCertificateNoKey = rawCertNoKey
|
||||
// put public key back
|
||||
cs.certificate.Details.PublicKey = cs.publicKey
|
||||
return cs, nil
|
||||
}
|
||||
|
||||
func NewCertStateFromConfig(c *config.C) (*CertState, error) {
|
||||
var pemPrivateKey []byte
|
||||
var err error
|
||||
|
||||
privPathOrPEM := c.GetString("pki.key", "")
|
||||
|
||||
if privPathOrPEM == "" {
|
||||
return nil, errors.New("no pki.key path or PEM data provided")
|
||||
}
|
||||
|
||||
if strings.Contains(privPathOrPEM, "-----BEGIN") {
|
||||
pemPrivateKey = []byte(privPathOrPEM)
|
||||
privPathOrPEM = "<inline>"
|
||||
} else {
|
||||
pemPrivateKey, err = ioutil.ReadFile(privPathOrPEM)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read pki.key file %s: %s", privPathOrPEM, err)
|
||||
}
|
||||
}
|
||||
|
||||
rawKey, _, curve, err := cert.UnmarshalPrivateKey(pemPrivateKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while unmarshaling pki.key %s: %s", privPathOrPEM, err)
|
||||
}
|
||||
|
||||
var rawCert []byte
|
||||
|
||||
pubPathOrPEM := c.GetString("pki.cert", "")
|
||||
|
||||
if pubPathOrPEM == "" {
|
||||
return nil, errors.New("no pki.cert path or PEM data provided")
|
||||
}
|
||||
|
||||
if strings.Contains(pubPathOrPEM, "-----BEGIN") {
|
||||
rawCert = []byte(pubPathOrPEM)
|
||||
pubPathOrPEM = "<inline>"
|
||||
} else {
|
||||
rawCert, err = ioutil.ReadFile(pubPathOrPEM)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read pki.cert file %s: %s", pubPathOrPEM, err)
|
||||
}
|
||||
}
|
||||
|
||||
nebulaCert, _, err := cert.UnmarshalNebulaCertificateFromPEM(rawCert)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while unmarshaling pki.cert %s: %s", pubPathOrPEM, err)
|
||||
}
|
||||
|
||||
if nebulaCert.Expired(time.Now()) {
|
||||
return nil, fmt.Errorf("nebula certificate for this host is expired")
|
||||
}
|
||||
|
||||
if len(nebulaCert.Details.Ips) == 0 {
|
||||
return nil, fmt.Errorf("no IPs encoded in certificate")
|
||||
}
|
||||
|
||||
if err = nebulaCert.VerifyPrivateKey(curve, rawKey); err != nil {
|
||||
return nil, fmt.Errorf("private key is not a pair with public key in nebula cert")
|
||||
}
|
||||
|
||||
return NewCertState(nebulaCert, rawKey)
|
||||
}
|
||||
|
||||
func loadCAFromConfig(l *logrus.Logger, c *config.C) (*cert.NebulaCAPool, error) {
|
||||
var rawCA []byte
|
||||
var err error
|
||||
|
||||
caPathOrPEM := c.GetString("pki.ca", "")
|
||||
if caPathOrPEM == "" {
|
||||
return nil, errors.New("no pki.ca path or PEM data provided")
|
||||
}
|
||||
|
||||
if strings.Contains(caPathOrPEM, "-----BEGIN") {
|
||||
rawCA = []byte(caPathOrPEM)
|
||||
|
||||
} else {
|
||||
rawCA, err = ioutil.ReadFile(caPathOrPEM)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read pki.ca file %s: %s", caPathOrPEM, err)
|
||||
}
|
||||
}
|
||||
|
||||
CAs, err := cert.NewCAPoolFromBytes(rawCA)
|
||||
if errors.Is(err, cert.ErrExpired) {
|
||||
var expired int
|
||||
for _, cert := range CAs.CAs {
|
||||
if cert.Expired(time.Now()) {
|
||||
expired++
|
||||
l.WithField("cert", cert).Warn("expired certificate present in CA pool")
|
||||
}
|
||||
}
|
||||
|
||||
if expired >= len(CAs.CAs) {
|
||||
return nil, errors.New("no valid CA certificates present")
|
||||
}
|
||||
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("error while adding CA certificate to CA trust store: %s", err)
|
||||
}
|
||||
|
||||
for _, fp := range c.GetStringSlice("pki.blocklist", []string{}) {
|
||||
l.WithField("fingerprint", fp).Info("Blocklisting cert")
|
||||
CAs.BlocklistFingerprint(fp)
|
||||
}
|
||||
|
||||
// Support deprecated config for at least one minor release to allow for migrations
|
||||
//TODO: remove in 2022 or later
|
||||
for _, fp := range c.GetStringSlice("pki.blacklist", []string{}) {
|
||||
l.WithField("fingerprint", fp).Info("Blocklisting cert")
|
||||
l.Warn("pki.blacklist is deprecated and will not be supported in a future release. Please migrate your config to use pki.blocklist")
|
||||
CAs.BlocklistFingerprint(fp)
|
||||
}
|
||||
|
||||
return CAs, nil
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
GO111MODULE = on
|
||||
export GO111MODULE
|
||||
|
||||
cert_v1.pb.go: cert_v1.proto .FORCE
|
||||
cert.pb.go: cert.proto .FORCE
|
||||
go build google.golang.org/protobuf/cmd/protoc-gen-go
|
||||
PATH="$(CURDIR):$(PATH)" protoc --go_out=. --go_opt=paths=source_relative $<
|
||||
rm protoc-gen-go
|
||||
|
|
|
@ -2,25 +2,14 @@
|
|||
|
||||
This is a library for interacting with `nebula` style certificates and authorities.
|
||||
|
||||
There are now 2 versions of `nebula` certificates:
|
||||
A `protobuf` definition of the certificate format is also included
|
||||
|
||||
## v1
|
||||
### Compiling the protobuf definition
|
||||
|
||||
This version is deprecated.
|
||||
|
||||
A `protobuf` definition of the certificate format is included at `cert_v1.proto`
|
||||
|
||||
To compile the definition you will need `protoc` installed.
|
||||
Make sure you have `protoc` installed.
|
||||
|
||||
To compile for `go` with the same version of protobuf specified in go.mod:
|
||||
|
||||
```bash
|
||||
make proto
|
||||
make
|
||||
```
|
||||
|
||||
## v2
|
||||
|
||||
This is the latest version which uses asn.1 DER encoding. It can support ipv4 and ipv6 and tolerate
|
||||
future certificate changes better than v1.
|
||||
|
||||
`cert_v2.asn1` defines the wire format and can be used to compile marshalers.
|
52
cert/asn1.go
52
cert/asn1.go
|
@ -1,52 +0,0 @@
|
|||
package cert
|
||||
|
||||
import (
|
||||
"golang.org/x/crypto/cryptobyte"
|
||||
"golang.org/x/crypto/cryptobyte/asn1"
|
||||
)
|
||||
|
||||
// readOptionalASN1Boolean reads an asn.1 boolean with a specific tag instead of a asn.1 tag wrapping a boolean with a value
|
||||
// https://github.com/golang/go/issues/64811#issuecomment-1944446920
|
||||
func readOptionalASN1Boolean(b *cryptobyte.String, out *bool, tag asn1.Tag, defaultValue bool) bool {
|
||||
var present bool
|
||||
var child cryptobyte.String
|
||||
if !b.ReadOptionalASN1(&child, &present, tag) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !present {
|
||||
*out = defaultValue
|
||||
return true
|
||||
}
|
||||
|
||||
// Ensure we have 1 byte
|
||||
if len(child) == 1 {
|
||||
*out = child[0] > 0
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// readOptionalASN1Byte reads an asn.1 uint8 with a specific tag instead of a asn.1 tag wrapping a uint8 with a value
|
||||
// Similar issue as with readOptionalASN1Boolean
|
||||
func readOptionalASN1Byte(b *cryptobyte.String, out *byte, tag asn1.Tag, defaultValue byte) bool {
|
||||
var present bool
|
||||
var child cryptobyte.String
|
||||
if !b.ReadOptionalASN1(&child, &present, tag) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !present {
|
||||
*out = defaultValue
|
||||
return true
|
||||
}
|
||||
|
||||
// Ensure we have 1 byte
|
||||
if len(child) == 1 {
|
||||
*out = child[0]
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
140
cert/ca.go
Normal file
140
cert/ca.go
Normal file
|
@ -0,0 +1,140 @@
|
|||
package cert
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type NebulaCAPool struct {
|
||||
CAs map[string]*NebulaCertificate
|
||||
certBlocklist map[string]struct{}
|
||||
}
|
||||
|
||||
// NewCAPool creates a CAPool
|
||||
func NewCAPool() *NebulaCAPool {
|
||||
ca := NebulaCAPool{
|
||||
CAs: make(map[string]*NebulaCertificate),
|
||||
certBlocklist: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
return &ca
|
||||
}
|
||||
|
||||
// NewCAPoolFromBytes will create a new CA pool from the provided
|
||||
// input bytes, which must be a PEM-encoded set of nebula certificates.
|
||||
// If the pool contains any expired certificates, an ErrExpired will be
|
||||
// returned along with the pool. The caller must handle any such errors.
|
||||
func NewCAPoolFromBytes(caPEMs []byte) (*NebulaCAPool, error) {
|
||||
pool := NewCAPool()
|
||||
var err error
|
||||
var expired bool
|
||||
for {
|
||||
caPEMs, err = pool.AddCACertificate(caPEMs)
|
||||
if errors.Is(err, ErrExpired) {
|
||||
expired = true
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if expired {
|
||||
return pool, ErrExpired
|
||||
}
|
||||
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// AddCACertificate verifies a Nebula CA certificate and adds it to the pool
|
||||
// Only the first pem encoded object will be consumed, any remaining bytes are returned.
|
||||
// Parsed certificates will be verified and must be a CA
|
||||
func (ncp *NebulaCAPool) AddCACertificate(pemBytes []byte) ([]byte, error) {
|
||||
c, pemBytes, err := UnmarshalNebulaCertificateFromPEM(pemBytes)
|
||||
if err != nil {
|
||||
return pemBytes, err
|
||||
}
|
||||
|
||||
if !c.Details.IsCA {
|
||||
return pemBytes, fmt.Errorf("%s: %w", c.Details.Name, ErrNotCA)
|
||||
}
|
||||
|
||||
if !c.CheckSignature(c.Details.PublicKey) {
|
||||
return pemBytes, fmt.Errorf("%s: %w", c.Details.Name, ErrNotSelfSigned)
|
||||
}
|
||||
|
||||
sum, err := c.Sha256Sum()
|
||||
if err != nil {
|
||||
return pemBytes, fmt.Errorf("could not calculate shasum for provided CA; error: %s; %s", err, c.Details.Name)
|
||||
}
|
||||
|
||||
ncp.CAs[sum] = c
|
||||
if c.Expired(time.Now()) {
|
||||
return pemBytes, fmt.Errorf("%s: %w", c.Details.Name, ErrExpired)
|
||||
}
|
||||
|
||||
return pemBytes, nil
|
||||
}
|
||||
|
||||
// BlocklistFingerprint adds a cert fingerprint to the blocklist
|
||||
func (ncp *NebulaCAPool) BlocklistFingerprint(f string) {
|
||||
ncp.certBlocklist[f] = struct{}{}
|
||||
}
|
||||
|
||||
// ResetCertBlocklist removes all previously blocklisted cert fingerprints
|
||||
func (ncp *NebulaCAPool) ResetCertBlocklist() {
|
||||
ncp.certBlocklist = make(map[string]struct{})
|
||||
}
|
||||
|
||||
// NOTE: This uses an internal cache for Sha256Sum() that will not be invalidated
|
||||
// automatically if you manually change any fields in the NebulaCertificate.
|
||||
func (ncp *NebulaCAPool) IsBlocklisted(c *NebulaCertificate) bool {
|
||||
return ncp.isBlocklistedWithCache(c, false)
|
||||
}
|
||||
|
||||
// IsBlocklisted returns true if the fingerprint fails to generate or has been explicitly blocklisted
|
||||
func (ncp *NebulaCAPool) isBlocklistedWithCache(c *NebulaCertificate, useCache bool) bool {
|
||||
h, err := c.sha256SumWithCache(useCache)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if _, ok := ncp.certBlocklist[h]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// GetCAForCert attempts to return the signing certificate for the provided certificate.
|
||||
// No signature validation is performed
|
||||
func (ncp *NebulaCAPool) GetCAForCert(c *NebulaCertificate) (*NebulaCertificate, error) {
|
||||
if c.Details.Issuer == "" {
|
||||
return nil, fmt.Errorf("no issuer in certificate")
|
||||
}
|
||||
|
||||
signer, ok := ncp.CAs[c.Details.Issuer]
|
||||
if ok {
|
||||
return signer, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("could not find ca for the certificate")
|
||||
}
|
||||
|
||||
// GetFingerprints returns an array of trusted CA fingerprints
|
||||
func (ncp *NebulaCAPool) GetFingerprints() []string {
|
||||
fp := make([]string, len(ncp.CAs))
|
||||
|
||||
i := 0
|
||||
for k := range ncp.CAs {
|
||||
fp[i] = k
|
||||
i++
|
||||
}
|
||||
|
||||
return fp
|
||||
}
|
296
cert/ca_pool.go
296
cert/ca_pool.go
|
@ -1,296 +0,0 @@
|
|||
package cert
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type CAPool struct {
|
||||
CAs map[string]*CachedCertificate
|
||||
certBlocklist map[string]struct{}
|
||||
}
|
||||
|
||||
// NewCAPool creates an empty CAPool
|
||||
func NewCAPool() *CAPool {
|
||||
ca := CAPool{
|
||||
CAs: make(map[string]*CachedCertificate),
|
||||
certBlocklist: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
return &ca
|
||||
}
|
||||
|
||||
// NewCAPoolFromPEM will create a new CA pool from the provided
|
||||
// input bytes, which must be a PEM-encoded set of nebula certificates.
|
||||
// If the pool contains any expired certificates, an ErrExpired will be
|
||||
// returned along with the pool. The caller must handle any such errors.
|
||||
func NewCAPoolFromPEM(caPEMs []byte) (*CAPool, error) {
|
||||
pool := NewCAPool()
|
||||
var err error
|
||||
var expired bool
|
||||
for {
|
||||
caPEMs, err = pool.AddCAFromPEM(caPEMs)
|
||||
if errors.Is(err, ErrExpired) {
|
||||
expired = true
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if expired {
|
||||
return pool, ErrExpired
|
||||
}
|
||||
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// AddCAFromPEM verifies a Nebula CA certificate and adds it to the pool.
|
||||
// Only the first pem encoded object will be consumed, any remaining bytes are returned.
|
||||
// Parsed certificates will be verified and must be a CA
|
||||
func (ncp *CAPool) AddCAFromPEM(pemBytes []byte) ([]byte, error) {
|
||||
c, pemBytes, err := UnmarshalCertificateFromPEM(pemBytes)
|
||||
if err != nil {
|
||||
return pemBytes, err
|
||||
}
|
||||
|
||||
err = ncp.AddCA(c)
|
||||
if err != nil {
|
||||
return pemBytes, err
|
||||
}
|
||||
|
||||
return pemBytes, nil
|
||||
}
|
||||
|
||||
// AddCA verifies a Nebula CA certificate and adds it to the pool.
|
||||
func (ncp *CAPool) AddCA(c Certificate) error {
|
||||
if !c.IsCA() {
|
||||
return fmt.Errorf("%s: %w", c.Name(), ErrNotCA)
|
||||
}
|
||||
|
||||
if !c.CheckSignature(c.PublicKey()) {
|
||||
return fmt.Errorf("%s: %w", c.Name(), ErrNotSelfSigned)
|
||||
}
|
||||
|
||||
sum, err := c.Fingerprint()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not calculate fingerprint for provided CA; error: %w; %s", err, c.Name())
|
||||
}
|
||||
|
||||
cc := &CachedCertificate{
|
||||
Certificate: c,
|
||||
Fingerprint: sum,
|
||||
InvertedGroups: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
for _, g := range c.Groups() {
|
||||
cc.InvertedGroups[g] = struct{}{}
|
||||
}
|
||||
|
||||
ncp.CAs[sum] = cc
|
||||
|
||||
if c.Expired(time.Now()) {
|
||||
return fmt.Errorf("%s: %w", c.Name(), ErrExpired)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlocklistFingerprint adds a cert fingerprint to the blocklist
|
||||
func (ncp *CAPool) BlocklistFingerprint(f string) {
|
||||
ncp.certBlocklist[f] = struct{}{}
|
||||
}
|
||||
|
||||
// ResetCertBlocklist removes all previously blocklisted cert fingerprints
|
||||
func (ncp *CAPool) ResetCertBlocklist() {
|
||||
ncp.certBlocklist = make(map[string]struct{})
|
||||
}
|
||||
|
||||
// IsBlocklisted tests the provided fingerprint against the pools blocklist.
|
||||
// Returns true if the fingerprint is blocked.
|
||||
func (ncp *CAPool) IsBlocklisted(fingerprint string) bool {
|
||||
if _, ok := ncp.certBlocklist[fingerprint]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// VerifyCertificate verifies the certificate is valid and is signed by a trusted CA in the pool.
|
||||
// If the certificate is valid then the returned CachedCertificate can be used in subsequent verification attempts
|
||||
// to increase performance.
|
||||
func (ncp *CAPool) VerifyCertificate(now time.Time, c Certificate) (*CachedCertificate, error) {
|
||||
if c == nil {
|
||||
return nil, fmt.Errorf("no certificate")
|
||||
}
|
||||
fp, err := c.Fingerprint()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not calculate fingerprint to verify: %w", err)
|
||||
}
|
||||
|
||||
signer, err := ncp.verify(c, now, fp, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cc := CachedCertificate{
|
||||
Certificate: c,
|
||||
InvertedGroups: make(map[string]struct{}),
|
||||
Fingerprint: fp,
|
||||
signerFingerprint: signer.Fingerprint,
|
||||
}
|
||||
|
||||
for _, g := range c.Groups() {
|
||||
cc.InvertedGroups[g] = struct{}{}
|
||||
}
|
||||
|
||||
return &cc, nil
|
||||
}
|
||||
|
||||
// VerifyCachedCertificate is the same as VerifyCertificate other than it operates on a pre-verified structure and
|
||||
// is a cheaper operation to perform as a result.
|
||||
func (ncp *CAPool) VerifyCachedCertificate(now time.Time, c *CachedCertificate) error {
|
||||
_, err := ncp.verify(c.Certificate, now, c.Fingerprint, c.signerFingerprint)
|
||||
return err
|
||||
}
|
||||
|
||||
func (ncp *CAPool) verify(c Certificate, now time.Time, certFp string, signerFp string) (*CachedCertificate, error) {
|
||||
if ncp.IsBlocklisted(certFp) {
|
||||
return nil, ErrBlockListed
|
||||
}
|
||||
|
||||
signer, err := ncp.GetCAForCert(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if signer.Certificate.Expired(now) {
|
||||
return nil, ErrRootExpired
|
||||
}
|
||||
|
||||
if c.Expired(now) {
|
||||
return nil, ErrExpired
|
||||
}
|
||||
|
||||
// If we are checking a cached certificate then we can bail early here
|
||||
// Either the root is no longer trusted or everything is fine
|
||||
if len(signerFp) > 0 {
|
||||
if signerFp != signer.Fingerprint {
|
||||
return nil, ErrFingerprintMismatch
|
||||
}
|
||||
return signer, nil
|
||||
}
|
||||
if !c.CheckSignature(signer.Certificate.PublicKey()) {
|
||||
return nil, ErrSignatureMismatch
|
||||
}
|
||||
|
||||
err = CheckCAConstraints(signer.Certificate, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return signer, nil
|
||||
}
|
||||
|
||||
// GetCAForCert attempts to return the signing certificate for the provided certificate.
|
||||
// No signature validation is performed
|
||||
func (ncp *CAPool) GetCAForCert(c Certificate) (*CachedCertificate, error) {
|
||||
issuer := c.Issuer()
|
||||
if issuer == "" {
|
||||
return nil, fmt.Errorf("no issuer in certificate")
|
||||
}
|
||||
|
||||
signer, ok := ncp.CAs[issuer]
|
||||
if ok {
|
||||
return signer, nil
|
||||
}
|
||||
|
||||
return nil, ErrCaNotFound
|
||||
}
|
||||
|
||||
// GetFingerprints returns an array of trusted CA fingerprints
|
||||
func (ncp *CAPool) GetFingerprints() []string {
|
||||
fp := make([]string, len(ncp.CAs))
|
||||
|
||||
i := 0
|
||||
for k := range ncp.CAs {
|
||||
fp[i] = k
|
||||
i++
|
||||
}
|
||||
|
||||
return fp
|
||||
}
|
||||
|
||||
// CheckCAConstraints returns an error if the sub certificate violates constraints present in the signer certificate.
|
||||
func CheckCAConstraints(signer Certificate, sub Certificate) error {
|
||||
return checkCAConstraints(signer, sub.NotBefore(), sub.NotAfter(), sub.Groups(), sub.Networks(), sub.UnsafeNetworks())
|
||||
}
|
||||
|
||||
// checkCAConstraints is a very generic function allowing both Certificates and TBSCertificates to be tested.
|
||||
func checkCAConstraints(signer Certificate, notBefore, notAfter time.Time, groups []string, networks, unsafeNetworks []netip.Prefix) error {
|
||||
// Make sure this cert isn't valid after the root
|
||||
if notAfter.After(signer.NotAfter()) {
|
||||
return fmt.Errorf("certificate expires after signing certificate")
|
||||
}
|
||||
|
||||
// Make sure this cert wasn't valid before the root
|
||||
if notBefore.Before(signer.NotBefore()) {
|
||||
return fmt.Errorf("certificate is valid before the signing certificate")
|
||||
}
|
||||
|
||||
// If the signer has a limited set of groups make sure the cert only contains a subset
|
||||
signerGroups := signer.Groups()
|
||||
if len(signerGroups) > 0 {
|
||||
for _, g := range groups {
|
||||
if !slices.Contains(signerGroups, g) {
|
||||
return fmt.Errorf("certificate contained a group not present on the signing ca: %s", g)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the signer has a limited set of ip ranges to issue from make sure the cert only contains a subset
|
||||
signingNetworks := signer.Networks()
|
||||
if len(signingNetworks) > 0 {
|
||||
for _, certNetwork := range networks {
|
||||
found := false
|
||||
for _, signingNetwork := range signingNetworks {
|
||||
if signingNetwork.Contains(certNetwork.Addr()) && signingNetwork.Bits() <= certNetwork.Bits() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return fmt.Errorf("certificate contained a network assignment outside the limitations of the signing ca: %s", certNetwork.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the signer has a limited set of subnet ranges to issue from make sure the cert only contains a subset
|
||||
signingUnsafeNetworks := signer.UnsafeNetworks()
|
||||
if len(signingUnsafeNetworks) > 0 {
|
||||
for _, certUnsafeNetwork := range unsafeNetworks {
|
||||
found := false
|
||||
for _, caNetwork := range signingUnsafeNetworks {
|
||||
if caNetwork.Contains(certUnsafeNetwork.Addr()) && caNetwork.Bits() <= certUnsafeNetwork.Bits() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return fmt.Errorf("certificate contained an unsafe network assignment outside the limitations of the signing ca: %s", certUnsafeNetwork.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,560 +0,0 @@
|
|||
package cert
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewCAPoolFromBytes(t *testing.T) {
|
||||
noNewLines := `
|
||||
# Current provisional, Remove once everything moves over to the real root.
|
||||
-----BEGIN NEBULA CERTIFICATE-----
|
||||
Cj4KDm5lYnVsYSByb290IGNhKM0cMM24zPCvBzogV24YEw5YiqeI/oYo8XXFsoo+
|
||||
PBmiOafNJhLacf9rsspAARJAz9OAnh8TKAUKix1kKVMyQU4iM3LsFfZRf6ODWXIf
|
||||
2qWMpB6fpd3PSoVYziPoOt2bIHIFLlgRLPJz3I3xBEdBCQ==
|
||||
-----END NEBULA CERTIFICATE-----
|
||||
# root-ca01
|
||||
-----BEGIN NEBULA CERTIFICATE-----
|
||||
CkEKEW5lYnVsYSByb290IGNhIDAxKM0cMM24zPCvBzogPzbWTxt8ZgXPQEwup7Br
|
||||
BrtIt1O0q5AuTRT3+t2x1VJAARJAZ+2ib23qBXjdy49oU1YysrwuKkWWKrtJ7Jye
|
||||
rFBQpDXikOukhQD/mfkloFwJ+Yjsfru7IpTN4ZfjXL+kN/2sCA==
|
||||
-----END NEBULA CERTIFICATE-----
|
||||
`
|
||||
|
||||
withNewLines := `
|
||||
# Current provisional, Remove once everything moves over to the real root.
|
||||
|
||||
-----BEGIN NEBULA CERTIFICATE-----
|
||||
Cj4KDm5lYnVsYSByb290IGNhKM0cMM24zPCvBzogV24YEw5YiqeI/oYo8XXFsoo+
|
||||
PBmiOafNJhLacf9rsspAARJAz9OAnh8TKAUKix1kKVMyQU4iM3LsFfZRf6ODWXIf
|
||||
2qWMpB6fpd3PSoVYziPoOt2bIHIFLlgRLPJz3I3xBEdBCQ==
|
||||
-----END NEBULA CERTIFICATE-----
|
||||
|
||||
# root-ca01
|
||||
|
||||
|
||||
-----BEGIN NEBULA CERTIFICATE-----
|
||||
CkEKEW5lYnVsYSByb290IGNhIDAxKM0cMM24zPCvBzogPzbWTxt8ZgXPQEwup7Br
|
||||
BrtIt1O0q5AuTRT3+t2x1VJAARJAZ+2ib23qBXjdy49oU1YysrwuKkWWKrtJ7Jye
|
||||
rFBQpDXikOukhQD/mfkloFwJ+Yjsfru7IpTN4ZfjXL+kN/2sCA==
|
||||
-----END NEBULA CERTIFICATE-----
|
||||
|
||||
`
|
||||
|
||||
expired := `
|
||||
# expired certificate
|
||||
-----BEGIN NEBULA CERTIFICATE-----
|
||||
CjMKB2V4cGlyZWQozRwwzRw6ICJSG94CqX8wn5I65Pwn25V6HftVfWeIySVtp2DA
|
||||
7TY/QAESQMaAk5iJT5EnQwK524ZaaHGEJLUqqbh5yyOHhboIGiVTWkFeH3HccTW8
|
||||
Tq5a8AyWDQdfXbtEZ1FwabeHfH5Asw0=
|
||||
-----END NEBULA CERTIFICATE-----
|
||||
`
|
||||
|
||||
p256 := `
|
||||
# p256 certificate
|
||||
-----BEGIN NEBULA CERTIFICATE-----
|
||||
CmQKEG5lYnVsYSBQMjU2IHRlc3QozRwwzbjM8K8HOkEEdrmmg40zQp44AkMq6DZp
|
||||
k+coOv04r+zh33ISyhbsafnYduN17p2eD7CmHvHuerguXD9f32gcxo/KsFCKEjMe
|
||||
+0ABoAYBEkcwRQIgVoTg38L7uWku9xQgsr06kxZ/viQLOO/w1Qj1vFUEnhcCIQCq
|
||||
75SjTiV92kv/1GcbT3wWpAZQQDBiUHVMVmh1822szA==
|
||||
-----END NEBULA CERTIFICATE-----
|
||||
`
|
||||
|
||||
rootCA := certificateV1{
|
||||
details: detailsV1{
|
||||
name: "nebula root ca",
|
||||
},
|
||||
}
|
||||
|
||||
rootCA01 := certificateV1{
|
||||
details: detailsV1{
|
||||
name: "nebula root ca 01",
|
||||
},
|
||||
}
|
||||
|
||||
rootCAP256 := certificateV1{
|
||||
details: detailsV1{
|
||||
name: "nebula P256 test",
|
||||
},
|
||||
}
|
||||
|
||||
p, err := NewCAPoolFromPEM([]byte(noNewLines))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, p.CAs["ce4e6c7a596996eb0d82a8875f0f0137a4b53ce22d2421c9fd7150e7a26f6300"].Certificate.Name(), rootCA.details.name)
|
||||
assert.Equal(t, p.CAs["04c585fcd9a49b276df956a22b7ebea3bf23f1fca5a17c0b56ce2e626631969e"].Certificate.Name(), rootCA01.details.name)
|
||||
|
||||
pp, err := NewCAPoolFromPEM([]byte(withNewLines))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, pp.CAs["ce4e6c7a596996eb0d82a8875f0f0137a4b53ce22d2421c9fd7150e7a26f6300"].Certificate.Name(), rootCA.details.name)
|
||||
assert.Equal(t, pp.CAs["04c585fcd9a49b276df956a22b7ebea3bf23f1fca5a17c0b56ce2e626631969e"].Certificate.Name(), rootCA01.details.name)
|
||||
|
||||
// expired cert, no valid certs
|
||||
ppp, err := NewCAPoolFromPEM([]byte(expired))
|
||||
assert.Equal(t, ErrExpired, err)
|
||||
assert.Equal(t, "expired", ppp.CAs["c39b35a0e8f246203fe4f32b9aa8bfd155f1ae6a6be9d78370641e43397f48f5"].Certificate.Name())
|
||||
|
||||
// expired cert, with valid certs
|
||||
pppp, err := NewCAPoolFromPEM(append([]byte(expired), noNewLines...))
|
||||
assert.Equal(t, ErrExpired, err)
|
||||
assert.Equal(t, pppp.CAs["ce4e6c7a596996eb0d82a8875f0f0137a4b53ce22d2421c9fd7150e7a26f6300"].Certificate.Name(), rootCA.details.name)
|
||||
assert.Equal(t, pppp.CAs["04c585fcd9a49b276df956a22b7ebea3bf23f1fca5a17c0b56ce2e626631969e"].Certificate.Name(), rootCA01.details.name)
|
||||
assert.Equal(t, "expired", pppp.CAs["c39b35a0e8f246203fe4f32b9aa8bfd155f1ae6a6be9d78370641e43397f48f5"].Certificate.Name())
|
||||
assert.Len(t, pppp.CAs, 3)
|
||||
|
||||
ppppp, err := NewCAPoolFromPEM([]byte(p256))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, ppppp.CAs["552bf7d99bec1fc775a0e4c324bf6d8f789b3078f1919c7960d2e5e0c351ee97"].Certificate.Name(), rootCAP256.details.name)
|
||||
assert.Len(t, ppppp.CAs, 1)
|
||||
}
|
||||
|
||||
func TestCertificateV1_Verify(t *testing.T) {
|
||||
ca, _, caKey, _ := NewTestCaCert(Version1, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, nil)
|
||||
c, _, _, _ := NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test cert", time.Now(), time.Now().Add(5*time.Minute), nil, nil, nil)
|
||||
|
||||
caPool := NewCAPool()
|
||||
require.NoError(t, caPool.AddCA(ca))
|
||||
|
||||
f, err := c.Fingerprint()
|
||||
require.NoError(t, err)
|
||||
caPool.BlocklistFingerprint(f)
|
||||
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.EqualError(t, err, "certificate is in the block list")
|
||||
|
||||
caPool.ResetCertBlocklist()
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = caPool.VerifyCertificate(time.Now().Add(time.Hour*1000), c)
|
||||
require.EqualError(t, err, "root certificate is expired")
|
||||
|
||||
assert.PanicsWithError(t, "certificate is valid before the signing certificate", func() {
|
||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test cert2", time.Time{}, time.Time{}, nil, nil, nil)
|
||||
})
|
||||
|
||||
// Test group assertion
|
||||
ca, _, caKey, _ = NewTestCaCert(Version1, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{"test1", "test2"})
|
||||
caPem, err := ca.MarshalPEM()
|
||||
require.NoError(t, err)
|
||||
|
||||
caPool = NewCAPool()
|
||||
b, err := caPool.AddCAFromPEM(caPem)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, b)
|
||||
|
||||
assert.PanicsWithError(t, "certificate contained a group not present on the signing ca: bad", func() {
|
||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1", "bad"})
|
||||
})
|
||||
|
||||
c, _, _, _ = NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test2", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestCertificateV1_VerifyP256(t *testing.T) {
|
||||
ca, _, caKey, _ := NewTestCaCert(Version1, Curve_P256, time.Now(), time.Now().Add(10*time.Minute), nil, nil, nil)
|
||||
c, _, _, _ := NewTestCert(Version1, Curve_P256, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, nil)
|
||||
|
||||
caPool := NewCAPool()
|
||||
require.NoError(t, caPool.AddCA(ca))
|
||||
|
||||
f, err := c.Fingerprint()
|
||||
require.NoError(t, err)
|
||||
caPool.BlocklistFingerprint(f)
|
||||
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.EqualError(t, err, "certificate is in the block list")
|
||||
|
||||
caPool.ResetCertBlocklist()
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = caPool.VerifyCertificate(time.Now().Add(time.Hour*1000), c)
|
||||
require.EqualError(t, err, "root certificate is expired")
|
||||
|
||||
assert.PanicsWithError(t, "certificate is valid before the signing certificate", func() {
|
||||
NewTestCert(Version1, Curve_P256, ca, caKey, "test", time.Time{}, time.Time{}, nil, nil, nil)
|
||||
})
|
||||
|
||||
// Test group assertion
|
||||
ca, _, caKey, _ = NewTestCaCert(Version1, Curve_P256, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{"test1", "test2"})
|
||||
caPem, err := ca.MarshalPEM()
|
||||
require.NoError(t, err)
|
||||
|
||||
caPool = NewCAPool()
|
||||
b, err := caPool.AddCAFromPEM(caPem)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, b)
|
||||
|
||||
assert.PanicsWithError(t, "certificate contained a group not present on the signing ca: bad", func() {
|
||||
NewTestCert(Version1, Curve_P256, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1", "bad"})
|
||||
})
|
||||
|
||||
c, _, _, _ = NewTestCert(Version1, Curve_P256, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1"})
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestCertificateV1_Verify_IPs(t *testing.T) {
|
||||
caIp1 := mustParsePrefixUnmapped("10.0.0.0/16")
|
||||
caIp2 := mustParsePrefixUnmapped("192.168.0.0/24")
|
||||
ca, _, caKey, _ := NewTestCaCert(Version1, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), []netip.Prefix{caIp1, caIp2}, nil, []string{"test"})
|
||||
|
||||
caPem, err := ca.MarshalPEM()
|
||||
require.NoError(t, err)
|
||||
|
||||
caPool := NewCAPool()
|
||||
b, err := caPool.AddCAFromPEM(caPem)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, b)
|
||||
|
||||
// ip is outside the network
|
||||
cIp1 := mustParsePrefixUnmapped("10.1.0.0/24")
|
||||
cIp2 := mustParsePrefixUnmapped("192.168.0.1/16")
|
||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
||||
})
|
||||
|
||||
// ip is outside the network reversed order of above
|
||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
||||
cIp2 = mustParsePrefixUnmapped("10.1.0.0/24")
|
||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
||||
})
|
||||
|
||||
// ip is within the network but mask is outside
|
||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/15")
|
||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/24")
|
||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
||||
})
|
||||
|
||||
// ip is within the network but mask is outside reversed order of above
|
||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
||||
cIp2 = mustParsePrefixUnmapped("10.0.1.0/15")
|
||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
||||
})
|
||||
|
||||
// ip and mask are within the network
|
||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/16")
|
||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/25")
|
||||
c, _, _, _ := NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Exact matches
|
||||
c, _, _, _ = NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{caIp1, caIp2}, nil, []string{"test"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Exact matches reversed
|
||||
c, _, _, _ = NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{caIp2, caIp1}, nil, []string{"test"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Exact matches reversed with just 1
|
||||
c, _, _, _ = NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{caIp1}, nil, []string{"test"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestCertificateV1_Verify_Subnets(t *testing.T) {
|
||||
caIp1 := mustParsePrefixUnmapped("10.0.0.0/16")
|
||||
caIp2 := mustParsePrefixUnmapped("192.168.0.0/24")
|
||||
ca, _, caKey, _ := NewTestCaCert(Version1, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, []netip.Prefix{caIp1, caIp2}, []string{"test"})
|
||||
|
||||
caPem, err := ca.MarshalPEM()
|
||||
require.NoError(t, err)
|
||||
|
||||
caPool := NewCAPool()
|
||||
b, err := caPool.AddCAFromPEM(caPem)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, b)
|
||||
|
||||
// ip is outside the network
|
||||
cIp1 := mustParsePrefixUnmapped("10.1.0.0/24")
|
||||
cIp2 := mustParsePrefixUnmapped("192.168.0.1/16")
|
||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
||||
})
|
||||
|
||||
// ip is outside the network reversed order of above
|
||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
||||
cIp2 = mustParsePrefixUnmapped("10.1.0.0/24")
|
||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
||||
})
|
||||
|
||||
// ip is within the network but mask is outside
|
||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/15")
|
||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/24")
|
||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
||||
})
|
||||
|
||||
// ip is within the network but mask is outside reversed order of above
|
||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
||||
cIp2 = mustParsePrefixUnmapped("10.0.1.0/15")
|
||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
||||
NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
||||
})
|
||||
|
||||
// ip and mask are within the network
|
||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/16")
|
||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/25")
|
||||
c, _, _, _ := NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Exact matches
|
||||
c, _, _, _ = NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{caIp1, caIp2}, []string{"test"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Exact matches reversed
|
||||
c, _, _, _ = NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{caIp2, caIp1}, []string{"test"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Exact matches reversed with just 1
|
||||
c, _, _, _ = NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{caIp1}, []string{"test"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestCertificateV2_Verify(t *testing.T) {
|
||||
ca, _, caKey, _ := NewTestCaCert(Version2, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, nil)
|
||||
c, _, _, _ := NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test cert", time.Now(), time.Now().Add(5*time.Minute), nil, nil, nil)
|
||||
|
||||
caPool := NewCAPool()
|
||||
require.NoError(t, caPool.AddCA(ca))
|
||||
|
||||
f, err := c.Fingerprint()
|
||||
require.NoError(t, err)
|
||||
caPool.BlocklistFingerprint(f)
|
||||
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.EqualError(t, err, "certificate is in the block list")
|
||||
|
||||
caPool.ResetCertBlocklist()
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = caPool.VerifyCertificate(time.Now().Add(time.Hour*1000), c)
|
||||
require.EqualError(t, err, "root certificate is expired")
|
||||
|
||||
assert.PanicsWithError(t, "certificate is valid before the signing certificate", func() {
|
||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test cert2", time.Time{}, time.Time{}, nil, nil, nil)
|
||||
})
|
||||
|
||||
// Test group assertion
|
||||
ca, _, caKey, _ = NewTestCaCert(Version2, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{"test1", "test2"})
|
||||
caPem, err := ca.MarshalPEM()
|
||||
require.NoError(t, err)
|
||||
|
||||
caPool = NewCAPool()
|
||||
b, err := caPool.AddCAFromPEM(caPem)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, b)
|
||||
|
||||
assert.PanicsWithError(t, "certificate contained a group not present on the signing ca: bad", func() {
|
||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1", "bad"})
|
||||
})
|
||||
|
||||
c, _, _, _ = NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test2", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestCertificateV2_VerifyP256(t *testing.T) {
|
||||
ca, _, caKey, _ := NewTestCaCert(Version2, Curve_P256, time.Now(), time.Now().Add(10*time.Minute), nil, nil, nil)
|
||||
c, _, _, _ := NewTestCert(Version2, Curve_P256, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, nil)
|
||||
|
||||
caPool := NewCAPool()
|
||||
require.NoError(t, caPool.AddCA(ca))
|
||||
|
||||
f, err := c.Fingerprint()
|
||||
require.NoError(t, err)
|
||||
caPool.BlocklistFingerprint(f)
|
||||
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.EqualError(t, err, "certificate is in the block list")
|
||||
|
||||
caPool.ResetCertBlocklist()
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = caPool.VerifyCertificate(time.Now().Add(time.Hour*1000), c)
|
||||
require.EqualError(t, err, "root certificate is expired")
|
||||
|
||||
assert.PanicsWithError(t, "certificate is valid before the signing certificate", func() {
|
||||
NewTestCert(Version2, Curve_P256, ca, caKey, "test", time.Time{}, time.Time{}, nil, nil, nil)
|
||||
})
|
||||
|
||||
// Test group assertion
|
||||
ca, _, caKey, _ = NewTestCaCert(Version2, Curve_P256, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{"test1", "test2"})
|
||||
caPem, err := ca.MarshalPEM()
|
||||
require.NoError(t, err)
|
||||
|
||||
caPool = NewCAPool()
|
||||
b, err := caPool.AddCAFromPEM(caPem)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, b)
|
||||
|
||||
assert.PanicsWithError(t, "certificate contained a group not present on the signing ca: bad", func() {
|
||||
NewTestCert(Version2, Curve_P256, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1", "bad"})
|
||||
})
|
||||
|
||||
c, _, _, _ = NewTestCert(Version2, Curve_P256, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, []string{"test1"})
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestCertificateV2_Verify_IPs(t *testing.T) {
|
||||
caIp1 := mustParsePrefixUnmapped("10.0.0.0/16")
|
||||
caIp2 := mustParsePrefixUnmapped("192.168.0.0/24")
|
||||
ca, _, caKey, _ := NewTestCaCert(Version2, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), []netip.Prefix{caIp1, caIp2}, nil, []string{"test"})
|
||||
|
||||
caPem, err := ca.MarshalPEM()
|
||||
require.NoError(t, err)
|
||||
|
||||
caPool := NewCAPool()
|
||||
b, err := caPool.AddCAFromPEM(caPem)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, b)
|
||||
|
||||
// ip is outside the network
|
||||
cIp1 := mustParsePrefixUnmapped("10.1.0.0/24")
|
||||
cIp2 := mustParsePrefixUnmapped("192.168.0.1/16")
|
||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
||||
})
|
||||
|
||||
// ip is outside the network reversed order of above
|
||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
||||
cIp2 = mustParsePrefixUnmapped("10.1.0.0/24")
|
||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
||||
})
|
||||
|
||||
// ip is within the network but mask is outside
|
||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/15")
|
||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/24")
|
||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
||||
})
|
||||
|
||||
// ip is within the network but mask is outside reversed order of above
|
||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
||||
cIp2 = mustParsePrefixUnmapped("10.0.1.0/15")
|
||||
assert.PanicsWithError(t, "certificate contained a network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
||||
})
|
||||
|
||||
// ip and mask are within the network
|
||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/16")
|
||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/25")
|
||||
c, _, _, _ := NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{cIp1, cIp2}, nil, []string{"test"})
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Exact matches
|
||||
c, _, _, _ = NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{caIp1, caIp2}, nil, []string{"test"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Exact matches reversed
|
||||
c, _, _, _ = NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{caIp2, caIp1}, nil, []string{"test"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Exact matches reversed with just 1
|
||||
c, _, _, _ = NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{caIp1}, nil, []string{"test"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestCertificateV2_Verify_Subnets(t *testing.T) {
|
||||
caIp1 := mustParsePrefixUnmapped("10.0.0.0/16")
|
||||
caIp2 := mustParsePrefixUnmapped("192.168.0.0/24")
|
||||
ca, _, caKey, _ := NewTestCaCert(Version2, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, []netip.Prefix{caIp1, caIp2}, []string{"test"})
|
||||
|
||||
caPem, err := ca.MarshalPEM()
|
||||
require.NoError(t, err)
|
||||
|
||||
caPool := NewCAPool()
|
||||
b, err := caPool.AddCAFromPEM(caPem)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, b)
|
||||
|
||||
// ip is outside the network
|
||||
cIp1 := mustParsePrefixUnmapped("10.1.0.0/24")
|
||||
cIp2 := mustParsePrefixUnmapped("192.168.0.1/16")
|
||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
||||
})
|
||||
|
||||
// ip is outside the network reversed order of above
|
||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
||||
cIp2 = mustParsePrefixUnmapped("10.1.0.0/24")
|
||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.1.0.0/24", func() {
|
||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
||||
})
|
||||
|
||||
// ip is within the network but mask is outside
|
||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/15")
|
||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/24")
|
||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
||||
})
|
||||
|
||||
// ip is within the network but mask is outside reversed order of above
|
||||
cIp1 = mustParsePrefixUnmapped("192.168.0.1/24")
|
||||
cIp2 = mustParsePrefixUnmapped("10.0.1.0/15")
|
||||
assert.PanicsWithError(t, "certificate contained an unsafe network assignment outside the limitations of the signing ca: 10.0.1.0/15", func() {
|
||||
NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
||||
})
|
||||
|
||||
// ip and mask are within the network
|
||||
cIp1 = mustParsePrefixUnmapped("10.0.1.0/16")
|
||||
cIp2 = mustParsePrefixUnmapped("192.168.0.1/25")
|
||||
c, _, _, _ := NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{cIp1, cIp2}, []string{"test"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Exact matches
|
||||
c, _, _, _ = NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{caIp1, caIp2}, []string{"test"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Exact matches reversed
|
||||
c, _, _, _ = NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{caIp2, caIp1}, []string{"test"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Exact matches reversed with just 1
|
||||
c, _, _, _ = NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, []netip.Prefix{caIp1}, []string{"test"})
|
||||
require.NoError(t, err)
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
require.NoError(t, err)
|
||||
}
|
1123
cert/cert.go
1123
cert/cert.go
File diff suppressed because it is too large
Load diff
|
@ -1,8 +1,8 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.34.2
|
||||
// protoc-gen-go v1.30.0
|
||||
// protoc v3.21.5
|
||||
// source: cert_v1.proto
|
||||
// source: cert.proto
|
||||
|
||||
package cert
|
||||
|
||||
|
@ -50,11 +50,11 @@ func (x Curve) String() string {
|
|||
}
|
||||
|
||||
func (Curve) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_cert_v1_proto_enumTypes[0].Descriptor()
|
||||
return file_cert_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (Curve) Type() protoreflect.EnumType {
|
||||
return &file_cert_v1_proto_enumTypes[0]
|
||||
return &file_cert_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x Curve) Number() protoreflect.EnumNumber {
|
||||
|
@ -63,7 +63,7 @@ func (x Curve) Number() protoreflect.EnumNumber {
|
|||
|
||||
// Deprecated: Use Curve.Descriptor instead.
|
||||
func (Curve) EnumDescriptor() ([]byte, []int) {
|
||||
return file_cert_v1_proto_rawDescGZIP(), []int{0}
|
||||
return file_cert_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
type RawNebulaCertificate struct {
|
||||
|
@ -78,7 +78,7 @@ type RawNebulaCertificate struct {
|
|||
func (x *RawNebulaCertificate) Reset() {
|
||||
*x = RawNebulaCertificate{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cert_v1_proto_msgTypes[0]
|
||||
mi := &file_cert_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ func (x *RawNebulaCertificate) String() string {
|
|||
func (*RawNebulaCertificate) ProtoMessage() {}
|
||||
|
||||
func (x *RawNebulaCertificate) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cert_v1_proto_msgTypes[0]
|
||||
mi := &file_cert_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -104,7 +104,7 @@ func (x *RawNebulaCertificate) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use RawNebulaCertificate.ProtoReflect.Descriptor instead.
|
||||
func (*RawNebulaCertificate) Descriptor() ([]byte, []int) {
|
||||
return file_cert_v1_proto_rawDescGZIP(), []int{0}
|
||||
return file_cert_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *RawNebulaCertificate) GetDetails() *RawNebulaCertificateDetails {
|
||||
|
@ -143,7 +143,7 @@ type RawNebulaCertificateDetails struct {
|
|||
func (x *RawNebulaCertificateDetails) Reset() {
|
||||
*x = RawNebulaCertificateDetails{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cert_v1_proto_msgTypes[1]
|
||||
mi := &file_cert_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ func (x *RawNebulaCertificateDetails) String() string {
|
|||
func (*RawNebulaCertificateDetails) ProtoMessage() {}
|
||||
|
||||
func (x *RawNebulaCertificateDetails) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cert_v1_proto_msgTypes[1]
|
||||
mi := &file_cert_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -169,7 +169,7 @@ func (x *RawNebulaCertificateDetails) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use RawNebulaCertificateDetails.ProtoReflect.Descriptor instead.
|
||||
func (*RawNebulaCertificateDetails) Descriptor() ([]byte, []int) {
|
||||
return file_cert_v1_proto_rawDescGZIP(), []int{1}
|
||||
return file_cert_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *RawNebulaCertificateDetails) GetName() string {
|
||||
|
@ -254,7 +254,7 @@ type RawNebulaEncryptedData struct {
|
|||
func (x *RawNebulaEncryptedData) Reset() {
|
||||
*x = RawNebulaEncryptedData{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cert_v1_proto_msgTypes[2]
|
||||
mi := &file_cert_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -267,7 +267,7 @@ func (x *RawNebulaEncryptedData) String() string {
|
|||
func (*RawNebulaEncryptedData) ProtoMessage() {}
|
||||
|
||||
func (x *RawNebulaEncryptedData) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cert_v1_proto_msgTypes[2]
|
||||
mi := &file_cert_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -280,7 +280,7 @@ func (x *RawNebulaEncryptedData) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use RawNebulaEncryptedData.ProtoReflect.Descriptor instead.
|
||||
func (*RawNebulaEncryptedData) Descriptor() ([]byte, []int) {
|
||||
return file_cert_v1_proto_rawDescGZIP(), []int{2}
|
||||
return file_cert_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *RawNebulaEncryptedData) GetEncryptionMetadata() *RawNebulaEncryptionMetadata {
|
||||
|
@ -309,7 +309,7 @@ type RawNebulaEncryptionMetadata struct {
|
|||
func (x *RawNebulaEncryptionMetadata) Reset() {
|
||||
*x = RawNebulaEncryptionMetadata{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cert_v1_proto_msgTypes[3]
|
||||
mi := &file_cert_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -322,7 +322,7 @@ func (x *RawNebulaEncryptionMetadata) String() string {
|
|||
func (*RawNebulaEncryptionMetadata) ProtoMessage() {}
|
||||
|
||||
func (x *RawNebulaEncryptionMetadata) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cert_v1_proto_msgTypes[3]
|
||||
mi := &file_cert_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -335,7 +335,7 @@ func (x *RawNebulaEncryptionMetadata) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use RawNebulaEncryptionMetadata.ProtoReflect.Descriptor instead.
|
||||
func (*RawNebulaEncryptionMetadata) Descriptor() ([]byte, []int) {
|
||||
return file_cert_v1_proto_rawDescGZIP(), []int{3}
|
||||
return file_cert_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *RawNebulaEncryptionMetadata) GetEncryptionAlgorithm() string {
|
||||
|
@ -367,7 +367,7 @@ type RawNebulaArgon2Parameters struct {
|
|||
func (x *RawNebulaArgon2Parameters) Reset() {
|
||||
*x = RawNebulaArgon2Parameters{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cert_v1_proto_msgTypes[4]
|
||||
mi := &file_cert_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -380,7 +380,7 @@ func (x *RawNebulaArgon2Parameters) String() string {
|
|||
func (*RawNebulaArgon2Parameters) ProtoMessage() {}
|
||||
|
||||
func (x *RawNebulaArgon2Parameters) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cert_v1_proto_msgTypes[4]
|
||||
mi := &file_cert_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -393,7 +393,7 @@ func (x *RawNebulaArgon2Parameters) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use RawNebulaArgon2Parameters.ProtoReflect.Descriptor instead.
|
||||
func (*RawNebulaArgon2Parameters) Descriptor() ([]byte, []int) {
|
||||
return file_cert_v1_proto_rawDescGZIP(), []int{4}
|
||||
return file_cert_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *RawNebulaArgon2Parameters) GetVersion() int32 {
|
||||
|
@ -431,87 +431,87 @@ func (x *RawNebulaArgon2Parameters) GetSalt() []byte {
|
|||
return nil
|
||||
}
|
||||
|
||||
var File_cert_v1_proto protoreflect.FileDescriptor
|
||||
var File_cert_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_cert_v1_proto_rawDesc = []byte{
|
||||
0x0a, 0x0d, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x76, 0x31, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
|
||||
0x04, 0x63, 0x65, 0x72, 0x74, 0x22, 0x71, 0x0a, 0x14, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75,
|
||||
0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a,
|
||||
0x07, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21,
|
||||
0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43,
|
||||
0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c,
|
||||
0x73, 0x52, 0x07, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69,
|
||||
0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x53,
|
||||
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x9c, 0x02, 0x0a, 0x1b, 0x52, 0x61, 0x77,
|
||||
0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
|
||||
0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03,
|
||||
0x49, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x03, 0x49, 0x70, 0x73, 0x12, 0x18,
|
||||
0x0a, 0x07, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52,
|
||||
0x07, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x47, 0x72, 0x6f, 0x75,
|
||||
0x70, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73,
|
||||
0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x05, 0x20,
|
||||
0x01, 0x28, 0x03, 0x52, 0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1a,
|
||||
0x0a, 0x08, 0x4e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03,
|
||||
0x52, 0x08, 0x4e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x75,
|
||||
0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x50,
|
||||
0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x49, 0x73, 0x43, 0x41,
|
||||
0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x49, 0x73, 0x43, 0x41, 0x12, 0x16, 0x0a, 0x06,
|
||||
0x49, 0x73, 0x73, 0x75, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x49, 0x73,
|
||||
0x73, 0x75, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x05, 0x63, 0x75, 0x72, 0x76, 0x65, 0x18, 0x64, 0x20,
|
||||
0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x43, 0x75, 0x72, 0x76, 0x65,
|
||||
0x52, 0x05, 0x63, 0x75, 0x72, 0x76, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x16, 0x52, 0x61, 0x77, 0x4e,
|
||||
0x65, 0x62, 0x75, 0x6c, 0x61, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x44, 0x61,
|
||||
0x74, 0x61, 0x12, 0x51, 0x0a, 0x12, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21,
|
||||
0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x45,
|
||||
0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
|
||||
0x61, 0x52, 0x12, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74,
|
||||
0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1e, 0x0a, 0x0a, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74,
|
||||
0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x43, 0x69, 0x70, 0x68, 0x65,
|
||||
0x72, 0x74, 0x65, 0x78, 0x74, 0x22, 0x9c, 0x01, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62,
|
||||
0x75, 0x6c, 0x61, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74,
|
||||
0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x30, 0x0a, 0x13, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x13, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c,
|
||||
0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x4b, 0x0a, 0x10, 0x41, 0x72, 0x67, 0x6f, 0x6e,
|
||||
0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x1f, 0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75,
|
||||
0x6c, 0x61, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
|
||||
0x72, 0x73, 0x52, 0x10, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
|
||||
0x74, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x01, 0x0a, 0x19, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75,
|
||||
0x6c, 0x61, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
|
||||
0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06,
|
||||
0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6d, 0x65,
|
||||
0x6d, 0x6f, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c,
|
||||
0x69, 0x73, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c,
|
||||
0x6c, 0x65, 0x6c, 0x69, 0x73, 0x6d, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x69, 0x74, 0x65, 0x72,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x18, 0x05,
|
||||
0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x2a, 0x21, 0x0a, 0x05, 0x43, 0x75,
|
||||
0x72, 0x76, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x55, 0x52, 0x56, 0x45, 0x32, 0x35, 0x35, 0x31,
|
||||
0x39, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x32, 0x35, 0x36, 0x10, 0x01, 0x42, 0x20, 0x5a,
|
||||
0x1e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6c, 0x61, 0x63,
|
||||
0x6b, 0x68, 0x71, 0x2f, 0x6e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x62,
|
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
var file_cert_proto_rawDesc = []byte{
|
||||
0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x63, 0x65,
|
||||
0x72, 0x74, 0x22, 0x71, 0x0a, 0x14, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43,
|
||||
0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x44, 0x65,
|
||||
0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65,
|
||||
0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74,
|
||||
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07,
|
||||
0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61,
|
||||
0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x53, 0x69, 0x67, 0x6e,
|
||||
0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x9c, 0x02, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62,
|
||||
0x75, 0x6c, 0x61, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65,
|
||||
0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x49, 0x70, 0x73,
|
||||
0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x03, 0x49, 0x70, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x53,
|
||||
0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x53, 0x75,
|
||||
0x62, 0x6e, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18,
|
||||
0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x1c, 0x0a,
|
||||
0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
|
||||
0x52, 0x09, 0x4e, 0x6f, 0x74, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x4e,
|
||||
0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x4e,
|
||||
0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69,
|
||||
0x63, 0x4b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x50, 0x75, 0x62, 0x6c,
|
||||
0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x49, 0x73, 0x43, 0x41, 0x18, 0x08, 0x20,
|
||||
0x01, 0x28, 0x08, 0x52, 0x04, 0x49, 0x73, 0x43, 0x41, 0x12, 0x16, 0x0a, 0x06, 0x49, 0x73, 0x73,
|
||||
0x75, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x49, 0x73, 0x73, 0x75, 0x65,
|
||||
0x72, 0x12, 0x21, 0x0a, 0x05, 0x63, 0x75, 0x72, 0x76, 0x65, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0e,
|
||||
0x32, 0x0b, 0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x43, 0x75, 0x72, 0x76, 0x65, 0x52, 0x05, 0x63,
|
||||
0x75, 0x72, 0x76, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x16, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75,
|
||||
0x6c, 0x61, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12,
|
||||
0x51, 0x0a, 0x12, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74,
|
||||
0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65,
|
||||
0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x45, 0x6e, 0x63, 0x72,
|
||||
0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x12,
|
||||
0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
|
||||
0x74, 0x61, 0x12, 0x1e, 0x0a, 0x0a, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65,
|
||||
0x78, 0x74, 0x22, 0x9c, 0x01, 0x0a, 0x1b, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61,
|
||||
0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
|
||||
0x74, 0x61, 0x12, 0x30, 0x0a, 0x13, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x13, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72,
|
||||
0x69, 0x74, 0x68, 0x6d, 0x12, 0x4b, 0x0a, 0x10, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61,
|
||||
0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
|
||||
0x2e, 0x63, 0x65, 0x72, 0x74, 0x2e, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x41,
|
||||
0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52,
|
||||
0x10, 0x41, 0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72,
|
||||
0x73, 0x22, 0xa3, 0x01, 0x0a, 0x19, 0x52, 0x61, 0x77, 0x4e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x41,
|
||||
0x72, 0x67, 0x6f, 0x6e, 0x32, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12,
|
||||
0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
|
||||
0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x6d,
|
||||
0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72,
|
||||
0x79, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x69, 0x73, 0x6d,
|
||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c,
|
||||
0x69, 0x73, 0x6d, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x2a, 0x21, 0x0a, 0x05, 0x43, 0x75, 0x72, 0x76, 0x65,
|
||||
0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x55, 0x52, 0x56, 0x45, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x00,
|
||||
0x12, 0x08, 0x0a, 0x04, 0x50, 0x32, 0x35, 0x36, 0x10, 0x01, 0x42, 0x20, 0x5a, 0x1e, 0x67, 0x69,
|
||||
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6c, 0x61, 0x63, 0x6b, 0x68, 0x71,
|
||||
0x2f, 0x6e, 0x65, 0x62, 0x75, 0x6c, 0x61, 0x2f, 0x63, 0x65, 0x72, 0x74, 0x62, 0x06, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_cert_v1_proto_rawDescOnce sync.Once
|
||||
file_cert_v1_proto_rawDescData = file_cert_v1_proto_rawDesc
|
||||
file_cert_proto_rawDescOnce sync.Once
|
||||
file_cert_proto_rawDescData = file_cert_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_cert_v1_proto_rawDescGZIP() []byte {
|
||||
file_cert_v1_proto_rawDescOnce.Do(func() {
|
||||
file_cert_v1_proto_rawDescData = protoimpl.X.CompressGZIP(file_cert_v1_proto_rawDescData)
|
||||
func file_cert_proto_rawDescGZIP() []byte {
|
||||
file_cert_proto_rawDescOnce.Do(func() {
|
||||
file_cert_proto_rawDescData = protoimpl.X.CompressGZIP(file_cert_proto_rawDescData)
|
||||
})
|
||||
return file_cert_v1_proto_rawDescData
|
||||
return file_cert_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_cert_v1_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_cert_v1_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||
var file_cert_v1_proto_goTypes = []any{
|
||||
var file_cert_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_cert_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||
var file_cert_proto_goTypes = []interface{}{
|
||||
(Curve)(0), // 0: cert.Curve
|
||||
(*RawNebulaCertificate)(nil), // 1: cert.RawNebulaCertificate
|
||||
(*RawNebulaCertificateDetails)(nil), // 2: cert.RawNebulaCertificateDetails
|
||||
|
@ -519,7 +519,7 @@ var file_cert_v1_proto_goTypes = []any{
|
|||
(*RawNebulaEncryptionMetadata)(nil), // 4: cert.RawNebulaEncryptionMetadata
|
||||
(*RawNebulaArgon2Parameters)(nil), // 5: cert.RawNebulaArgon2Parameters
|
||||
}
|
||||
var file_cert_v1_proto_depIdxs = []int32{
|
||||
var file_cert_proto_depIdxs = []int32{
|
||||
2, // 0: cert.RawNebulaCertificate.Details:type_name -> cert.RawNebulaCertificateDetails
|
||||
0, // 1: cert.RawNebulaCertificateDetails.curve:type_name -> cert.Curve
|
||||
4, // 2: cert.RawNebulaEncryptedData.EncryptionMetadata:type_name -> cert.RawNebulaEncryptionMetadata
|
||||
|
@ -531,13 +531,13 @@ var file_cert_v1_proto_depIdxs = []int32{
|
|||
0, // [0:4] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_cert_v1_proto_init() }
|
||||
func file_cert_v1_proto_init() {
|
||||
if File_cert_v1_proto != nil {
|
||||
func init() { file_cert_proto_init() }
|
||||
func file_cert_proto_init() {
|
||||
if File_cert_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_cert_v1_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
||||
file_cert_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RawNebulaCertificate); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -549,7 +549,7 @@ func file_cert_v1_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_cert_v1_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
||||
file_cert_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RawNebulaCertificateDetails); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -561,7 +561,7 @@ func file_cert_v1_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_cert_v1_proto_msgTypes[2].Exporter = func(v any, i int) any {
|
||||
file_cert_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RawNebulaEncryptedData); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -573,7 +573,7 @@ func file_cert_v1_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_cert_v1_proto_msgTypes[3].Exporter = func(v any, i int) any {
|
||||
file_cert_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RawNebulaEncryptionMetadata); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -585,7 +585,7 @@ func file_cert_v1_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_cert_v1_proto_msgTypes[4].Exporter = func(v any, i int) any {
|
||||
file_cert_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RawNebulaArgon2Parameters); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -602,19 +602,19 @@ func file_cert_v1_proto_init() {
|
|||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_cert_v1_proto_rawDesc,
|
||||
RawDescriptor: file_cert_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 5,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_cert_v1_proto_goTypes,
|
||||
DependencyIndexes: file_cert_v1_proto_depIdxs,
|
||||
EnumInfos: file_cert_v1_proto_enumTypes,
|
||||
MessageInfos: file_cert_v1_proto_msgTypes,
|
||||
GoTypes: file_cert_proto_goTypes,
|
||||
DependencyIndexes: file_cert_proto_depIdxs,
|
||||
EnumInfos: file_cert_proto_enumTypes,
|
||||
MessageInfos: file_cert_proto_msgTypes,
|
||||
}.Build()
|
||||
File_cert_v1_proto = out.File
|
||||
file_cert_v1_proto_rawDesc = nil
|
||||
file_cert_v1_proto_goTypes = nil
|
||||
file_cert_v1_proto_depIdxs = nil
|
||||
File_cert_proto = out.File
|
||||
file_cert_proto_rawDesc = nil
|
||||
file_cert_proto_goTypes = nil
|
||||
file_cert_proto_depIdxs = nil
|
||||
}
|
1230
cert/cert_test.go
Normal file
1230
cert/cert_test.go
Normal file
File diff suppressed because it is too large
Load diff
489
cert/cert_v1.go
489
cert/cert_v1.go
|
@ -1,489 +0,0 @@
|
|||
package cert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdh"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/curve25519"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const publicKeyLen = 32
|
||||
|
||||
type certificateV1 struct {
|
||||
details detailsV1
|
||||
signature []byte
|
||||
}
|
||||
|
||||
type detailsV1 struct {
|
||||
name string
|
||||
networks []netip.Prefix
|
||||
unsafeNetworks []netip.Prefix
|
||||
groups []string
|
||||
notBefore time.Time
|
||||
notAfter time.Time
|
||||
publicKey []byte
|
||||
isCA bool
|
||||
issuer string
|
||||
|
||||
curve Curve
|
||||
}
|
||||
|
||||
type m map[string]interface{}
|
||||
|
||||
func (c *certificateV1) Version() Version {
|
||||
return Version1
|
||||
}
|
||||
|
||||
func (c *certificateV1) Curve() Curve {
|
||||
return c.details.curve
|
||||
}
|
||||
|
||||
func (c *certificateV1) Groups() []string {
|
||||
return c.details.groups
|
||||
}
|
||||
|
||||
func (c *certificateV1) IsCA() bool {
|
||||
return c.details.isCA
|
||||
}
|
||||
|
||||
func (c *certificateV1) Issuer() string {
|
||||
return c.details.issuer
|
||||
}
|
||||
|
||||
func (c *certificateV1) Name() string {
|
||||
return c.details.name
|
||||
}
|
||||
|
||||
func (c *certificateV1) Networks() []netip.Prefix {
|
||||
return c.details.networks
|
||||
}
|
||||
|
||||
func (c *certificateV1) NotAfter() time.Time {
|
||||
return c.details.notAfter
|
||||
}
|
||||
|
||||
func (c *certificateV1) NotBefore() time.Time {
|
||||
return c.details.notBefore
|
||||
}
|
||||
|
||||
func (c *certificateV1) PublicKey() []byte {
|
||||
return c.details.publicKey
|
||||
}
|
||||
|
||||
func (c *certificateV1) Signature() []byte {
|
||||
return c.signature
|
||||
}
|
||||
|
||||
func (c *certificateV1) UnsafeNetworks() []netip.Prefix {
|
||||
return c.details.unsafeNetworks
|
||||
}
|
||||
|
||||
func (c *certificateV1) Fingerprint() (string, error) {
|
||||
b, err := c.Marshal()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
sum := sha256.Sum256(b)
|
||||
return hex.EncodeToString(sum[:]), nil
|
||||
}
|
||||
|
||||
func (c *certificateV1) CheckSignature(key []byte) bool {
|
||||
b, err := proto.Marshal(c.getRawDetails())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
switch c.details.curve {
|
||||
case Curve_CURVE25519:
|
||||
return ed25519.Verify(key, b, c.signature)
|
||||
case Curve_P256:
|
||||
x, y := elliptic.Unmarshal(elliptic.P256(), key)
|
||||
pubKey := &ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}
|
||||
hashed := sha256.Sum256(b)
|
||||
return ecdsa.VerifyASN1(pubKey, hashed[:], c.signature)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (c *certificateV1) Expired(t time.Time) bool {
|
||||
return c.details.notBefore.After(t) || c.details.notAfter.Before(t)
|
||||
}
|
||||
|
||||
func (c *certificateV1) VerifyPrivateKey(curve Curve, key []byte) error {
|
||||
if curve != c.details.curve {
|
||||
return fmt.Errorf("curve in cert and private key supplied don't match")
|
||||
}
|
||||
if c.details.isCA {
|
||||
switch curve {
|
||||
case Curve_CURVE25519:
|
||||
// the call to PublicKey below will panic slice bounds out of range otherwise
|
||||
if len(key) != ed25519.PrivateKeySize {
|
||||
return fmt.Errorf("key was not 64 bytes, is invalid ed25519 private key")
|
||||
}
|
||||
|
||||
if !ed25519.PublicKey(c.details.publicKey).Equal(ed25519.PrivateKey(key).Public()) {
|
||||
return fmt.Errorf("public key in cert and private key supplied don't match")
|
||||
}
|
||||
case Curve_P256:
|
||||
privkey, err := ecdh.P256().NewPrivateKey(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse private key as P256: %w", err)
|
||||
}
|
||||
pub := privkey.PublicKey().Bytes()
|
||||
if !bytes.Equal(pub, c.details.publicKey) {
|
||||
return fmt.Errorf("public key in cert and private key supplied don't match")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("invalid curve: %s", curve)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var pub []byte
|
||||
switch curve {
|
||||
case Curve_CURVE25519:
|
||||
var err error
|
||||
pub, err = curve25519.X25519(key, curve25519.Basepoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case Curve_P256:
|
||||
privkey, err := ecdh.P256().NewPrivateKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pub = privkey.PublicKey().Bytes()
|
||||
default:
|
||||
return fmt.Errorf("invalid curve: %s", curve)
|
||||
}
|
||||
if !bytes.Equal(pub, c.details.publicKey) {
|
||||
return fmt.Errorf("public key in cert and private key supplied don't match")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getRawDetails marshals the raw details into protobuf ready struct
|
||||
func (c *certificateV1) getRawDetails() *RawNebulaCertificateDetails {
|
||||
rd := &RawNebulaCertificateDetails{
|
||||
Name: c.details.name,
|
||||
Groups: c.details.groups,
|
||||
NotBefore: c.details.notBefore.Unix(),
|
||||
NotAfter: c.details.notAfter.Unix(),
|
||||
PublicKey: make([]byte, len(c.details.publicKey)),
|
||||
IsCA: c.details.isCA,
|
||||
Curve: c.details.curve,
|
||||
}
|
||||
|
||||
for _, ipNet := range c.details.networks {
|
||||
mask := net.CIDRMask(ipNet.Bits(), ipNet.Addr().BitLen())
|
||||
rd.Ips = append(rd.Ips, addr2int(ipNet.Addr()), ip2int(mask))
|
||||
}
|
||||
|
||||
for _, ipNet := range c.details.unsafeNetworks {
|
||||
mask := net.CIDRMask(ipNet.Bits(), ipNet.Addr().BitLen())
|
||||
rd.Subnets = append(rd.Subnets, addr2int(ipNet.Addr()), ip2int(mask))
|
||||
}
|
||||
|
||||
copy(rd.PublicKey, c.details.publicKey[:])
|
||||
|
||||
// I know, this is terrible
|
||||
rd.Issuer, _ = hex.DecodeString(c.details.issuer)
|
||||
|
||||
return rd
|
||||
}
|
||||
|
||||
func (c *certificateV1) String() string {
|
||||
b, err := json.MarshalIndent(c.marshalJSON(), "", "\t")
|
||||
if err != nil {
|
||||
return fmt.Sprintf("<error marshalling certificate: %v>", err)
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (c *certificateV1) MarshalForHandshakes() ([]byte, error) {
|
||||
pubKey := c.details.publicKey
|
||||
c.details.publicKey = nil
|
||||
rawCertNoKey, err := c.Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.details.publicKey = pubKey
|
||||
return rawCertNoKey, nil
|
||||
}
|
||||
|
||||
func (c *certificateV1) Marshal() ([]byte, error) {
|
||||
rc := RawNebulaCertificate{
|
||||
Details: c.getRawDetails(),
|
||||
Signature: c.signature,
|
||||
}
|
||||
|
||||
return proto.Marshal(&rc)
|
||||
}
|
||||
|
||||
func (c *certificateV1) MarshalPEM() ([]byte, error) {
|
||||
b, err := c.Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pem.EncodeToMemory(&pem.Block{Type: CertificateBanner, Bytes: b}), nil
|
||||
}
|
||||
|
||||
func (c *certificateV1) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(c.marshalJSON())
|
||||
}
|
||||
|
||||
func (c *certificateV1) marshalJSON() m {
|
||||
fp, _ := c.Fingerprint()
|
||||
return m{
|
||||
"version": Version1,
|
||||
"details": m{
|
||||
"name": c.details.name,
|
||||
"networks": c.details.networks,
|
||||
"unsafeNetworks": c.details.unsafeNetworks,
|
||||
"groups": c.details.groups,
|
||||
"notBefore": c.details.notBefore,
|
||||
"notAfter": c.details.notAfter,
|
||||
"publicKey": fmt.Sprintf("%x", c.details.publicKey),
|
||||
"isCa": c.details.isCA,
|
||||
"issuer": c.details.issuer,
|
||||
"curve": c.details.curve.String(),
|
||||
},
|
||||
"fingerprint": fp,
|
||||
"signature": fmt.Sprintf("%x", c.Signature()),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *certificateV1) Copy() Certificate {
|
||||
nc := &certificateV1{
|
||||
details: detailsV1{
|
||||
name: c.details.name,
|
||||
notBefore: c.details.notBefore,
|
||||
notAfter: c.details.notAfter,
|
||||
publicKey: make([]byte, len(c.details.publicKey)),
|
||||
isCA: c.details.isCA,
|
||||
issuer: c.details.issuer,
|
||||
curve: c.details.curve,
|
||||
},
|
||||
signature: make([]byte, len(c.signature)),
|
||||
}
|
||||
|
||||
if c.details.groups != nil {
|
||||
nc.details.groups = make([]string, len(c.details.groups))
|
||||
copy(nc.details.groups, c.details.groups)
|
||||
}
|
||||
|
||||
if c.details.networks != nil {
|
||||
nc.details.networks = make([]netip.Prefix, len(c.details.networks))
|
||||
copy(nc.details.networks, c.details.networks)
|
||||
}
|
||||
|
||||
if c.details.unsafeNetworks != nil {
|
||||
nc.details.unsafeNetworks = make([]netip.Prefix, len(c.details.unsafeNetworks))
|
||||
copy(nc.details.unsafeNetworks, c.details.unsafeNetworks)
|
||||
}
|
||||
|
||||
copy(nc.signature, c.signature)
|
||||
copy(nc.details.publicKey, c.details.publicKey)
|
||||
|
||||
return nc
|
||||
}
|
||||
|
||||
func (c *certificateV1) fromTBSCertificate(t *TBSCertificate) error {
|
||||
c.details = detailsV1{
|
||||
name: t.Name,
|
||||
networks: t.Networks,
|
||||
unsafeNetworks: t.UnsafeNetworks,
|
||||
groups: t.Groups,
|
||||
notBefore: t.NotBefore,
|
||||
notAfter: t.NotAfter,
|
||||
publicKey: t.PublicKey,
|
||||
isCA: t.IsCA,
|
||||
curve: t.Curve,
|
||||
issuer: t.issuer,
|
||||
}
|
||||
|
||||
return c.validate()
|
||||
}
|
||||
|
||||
func (c *certificateV1) validate() error {
|
||||
// Empty names are allowed
|
||||
|
||||
if len(c.details.publicKey) == 0 {
|
||||
return ErrInvalidPublicKey
|
||||
}
|
||||
|
||||
// Original v1 rules allowed multiple networks to be present but ignored all but the first one.
|
||||
// Continue to allow this behavior
|
||||
if !c.details.isCA && len(c.details.networks) == 0 {
|
||||
return NewErrInvalidCertificateProperties("non-CA certificates must contain exactly one network")
|
||||
}
|
||||
|
||||
for _, network := range c.details.networks {
|
||||
if !network.IsValid() || !network.Addr().IsValid() {
|
||||
return NewErrInvalidCertificateProperties("invalid network: %s", network)
|
||||
}
|
||||
|
||||
if network.Addr().Is6() {
|
||||
return NewErrInvalidCertificateProperties("certificate may not contain IPv6 networks: %v", network)
|
||||
}
|
||||
|
||||
if network.Addr().IsUnspecified() {
|
||||
return NewErrInvalidCertificateProperties("non-CA certificates must not use the zero address as a network: %s", network)
|
||||
}
|
||||
|
||||
if network.Addr().Zone() != "" {
|
||||
return NewErrInvalidCertificateProperties("networks may not contain zones: %s", network)
|
||||
}
|
||||
}
|
||||
|
||||
for _, network := range c.details.unsafeNetworks {
|
||||
if !network.IsValid() || !network.Addr().IsValid() {
|
||||
return NewErrInvalidCertificateProperties("invalid unsafe network: %s", network)
|
||||
}
|
||||
|
||||
if network.Addr().Is6() {
|
||||
return NewErrInvalidCertificateProperties("certificate may not contain IPv6 unsafe networks: %v", network)
|
||||
}
|
||||
|
||||
if network.Addr().Zone() != "" {
|
||||
return NewErrInvalidCertificateProperties("unsafe networks may not contain zones: %s", network)
|
||||
}
|
||||
}
|
||||
|
||||
// v1 doesn't bother with sort order or uniqueness of networks or unsafe networks.
|
||||
// We can't modify the unmarshalled data because verification requires re-marshalling and a re-ordered
|
||||
// unsafe networks would result in a different signature.
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *certificateV1) marshalForSigning() ([]byte, error) {
|
||||
b, err := proto.Marshal(c.getRawDetails())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (c *certificateV1) setSignature(b []byte) error {
|
||||
if len(b) == 0 {
|
||||
return ErrEmptySignature
|
||||
}
|
||||
c.signature = b
|
||||
return nil
|
||||
}
|
||||
|
||||
// unmarshalCertificateV1 will unmarshal a protobuf byte representation of a nebula cert
|
||||
// if the publicKey is provided here then it is not required to be present in `b`
|
||||
func unmarshalCertificateV1(b []byte, publicKey []byte) (*certificateV1, error) {
|
||||
if len(b) == 0 {
|
||||
return nil, fmt.Errorf("nil byte array")
|
||||
}
|
||||
var rc RawNebulaCertificate
|
||||
err := proto.Unmarshal(b, &rc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if rc.Details == nil {
|
||||
return nil, fmt.Errorf("encoded Details was nil")
|
||||
}
|
||||
|
||||
if len(rc.Details.Ips)%2 != 0 {
|
||||
return nil, fmt.Errorf("encoded IPs should be in pairs, an odd number was found")
|
||||
}
|
||||
|
||||
if len(rc.Details.Subnets)%2 != 0 {
|
||||
return nil, fmt.Errorf("encoded Subnets should be in pairs, an odd number was found")
|
||||
}
|
||||
|
||||
nc := certificateV1{
|
||||
details: detailsV1{
|
||||
name: rc.Details.Name,
|
||||
groups: make([]string, len(rc.Details.Groups)),
|
||||
networks: make([]netip.Prefix, len(rc.Details.Ips)/2),
|
||||
unsafeNetworks: make([]netip.Prefix, len(rc.Details.Subnets)/2),
|
||||
notBefore: time.Unix(rc.Details.NotBefore, 0),
|
||||
notAfter: time.Unix(rc.Details.NotAfter, 0),
|
||||
publicKey: make([]byte, len(rc.Details.PublicKey)),
|
||||
isCA: rc.Details.IsCA,
|
||||
curve: rc.Details.Curve,
|
||||
},
|
||||
signature: make([]byte, len(rc.Signature)),
|
||||
}
|
||||
|
||||
copy(nc.signature, rc.Signature)
|
||||
copy(nc.details.groups, rc.Details.Groups)
|
||||
nc.details.issuer = hex.EncodeToString(rc.Details.Issuer)
|
||||
|
||||
if len(publicKey) > 0 {
|
||||
nc.details.publicKey = publicKey
|
||||
}
|
||||
|
||||
copy(nc.details.publicKey, rc.Details.PublicKey)
|
||||
|
||||
var ip netip.Addr
|
||||
for i, rawIp := range rc.Details.Ips {
|
||||
if i%2 == 0 {
|
||||
ip = int2addr(rawIp)
|
||||
} else {
|
||||
ones, _ := net.IPMask(int2ip(rawIp)).Size()
|
||||
nc.details.networks[i/2] = netip.PrefixFrom(ip, ones)
|
||||
}
|
||||
}
|
||||
|
||||
for i, rawIp := range rc.Details.Subnets {
|
||||
if i%2 == 0 {
|
||||
ip = int2addr(rawIp)
|
||||
} else {
|
||||
ones, _ := net.IPMask(int2ip(rawIp)).Size()
|
||||
nc.details.unsafeNetworks[i/2] = netip.PrefixFrom(ip, ones)
|
||||
}
|
||||
}
|
||||
|
||||
err = nc.validate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &nc, nil
|
||||
}
|
||||
|
||||
func ip2int(ip []byte) uint32 {
|
||||
if len(ip) == 16 {
|
||||
return binary.BigEndian.Uint32(ip[12:16])
|
||||
}
|
||||
return binary.BigEndian.Uint32(ip)
|
||||
}
|
||||
|
||||
func int2ip(nn uint32) net.IP {
|
||||
ip := make(net.IP, net.IPv4len)
|
||||
binary.BigEndian.PutUint32(ip, nn)
|
||||
return ip
|
||||
}
|
||||
|
||||
func addr2int(addr netip.Addr) uint32 {
|
||||
b := addr.Unmap().As4()
|
||||
return binary.BigEndian.Uint32(b[:])
|
||||
}
|
||||
|
||||
func int2addr(nn uint32) netip.Addr {
|
||||
ip := [4]byte{}
|
||||
binary.BigEndian.PutUint32(ip[:], nn)
|
||||
return netip.AddrFrom4(ip).Unmap()
|
||||
}
|
|
@ -1,218 +0,0 @@
|
|||
package cert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestCertificateV1_Marshal(t *testing.T) {
|
||||
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
||||
|
||||
nc := certificateV1{
|
||||
details: detailsV1{
|
||||
name: "testing",
|
||||
networks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
||||
},
|
||||
unsafeNetworks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
||||
mustParsePrefixUnmapped("9.1.1.3/16"),
|
||||
},
|
||||
groups: []string{"test-group1", "test-group2", "test-group3"},
|
||||
notBefore: before,
|
||||
notAfter: after,
|
||||
publicKey: pubKey,
|
||||
isCA: false,
|
||||
issuer: "1234567890abcedfghij1234567890ab",
|
||||
},
|
||||
signature: []byte("1234567890abcedfghij1234567890ab"),
|
||||
}
|
||||
|
||||
b, err := nc.Marshal()
|
||||
require.NoError(t, err)
|
||||
//t.Log("Cert size:", len(b))
|
||||
|
||||
nc2, err := unmarshalCertificateV1(b, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, Version1, nc.Version())
|
||||
assert.Equal(t, Curve_CURVE25519, nc.Curve())
|
||||
assert.Equal(t, nc.Signature(), nc2.Signature())
|
||||
assert.Equal(t, nc.Name(), nc2.Name())
|
||||
assert.Equal(t, nc.NotBefore(), nc2.NotBefore())
|
||||
assert.Equal(t, nc.NotAfter(), nc2.NotAfter())
|
||||
assert.Equal(t, nc.PublicKey(), nc2.PublicKey())
|
||||
assert.Equal(t, nc.IsCA(), nc2.IsCA())
|
||||
|
||||
assert.Equal(t, nc.Networks(), nc2.Networks())
|
||||
assert.Equal(t, nc.UnsafeNetworks(), nc2.UnsafeNetworks())
|
||||
|
||||
assert.Equal(t, nc.Groups(), nc2.Groups())
|
||||
}
|
||||
|
||||
func TestCertificateV1_Expired(t *testing.T) {
|
||||
nc := certificateV1{
|
||||
details: detailsV1{
|
||||
notBefore: time.Now().Add(time.Second * -60).Round(time.Second),
|
||||
notAfter: time.Now().Add(time.Second * 60).Round(time.Second),
|
||||
},
|
||||
}
|
||||
|
||||
assert.True(t, nc.Expired(time.Now().Add(time.Hour)))
|
||||
assert.True(t, nc.Expired(time.Now().Add(-time.Hour)))
|
||||
assert.False(t, nc.Expired(time.Now()))
|
||||
}
|
||||
|
||||
func TestCertificateV1_MarshalJSON(t *testing.T) {
|
||||
time.Local = time.UTC
|
||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
||||
|
||||
nc := certificateV1{
|
||||
details: detailsV1{
|
||||
name: "testing",
|
||||
networks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
||||
},
|
||||
unsafeNetworks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
||||
mustParsePrefixUnmapped("9.1.1.3/16"),
|
||||
},
|
||||
groups: []string{"test-group1", "test-group2", "test-group3"},
|
||||
notBefore: time.Date(1, 0, 0, 1, 0, 0, 0, time.UTC),
|
||||
notAfter: time.Date(1, 0, 0, 2, 0, 0, 0, time.UTC),
|
||||
publicKey: pubKey,
|
||||
isCA: false,
|
||||
issuer: "1234567890abcedfghij1234567890ab",
|
||||
},
|
||||
signature: []byte("1234567890abcedfghij1234567890ab"),
|
||||
}
|
||||
|
||||
b, err := nc.MarshalJSON()
|
||||
require.NoError(t, err)
|
||||
assert.JSONEq(
|
||||
t,
|
||||
"{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"test-group1\",\"test-group2\",\"test-group3\"],\"isCa\":false,\"issuer\":\"1234567890abcedfghij1234567890ab\",\"name\":\"testing\",\"networks\":[\"10.1.1.1/24\",\"10.1.1.2/16\"],\"notAfter\":\"0000-11-30T02:00:00Z\",\"notBefore\":\"0000-11-30T01:00:00Z\",\"publicKey\":\"313233343536373839306162636564666768696a313233343536373839306162\",\"unsafeNetworks\":[\"9.1.1.2/24\",\"9.1.1.3/16\"]},\"fingerprint\":\"3944c53d4267a229295b56cb2d27d459164c010ac97d655063ba421e0670f4ba\",\"signature\":\"313233343536373839306162636564666768696a313233343536373839306162\",\"version\":1}",
|
||||
string(b),
|
||||
)
|
||||
}
|
||||
|
||||
func TestCertificateV1_VerifyPrivateKey(t *testing.T) {
|
||||
ca, _, caKey, _ := NewTestCaCert(Version1, Curve_CURVE25519, time.Time{}, time.Time{}, nil, nil, nil)
|
||||
err := ca.VerifyPrivateKey(Curve_CURVE25519, caKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, caKey2, _ := NewTestCaCert(Version1, Curve_CURVE25519, time.Time{}, time.Time{}, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
err = ca.VerifyPrivateKey(Curve_CURVE25519, caKey2)
|
||||
require.Error(t, err)
|
||||
|
||||
c, _, priv, _ := NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Time{}, time.Time{}, nil, nil, nil)
|
||||
rawPriv, b, curve, err := UnmarshalPrivateKeyFromPEM(priv)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, b)
|
||||
assert.Equal(t, Curve_CURVE25519, curve)
|
||||
err = c.VerifyPrivateKey(Curve_CURVE25519, rawPriv)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, priv2 := X25519Keypair()
|
||||
err = c.VerifyPrivateKey(Curve_CURVE25519, priv2)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCertificateV1_VerifyPrivateKeyP256(t *testing.T) {
|
||||
ca, _, caKey, _ := NewTestCaCert(Version1, Curve_P256, time.Time{}, time.Time{}, nil, nil, nil)
|
||||
err := ca.VerifyPrivateKey(Curve_P256, caKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, caKey2, _ := NewTestCaCert(Version1, Curve_P256, time.Time{}, time.Time{}, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
err = ca.VerifyPrivateKey(Curve_P256, caKey2)
|
||||
require.Error(t, err)
|
||||
|
||||
c, _, priv, _ := NewTestCert(Version1, Curve_P256, ca, caKey, "test", time.Time{}, time.Time{}, nil, nil, nil)
|
||||
rawPriv, b, curve, err := UnmarshalPrivateKeyFromPEM(priv)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, b)
|
||||
assert.Equal(t, Curve_P256, curve)
|
||||
err = c.VerifyPrivateKey(Curve_P256, rawPriv)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, priv2 := P256Keypair()
|
||||
err = c.VerifyPrivateKey(Curve_P256, priv2)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// Ensure that upgrading the protobuf library does not change how certificates
|
||||
// are marshalled, since this would break signature verification
|
||||
func TestMarshalingCertificateV1Consistency(t *testing.T) {
|
||||
before := time.Date(1970, time.January, 1, 1, 1, 1, 1, time.UTC)
|
||||
after := time.Date(9999, time.January, 1, 1, 1, 1, 1, time.UTC)
|
||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
||||
|
||||
nc := certificateV1{
|
||||
details: detailsV1{
|
||||
name: "testing",
|
||||
networks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
||||
},
|
||||
unsafeNetworks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("9.1.1.3/16"),
|
||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
||||
},
|
||||
groups: []string{"test-group1", "test-group2", "test-group3"},
|
||||
notBefore: before,
|
||||
notAfter: after,
|
||||
publicKey: pubKey,
|
||||
isCA: false,
|
||||
issuer: "1234567890abcedfghij1234567890ab",
|
||||
},
|
||||
signature: []byte("1234567890abcedfghij1234567890ab"),
|
||||
}
|
||||
|
||||
b, err := nc.Marshal()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "0a8e010a0774657374696e671212828284508080fcff0f8182845080feffff0f1a12838284488080fcff0f8282844880feffff0f220b746573742d67726f757031220b746573742d67726f757032220b746573742d67726f75703328cd1c30cdb8ccf0af073a20313233343536373839306162636564666768696a3132333435363738393061624a081234567890abcedf1220313233343536373839306162636564666768696a313233343536373839306162", fmt.Sprintf("%x", b))
|
||||
|
||||
b, err = proto.Marshal(nc.getRawDetails())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "0a0774657374696e671212828284508080fcff0f8182845080feffff0f1a12838284488080fcff0f8282844880feffff0f220b746573742d67726f757031220b746573742d67726f757032220b746573742d67726f75703328cd1c30cdb8ccf0af073a20313233343536373839306162636564666768696a3132333435363738393061624a081234567890abcedf", fmt.Sprintf("%x", b))
|
||||
}
|
||||
|
||||
func TestCertificateV1_Copy(t *testing.T) {
|
||||
ca, _, caKey, _ := NewTestCaCert(Version1, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, nil)
|
||||
c, _, _, _ := NewTestCert(Version1, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, nil)
|
||||
cc := c.Copy()
|
||||
test.AssertDeepCopyEqual(t, c, cc)
|
||||
}
|
||||
|
||||
func TestUnmarshalCertificateV1(t *testing.T) {
|
||||
// Test that we don't panic with an invalid certificate (#332)
|
||||
data := []byte("\x98\x00\x00")
|
||||
_, err := unmarshalCertificateV1(data, nil)
|
||||
require.EqualError(t, err, "encoded Details was nil")
|
||||
}
|
||||
|
||||
func appendByteSlices(b ...[]byte) []byte {
|
||||
retSlice := []byte{}
|
||||
for _, v := range b {
|
||||
retSlice = append(retSlice, v...)
|
||||
}
|
||||
return retSlice
|
||||
}
|
||||
|
||||
func mustParsePrefixUnmapped(s string) netip.Prefix {
|
||||
prefix := netip.MustParsePrefix(s)
|
||||
return netip.PrefixFrom(prefix.Addr().Unmap(), prefix.Bits())
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
Nebula DEFINITIONS AUTOMATIC TAGS ::= BEGIN
|
||||
|
||||
Name ::= UTF8String (SIZE (1..253))
|
||||
Time ::= INTEGER (0..18446744073709551615) -- Seconds since unix epoch, uint64 maximum
|
||||
Network ::= OCTET STRING (SIZE (5,17)) -- IP addresses are 4 or 16 bytes + 1 byte for the prefix length
|
||||
Curve ::= ENUMERATED {
|
||||
curve25519 (0),
|
||||
p256 (1)
|
||||
}
|
||||
|
||||
-- The maximum size of a certificate must not exceed 65536 bytes
|
||||
Certificate ::= SEQUENCE {
|
||||
details OCTET STRING,
|
||||
curve Curve DEFAULT curve25519,
|
||||
publicKey OCTET STRING,
|
||||
-- signature(details + curve + publicKey) using the appropriate method for curve
|
||||
signature OCTET STRING
|
||||
}
|
||||
|
||||
Details ::= SEQUENCE {
|
||||
name Name,
|
||||
|
||||
-- At least 1 ipv4 or ipv6 address must be present if isCA is false
|
||||
networks SEQUENCE OF Network OPTIONAL,
|
||||
unsafeNetworks SEQUENCE OF Network OPTIONAL,
|
||||
groups SEQUENCE OF Name OPTIONAL,
|
||||
isCA BOOLEAN DEFAULT false,
|
||||
notBefore Time,
|
||||
notAfter Time,
|
||||
|
||||
-- issuer is only required if isCA is false, if isCA is true then it must not be present
|
||||
issuer OCTET STRING OPTIONAL,
|
||||
...
|
||||
-- New fields can be added below here
|
||||
}
|
||||
|
||||
END
|
730
cert/cert_v2.go
730
cert/cert_v2.go
|
@ -1,730 +0,0 @@
|
|||
package cert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdh"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/cryptobyte"
|
||||
"golang.org/x/crypto/cryptobyte/asn1"
|
||||
"golang.org/x/crypto/curve25519"
|
||||
)
|
||||
|
||||
const (
|
||||
classConstructed = 0x20
|
||||
classContextSpecific = 0x80
|
||||
|
||||
TagCertDetails = 0 | classConstructed | classContextSpecific
|
||||
TagCertCurve = 1 | classContextSpecific
|
||||
TagCertPublicKey = 2 | classContextSpecific
|
||||
TagCertSignature = 3 | classContextSpecific
|
||||
|
||||
TagDetailsName = 0 | classContextSpecific
|
||||
TagDetailsNetworks = 1 | classConstructed | classContextSpecific
|
||||
TagDetailsUnsafeNetworks = 2 | classConstructed | classContextSpecific
|
||||
TagDetailsGroups = 3 | classConstructed | classContextSpecific
|
||||
TagDetailsIsCA = 4 | classContextSpecific
|
||||
TagDetailsNotBefore = 5 | classContextSpecific
|
||||
TagDetailsNotAfter = 6 | classContextSpecific
|
||||
TagDetailsIssuer = 7 | classContextSpecific
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxCertificateSize is the maximum length a valid certificate can be
|
||||
MaxCertificateSize = 65536
|
||||
|
||||
// MaxNameLength is limited to a maximum realistic DNS domain name to help facilitate DNS systems
|
||||
MaxNameLength = 253
|
||||
|
||||
// MaxNetworkLength is the maximum length a network value can be.
|
||||
// 16 bytes for an ipv6 address + 1 byte for the prefix length
|
||||
MaxNetworkLength = 17
|
||||
)
|
||||
|
||||
type certificateV2 struct {
|
||||
details detailsV2
|
||||
|
||||
// RawDetails contains the entire asn.1 DER encoded Details struct
|
||||
// This is to benefit forwards compatibility in signature checking.
|
||||
// signature(RawDetails + Curve + PublicKey) == Signature
|
||||
rawDetails []byte
|
||||
curve Curve
|
||||
publicKey []byte
|
||||
signature []byte
|
||||
}
|
||||
|
||||
type detailsV2 struct {
|
||||
name string
|
||||
networks []netip.Prefix // MUST BE SORTED
|
||||
unsafeNetworks []netip.Prefix // MUST BE SORTED
|
||||
groups []string
|
||||
isCA bool
|
||||
notBefore time.Time
|
||||
notAfter time.Time
|
||||
issuer string
|
||||
}
|
||||
|
||||
func (c *certificateV2) Version() Version {
|
||||
return Version2
|
||||
}
|
||||
|
||||
func (c *certificateV2) Curve() Curve {
|
||||
return c.curve
|
||||
}
|
||||
|
||||
func (c *certificateV2) Groups() []string {
|
||||
return c.details.groups
|
||||
}
|
||||
|
||||
func (c *certificateV2) IsCA() bool {
|
||||
return c.details.isCA
|
||||
}
|
||||
|
||||
func (c *certificateV2) Issuer() string {
|
||||
return c.details.issuer
|
||||
}
|
||||
|
||||
func (c *certificateV2) Name() string {
|
||||
return c.details.name
|
||||
}
|
||||
|
||||
func (c *certificateV2) Networks() []netip.Prefix {
|
||||
return c.details.networks
|
||||
}
|
||||
|
||||
func (c *certificateV2) NotAfter() time.Time {
|
||||
return c.details.notAfter
|
||||
}
|
||||
|
||||
func (c *certificateV2) NotBefore() time.Time {
|
||||
return c.details.notBefore
|
||||
}
|
||||
|
||||
func (c *certificateV2) PublicKey() []byte {
|
||||
return c.publicKey
|
||||
}
|
||||
|
||||
func (c *certificateV2) Signature() []byte {
|
||||
return c.signature
|
||||
}
|
||||
|
||||
func (c *certificateV2) UnsafeNetworks() []netip.Prefix {
|
||||
return c.details.unsafeNetworks
|
||||
}
|
||||
|
||||
func (c *certificateV2) Fingerprint() (string, error) {
|
||||
if len(c.rawDetails) == 0 {
|
||||
return "", ErrMissingDetails
|
||||
}
|
||||
|
||||
b := make([]byte, len(c.rawDetails)+1+len(c.publicKey)+len(c.signature))
|
||||
copy(b, c.rawDetails)
|
||||
b[len(c.rawDetails)] = byte(c.curve)
|
||||
copy(b[len(c.rawDetails)+1:], c.publicKey)
|
||||
copy(b[len(c.rawDetails)+1+len(c.publicKey):], c.signature)
|
||||
sum := sha256.Sum256(b)
|
||||
return hex.EncodeToString(sum[:]), nil
|
||||
}
|
||||
|
||||
func (c *certificateV2) CheckSignature(key []byte) bool {
|
||||
if len(c.rawDetails) == 0 {
|
||||
return false
|
||||
}
|
||||
b := make([]byte, len(c.rawDetails)+1+len(c.publicKey))
|
||||
copy(b, c.rawDetails)
|
||||
b[len(c.rawDetails)] = byte(c.curve)
|
||||
copy(b[len(c.rawDetails)+1:], c.publicKey)
|
||||
|
||||
switch c.curve {
|
||||
case Curve_CURVE25519:
|
||||
return ed25519.Verify(key, b, c.signature)
|
||||
case Curve_P256:
|
||||
x, y := elliptic.Unmarshal(elliptic.P256(), key)
|
||||
pubKey := &ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}
|
||||
hashed := sha256.Sum256(b)
|
||||
return ecdsa.VerifyASN1(pubKey, hashed[:], c.signature)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (c *certificateV2) Expired(t time.Time) bool {
|
||||
return c.details.notBefore.After(t) || c.details.notAfter.Before(t)
|
||||
}
|
||||
|
||||
func (c *certificateV2) VerifyPrivateKey(curve Curve, key []byte) error {
|
||||
if curve != c.curve {
|
||||
return ErrPublicPrivateCurveMismatch
|
||||
}
|
||||
if c.details.isCA {
|
||||
switch curve {
|
||||
case Curve_CURVE25519:
|
||||
// the call to PublicKey below will panic slice bounds out of range otherwise
|
||||
if len(key) != ed25519.PrivateKeySize {
|
||||
return ErrInvalidPrivateKey
|
||||
}
|
||||
|
||||
if !ed25519.PublicKey(c.publicKey).Equal(ed25519.PrivateKey(key).Public()) {
|
||||
return ErrPublicPrivateKeyMismatch
|
||||
}
|
||||
case Curve_P256:
|
||||
privkey, err := ecdh.P256().NewPrivateKey(key)
|
||||
if err != nil {
|
||||
return ErrInvalidPrivateKey
|
||||
}
|
||||
pub := privkey.PublicKey().Bytes()
|
||||
if !bytes.Equal(pub, c.publicKey) {
|
||||
return ErrPublicPrivateKeyMismatch
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("invalid curve: %s", curve)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var pub []byte
|
||||
switch curve {
|
||||
case Curve_CURVE25519:
|
||||
var err error
|
||||
pub, err = curve25519.X25519(key, curve25519.Basepoint)
|
||||
if err != nil {
|
||||
return ErrInvalidPrivateKey
|
||||
}
|
||||
case Curve_P256:
|
||||
privkey, err := ecdh.P256().NewPrivateKey(key)
|
||||
if err != nil {
|
||||
return ErrInvalidPrivateKey
|
||||
}
|
||||
pub = privkey.PublicKey().Bytes()
|
||||
default:
|
||||
return fmt.Errorf("invalid curve: %s", curve)
|
||||
}
|
||||
if !bytes.Equal(pub, c.publicKey) {
|
||||
return ErrPublicPrivateKeyMismatch
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *certificateV2) String() string {
|
||||
mb, err := c.marshalJSON()
|
||||
if err != nil {
|
||||
return fmt.Sprintf("<error marshalling certificate: %v>", err)
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(mb, "", "\t")
|
||||
if err != nil {
|
||||
return fmt.Sprintf("<error marshalling certificate: %v>", err)
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (c *certificateV2) MarshalForHandshakes() ([]byte, error) {
|
||||
if c.rawDetails == nil {
|
||||
return nil, ErrEmptyRawDetails
|
||||
}
|
||||
var b cryptobyte.Builder
|
||||
// Outermost certificate
|
||||
b.AddASN1(asn1.SEQUENCE, func(b *cryptobyte.Builder) {
|
||||
|
||||
// Add the cert details which is already marshalled
|
||||
b.AddBytes(c.rawDetails)
|
||||
|
||||
// Skipping the curve and public key since those come across in a different part of the handshake
|
||||
|
||||
// Add the signature
|
||||
b.AddASN1(TagCertSignature, func(b *cryptobyte.Builder) {
|
||||
b.AddBytes(c.signature)
|
||||
})
|
||||
})
|
||||
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
func (c *certificateV2) Marshal() ([]byte, error) {
|
||||
if c.rawDetails == nil {
|
||||
return nil, ErrEmptyRawDetails
|
||||
}
|
||||
var b cryptobyte.Builder
|
||||
// Outermost certificate
|
||||
b.AddASN1(asn1.SEQUENCE, func(b *cryptobyte.Builder) {
|
||||
|
||||
// Add the cert details which is already marshalled
|
||||
b.AddBytes(c.rawDetails)
|
||||
|
||||
// Add the curve only if its not the default value
|
||||
if c.curve != Curve_CURVE25519 {
|
||||
b.AddASN1(TagCertCurve, func(b *cryptobyte.Builder) {
|
||||
b.AddBytes([]byte{byte(c.curve)})
|
||||
})
|
||||
}
|
||||
|
||||
// Add the public key if it is not empty
|
||||
if c.publicKey != nil {
|
||||
b.AddASN1(TagCertPublicKey, func(b *cryptobyte.Builder) {
|
||||
b.AddBytes(c.publicKey)
|
||||
})
|
||||
}
|
||||
|
||||
// Add the signature
|
||||
b.AddASN1(TagCertSignature, func(b *cryptobyte.Builder) {
|
||||
b.AddBytes(c.signature)
|
||||
})
|
||||
})
|
||||
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
func (c *certificateV2) MarshalPEM() ([]byte, error) {
|
||||
b, err := c.Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pem.EncodeToMemory(&pem.Block{Type: CertificateV2Banner, Bytes: b}), nil
|
||||
}
|
||||
|
||||
func (c *certificateV2) MarshalJSON() ([]byte, error) {
|
||||
b, err := c.marshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(b)
|
||||
}
|
||||
|
||||
func (c *certificateV2) marshalJSON() (m, error) {
|
||||
fp, err := c.Fingerprint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m{
|
||||
"details": m{
|
||||
"name": c.details.name,
|
||||
"networks": c.details.networks,
|
||||
"unsafeNetworks": c.details.unsafeNetworks,
|
||||
"groups": c.details.groups,
|
||||
"notBefore": c.details.notBefore,
|
||||
"notAfter": c.details.notAfter,
|
||||
"isCa": c.details.isCA,
|
||||
"issuer": c.details.issuer,
|
||||
},
|
||||
"version": Version2,
|
||||
"publicKey": fmt.Sprintf("%x", c.publicKey),
|
||||
"curve": c.curve.String(),
|
||||
"fingerprint": fp,
|
||||
"signature": fmt.Sprintf("%x", c.Signature()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *certificateV2) Copy() Certificate {
|
||||
nc := &certificateV2{
|
||||
details: detailsV2{
|
||||
name: c.details.name,
|
||||
notBefore: c.details.notBefore,
|
||||
notAfter: c.details.notAfter,
|
||||
isCA: c.details.isCA,
|
||||
issuer: c.details.issuer,
|
||||
},
|
||||
curve: c.curve,
|
||||
publicKey: make([]byte, len(c.publicKey)),
|
||||
signature: make([]byte, len(c.signature)),
|
||||
rawDetails: make([]byte, len(c.rawDetails)),
|
||||
}
|
||||
|
||||
if c.details.groups != nil {
|
||||
nc.details.groups = make([]string, len(c.details.groups))
|
||||
copy(nc.details.groups, c.details.groups)
|
||||
}
|
||||
|
||||
if c.details.networks != nil {
|
||||
nc.details.networks = make([]netip.Prefix, len(c.details.networks))
|
||||
copy(nc.details.networks, c.details.networks)
|
||||
}
|
||||
|
||||
if c.details.unsafeNetworks != nil {
|
||||
nc.details.unsafeNetworks = make([]netip.Prefix, len(c.details.unsafeNetworks))
|
||||
copy(nc.details.unsafeNetworks, c.details.unsafeNetworks)
|
||||
}
|
||||
|
||||
copy(nc.rawDetails, c.rawDetails)
|
||||
copy(nc.signature, c.signature)
|
||||
copy(nc.publicKey, c.publicKey)
|
||||
|
||||
return nc
|
||||
}
|
||||
|
||||
func (c *certificateV2) fromTBSCertificate(t *TBSCertificate) error {
|
||||
c.details = detailsV2{
|
||||
name: t.Name,
|
||||
networks: t.Networks,
|
||||
unsafeNetworks: t.UnsafeNetworks,
|
||||
groups: t.Groups,
|
||||
isCA: t.IsCA,
|
||||
notBefore: t.NotBefore,
|
||||
notAfter: t.NotAfter,
|
||||
issuer: t.issuer,
|
||||
}
|
||||
c.curve = t.Curve
|
||||
c.publicKey = t.PublicKey
|
||||
return c.validate()
|
||||
}
|
||||
|
||||
func (c *certificateV2) validate() error {
|
||||
// Empty names are allowed
|
||||
|
||||
if len(c.publicKey) == 0 {
|
||||
return ErrInvalidPublicKey
|
||||
}
|
||||
|
||||
if !c.details.isCA && len(c.details.networks) == 0 {
|
||||
return NewErrInvalidCertificateProperties("non-CA certificate must contain at least 1 network")
|
||||
}
|
||||
|
||||
hasV4Networks := false
|
||||
hasV6Networks := false
|
||||
for _, network := range c.details.networks {
|
||||
if !network.IsValid() || !network.Addr().IsValid() {
|
||||
return NewErrInvalidCertificateProperties("invalid network: %s", network)
|
||||
}
|
||||
|
||||
if network.Addr().IsUnspecified() {
|
||||
return NewErrInvalidCertificateProperties("non-CA certificates must not use the zero address as a network: %s", network)
|
||||
}
|
||||
|
||||
if network.Addr().Zone() != "" {
|
||||
return NewErrInvalidCertificateProperties("networks may not contain zones: %s", network)
|
||||
}
|
||||
|
||||
if network.Addr().Is4In6() {
|
||||
return NewErrInvalidCertificateProperties("4in6 networks are not allowed: %s", network)
|
||||
}
|
||||
|
||||
hasV4Networks = hasV4Networks || network.Addr().Is4()
|
||||
hasV6Networks = hasV6Networks || network.Addr().Is6()
|
||||
}
|
||||
|
||||
slices.SortFunc(c.details.networks, comparePrefix)
|
||||
err := findDuplicatePrefix(c.details.networks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, network := range c.details.unsafeNetworks {
|
||||
if !network.IsValid() || !network.Addr().IsValid() {
|
||||
return NewErrInvalidCertificateProperties("invalid unsafe network: %s", network)
|
||||
}
|
||||
|
||||
if network.Addr().Zone() != "" {
|
||||
return NewErrInvalidCertificateProperties("unsafe networks may not contain zones: %s", network)
|
||||
}
|
||||
|
||||
if !c.details.isCA {
|
||||
if network.Addr().Is6() {
|
||||
if !hasV6Networks {
|
||||
return NewErrInvalidCertificateProperties("IPv6 unsafe networks require an IPv6 address assignment: %s", network)
|
||||
}
|
||||
} else if network.Addr().Is4() {
|
||||
if !hasV4Networks {
|
||||
return NewErrInvalidCertificateProperties("IPv4 unsafe networks require an IPv4 address assignment: %s", network)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
slices.SortFunc(c.details.unsafeNetworks, comparePrefix)
|
||||
err = findDuplicatePrefix(c.details.unsafeNetworks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *certificateV2) marshalForSigning() ([]byte, error) {
|
||||
d, err := c.details.Marshal()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshalling certificate details failed: %w", err)
|
||||
}
|
||||
c.rawDetails = d
|
||||
|
||||
b := make([]byte, len(c.rawDetails)+1+len(c.publicKey))
|
||||
copy(b, c.rawDetails)
|
||||
b[len(c.rawDetails)] = byte(c.curve)
|
||||
copy(b[len(c.rawDetails)+1:], c.publicKey)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (c *certificateV2) setSignature(b []byte) error {
|
||||
if len(b) == 0 {
|
||||
return ErrEmptySignature
|
||||
}
|
||||
c.signature = b
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *detailsV2) Marshal() ([]byte, error) {
|
||||
var b cryptobyte.Builder
|
||||
var err error
|
||||
|
||||
// Details are a structure
|
||||
b.AddASN1(TagCertDetails, func(b *cryptobyte.Builder) {
|
||||
|
||||
// Add the name
|
||||
b.AddASN1(TagDetailsName, func(b *cryptobyte.Builder) {
|
||||
b.AddBytes([]byte(d.name))
|
||||
})
|
||||
|
||||
// Add the networks if any exist
|
||||
if len(d.networks) > 0 {
|
||||
b.AddASN1(TagDetailsNetworks, func(b *cryptobyte.Builder) {
|
||||
for _, n := range d.networks {
|
||||
sb, innerErr := n.MarshalBinary()
|
||||
if innerErr != nil {
|
||||
// MarshalBinary never returns an error
|
||||
err = fmt.Errorf("unable to marshal network: %w", innerErr)
|
||||
return
|
||||
}
|
||||
b.AddASN1OctetString(sb)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Add the unsafe networks if any exist
|
||||
if len(d.unsafeNetworks) > 0 {
|
||||
b.AddASN1(TagDetailsUnsafeNetworks, func(b *cryptobyte.Builder) {
|
||||
for _, n := range d.unsafeNetworks {
|
||||
sb, innerErr := n.MarshalBinary()
|
||||
if innerErr != nil {
|
||||
// MarshalBinary never returns an error
|
||||
err = fmt.Errorf("unable to marshal unsafe network: %w", innerErr)
|
||||
return
|
||||
}
|
||||
b.AddASN1OctetString(sb)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Add groups if any exist
|
||||
if len(d.groups) > 0 {
|
||||
b.AddASN1(TagDetailsGroups, func(b *cryptobyte.Builder) {
|
||||
for _, group := range d.groups {
|
||||
b.AddASN1(asn1.UTF8String, func(b *cryptobyte.Builder) {
|
||||
b.AddBytes([]byte(group))
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Add IsCA only if true
|
||||
if d.isCA {
|
||||
b.AddASN1(TagDetailsIsCA, func(b *cryptobyte.Builder) {
|
||||
b.AddUint8(0xff)
|
||||
})
|
||||
}
|
||||
|
||||
// Add not before
|
||||
b.AddASN1Int64WithTag(d.notBefore.Unix(), TagDetailsNotBefore)
|
||||
|
||||
// Add not after
|
||||
b.AddASN1Int64WithTag(d.notAfter.Unix(), TagDetailsNotAfter)
|
||||
|
||||
// Add the issuer if present
|
||||
if d.issuer != "" {
|
||||
issuerBytes, innerErr := hex.DecodeString(d.issuer)
|
||||
if innerErr != nil {
|
||||
err = fmt.Errorf("failed to decode issuer: %w", innerErr)
|
||||
return
|
||||
}
|
||||
b.AddASN1(TagDetailsIssuer, func(b *cryptobyte.Builder) {
|
||||
b.AddBytes(issuerBytes)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
func unmarshalCertificateV2(b []byte, publicKey []byte, curve Curve) (*certificateV2, error) {
|
||||
l := len(b)
|
||||
if l == 0 || l > MaxCertificateSize {
|
||||
return nil, ErrBadFormat
|
||||
}
|
||||
|
||||
input := cryptobyte.String(b)
|
||||
// Open the envelope
|
||||
if !input.ReadASN1(&input, asn1.SEQUENCE) || input.Empty() {
|
||||
return nil, ErrBadFormat
|
||||
}
|
||||
|
||||
// Grab the cert details, we need to preserve the tag and length
|
||||
var rawDetails cryptobyte.String
|
||||
if !input.ReadASN1Element(&rawDetails, TagCertDetails) || rawDetails.Empty() {
|
||||
return nil, ErrBadFormat
|
||||
}
|
||||
|
||||
//Maybe grab the curve
|
||||
var rawCurve byte
|
||||
if !readOptionalASN1Byte(&input, &rawCurve, TagCertCurve, byte(curve)) {
|
||||
return nil, ErrBadFormat
|
||||
}
|
||||
curve = Curve(rawCurve)
|
||||
|
||||
// Maybe grab the public key
|
||||
var rawPublicKey cryptobyte.String
|
||||
if len(publicKey) > 0 {
|
||||
rawPublicKey = publicKey
|
||||
} else if !input.ReadOptionalASN1(&rawPublicKey, nil, TagCertPublicKey) {
|
||||
return nil, ErrBadFormat
|
||||
}
|
||||
|
||||
if len(rawPublicKey) == 0 {
|
||||
return nil, ErrBadFormat
|
||||
}
|
||||
|
||||
// Grab the signature
|
||||
var rawSignature cryptobyte.String
|
||||
if !input.ReadASN1(&rawSignature, TagCertSignature) || rawSignature.Empty() {
|
||||
return nil, ErrBadFormat
|
||||
}
|
||||
|
||||
// Finally unmarshal the details
|
||||
details, err := unmarshalDetails(rawDetails)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &certificateV2{
|
||||
details: details,
|
||||
rawDetails: rawDetails,
|
||||
curve: curve,
|
||||
publicKey: rawPublicKey,
|
||||
signature: rawSignature,
|
||||
}
|
||||
|
||||
err = c.validate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func unmarshalDetails(b cryptobyte.String) (detailsV2, error) {
|
||||
// Open the envelope
|
||||
if !b.ReadASN1(&b, TagCertDetails) || b.Empty() {
|
||||
return detailsV2{}, ErrBadFormat
|
||||
}
|
||||
|
||||
// Read the name
|
||||
var name cryptobyte.String
|
||||
if !b.ReadASN1(&name, TagDetailsName) || name.Empty() || len(name) > MaxNameLength {
|
||||
return detailsV2{}, ErrBadFormat
|
||||
}
|
||||
|
||||
// Read the network addresses
|
||||
var subString cryptobyte.String
|
||||
var found bool
|
||||
|
||||
if !b.ReadOptionalASN1(&subString, &found, TagDetailsNetworks) {
|
||||
return detailsV2{}, ErrBadFormat
|
||||
}
|
||||
|
||||
var networks []netip.Prefix
|
||||
var val cryptobyte.String
|
||||
if found {
|
||||
for !subString.Empty() {
|
||||
if !subString.ReadASN1(&val, asn1.OCTET_STRING) || val.Empty() || len(val) > MaxNetworkLength {
|
||||
return detailsV2{}, ErrBadFormat
|
||||
}
|
||||
|
||||
var n netip.Prefix
|
||||
if err := n.UnmarshalBinary(val); err != nil {
|
||||
return detailsV2{}, ErrBadFormat
|
||||
}
|
||||
networks = append(networks, n)
|
||||
}
|
||||
}
|
||||
|
||||
// Read out any unsafe networks
|
||||
if !b.ReadOptionalASN1(&subString, &found, TagDetailsUnsafeNetworks) {
|
||||
return detailsV2{}, ErrBadFormat
|
||||
}
|
||||
|
||||
var unsafeNetworks []netip.Prefix
|
||||
if found {
|
||||
for !subString.Empty() {
|
||||
if !subString.ReadASN1(&val, asn1.OCTET_STRING) || val.Empty() || len(val) > MaxNetworkLength {
|
||||
return detailsV2{}, ErrBadFormat
|
||||
}
|
||||
|
||||
var n netip.Prefix
|
||||
if err := n.UnmarshalBinary(val); err != nil {
|
||||
return detailsV2{}, ErrBadFormat
|
||||
}
|
||||
unsafeNetworks = append(unsafeNetworks, n)
|
||||
}
|
||||
}
|
||||
|
||||
// Read out any groups
|
||||
if !b.ReadOptionalASN1(&subString, &found, TagDetailsGroups) {
|
||||
return detailsV2{}, ErrBadFormat
|
||||
}
|
||||
|
||||
var groups []string
|
||||
if found {
|
||||
for !subString.Empty() {
|
||||
if !subString.ReadASN1(&val, asn1.UTF8String) || val.Empty() {
|
||||
return detailsV2{}, ErrBadFormat
|
||||
}
|
||||
groups = append(groups, string(val))
|
||||
}
|
||||
}
|
||||
|
||||
// Read out IsCA
|
||||
var isCa bool
|
||||
if !readOptionalASN1Boolean(&b, &isCa, TagDetailsIsCA, false) {
|
||||
return detailsV2{}, ErrBadFormat
|
||||
}
|
||||
|
||||
// Read not before and not after
|
||||
var notBefore int64
|
||||
if !b.ReadASN1Int64WithTag(¬Before, TagDetailsNotBefore) {
|
||||
return detailsV2{}, ErrBadFormat
|
||||
}
|
||||
|
||||
var notAfter int64
|
||||
if !b.ReadASN1Int64WithTag(¬After, TagDetailsNotAfter) {
|
||||
return detailsV2{}, ErrBadFormat
|
||||
}
|
||||
|
||||
// Read issuer
|
||||
var issuer cryptobyte.String
|
||||
if !b.ReadOptionalASN1(&issuer, nil, TagDetailsIssuer) {
|
||||
return detailsV2{}, ErrBadFormat
|
||||
}
|
||||
|
||||
return detailsV2{
|
||||
name: string(name),
|
||||
networks: networks,
|
||||
unsafeNetworks: unsafeNetworks,
|
||||
groups: groups,
|
||||
isCA: isCa,
|
||||
notBefore: time.Unix(notBefore, 0),
|
||||
notAfter: time.Unix(notAfter, 0),
|
||||
issuer: hex.EncodeToString(issuer),
|
||||
}, nil
|
||||
}
|
|
@ -1,267 +0,0 @@
|
|||
package cert
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCertificateV2_Marshal(t *testing.T) {
|
||||
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
||||
|
||||
nc := certificateV2{
|
||||
details: detailsV2{
|
||||
name: "testing",
|
||||
networks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
||||
},
|
||||
unsafeNetworks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("9.1.1.3/16"),
|
||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
||||
},
|
||||
groups: []string{"test-group1", "test-group2", "test-group3"},
|
||||
notBefore: before,
|
||||
notAfter: after,
|
||||
isCA: false,
|
||||
issuer: "1234567890abcdef1234567890abcdef",
|
||||
},
|
||||
signature: []byte("1234567890abcdef1234567890abcdef"),
|
||||
publicKey: pubKey,
|
||||
}
|
||||
|
||||
db, err := nc.details.Marshal()
|
||||
require.NoError(t, err)
|
||||
nc.rawDetails = db
|
||||
|
||||
b, err := nc.Marshal()
|
||||
require.NoError(t, err)
|
||||
//t.Log("Cert size:", len(b))
|
||||
|
||||
nc2, err := unmarshalCertificateV2(b, nil, Curve_CURVE25519)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, Version2, nc.Version())
|
||||
assert.Equal(t, Curve_CURVE25519, nc.Curve())
|
||||
assert.Equal(t, nc.Signature(), nc2.Signature())
|
||||
assert.Equal(t, nc.Name(), nc2.Name())
|
||||
assert.Equal(t, nc.NotBefore(), nc2.NotBefore())
|
||||
assert.Equal(t, nc.NotAfter(), nc2.NotAfter())
|
||||
assert.Equal(t, nc.PublicKey(), nc2.PublicKey())
|
||||
assert.Equal(t, nc.IsCA(), nc2.IsCA())
|
||||
assert.Equal(t, nc.Issuer(), nc2.Issuer())
|
||||
|
||||
// unmarshalling will sort networks and unsafeNetworks, we need to do the same
|
||||
// but first make sure it fails
|
||||
assert.NotEqual(t, nc.Networks(), nc2.Networks())
|
||||
assert.NotEqual(t, nc.UnsafeNetworks(), nc2.UnsafeNetworks())
|
||||
|
||||
slices.SortFunc(nc.details.networks, comparePrefix)
|
||||
slices.SortFunc(nc.details.unsafeNetworks, comparePrefix)
|
||||
|
||||
assert.Equal(t, nc.Networks(), nc2.Networks())
|
||||
assert.Equal(t, nc.UnsafeNetworks(), nc2.UnsafeNetworks())
|
||||
|
||||
assert.Equal(t, nc.Groups(), nc2.Groups())
|
||||
}
|
||||
|
||||
func TestCertificateV2_Expired(t *testing.T) {
|
||||
nc := certificateV2{
|
||||
details: detailsV2{
|
||||
notBefore: time.Now().Add(time.Second * -60).Round(time.Second),
|
||||
notAfter: time.Now().Add(time.Second * 60).Round(time.Second),
|
||||
},
|
||||
}
|
||||
|
||||
assert.True(t, nc.Expired(time.Now().Add(time.Hour)))
|
||||
assert.True(t, nc.Expired(time.Now().Add(-time.Hour)))
|
||||
assert.False(t, nc.Expired(time.Now()))
|
||||
}
|
||||
|
||||
func TestCertificateV2_MarshalJSON(t *testing.T) {
|
||||
time.Local = time.UTC
|
||||
pubKey := []byte("1234567890abcedf1234567890abcedf")
|
||||
|
||||
nc := certificateV2{
|
||||
details: detailsV2{
|
||||
name: "testing",
|
||||
networks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
||||
},
|
||||
unsafeNetworks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
||||
mustParsePrefixUnmapped("9.1.1.3/16"),
|
||||
},
|
||||
groups: []string{"test-group1", "test-group2", "test-group3"},
|
||||
notBefore: time.Date(1, 0, 0, 1, 0, 0, 0, time.UTC),
|
||||
notAfter: time.Date(1, 0, 0, 2, 0, 0, 0, time.UTC),
|
||||
isCA: false,
|
||||
issuer: "1234567890abcedf1234567890abcedf",
|
||||
},
|
||||
publicKey: pubKey,
|
||||
signature: []byte("1234567890abcedf1234567890abcedf1234567890abcedf1234567890abcedf"),
|
||||
}
|
||||
|
||||
b, err := nc.MarshalJSON()
|
||||
require.ErrorIs(t, err, ErrMissingDetails)
|
||||
|
||||
rd, err := nc.details.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
||||
nc.rawDetails = rd
|
||||
b, err = nc.MarshalJSON()
|
||||
require.NoError(t, err)
|
||||
assert.JSONEq(
|
||||
t,
|
||||
"{\"curve\":\"CURVE25519\",\"details\":{\"groups\":[\"test-group1\",\"test-group2\",\"test-group3\"],\"isCa\":false,\"issuer\":\"1234567890abcedf1234567890abcedf\",\"name\":\"testing\",\"networks\":[\"10.1.1.1/24\",\"10.1.1.2/16\"],\"notAfter\":\"0000-11-30T02:00:00Z\",\"notBefore\":\"0000-11-30T01:00:00Z\",\"unsafeNetworks\":[\"9.1.1.2/24\",\"9.1.1.3/16\"]},\"fingerprint\":\"152d9a7400c1e001cb76cffd035215ebb351f69eeb797f7f847dd086e15e56dd\",\"publicKey\":\"3132333435363738393061626365646631323334353637383930616263656466\",\"signature\":\"31323334353637383930616263656466313233343536373839306162636564663132333435363738393061626365646631323334353637383930616263656466\",\"version\":2}",
|
||||
string(b),
|
||||
)
|
||||
}
|
||||
|
||||
func TestCertificateV2_VerifyPrivateKey(t *testing.T) {
|
||||
ca, _, caKey, _ := NewTestCaCert(Version2, Curve_CURVE25519, time.Time{}, time.Time{}, nil, nil, nil)
|
||||
err := ca.VerifyPrivateKey(Curve_CURVE25519, caKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ca.VerifyPrivateKey(Curve_CURVE25519, caKey[:16])
|
||||
require.ErrorIs(t, err, ErrInvalidPrivateKey)
|
||||
|
||||
_, caKey2, err := ed25519.GenerateKey(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
err = ca.VerifyPrivateKey(Curve_CURVE25519, caKey2)
|
||||
require.ErrorIs(t, err, ErrPublicPrivateKeyMismatch)
|
||||
|
||||
c, _, priv, _ := NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Time{}, time.Time{}, nil, nil, nil)
|
||||
rawPriv, b, curve, err := UnmarshalPrivateKeyFromPEM(priv)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, b)
|
||||
assert.Equal(t, Curve_CURVE25519, curve)
|
||||
err = c.VerifyPrivateKey(Curve_CURVE25519, rawPriv)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, priv2 := X25519Keypair()
|
||||
err = c.VerifyPrivateKey(Curve_P256, priv2)
|
||||
require.ErrorIs(t, err, ErrPublicPrivateCurveMismatch)
|
||||
|
||||
err = c.VerifyPrivateKey(Curve_CURVE25519, priv2)
|
||||
require.ErrorIs(t, err, ErrPublicPrivateKeyMismatch)
|
||||
|
||||
err = c.VerifyPrivateKey(Curve_CURVE25519, priv2[:16])
|
||||
require.ErrorIs(t, err, ErrInvalidPrivateKey)
|
||||
|
||||
ac, ok := c.(*certificateV2)
|
||||
require.True(t, ok)
|
||||
ac.curve = Curve(99)
|
||||
err = c.VerifyPrivateKey(Curve(99), priv2)
|
||||
require.EqualError(t, err, "invalid curve: 99")
|
||||
|
||||
ca2, _, caKey2, _ := NewTestCaCert(Version2, Curve_P256, time.Time{}, time.Time{}, nil, nil, nil)
|
||||
err = ca.VerifyPrivateKey(Curve_CURVE25519, caKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ca2.VerifyPrivateKey(Curve_P256, caKey2[:16])
|
||||
require.ErrorIs(t, err, ErrInvalidPrivateKey)
|
||||
|
||||
c, _, priv, _ = NewTestCert(Version2, Curve_P256, ca2, caKey2, "test", time.Time{}, time.Time{}, nil, nil, nil)
|
||||
rawPriv, b, curve, err = UnmarshalPrivateKeyFromPEM(priv)
|
||||
|
||||
err = c.VerifyPrivateKey(Curve_P256, priv[:16])
|
||||
require.ErrorIs(t, err, ErrInvalidPrivateKey)
|
||||
|
||||
err = c.VerifyPrivateKey(Curve_P256, priv)
|
||||
require.ErrorIs(t, err, ErrInvalidPrivateKey)
|
||||
|
||||
aCa, ok := ca2.(*certificateV2)
|
||||
require.True(t, ok)
|
||||
aCa.curve = Curve(99)
|
||||
err = aCa.VerifyPrivateKey(Curve(99), priv2)
|
||||
require.EqualError(t, err, "invalid curve: 99")
|
||||
|
||||
}
|
||||
|
||||
func TestCertificateV2_VerifyPrivateKeyP256(t *testing.T) {
|
||||
ca, _, caKey, _ := NewTestCaCert(Version2, Curve_P256, time.Time{}, time.Time{}, nil, nil, nil)
|
||||
err := ca.VerifyPrivateKey(Curve_P256, caKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, caKey2, _ := NewTestCaCert(Version2, Curve_P256, time.Time{}, time.Time{}, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
err = ca.VerifyPrivateKey(Curve_P256, caKey2)
|
||||
require.Error(t, err)
|
||||
|
||||
c, _, priv, _ := NewTestCert(Version2, Curve_P256, ca, caKey, "test", time.Time{}, time.Time{}, nil, nil, nil)
|
||||
rawPriv, b, curve, err := UnmarshalPrivateKeyFromPEM(priv)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, b)
|
||||
assert.Equal(t, Curve_P256, curve)
|
||||
err = c.VerifyPrivateKey(Curve_P256, rawPriv)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, priv2 := P256Keypair()
|
||||
err = c.VerifyPrivateKey(Curve_P256, priv2)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestCertificateV2_Copy(t *testing.T) {
|
||||
ca, _, caKey, _ := NewTestCaCert(Version2, Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, nil)
|
||||
c, _, _, _ := NewTestCert(Version2, Curve_CURVE25519, ca, caKey, "test", time.Now(), time.Now().Add(5*time.Minute), nil, nil, nil)
|
||||
cc := c.Copy()
|
||||
test.AssertDeepCopyEqual(t, c, cc)
|
||||
}
|
||||
|
||||
func TestUnmarshalCertificateV2(t *testing.T) {
|
||||
data := []byte("\x98\x00\x00")
|
||||
_, err := unmarshalCertificateV2(data, nil, Curve_CURVE25519)
|
||||
require.EqualError(t, err, "bad wire format")
|
||||
}
|
||||
|
||||
func TestCertificateV2_marshalForSigningStability(t *testing.T) {
|
||||
before := time.Date(1996, time.May, 5, 0, 0, 0, 0, time.UTC)
|
||||
after := before.Add(time.Second * 60).Round(time.Second)
|
||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
||||
|
||||
nc := certificateV2{
|
||||
details: detailsV2{
|
||||
name: "testing",
|
||||
networks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
||||
},
|
||||
unsafeNetworks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("9.1.1.3/16"),
|
||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
||||
},
|
||||
groups: []string{"test-group1", "test-group2", "test-group3"},
|
||||
notBefore: before,
|
||||
notAfter: after,
|
||||
isCA: false,
|
||||
issuer: "1234567890abcdef1234567890abcdef",
|
||||
},
|
||||
signature: []byte("1234567890abcdef1234567890abcdef"),
|
||||
publicKey: pubKey,
|
||||
}
|
||||
|
||||
const expectedRawDetailsStr = "a070800774657374696e67a10e04050a0101021004050a01010118a20e0405090101031004050901010218a3270c0b746573742d67726f7570310c0b746573742d67726f7570320c0b746573742d67726f7570338504318bef808604318befbc87101234567890abcdef1234567890abcdef"
|
||||
expectedRawDetails, err := hex.DecodeString(expectedRawDetailsStr)
|
||||
require.NoError(t, err)
|
||||
|
||||
db, err := nc.details.Marshal()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedRawDetails, db)
|
||||
|
||||
expectedForSigning, err := hex.DecodeString(expectedRawDetailsStr + "00313233343536373839306162636564666768696a313233343536373839306162")
|
||||
b, err := nc.marshalForSigning()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedForSigning, b)
|
||||
}
|
164
cert/crypto.go
164
cert/crypto.go
|
@ -3,28 +3,14 @@ package cert
|
|||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"golang.org/x/crypto/argon2"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type NebulaEncryptedData struct {
|
||||
EncryptionMetadata NebulaEncryptionMetadata
|
||||
Ciphertext []byte
|
||||
}
|
||||
|
||||
type NebulaEncryptionMetadata struct {
|
||||
EncryptionAlgorithm string
|
||||
Argon2Parameters Argon2Parameters
|
||||
}
|
||||
|
||||
// Argon2Parameters KDF factors
|
||||
// KDF factors
|
||||
type Argon2Parameters struct {
|
||||
version rune
|
||||
Memory uint32 // KiB
|
||||
|
@ -33,7 +19,7 @@ type Argon2Parameters struct {
|
|||
salt []byte
|
||||
}
|
||||
|
||||
// NewArgon2Parameters Returns a new Argon2Parameters object with current version set
|
||||
// Returns a new Argon2Parameters object with current version set
|
||||
func NewArgon2Parameters(memory uint32, parallelism uint8, iterations uint32) *Argon2Parameters {
|
||||
return &Argon2Parameters{
|
||||
version: argon2.Version,
|
||||
|
@ -91,9 +77,6 @@ func aes256Decrypt(passphrase []byte, kdfParams *Argon2Parameters, data []byte)
|
|||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nonce, ciphertext, err := splitNonceCiphertext(data, gcm.NonceSize())
|
||||
if err != nil {
|
||||
|
@ -155,146 +138,3 @@ func splitNonceCiphertext(blob []byte, nonceSize int) ([]byte, []byte, error) {
|
|||
|
||||
return blob[:nonceSize], blob[nonceSize:], nil
|
||||
}
|
||||
|
||||
// EncryptAndMarshalSigningPrivateKey is a simple helper to encrypt and PEM encode a private key
|
||||
func EncryptAndMarshalSigningPrivateKey(curve Curve, b []byte, passphrase []byte, kdfParams *Argon2Parameters) ([]byte, error) {
|
||||
ciphertext, err := aes256Encrypt(passphrase, kdfParams, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b, err = proto.Marshal(&RawNebulaEncryptedData{
|
||||
EncryptionMetadata: &RawNebulaEncryptionMetadata{
|
||||
EncryptionAlgorithm: "AES-256-GCM",
|
||||
Argon2Parameters: &RawNebulaArgon2Parameters{
|
||||
Version: kdfParams.version,
|
||||
Memory: kdfParams.Memory,
|
||||
Parallelism: uint32(kdfParams.Parallelism),
|
||||
Iterations: kdfParams.Iterations,
|
||||
Salt: kdfParams.salt,
|
||||
},
|
||||
},
|
||||
Ciphertext: ciphertext,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch curve {
|
||||
case Curve_CURVE25519:
|
||||
return pem.EncodeToMemory(&pem.Block{Type: EncryptedEd25519PrivateKeyBanner, Bytes: b}), nil
|
||||
case Curve_P256:
|
||||
return pem.EncodeToMemory(&pem.Block{Type: EncryptedECDSAP256PrivateKeyBanner, Bytes: b}), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid curve: %v", curve)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalNebulaEncryptedData will unmarshal a protobuf byte representation of a nebula cert into its
|
||||
// protobuf-generated struct.
|
||||
func UnmarshalNebulaEncryptedData(b []byte) (*NebulaEncryptedData, error) {
|
||||
if len(b) == 0 {
|
||||
return nil, fmt.Errorf("nil byte array")
|
||||
}
|
||||
var rned RawNebulaEncryptedData
|
||||
err := proto.Unmarshal(b, &rned)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if rned.EncryptionMetadata == nil {
|
||||
return nil, fmt.Errorf("encoded EncryptionMetadata was nil")
|
||||
}
|
||||
|
||||
if rned.EncryptionMetadata.Argon2Parameters == nil {
|
||||
return nil, fmt.Errorf("encoded Argon2Parameters was nil")
|
||||
}
|
||||
|
||||
params, err := unmarshalArgon2Parameters(rned.EncryptionMetadata.Argon2Parameters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ned := NebulaEncryptedData{
|
||||
EncryptionMetadata: NebulaEncryptionMetadata{
|
||||
EncryptionAlgorithm: rned.EncryptionMetadata.EncryptionAlgorithm,
|
||||
Argon2Parameters: *params,
|
||||
},
|
||||
Ciphertext: rned.Ciphertext,
|
||||
}
|
||||
|
||||
return &ned, nil
|
||||
}
|
||||
|
||||
func unmarshalArgon2Parameters(params *RawNebulaArgon2Parameters) (*Argon2Parameters, error) {
|
||||
if params.Version < math.MinInt32 || params.Version > math.MaxInt32 {
|
||||
return nil, fmt.Errorf("Argon2Parameters Version must be at least %d and no more than %d", math.MinInt32, math.MaxInt32)
|
||||
}
|
||||
if params.Memory <= 0 || params.Memory > math.MaxUint32 {
|
||||
return nil, fmt.Errorf("Argon2Parameters Memory must be be greater than 0 and no more than %d KiB", uint32(math.MaxUint32))
|
||||
}
|
||||
if params.Parallelism <= 0 || params.Parallelism > math.MaxUint8 {
|
||||
return nil, fmt.Errorf("Argon2Parameters Parallelism must be be greater than 0 and no more than %d", math.MaxUint8)
|
||||
}
|
||||
if params.Iterations <= 0 || params.Iterations > math.MaxUint32 {
|
||||
return nil, fmt.Errorf("-argon-iterations must be be greater than 0 and no more than %d", uint32(math.MaxUint32))
|
||||
}
|
||||
|
||||
return &Argon2Parameters{
|
||||
version: params.Version,
|
||||
Memory: params.Memory,
|
||||
Parallelism: uint8(params.Parallelism),
|
||||
Iterations: params.Iterations,
|
||||
salt: params.Salt,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
// DecryptAndUnmarshalSigningPrivateKey will try to pem decode and decrypt an Ed25519/ECDSA private key with
|
||||
// the given passphrase, returning any other bytes b or an error on failure
|
||||
func DecryptAndUnmarshalSigningPrivateKey(passphrase, b []byte) (Curve, []byte, []byte, error) {
|
||||
var curve Curve
|
||||
|
||||
k, r := pem.Decode(b)
|
||||
if k == nil {
|
||||
return curve, nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||
}
|
||||
|
||||
switch k.Type {
|
||||
case EncryptedEd25519PrivateKeyBanner:
|
||||
curve = Curve_CURVE25519
|
||||
case EncryptedECDSAP256PrivateKeyBanner:
|
||||
curve = Curve_P256
|
||||
default:
|
||||
return curve, nil, r, fmt.Errorf("bytes did not contain a proper nebula encrypted Ed25519/ECDSA private key banner")
|
||||
}
|
||||
|
||||
ned, err := UnmarshalNebulaEncryptedData(k.Bytes)
|
||||
if err != nil {
|
||||
return curve, nil, r, err
|
||||
}
|
||||
|
||||
var bytes []byte
|
||||
switch ned.EncryptionMetadata.EncryptionAlgorithm {
|
||||
case "AES-256-GCM":
|
||||
bytes, err = aes256Decrypt(passphrase, &ned.EncryptionMetadata.Argon2Parameters, ned.Ciphertext)
|
||||
if err != nil {
|
||||
return curve, nil, r, err
|
||||
}
|
||||
default:
|
||||
return curve, nil, r, fmt.Errorf("unsupported encryption algorithm: %s", ned.EncryptionMetadata.EncryptionAlgorithm)
|
||||
}
|
||||
|
||||
switch curve {
|
||||
case Curve_CURVE25519:
|
||||
if len(bytes) != ed25519.PrivateKeySize {
|
||||
return curve, nil, r, fmt.Errorf("key was not %d bytes, is invalid ed25519 private key", ed25519.PrivateKeySize)
|
||||
}
|
||||
case Curve_P256:
|
||||
if len(bytes) != 32 {
|
||||
return curve, nil, r, fmt.Errorf("key was not 32 bytes, is invalid ECDSA P256 private key")
|
||||
}
|
||||
}
|
||||
|
||||
return curve, bytes, r, nil
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/crypto/argon2"
|
||||
)
|
||||
|
||||
|
@ -24,90 +23,3 @@ func TestNewArgon2Parameters(t *testing.T) {
|
|||
Iterations: 1,
|
||||
}, p)
|
||||
}
|
||||
|
||||
func TestDecryptAndUnmarshalSigningPrivateKey(t *testing.T) {
|
||||
passphrase := []byte("DO NOT USE THIS KEY")
|
||||
privKey := []byte(`# A good key
|
||||
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCognnjujd67Vsv99p22wfAjQaDT
|
||||
oCMW1mdjkU3gACKNW4MSXOWR9Sts4C81yk1RUku2gvGKs3TB9LYoklLsIizSYOLl
|
||||
+Vs//O1T0I1Xbml2XBAROsb/VSoDln/6LMqR4B6fn6B3GOsLBBqRI8daDl9lRMPB
|
||||
qrlJ69wer3ZUHFXA
|
||||
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||
`)
|
||||
shortKey := []byte(`# A key which, once decrypted, is too short
|
||||
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCoga5h8owMEBWRSMMJKzuUvWce7
|
||||
k0qlBkQmCxiuLh80MuASW70YcKt8jeEIS2axo2V6zAKA9TSMcCsJW1kDDXEtL/xe
|
||||
GLF5T7sDl5COp4LU3pGxpV+KoeQ/S3gQCAAcnaOtnJQX+aSDnbO3jCHyP7U9CHbs
|
||||
rQr3bdH3Oy/WiYU=
|
||||
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||
`)
|
||||
invalidBanner := []byte(`# Invalid banner (not encrypted)
|
||||
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||
bWRp2CTVFhW9HD/qCd28ltDgK3w8VXSeaEYczDWos8sMUBqDb9jP3+NYwcS4lURG
|
||||
XgLvodMXZJuaFPssp+WwtA==
|
||||
-----END NEBULA ED25519 PRIVATE KEY-----
|
||||
`)
|
||||
invalidPem := []byte(`# Not a valid PEM format
|
||||
-BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCognnjujd67Vsv99p22wfAjQaDT
|
||||
oCMW1mdjkU3gACKNW4MSXOWR9Sts4C81yk1RUku2gvGKs3TB9LYoklLsIizSYOLl
|
||||
+Vs//O1T0I1Xbml2XBAROsb/VSoDln/6LMqR4B6fn6B3GOsLBBqRI8daDl9lRMPB
|
||||
qrlJ69wer3ZUHFXA
|
||||
-END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||
`)
|
||||
|
||||
keyBundle := appendByteSlices(privKey, shortKey, invalidBanner, invalidPem)
|
||||
|
||||
// Success test case
|
||||
curve, k, rest, err := DecryptAndUnmarshalSigningPrivateKey(passphrase, keyBundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, Curve_CURVE25519, curve)
|
||||
assert.Len(t, k, 64)
|
||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||
|
||||
// Fail due to short key
|
||||
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
|
||||
require.EqualError(t, err, "key was not 64 bytes, is invalid ed25519 private key")
|
||||
assert.Nil(t, k)
|
||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||
|
||||
// Fail due to invalid banner
|
||||
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
|
||||
require.EqualError(t, err, "bytes did not contain a proper nebula encrypted Ed25519/ECDSA private key banner")
|
||||
assert.Nil(t, k)
|
||||
assert.Equal(t, rest, invalidPem)
|
||||
|
||||
// Fail due to ivalid PEM format, because
|
||||
// it's missing the requisite pre-encapsulation boundary.
|
||||
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
|
||||
require.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||
assert.Nil(t, k)
|
||||
assert.Equal(t, rest, invalidPem)
|
||||
|
||||
// Fail due to invalid passphrase
|
||||
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey([]byte("invalid passphrase"), privKey)
|
||||
require.EqualError(t, err, "invalid passphrase or corrupt private key")
|
||||
assert.Nil(t, k)
|
||||
assert.Equal(t, []byte{}, rest)
|
||||
}
|
||||
|
||||
func TestEncryptAndMarshalSigningPrivateKey(t *testing.T) {
|
||||
// Having proved that decryption works correctly above, we can test the
|
||||
// encryption function produces a value which can be decrypted
|
||||
passphrase := []byte("passphrase")
|
||||
bytes := []byte("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
|
||||
kdfParams := NewArgon2Parameters(64*1024, 4, 3)
|
||||
key, err := EncryptAndMarshalSigningPrivateKey(Curve_CURVE25519, bytes, passphrase, kdfParams)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the "key" can be decrypted successfully
|
||||
curve, k, rest, err := DecryptAndUnmarshalSigningPrivateKey(passphrase, key)
|
||||
assert.Len(t, k, 64)
|
||||
assert.Equal(t, Curve_CURVE25519, curve)
|
||||
assert.Equal(t, []byte{}, rest)
|
||||
require.NoError(t, err)
|
||||
|
||||
// EncryptAndMarshalEd25519PrivateKey does not create any errors itself
|
||||
}
|
||||
|
|
|
@ -2,48 +2,13 @@ package cert
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrBadFormat = errors.New("bad wire format")
|
||||
ErrRootExpired = errors.New("root certificate is expired")
|
||||
ErrExpired = errors.New("certificate is expired")
|
||||
ErrNotCA = errors.New("certificate is not a CA")
|
||||
ErrNotSelfSigned = errors.New("certificate is not self-signed")
|
||||
ErrBlockListed = errors.New("certificate is in the block list")
|
||||
ErrFingerprintMismatch = errors.New("certificate fingerprint did not match")
|
||||
ErrSignatureMismatch = errors.New("certificate signature did not match")
|
||||
ErrInvalidPublicKey = errors.New("invalid public key")
|
||||
ErrInvalidPrivateKey = errors.New("invalid private key")
|
||||
ErrPublicPrivateCurveMismatch = errors.New("public key does not match private key curve")
|
||||
ErrPublicPrivateKeyMismatch = errors.New("public key and private key are not a pair")
|
||||
ErrPrivateKeyEncrypted = errors.New("private key must be decrypted")
|
||||
ErrCaNotFound = errors.New("could not find ca for the certificate")
|
||||
|
||||
ErrInvalidPEMBlock = errors.New("input did not contain a valid PEM encoded block")
|
||||
ErrInvalidPEMCertificateBanner = errors.New("bytes did not contain a proper certificate banner")
|
||||
ErrInvalidPEMX25519PublicKeyBanner = errors.New("bytes did not contain a proper X25519 public key banner")
|
||||
ErrInvalidPEMX25519PrivateKeyBanner = errors.New("bytes did not contain a proper X25519 private key banner")
|
||||
ErrInvalidPEMEd25519PublicKeyBanner = errors.New("bytes did not contain a proper Ed25519 public key banner")
|
||||
ErrInvalidPEMEd25519PrivateKeyBanner = errors.New("bytes did not contain a proper Ed25519 private key banner")
|
||||
|
||||
ErrNoPeerStaticKey = errors.New("no peer static key was present")
|
||||
ErrNoPayload = errors.New("provided payload was empty")
|
||||
|
||||
ErrMissingDetails = errors.New("certificate did not contain details")
|
||||
ErrEmptySignature = errors.New("empty signature")
|
||||
ErrEmptyRawDetails = errors.New("empty rawDetails not allowed")
|
||||
ErrRootExpired = errors.New("root certificate is expired")
|
||||
ErrExpired = errors.New("certificate is expired")
|
||||
ErrNotCA = errors.New("certificate is not a CA")
|
||||
ErrNotSelfSigned = errors.New("certificate is not self-signed")
|
||||
ErrBlockListed = errors.New("certificate is in the block list")
|
||||
ErrSignatureMismatch = errors.New("certificate signature did not match")
|
||||
)
|
||||
|
||||
type ErrInvalidCertificateProperties struct {
|
||||
str string
|
||||
}
|
||||
|
||||
func NewErrInvalidCertificateProperties(format string, a ...any) error {
|
||||
return &ErrInvalidCertificateProperties{fmt.Sprintf(format, a...)}
|
||||
}
|
||||
|
||||
func (e *ErrInvalidCertificateProperties) Error() string {
|
||||
return e.str
|
||||
}
|
||||
|
|
|
@ -1,141 +0,0 @@
|
|||
package cert
|
||||
|
||||
import (
|
||||
"crypto/ecdh"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/curve25519"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// NewTestCaCert will create a new ca certificate
|
||||
func NewTestCaCert(version Version, curve Curve, before, after time.Time, networks, unsafeNetworks []netip.Prefix, groups []string) (Certificate, []byte, []byte, []byte) {
|
||||
var err error
|
||||
var pub, priv []byte
|
||||
|
||||
switch curve {
|
||||
case Curve_CURVE25519:
|
||||
pub, priv, err = ed25519.GenerateKey(rand.Reader)
|
||||
case Curve_P256:
|
||||
privk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pub = elliptic.Marshal(elliptic.P256(), privk.PublicKey.X, privk.PublicKey.Y)
|
||||
priv = privk.D.FillBytes(make([]byte, 32))
|
||||
default:
|
||||
// There is no default to allow the underlying lib to respond with an error
|
||||
}
|
||||
|
||||
if before.IsZero() {
|
||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
}
|
||||
if after.IsZero() {
|
||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
}
|
||||
|
||||
t := &TBSCertificate{
|
||||
Curve: curve,
|
||||
Version: version,
|
||||
Name: "test ca",
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pub,
|
||||
Networks: networks,
|
||||
UnsafeNetworks: unsafeNetworks,
|
||||
Groups: groups,
|
||||
IsCA: true,
|
||||
}
|
||||
|
||||
c, err := t.Sign(nil, curve, priv)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pem, err := c.MarshalPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return c, pub, priv, pem
|
||||
}
|
||||
|
||||
// NewTestCert will generate a signed certificate with the provided details.
|
||||
// Expiry times are defaulted if you do not pass them in
|
||||
func NewTestCert(v Version, curve Curve, ca Certificate, key []byte, name string, before, after time.Time, networks, unsafeNetworks []netip.Prefix, groups []string) (Certificate, []byte, []byte, []byte) {
|
||||
if before.IsZero() {
|
||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
}
|
||||
|
||||
if after.IsZero() {
|
||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
}
|
||||
|
||||
if len(networks) == 0 {
|
||||
networks = []netip.Prefix{netip.MustParsePrefix("10.0.0.123/8")}
|
||||
}
|
||||
|
||||
var pub, priv []byte
|
||||
switch curve {
|
||||
case Curve_CURVE25519:
|
||||
pub, priv = X25519Keypair()
|
||||
case Curve_P256:
|
||||
pub, priv = P256Keypair()
|
||||
default:
|
||||
panic("unknown curve")
|
||||
}
|
||||
|
||||
nc := &TBSCertificate{
|
||||
Version: v,
|
||||
Curve: curve,
|
||||
Name: name,
|
||||
Networks: networks,
|
||||
UnsafeNetworks: unsafeNetworks,
|
||||
Groups: groups,
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pub,
|
||||
IsCA: false,
|
||||
}
|
||||
|
||||
c, err := nc.Sign(ca, ca.Curve(), key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pem, err := c.MarshalPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return c, pub, MarshalPrivateKeyToPEM(curve, priv), pem
|
||||
}
|
||||
|
||||
func X25519Keypair() ([]byte, []byte) {
|
||||
privkey := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return pubkey, privkey
|
||||
}
|
||||
|
||||
func P256Keypair() ([]byte, []byte) {
|
||||
privkey, err := ecdh.P256().GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pubkey := privkey.PublicKey()
|
||||
return pubkey.Bytes(), privkey.Bytes()
|
||||
}
|
161
cert/pem.go
161
cert/pem.go
|
@ -1,161 +0,0 @@
|
|||
package cert
|
||||
|
||||
import (
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
const (
|
||||
CertificateBanner = "NEBULA CERTIFICATE"
|
||||
CertificateV2Banner = "NEBULA CERTIFICATE V2"
|
||||
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
|
||||
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
|
||||
EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY"
|
||||
Ed25519PrivateKeyBanner = "NEBULA ED25519 PRIVATE KEY"
|
||||
Ed25519PublicKeyBanner = "NEBULA ED25519 PUBLIC KEY"
|
||||
|
||||
P256PrivateKeyBanner = "NEBULA P256 PRIVATE KEY"
|
||||
P256PublicKeyBanner = "NEBULA P256 PUBLIC KEY"
|
||||
EncryptedECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 ENCRYPTED PRIVATE KEY"
|
||||
ECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 PRIVATE KEY"
|
||||
)
|
||||
|
||||
// UnmarshalCertificateFromPEM will try to unmarshal the first pem block in a byte array, returning any non consumed
|
||||
// data or an error on failure
|
||||
func UnmarshalCertificateFromPEM(b []byte) (Certificate, []byte, error) {
|
||||
p, r := pem.Decode(b)
|
||||
if p == nil {
|
||||
return nil, r, ErrInvalidPEMBlock
|
||||
}
|
||||
|
||||
var c Certificate
|
||||
var err error
|
||||
|
||||
switch p.Type {
|
||||
// Implementations must validate the resulting certificate contains valid information
|
||||
case CertificateBanner:
|
||||
c, err = unmarshalCertificateV1(p.Bytes, nil)
|
||||
case CertificateV2Banner:
|
||||
c, err = unmarshalCertificateV2(p.Bytes, nil, Curve_CURVE25519)
|
||||
default:
|
||||
return nil, r, ErrInvalidPEMCertificateBanner
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, r, err
|
||||
}
|
||||
|
||||
return c, r, nil
|
||||
|
||||
}
|
||||
|
||||
func MarshalPublicKeyToPEM(curve Curve, b []byte) []byte {
|
||||
switch curve {
|
||||
case Curve_CURVE25519:
|
||||
return pem.EncodeToMemory(&pem.Block{Type: X25519PublicKeyBanner, Bytes: b})
|
||||
case Curve_P256:
|
||||
return pem.EncodeToMemory(&pem.Block{Type: P256PublicKeyBanner, Bytes: b})
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func UnmarshalPublicKeyFromPEM(b []byte) ([]byte, []byte, Curve, error) {
|
||||
k, r := pem.Decode(b)
|
||||
if k == nil {
|
||||
return nil, r, 0, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||
}
|
||||
var expectedLen int
|
||||
var curve Curve
|
||||
switch k.Type {
|
||||
case X25519PublicKeyBanner, Ed25519PublicKeyBanner:
|
||||
expectedLen = 32
|
||||
curve = Curve_CURVE25519
|
||||
case P256PublicKeyBanner:
|
||||
// Uncompressed
|
||||
expectedLen = 65
|
||||
curve = Curve_P256
|
||||
default:
|
||||
return nil, r, 0, fmt.Errorf("bytes did not contain a proper public key banner")
|
||||
}
|
||||
if len(k.Bytes) != expectedLen {
|
||||
return nil, r, 0, fmt.Errorf("key was not %d bytes, is invalid %s public key", expectedLen, curve)
|
||||
}
|
||||
return k.Bytes, r, curve, nil
|
||||
}
|
||||
|
||||
func MarshalPrivateKeyToPEM(curve Curve, b []byte) []byte {
|
||||
switch curve {
|
||||
case Curve_CURVE25519:
|
||||
return pem.EncodeToMemory(&pem.Block{Type: X25519PrivateKeyBanner, Bytes: b})
|
||||
case Curve_P256:
|
||||
return pem.EncodeToMemory(&pem.Block{Type: P256PrivateKeyBanner, Bytes: b})
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func MarshalSigningPrivateKeyToPEM(curve Curve, b []byte) []byte {
|
||||
switch curve {
|
||||
case Curve_CURVE25519:
|
||||
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PrivateKeyBanner, Bytes: b})
|
||||
case Curve_P256:
|
||||
return pem.EncodeToMemory(&pem.Block{Type: ECDSAP256PrivateKeyBanner, Bytes: b})
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalPrivateKeyFromPEM will try to unmarshal the first pem block in a byte array, returning any non
|
||||
// consumed data or an error on failure
|
||||
func UnmarshalPrivateKeyFromPEM(b []byte) ([]byte, []byte, Curve, error) {
|
||||
k, r := pem.Decode(b)
|
||||
if k == nil {
|
||||
return nil, r, 0, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||
}
|
||||
var expectedLen int
|
||||
var curve Curve
|
||||
switch k.Type {
|
||||
case X25519PrivateKeyBanner:
|
||||
expectedLen = 32
|
||||
curve = Curve_CURVE25519
|
||||
case P256PrivateKeyBanner:
|
||||
expectedLen = 32
|
||||
curve = Curve_P256
|
||||
default:
|
||||
return nil, r, 0, fmt.Errorf("bytes did not contain a proper private key banner")
|
||||
}
|
||||
if len(k.Bytes) != expectedLen {
|
||||
return nil, r, 0, fmt.Errorf("key was not %d bytes, is invalid %s private key", expectedLen, curve)
|
||||
}
|
||||
return k.Bytes, r, curve, nil
|
||||
}
|
||||
|
||||
func UnmarshalSigningPrivateKeyFromPEM(b []byte) ([]byte, []byte, Curve, error) {
|
||||
k, r := pem.Decode(b)
|
||||
if k == nil {
|
||||
return nil, r, 0, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||
}
|
||||
var curve Curve
|
||||
switch k.Type {
|
||||
case EncryptedEd25519PrivateKeyBanner:
|
||||
return nil, nil, Curve_CURVE25519, ErrPrivateKeyEncrypted
|
||||
case EncryptedECDSAP256PrivateKeyBanner:
|
||||
return nil, nil, Curve_P256, ErrPrivateKeyEncrypted
|
||||
case Ed25519PrivateKeyBanner:
|
||||
curve = Curve_CURVE25519
|
||||
if len(k.Bytes) != ed25519.PrivateKeySize {
|
||||
return nil, r, 0, fmt.Errorf("key was not %d bytes, is invalid Ed25519 private key", ed25519.PrivateKeySize)
|
||||
}
|
||||
case ECDSAP256PrivateKeyBanner:
|
||||
curve = Curve_P256
|
||||
if len(k.Bytes) != 32 {
|
||||
return nil, r, 0, fmt.Errorf("key was not 32 bytes, is invalid ECDSA P256 private key")
|
||||
}
|
||||
default:
|
||||
return nil, r, 0, fmt.Errorf("bytes did not contain a proper Ed25519/ECDSA private key banner")
|
||||
}
|
||||
return k.Bytes, r, curve, nil
|
||||
}
|
293
cert/pem_test.go
293
cert/pem_test.go
|
@ -1,293 +0,0 @@
|
|||
package cert
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestUnmarshalCertificateFromPEM(t *testing.T) {
|
||||
goodCert := []byte(`
|
||||
# A good cert
|
||||
-----BEGIN NEBULA CERTIFICATE-----
|
||||
CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL
|
||||
vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv
|
||||
bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
|
||||
-----END NEBULA CERTIFICATE-----
|
||||
`)
|
||||
badBanner := []byte(`# A bad banner
|
||||
-----BEGIN NOT A NEBULA CERTIFICATE-----
|
||||
CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL
|
||||
vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv
|
||||
bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
|
||||
-----END NOT A NEBULA CERTIFICATE-----
|
||||
`)
|
||||
invalidPem := []byte(`# Not a valid PEM format
|
||||
-BEGIN NEBULA CERTIFICATE-----
|
||||
CkAKDm5lYnVsYSByb290IGNhKJfap9AFMJfg1+YGOiCUQGByMuNRhIlQBOyzXWbL
|
||||
vcKBwDhov900phEfJ5DN3kABEkDCq5R8qBiu8sl54yVfgRcQXEDt3cHr8UTSLszv
|
||||
bzBEr00kERQxxTzTsH8cpYEgRoipvmExvg8WP8NdAJEYJosB
|
||||
-END NEBULA CERTIFICATE----`)
|
||||
|
||||
certBundle := appendByteSlices(goodCert, badBanner, invalidPem)
|
||||
|
||||
// Success test case
|
||||
cert, rest, err := UnmarshalCertificateFromPEM(certBundle)
|
||||
assert.NotNil(t, cert)
|
||||
assert.Equal(t, rest, append(badBanner, invalidPem...))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Fail due to invalid banner.
|
||||
cert, rest, err = UnmarshalCertificateFromPEM(rest)
|
||||
assert.Nil(t, cert)
|
||||
assert.Equal(t, rest, invalidPem)
|
||||
require.EqualError(t, err, "bytes did not contain a proper certificate banner")
|
||||
|
||||
// Fail due to ivalid PEM format, because
|
||||
// it's missing the requisite pre-encapsulation boundary.
|
||||
cert, rest, err = UnmarshalCertificateFromPEM(rest)
|
||||
assert.Nil(t, cert)
|
||||
assert.Equal(t, rest, invalidPem)
|
||||
require.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||
}
|
||||
|
||||
func TestUnmarshalSigningPrivateKeyFromPEM(t *testing.T) {
|
||||
privKey := []byte(`# A good key
|
||||
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||
-----END NEBULA ED25519 PRIVATE KEY-----
|
||||
`)
|
||||
privP256Key := []byte(`# A good key
|
||||
-----BEGIN NEBULA ECDSA P256 PRIVATE KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-----END NEBULA ECDSA P256 PRIVATE KEY-----
|
||||
`)
|
||||
shortKey := []byte(`# A short key
|
||||
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
-----END NEBULA ED25519 PRIVATE KEY-----
|
||||
`)
|
||||
invalidBanner := []byte(`# Invalid banner
|
||||
-----BEGIN NOT A NEBULA PRIVATE KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||
-----END NOT A NEBULA PRIVATE KEY-----
|
||||
`)
|
||||
invalidPem := []byte(`# Not a valid PEM format
|
||||
-BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||
-END NEBULA ED25519 PRIVATE KEY-----`)
|
||||
|
||||
keyBundle := appendByteSlices(privKey, privP256Key, shortKey, invalidBanner, invalidPem)
|
||||
|
||||
// Success test case
|
||||
k, rest, curve, err := UnmarshalSigningPrivateKeyFromPEM(keyBundle)
|
||||
assert.Len(t, k, 64)
|
||||
assert.Equal(t, rest, appendByteSlices(privP256Key, shortKey, invalidBanner, invalidPem))
|
||||
assert.Equal(t, Curve_CURVE25519, curve)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Success test case
|
||||
k, rest, curve, err = UnmarshalSigningPrivateKeyFromPEM(rest)
|
||||
assert.Len(t, k, 32)
|
||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||
assert.Equal(t, Curve_P256, curve)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Fail due to short key
|
||||
k, rest, curve, err = UnmarshalSigningPrivateKeyFromPEM(rest)
|
||||
assert.Nil(t, k)
|
||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||
require.EqualError(t, err, "key was not 64 bytes, is invalid Ed25519 private key")
|
||||
|
||||
// Fail due to invalid banner
|
||||
k, rest, curve, err = UnmarshalSigningPrivateKeyFromPEM(rest)
|
||||
assert.Nil(t, k)
|
||||
assert.Equal(t, rest, invalidPem)
|
||||
require.EqualError(t, err, "bytes did not contain a proper Ed25519/ECDSA private key banner")
|
||||
|
||||
// Fail due to ivalid PEM format, because
|
||||
// it's missing the requisite pre-encapsulation boundary.
|
||||
k, rest, curve, err = UnmarshalSigningPrivateKeyFromPEM(rest)
|
||||
assert.Nil(t, k)
|
||||
assert.Equal(t, rest, invalidPem)
|
||||
require.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||
}
|
||||
|
||||
func TestUnmarshalPrivateKeyFromPEM(t *testing.T) {
|
||||
privKey := []byte(`# A good key
|
||||
-----BEGIN NEBULA X25519 PRIVATE KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-----END NEBULA X25519 PRIVATE KEY-----
|
||||
`)
|
||||
privP256Key := []byte(`# A good key
|
||||
-----BEGIN NEBULA P256 PRIVATE KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-----END NEBULA P256 PRIVATE KEY-----
|
||||
`)
|
||||
shortKey := []byte(`# A short key
|
||||
-----BEGIN NEBULA X25519 PRIVATE KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||
-----END NEBULA X25519 PRIVATE KEY-----
|
||||
`)
|
||||
invalidBanner := []byte(`# Invalid banner
|
||||
-----BEGIN NOT A NEBULA PRIVATE KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-----END NOT A NEBULA PRIVATE KEY-----
|
||||
`)
|
||||
invalidPem := []byte(`# Not a valid PEM format
|
||||
-BEGIN NEBULA X25519 PRIVATE KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-END NEBULA X25519 PRIVATE KEY-----`)
|
||||
|
||||
keyBundle := appendByteSlices(privKey, privP256Key, shortKey, invalidBanner, invalidPem)
|
||||
|
||||
// Success test case
|
||||
k, rest, curve, err := UnmarshalPrivateKeyFromPEM(keyBundle)
|
||||
assert.Len(t, k, 32)
|
||||
assert.Equal(t, rest, appendByteSlices(privP256Key, shortKey, invalidBanner, invalidPem))
|
||||
assert.Equal(t, Curve_CURVE25519, curve)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Success test case
|
||||
k, rest, curve, err = UnmarshalPrivateKeyFromPEM(rest)
|
||||
assert.Len(t, k, 32)
|
||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||
assert.Equal(t, Curve_P256, curve)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Fail due to short key
|
||||
k, rest, curve, err = UnmarshalPrivateKeyFromPEM(rest)
|
||||
assert.Nil(t, k)
|
||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||
require.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 private key")
|
||||
|
||||
// Fail due to invalid banner
|
||||
k, rest, curve, err = UnmarshalPrivateKeyFromPEM(rest)
|
||||
assert.Nil(t, k)
|
||||
assert.Equal(t, rest, invalidPem)
|
||||
require.EqualError(t, err, "bytes did not contain a proper private key banner")
|
||||
|
||||
// Fail due to ivalid PEM format, because
|
||||
// it's missing the requisite pre-encapsulation boundary.
|
||||
k, rest, curve, err = UnmarshalPrivateKeyFromPEM(rest)
|
||||
assert.Nil(t, k)
|
||||
assert.Equal(t, rest, invalidPem)
|
||||
require.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||
}
|
||||
|
||||
func TestUnmarshalPublicKeyFromPEM(t *testing.T) {
|
||||
pubKey := []byte(`# A good key
|
||||
-----BEGIN NEBULA ED25519 PUBLIC KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-----END NEBULA ED25519 PUBLIC KEY-----
|
||||
`)
|
||||
shortKey := []byte(`# A short key
|
||||
-----BEGIN NEBULA ED25519 PUBLIC KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||
-----END NEBULA ED25519 PUBLIC KEY-----
|
||||
`)
|
||||
invalidBanner := []byte(`# Invalid banner
|
||||
-----BEGIN NOT A NEBULA PUBLIC KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-----END NOT A NEBULA PUBLIC KEY-----
|
||||
`)
|
||||
invalidPem := []byte(`# Not a valid PEM format
|
||||
-BEGIN NEBULA ED25519 PUBLIC KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-END NEBULA ED25519 PUBLIC KEY-----`)
|
||||
|
||||
keyBundle := appendByteSlices(pubKey, shortKey, invalidBanner, invalidPem)
|
||||
|
||||
// Success test case
|
||||
k, rest, curve, err := UnmarshalPublicKeyFromPEM(keyBundle)
|
||||
assert.Len(t, k, 32)
|
||||
assert.Equal(t, Curve_CURVE25519, curve)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||
|
||||
// Fail due to short key
|
||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
||||
assert.Nil(t, k)
|
||||
assert.Equal(t, Curve_CURVE25519, curve)
|
||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||
require.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 public key")
|
||||
|
||||
// Fail due to invalid banner
|
||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
||||
assert.Nil(t, k)
|
||||
assert.Equal(t, Curve_CURVE25519, curve)
|
||||
require.EqualError(t, err, "bytes did not contain a proper public key banner")
|
||||
assert.Equal(t, rest, invalidPem)
|
||||
|
||||
// Fail due to ivalid PEM format, because
|
||||
// it's missing the requisite pre-encapsulation boundary.
|
||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
||||
assert.Nil(t, k)
|
||||
assert.Equal(t, Curve_CURVE25519, curve)
|
||||
assert.Equal(t, rest, invalidPem)
|
||||
require.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||
}
|
||||
|
||||
func TestUnmarshalX25519PublicKey(t *testing.T) {
|
||||
pubKey := []byte(`# A good key
|
||||
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-----END NEBULA X25519 PUBLIC KEY-----
|
||||
`)
|
||||
pubP256Key := []byte(`# A good key
|
||||
-----BEGIN NEBULA P256 PUBLIC KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-----END NEBULA P256 PUBLIC KEY-----
|
||||
`)
|
||||
shortKey := []byte(`# A short key
|
||||
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
|
||||
-----END NEBULA X25519 PUBLIC KEY-----
|
||||
`)
|
||||
invalidBanner := []byte(`# Invalid banner
|
||||
-----BEGIN NOT A NEBULA PUBLIC KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-----END NOT A NEBULA PUBLIC KEY-----
|
||||
`)
|
||||
invalidPem := []byte(`# Not a valid PEM format
|
||||
-BEGIN NEBULA X25519 PUBLIC KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-END NEBULA X25519 PUBLIC KEY-----`)
|
||||
|
||||
keyBundle := appendByteSlices(pubKey, pubP256Key, shortKey, invalidBanner, invalidPem)
|
||||
|
||||
// Success test case
|
||||
k, rest, curve, err := UnmarshalPublicKeyFromPEM(keyBundle)
|
||||
assert.Len(t, k, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, rest, appendByteSlices(pubP256Key, shortKey, invalidBanner, invalidPem))
|
||||
assert.Equal(t, Curve_CURVE25519, curve)
|
||||
|
||||
// Success test case
|
||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
||||
assert.Len(t, k, 65)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, rest, appendByteSlices(shortKey, invalidBanner, invalidPem))
|
||||
assert.Equal(t, Curve_P256, curve)
|
||||
|
||||
// Fail due to short key
|
||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
||||
assert.Nil(t, k)
|
||||
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
|
||||
require.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 public key")
|
||||
|
||||
// Fail due to invalid banner
|
||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
||||
assert.Nil(t, k)
|
||||
require.EqualError(t, err, "bytes did not contain a proper public key banner")
|
||||
assert.Equal(t, rest, invalidPem)
|
||||
|
||||
// Fail due to ivalid PEM format, because
|
||||
// it's missing the requisite pre-encapsulation boundary.
|
||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
||||
assert.Nil(t, k)
|
||||
assert.Equal(t, rest, invalidPem)
|
||||
require.EqualError(t, err, "input did not contain a valid PEM encoded block")
|
||||
}
|
167
cert/sign.go
167
cert/sign.go
|
@ -1,167 +0,0 @@
|
|||
package cert
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/netip"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TBSCertificate represents a certificate intended to be signed.
|
||||
// It is invalid to use this structure as a Certificate.
|
||||
type TBSCertificate struct {
|
||||
Version Version
|
||||
Name string
|
||||
Networks []netip.Prefix
|
||||
UnsafeNetworks []netip.Prefix
|
||||
Groups []string
|
||||
IsCA bool
|
||||
NotBefore time.Time
|
||||
NotAfter time.Time
|
||||
PublicKey []byte
|
||||
Curve Curve
|
||||
issuer string
|
||||
}
|
||||
|
||||
type beingSignedCertificate interface {
|
||||
// fromTBSCertificate copies the values from the TBSCertificate to this versions internal representation
|
||||
// Implementations must validate the resulting certificate contains valid information
|
||||
fromTBSCertificate(*TBSCertificate) error
|
||||
|
||||
// marshalForSigning returns the bytes that should be signed
|
||||
marshalForSigning() ([]byte, error)
|
||||
|
||||
// setSignature sets the signature for the certificate that has just been signed. The signature must not be blank.
|
||||
setSignature([]byte) error
|
||||
}
|
||||
|
||||
type SignerLambda func(certBytes []byte) ([]byte, error)
|
||||
|
||||
// Sign will create a sealed certificate using details provided by the TBSCertificate as long as those
|
||||
// details do not violate constraints of the signing certificate.
|
||||
// If the TBSCertificate is a CA then signer must be nil.
|
||||
func (t *TBSCertificate) Sign(signer Certificate, curve Curve, key []byte) (Certificate, error) {
|
||||
switch t.Curve {
|
||||
case Curve_CURVE25519:
|
||||
pk := ed25519.PrivateKey(key)
|
||||
sp := func(certBytes []byte) ([]byte, error) {
|
||||
sig := ed25519.Sign(pk, certBytes)
|
||||
return sig, nil
|
||||
}
|
||||
return t.SignWith(signer, curve, sp)
|
||||
case Curve_P256:
|
||||
pk := &ecdsa.PrivateKey{
|
||||
PublicKey: ecdsa.PublicKey{
|
||||
Curve: elliptic.P256(),
|
||||
},
|
||||
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L95
|
||||
D: new(big.Int).SetBytes(key),
|
||||
}
|
||||
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L119
|
||||
pk.X, pk.Y = pk.Curve.ScalarBaseMult(key)
|
||||
sp := func(certBytes []byte) ([]byte, error) {
|
||||
// We need to hash first for ECDSA
|
||||
// - https://pkg.go.dev/crypto/ecdsa#SignASN1
|
||||
hashed := sha256.Sum256(certBytes)
|
||||
return ecdsa.SignASN1(rand.Reader, pk, hashed[:])
|
||||
}
|
||||
return t.SignWith(signer, curve, sp)
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid curve: %s", t.Curve)
|
||||
}
|
||||
}
|
||||
|
||||
// SignWith does the same thing as sign, but uses the function in `sp` to calculate the signature.
|
||||
// You should only use SignWith if you do not have direct access to your private key.
|
||||
func (t *TBSCertificate) SignWith(signer Certificate, curve Curve, sp SignerLambda) (Certificate, error) {
|
||||
if curve != t.Curve {
|
||||
return nil, fmt.Errorf("curve in cert and private key supplied don't match")
|
||||
}
|
||||
|
||||
if signer != nil {
|
||||
if t.IsCA {
|
||||
return nil, fmt.Errorf("can not sign a CA certificate with another")
|
||||
}
|
||||
|
||||
err := checkCAConstraints(signer, t.NotBefore, t.NotAfter, t.Groups, t.Networks, t.UnsafeNetworks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
issuer, err := signer.Fingerprint()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error computing issuer: %v", err)
|
||||
}
|
||||
t.issuer = issuer
|
||||
} else {
|
||||
if !t.IsCA {
|
||||
return nil, fmt.Errorf("self signed certificates must have IsCA set to true")
|
||||
}
|
||||
}
|
||||
|
||||
var c beingSignedCertificate
|
||||
switch t.Version {
|
||||
case Version1:
|
||||
c = &certificateV1{}
|
||||
err := c.fromTBSCertificate(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case Version2:
|
||||
c = &certificateV2{}
|
||||
err := c.fromTBSCertificate(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown cert version %d", t.Version)
|
||||
}
|
||||
|
||||
certBytes, err := c.marshalForSigning()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sig, err := sp(certBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = c.setSignature(sig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sc, ok := c.(Certificate)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid certificate")
|
||||
}
|
||||
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
func comparePrefix(a, b netip.Prefix) int {
|
||||
addr := a.Addr().Compare(b.Addr())
|
||||
if addr == 0 {
|
||||
return a.Bits() - b.Bits()
|
||||
}
|
||||
return addr
|
||||
}
|
||||
|
||||
// findDuplicatePrefix returns an error if there is a duplicate prefix in the pre-sorted input slice sortedPrefixes
|
||||
func findDuplicatePrefix(sortedPrefixes []netip.Prefix) error {
|
||||
if len(sortedPrefixes) < 2 {
|
||||
return nil
|
||||
}
|
||||
for i := 1; i < len(sortedPrefixes); i++ {
|
||||
if comparePrefix(sortedPrefixes[i], sortedPrefixes[i-1]) == 0 {
|
||||
return NewErrInvalidCertificateProperties("duplicate network detected: %v", sortedPrefixes[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,91 +0,0 @@
|
|||
package cert
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"net/netip"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCertificateV1_Sign(t *testing.T) {
|
||||
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
||||
|
||||
tbs := TBSCertificate{
|
||||
Version: Version1,
|
||||
Name: "testing",
|
||||
Networks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
||||
},
|
||||
UnsafeNetworks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
||||
mustParsePrefixUnmapped("9.1.1.3/24"),
|
||||
},
|
||||
Groups: []string{"test-group1", "test-group2", "test-group3"},
|
||||
NotBefore: before,
|
||||
NotAfter: after,
|
||||
PublicKey: pubKey,
|
||||
IsCA: false,
|
||||
}
|
||||
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
c, err := tbs.Sign(&certificateV1{details: detailsV1{notBefore: before, notAfter: after}}, Curve_CURVE25519, priv)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, c)
|
||||
assert.True(t, c.CheckSignature(pub))
|
||||
|
||||
b, err := c.Marshal()
|
||||
require.NoError(t, err)
|
||||
uc, err := unmarshalCertificateV1(b, nil)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, uc)
|
||||
}
|
||||
|
||||
func TestCertificateV1_SignP256(t *testing.T) {
|
||||
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
pubKey := []byte("01234567890abcedfghij1234567890ab1234567890abcedfghij1234567890ab")
|
||||
|
||||
tbs := TBSCertificate{
|
||||
Version: Version1,
|
||||
Name: "testing",
|
||||
Networks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("10.1.1.1/24"),
|
||||
mustParsePrefixUnmapped("10.1.1.2/16"),
|
||||
},
|
||||
UnsafeNetworks: []netip.Prefix{
|
||||
mustParsePrefixUnmapped("9.1.1.2/24"),
|
||||
mustParsePrefixUnmapped("9.1.1.3/16"),
|
||||
},
|
||||
Groups: []string{"test-group1", "test-group2", "test-group3"},
|
||||
NotBefore: before,
|
||||
NotAfter: after,
|
||||
PublicKey: pubKey,
|
||||
IsCA: false,
|
||||
Curve: Curve_P256,
|
||||
}
|
||||
|
||||
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
require.NoError(t, err)
|
||||
pub := elliptic.Marshal(elliptic.P256(), priv.PublicKey.X, priv.PublicKey.Y)
|
||||
rawPriv := priv.D.FillBytes(make([]byte, 32))
|
||||
|
||||
c, err := tbs.Sign(&certificateV1{details: detailsV1{notBefore: before, notAfter: after}}, Curve_P256, rawPriv)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, c)
|
||||
assert.True(t, c.CheckSignature(pub))
|
||||
|
||||
b, err := c.Marshal()
|
||||
require.NoError(t, err)
|
||||
uc, err := unmarshalCertificateV1(b, nil)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, uc)
|
||||
}
|
|
@ -1,138 +0,0 @@
|
|||
package cert_test
|
||||
|
||||
import (
|
||||
"crypto/ecdh"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"golang.org/x/crypto/curve25519"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// NewTestCaCert will create a new ca certificate
|
||||
func NewTestCaCert(version cert.Version, curve cert.Curve, before, after time.Time, networks, unsafeNetworks []netip.Prefix, groups []string) (cert.Certificate, []byte, []byte, []byte) {
|
||||
var err error
|
||||
var pub, priv []byte
|
||||
|
||||
switch curve {
|
||||
case cert.Curve_CURVE25519:
|
||||
pub, priv, err = ed25519.GenerateKey(rand.Reader)
|
||||
case cert.Curve_P256:
|
||||
privk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pub = elliptic.Marshal(elliptic.P256(), privk.PublicKey.X, privk.PublicKey.Y)
|
||||
priv = privk.D.FillBytes(make([]byte, 32))
|
||||
default:
|
||||
// There is no default to allow the underlying lib to respond with an error
|
||||
}
|
||||
|
||||
if before.IsZero() {
|
||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
}
|
||||
if after.IsZero() {
|
||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
}
|
||||
|
||||
t := &cert.TBSCertificate{
|
||||
Curve: curve,
|
||||
Version: version,
|
||||
Name: "test ca",
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pub,
|
||||
Networks: networks,
|
||||
UnsafeNetworks: unsafeNetworks,
|
||||
Groups: groups,
|
||||
IsCA: true,
|
||||
}
|
||||
|
||||
c, err := t.Sign(nil, curve, priv)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pem, err := c.MarshalPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return c, pub, priv, pem
|
||||
}
|
||||
|
||||
// NewTestCert will generate a signed certificate with the provided details.
|
||||
// Expiry times are defaulted if you do not pass them in
|
||||
func NewTestCert(v cert.Version, curve cert.Curve, ca cert.Certificate, key []byte, name string, before, after time.Time, networks, unsafeNetworks []netip.Prefix, groups []string) (cert.Certificate, []byte, []byte, []byte) {
|
||||
if before.IsZero() {
|
||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
}
|
||||
|
||||
if after.IsZero() {
|
||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
}
|
||||
|
||||
var pub, priv []byte
|
||||
switch curve {
|
||||
case cert.Curve_CURVE25519:
|
||||
pub, priv = X25519Keypair()
|
||||
case cert.Curve_P256:
|
||||
pub, priv = P256Keypair()
|
||||
default:
|
||||
panic("unknown curve")
|
||||
}
|
||||
|
||||
nc := &cert.TBSCertificate{
|
||||
Version: v,
|
||||
Curve: curve,
|
||||
Name: name,
|
||||
Networks: networks,
|
||||
UnsafeNetworks: unsafeNetworks,
|
||||
Groups: groups,
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pub,
|
||||
IsCA: false,
|
||||
}
|
||||
|
||||
c, err := nc.Sign(ca, ca.Curve(), key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pem, err := c.MarshalPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return c, pub, cert.MarshalPrivateKeyToPEM(curve, priv), pem
|
||||
}
|
||||
|
||||
func X25519Keypair() ([]byte, []byte) {
|
||||
privkey := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return pubkey, privkey
|
||||
}
|
||||
|
||||
func P256Keypair() ([]byte, []byte) {
|
||||
privkey, err := ecdh.P256().GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pubkey := privkey.PublicKey()
|
||||
return pubkey.Bytes(), privkey.Bytes()
|
||||
}
|
10
cidr/parse.go
Normal file
10
cidr/parse.go
Normal file
|
@ -0,0 +1,10 @@
|
|||
package cidr
|
||||
|
||||
import "net"
|
||||
|
||||
// Parse is a convenience function that returns only the IPNet
|
||||
// This function ignores errors since it is primarily a test helper, the result could be nil
|
||||
func Parse(s string) *net.IPNet {
|
||||
_, c, _ := net.ParseCIDR(s)
|
||||
return c
|
||||
}
|
167
cidr/tree4.go
Normal file
167
cidr/tree4.go
Normal file
|
@ -0,0 +1,167 @@
|
|||
package cidr
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
left *Node
|
||||
right *Node
|
||||
parent *Node
|
||||
value interface{}
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
CIDR *net.IPNet
|
||||
Value *interface{}
|
||||
}
|
||||
|
||||
type Tree4 struct {
|
||||
root *Node
|
||||
list []entry
|
||||
}
|
||||
|
||||
const (
|
||||
startbit = iputil.VpnIp(0x80000000)
|
||||
)
|
||||
|
||||
func NewTree4() *Tree4 {
|
||||
tree := new(Tree4)
|
||||
tree.root = &Node{}
|
||||
tree.list = []entry{}
|
||||
return tree
|
||||
}
|
||||
|
||||
func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
|
||||
bit := startbit
|
||||
node := tree.root
|
||||
next := tree.root
|
||||
|
||||
ip := iputil.Ip2VpnIp(cidr.IP)
|
||||
mask := iputil.Ip2VpnIp(cidr.Mask)
|
||||
|
||||
// Find our last ancestor in the tree
|
||||
for bit&mask != 0 {
|
||||
if ip&bit != 0 {
|
||||
next = node.right
|
||||
} else {
|
||||
next = node.left
|
||||
}
|
||||
|
||||
if next == nil {
|
||||
break
|
||||
}
|
||||
|
||||
bit = bit >> 1
|
||||
node = next
|
||||
}
|
||||
|
||||
// We already have this range so update the value
|
||||
if next != nil {
|
||||
addCIDR := cidr.String()
|
||||
for i, v := range tree.list {
|
||||
if addCIDR == v.CIDR.String() {
|
||||
tree.list = append(tree.list[:i], tree.list[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
tree.list = append(tree.list, entry{CIDR: cidr, Value: &val})
|
||||
node.value = val
|
||||
return
|
||||
}
|
||||
|
||||
// Build up the rest of the tree we don't already have
|
||||
for bit&mask != 0 {
|
||||
next = &Node{}
|
||||
next.parent = node
|
||||
|
||||
if ip&bit != 0 {
|
||||
node.right = next
|
||||
} else {
|
||||
node.left = next
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
node = next
|
||||
}
|
||||
|
||||
// Final node marks our cidr, set the value
|
||||
node.value = val
|
||||
tree.list = append(tree.list, entry{CIDR: cidr, Value: &val})
|
||||
}
|
||||
|
||||
// Contains finds the first match, which may be the least specific
|
||||
func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) {
|
||||
bit := startbit
|
||||
node := tree.root
|
||||
|
||||
for node != nil {
|
||||
if node.value != nil {
|
||||
return node.value
|
||||
}
|
||||
|
||||
if ip&bit != 0 {
|
||||
node = node.right
|
||||
} else {
|
||||
node = node.left
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
// MostSpecificContains finds the most specific match
|
||||
func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) {
|
||||
bit := startbit
|
||||
node := tree.root
|
||||
|
||||
for node != nil {
|
||||
if node.value != nil {
|
||||
value = node.value
|
||||
}
|
||||
|
||||
if ip&bit != 0 {
|
||||
node = node.right
|
||||
} else {
|
||||
node = node.left
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
// Match finds the most specific match
|
||||
func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) {
|
||||
bit := startbit
|
||||
node := tree.root
|
||||
lastNode := node
|
||||
|
||||
for node != nil {
|
||||
lastNode = node
|
||||
if ip&bit != 0 {
|
||||
node = node.right
|
||||
} else {
|
||||
node = node.left
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
}
|
||||
|
||||
if bit == 0 && lastNode != nil {
|
||||
value = lastNode.value
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// List will return all CIDRs and their current values. Do not modify the contents!
|
||||
func (tree *Tree4) List() []entry {
|
||||
return tree.list
|
||||
}
|
167
cidr/tree4_test.go
Normal file
167
cidr/tree4_test.go
Normal file
|
@ -0,0 +1,167 @@
|
|||
package cidr
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCIDRTree_List(t *testing.T) {
|
||||
tree := NewTree4()
|
||||
tree.AddCIDR(Parse("1.0.0.0/16"), "1")
|
||||
tree.AddCIDR(Parse("1.0.0.0/8"), "2")
|
||||
tree.AddCIDR(Parse("1.0.0.0/16"), "3")
|
||||
tree.AddCIDR(Parse("1.0.0.0/16"), "4")
|
||||
list := tree.List()
|
||||
assert.Len(t, list, 2)
|
||||
assert.Equal(t, "1.0.0.0/8", list[0].CIDR.String())
|
||||
assert.Equal(t, "2", *list[0].Value)
|
||||
assert.Equal(t, "1.0.0.0/16", list[1].CIDR.String())
|
||||
assert.Equal(t, "4", *list[1].Value)
|
||||
}
|
||||
|
||||
func TestCIDRTree_Contains(t *testing.T) {
|
||||
tree := NewTree4()
|
||||
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
|
||||
tree.AddCIDR(Parse("4.1.1.1/32"), "4b")
|
||||
tree.AddCIDR(Parse("4.1.2.1/32"), "4c")
|
||||
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||
|
||||
tests := []struct {
|
||||
Result interface{}
|
||||
IP string
|
||||
}{
|
||||
{"1", "1.0.0.0"},
|
||||
{"1", "1.255.255.255"},
|
||||
{"2", "2.1.0.0"},
|
||||
{"2", "2.1.255.255"},
|
||||
{"3", "3.1.1.0"},
|
||||
{"3", "3.1.1.255"},
|
||||
{"4a", "4.1.1.255"},
|
||||
{"4a", "4.1.1.1"},
|
||||
{"5", "240.0.0.0"},
|
||||
{"5", "255.255.255.255"},
|
||||
{nil, "239.0.0.0"},
|
||||
{nil, "4.1.2.2"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
assert.Equal(t, tt.Result, tree.Contains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
|
||||
}
|
||||
|
||||
tree = NewTree4()
|
||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
|
||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
|
||||
}
|
||||
|
||||
func TestCIDRTree_MostSpecificContains(t *testing.T) {
|
||||
tree := NewTree4()
|
||||
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
|
||||
tree.AddCIDR(Parse("4.1.1.0/30"), "4b")
|
||||
tree.AddCIDR(Parse("4.1.1.1/32"), "4c")
|
||||
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||
|
||||
tests := []struct {
|
||||
Result interface{}
|
||||
IP string
|
||||
}{
|
||||
{"1", "1.0.0.0"},
|
||||
{"1", "1.255.255.255"},
|
||||
{"2", "2.1.0.0"},
|
||||
{"2", "2.1.255.255"},
|
||||
{"3", "3.1.1.0"},
|
||||
{"3", "3.1.1.255"},
|
||||
{"4a", "4.1.1.255"},
|
||||
{"4b", "4.1.1.2"},
|
||||
{"4c", "4.1.1.1"},
|
||||
{"5", "240.0.0.0"},
|
||||
{"5", "255.255.255.255"},
|
||||
{nil, "239.0.0.0"},
|
||||
{nil, "4.1.2.2"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
assert.Equal(t, tt.Result, tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
|
||||
}
|
||||
|
||||
tree = NewTree4()
|
||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
|
||||
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
|
||||
}
|
||||
|
||||
func TestCIDRTree_Match(t *testing.T) {
|
||||
tree := NewTree4()
|
||||
tree.AddCIDR(Parse("4.1.1.0/32"), "1a")
|
||||
tree.AddCIDR(Parse("4.1.1.1/32"), "1b")
|
||||
|
||||
tests := []struct {
|
||||
Result interface{}
|
||||
IP string
|
||||
}{
|
||||
{"1a", "4.1.1.0"},
|
||||
{"1b", "4.1.1.1"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
assert.Equal(t, tt.Result, tree.Match(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
|
||||
}
|
||||
|
||||
tree = NewTree4()
|
||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
|
||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
|
||||
}
|
||||
|
||||
func BenchmarkCIDRTree_Contains(b *testing.B) {
|
||||
tree := NewTree4()
|
||||
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
|
||||
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
|
||||
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
|
||||
tree.AddCIDR(Parse("172.2.1.1/32"), "1")
|
||||
|
||||
ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1"))
|
||||
b.Run("found", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
tree.Contains(ip)
|
||||
}
|
||||
})
|
||||
|
||||
ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255"))
|
||||
b.Run("not found", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
tree.Contains(ip)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCIDRTree_Match(b *testing.B) {
|
||||
tree := NewTree4()
|
||||
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
|
||||
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
|
||||
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
|
||||
tree.AddCIDR(Parse("172.2.1.1/32"), "1")
|
||||
|
||||
ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1"))
|
||||
b.Run("found", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
tree.Match(ip)
|
||||
}
|
||||
})
|
||||
|
||||
ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255"))
|
||||
b.Run("not found", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
tree.Match(ip)
|
||||
}
|
||||
})
|
||||
}
|
185
cidr/tree6.go
Normal file
185
cidr/tree6.go
Normal file
|
@ -0,0 +1,185 @@
|
|||
package cidr
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
)
|
||||
|
||||
const startbit6 = uint64(1 << 63)
|
||||
|
||||
type Tree6 struct {
|
||||
root4 *Node
|
||||
root6 *Node
|
||||
}
|
||||
|
||||
func NewTree6() *Tree6 {
|
||||
tree := new(Tree6)
|
||||
tree.root4 = &Node{}
|
||||
tree.root6 = &Node{}
|
||||
return tree
|
||||
}
|
||||
|
||||
func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) {
|
||||
var node, next *Node
|
||||
|
||||
cidrIP, ipv4 := isIPV4(cidr.IP)
|
||||
if ipv4 {
|
||||
node = tree.root4
|
||||
next = tree.root4
|
||||
|
||||
} else {
|
||||
node = tree.root6
|
||||
next = tree.root6
|
||||
}
|
||||
|
||||
for i := 0; i < len(cidrIP); i += 4 {
|
||||
ip := iputil.Ip2VpnIp(cidrIP[i : i+4])
|
||||
mask := iputil.Ip2VpnIp(cidr.Mask[i : i+4])
|
||||
bit := startbit
|
||||
|
||||
// Find our last ancestor in the tree
|
||||
for bit&mask != 0 {
|
||||
if ip&bit != 0 {
|
||||
next = node.right
|
||||
} else {
|
||||
next = node.left
|
||||
}
|
||||
|
||||
if next == nil {
|
||||
break
|
||||
}
|
||||
|
||||
bit = bit >> 1
|
||||
node = next
|
||||
}
|
||||
|
||||
// Build up the rest of the tree we don't already have
|
||||
for bit&mask != 0 {
|
||||
next = &Node{}
|
||||
next.parent = node
|
||||
|
||||
if ip&bit != 0 {
|
||||
node.right = next
|
||||
} else {
|
||||
node.left = next
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
node = next
|
||||
}
|
||||
}
|
||||
|
||||
// Final node marks our cidr, set the value
|
||||
node.value = val
|
||||
}
|
||||
|
||||
// Finds the most specific match
|
||||
func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) {
|
||||
var node *Node
|
||||
|
||||
wholeIP, ipv4 := isIPV4(ip)
|
||||
if ipv4 {
|
||||
node = tree.root4
|
||||
} else {
|
||||
node = tree.root6
|
||||
}
|
||||
|
||||
for i := 0; i < len(wholeIP); i += 4 {
|
||||
ip := iputil.Ip2VpnIp(wholeIP[i : i+4])
|
||||
bit := startbit
|
||||
|
||||
for node != nil {
|
||||
if node.value != nil {
|
||||
value = node.value
|
||||
}
|
||||
|
||||
if bit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if ip&bit != 0 {
|
||||
node = node.right
|
||||
} else {
|
||||
node = node.left
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
}
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func (tree *Tree6) MostSpecificContainsIpV4(ip iputil.VpnIp) (value interface{}) {
|
||||
bit := startbit
|
||||
node := tree.root4
|
||||
|
||||
for node != nil {
|
||||
if node.value != nil {
|
||||
value = node.value
|
||||
}
|
||||
|
||||
if ip&bit != 0 {
|
||||
node = node.right
|
||||
} else {
|
||||
node = node.left
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) {
|
||||
ip := hi
|
||||
node := tree.root6
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
bit := startbit6
|
||||
|
||||
for node != nil {
|
||||
if node.value != nil {
|
||||
value = node.value
|
||||
}
|
||||
|
||||
if bit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if ip&bit != 0 {
|
||||
node = node.right
|
||||
} else {
|
||||
node = node.left
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
}
|
||||
|
||||
ip = lo
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func isIPV4(ip net.IP) (net.IP, bool) {
|
||||
if len(ip) == net.IPv4len {
|
||||
return ip, true
|
||||
}
|
||||
|
||||
if len(ip) == net.IPv6len && isZeros(ip[0:10]) && ip[10] == 0xff && ip[11] == 0xff {
|
||||
return ip[12:16], true
|
||||
}
|
||||
|
||||
return ip, false
|
||||
}
|
||||
|
||||
func isZeros(p net.IP) bool {
|
||||
for i := 0; i < len(p); i++ {
|
||||
if p[i] != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
81
cidr/tree6_test.go
Normal file
81
cidr/tree6_test.go
Normal file
|
@ -0,0 +1,81 @@
|
|||
package cidr
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCIDR6Tree_MostSpecificContains(t *testing.T) {
|
||||
tree := NewTree6()
|
||||
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||
tree.AddCIDR(Parse("4.1.1.1/24"), "4a")
|
||||
tree.AddCIDR(Parse("4.1.1.1/30"), "4b")
|
||||
tree.AddCIDR(Parse("4.1.1.1/32"), "4c")
|
||||
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
|
||||
|
||||
tests := []struct {
|
||||
Result interface{}
|
||||
IP string
|
||||
}{
|
||||
{"1", "1.0.0.0"},
|
||||
{"1", "1.255.255.255"},
|
||||
{"2", "2.1.0.0"},
|
||||
{"2", "2.1.255.255"},
|
||||
{"3", "3.1.1.0"},
|
||||
{"3", "3.1.1.255"},
|
||||
{"4a", "4.1.1.255"},
|
||||
{"4b", "4.1.1.2"},
|
||||
{"4c", "4.1.1.1"},
|
||||
{"5", "240.0.0.0"},
|
||||
{"5", "255.255.255.255"},
|
||||
{"6a", "1:2:0:4:1:1:1:1"},
|
||||
{"6b", "1:2:0:4:5:1:1:1"},
|
||||
{"6c", "1:2:0:4:5:0:0:0"},
|
||||
{nil, "239.0.0.0"},
|
||||
{nil, "4.1.2.2"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
assert.Equal(t, tt.Result, tree.MostSpecificContains(net.ParseIP(tt.IP)))
|
||||
}
|
||||
|
||||
tree = NewTree6()
|
||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||
tree.AddCIDR(Parse("::/0"), "cool6")
|
||||
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("0.0.0.0")))
|
||||
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("255.255.255.255")))
|
||||
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("::")))
|
||||
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("1:2:3:4:5:6:7:8")))
|
||||
}
|
||||
|
||||
func TestCIDR6Tree_MostSpecificContainsIpV6(t *testing.T) {
|
||||
tree := NewTree6()
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
|
||||
|
||||
tests := []struct {
|
||||
Result interface{}
|
||||
IP string
|
||||
}{
|
||||
{"6a", "1:2:0:4:1:1:1:1"},
|
||||
{"6b", "1:2:0:4:5:1:1:1"},
|
||||
{"6c", "1:2:0:4:5:0:0:0"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
ip := net.ParseIP(tt.IP)
|
||||
hi := binary.BigEndian.Uint64(ip[:8])
|
||||
lo := binary.BigEndian.Uint64(ip[8:])
|
||||
|
||||
assert.Equal(t, tt.Result, tree.MostSpecificContainsIpV6(hi, lo))
|
||||
}
|
||||
}
|
|
@ -7,15 +7,15 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net/netip"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/skip2/go-qrcode"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/pkclient"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
|
@ -27,43 +27,32 @@ type caFlags struct {
|
|||
outCertPath *string
|
||||
outQRPath *string
|
||||
groups *string
|
||||
networks *string
|
||||
unsafeNetworks *string
|
||||
ips *string
|
||||
subnets *string
|
||||
argonMemory *uint
|
||||
argonIterations *uint
|
||||
argonParallelism *uint
|
||||
encryption *bool
|
||||
version *uint
|
||||
|
||||
curve *string
|
||||
p11url *string
|
||||
|
||||
// Deprecated options
|
||||
ips *string
|
||||
subnets *string
|
||||
curve *string
|
||||
}
|
||||
|
||||
func newCaFlags() *caFlags {
|
||||
cf := caFlags{set: flag.NewFlagSet("ca", flag.ContinueOnError)}
|
||||
cf.set.Usage = func() {}
|
||||
cf.name = cf.set.String("name", "", "Required: name of the certificate authority")
|
||||
cf.version = cf.set.Uint("version", uint(cert.Version2), "Optional: version of the certificate format to use")
|
||||
cf.duration = cf.set.Duration("duration", time.Duration(time.Hour*8760), "Optional: amount of time the certificate should be valid for. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"")
|
||||
cf.outKeyPath = cf.set.String("out-key", "ca.key", "Optional: path to write the private key to")
|
||||
cf.outCertPath = cf.set.String("out-crt", "ca.crt", "Optional: path to write the certificate to")
|
||||
cf.outQRPath = cf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate")
|
||||
cf.groups = cf.set.String("groups", "", "Optional: comma separated list of groups. This will limit which groups subordinate certs can use")
|
||||
cf.networks = cf.set.String("networks", "", "Optional: comma separated list of ip address and network in CIDR notation. This will limit which ip addresses and networks subordinate certs can use in networks")
|
||||
cf.unsafeNetworks = cf.set.String("unsafe-networks", "", "Optional: comma separated list of ip address and network in CIDR notation. This will limit which ip addresses and networks subordinate certs can use in unsafe networks")
|
||||
cf.ips = cf.set.String("ips", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use for ip addresses")
|
||||
cf.subnets = cf.set.String("subnets", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use in subnets")
|
||||
cf.argonMemory = cf.set.Uint("argon-memory", 2*1024*1024, "Optional: Argon2 memory parameter (in KiB) used for encrypted private key passphrase")
|
||||
cf.argonParallelism = cf.set.Uint("argon-parallelism", 4, "Optional: Argon2 parallelism parameter used for encrypted private key passphrase")
|
||||
cf.argonIterations = cf.set.Uint("argon-iterations", 1, "Optional: Argon2 iterations parameter used for encrypted private key passphrase")
|
||||
cf.encryption = cf.set.Bool("encrypt", false, "Optional: prompt for passphrase and write out-key in an encrypted format")
|
||||
cf.curve = cf.set.String("curve", "25519", "EdDSA/ECDSA Curve (25519, P256)")
|
||||
cf.p11url = p11Flag(cf.set)
|
||||
|
||||
cf.ips = cf.set.String("ips", "", "Deprecated, see -networks")
|
||||
cf.subnets = cf.set.String("subnets", "", "Deprecated, see -unsafe-networks")
|
||||
return &cf
|
||||
}
|
||||
|
||||
|
@ -88,21 +77,17 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
|
|||
return err
|
||||
}
|
||||
|
||||
isP11 := len(*cf.p11url) > 0
|
||||
|
||||
if err := mustFlagString("name", cf.name); err != nil {
|
||||
return err
|
||||
}
|
||||
if !isP11 {
|
||||
if err = mustFlagString("out-key", cf.outKeyPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mustFlagString("out-key", cf.outKeyPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mustFlagString("out-crt", cf.outCertPath); err != nil {
|
||||
return err
|
||||
}
|
||||
var kdfParams *cert.Argon2Parameters
|
||||
if !isP11 && *cf.encryption {
|
||||
if *cf.encryption {
|
||||
if kdfParams, err = parseArgonParameters(*cf.argonMemory, *cf.argonParallelism, *cf.argonIterations); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -122,57 +107,44 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
|
|||
}
|
||||
}
|
||||
|
||||
version := cert.Version(*cf.version)
|
||||
if version != cert.Version1 && version != cert.Version2 {
|
||||
return newHelpErrorf("-version must be either %v or %v", cert.Version1, cert.Version2)
|
||||
}
|
||||
|
||||
var networks []netip.Prefix
|
||||
if *cf.networks == "" && *cf.ips != "" {
|
||||
// Pull up deprecated -ips flag if needed
|
||||
*cf.networks = *cf.ips
|
||||
}
|
||||
|
||||
if *cf.networks != "" {
|
||||
for _, rs := range strings.Split(*cf.networks, ",") {
|
||||
var ips []*net.IPNet
|
||||
if *cf.ips != "" {
|
||||
for _, rs := range strings.Split(*cf.ips, ",") {
|
||||
rs := strings.Trim(rs, " ")
|
||||
if rs != "" {
|
||||
n, err := netip.ParsePrefix(rs)
|
||||
ip, ipNet, err := net.ParseCIDR(rs)
|
||||
if err != nil {
|
||||
return newHelpErrorf("invalid -networks definition: %s", rs)
|
||||
return newHelpErrorf("invalid ip definition: %s", err)
|
||||
}
|
||||
if version == cert.Version1 && !n.Addr().Is4() {
|
||||
return newHelpErrorf("invalid -networks definition: v1 certificates can only be ipv4, have %s", rs)
|
||||
if ip.To4() == nil {
|
||||
return newHelpErrorf("invalid ip definition: can only be ipv4, have %s", rs)
|
||||
}
|
||||
networks = append(networks, n)
|
||||
|
||||
ipNet.IP = ip
|
||||
ips = append(ips, ipNet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var unsafeNetworks []netip.Prefix
|
||||
if *cf.unsafeNetworks == "" && *cf.subnets != "" {
|
||||
// Pull up deprecated -subnets flag if needed
|
||||
*cf.unsafeNetworks = *cf.subnets
|
||||
}
|
||||
|
||||
if *cf.unsafeNetworks != "" {
|
||||
for _, rs := range strings.Split(*cf.unsafeNetworks, ",") {
|
||||
var subnets []*net.IPNet
|
||||
if *cf.subnets != "" {
|
||||
for _, rs := range strings.Split(*cf.subnets, ",") {
|
||||
rs := strings.Trim(rs, " ")
|
||||
if rs != "" {
|
||||
n, err := netip.ParsePrefix(rs)
|
||||
_, s, err := net.ParseCIDR(rs)
|
||||
if err != nil {
|
||||
return newHelpErrorf("invalid -unsafe-networks definition: %s", rs)
|
||||
return newHelpErrorf("invalid subnet definition: %s", err)
|
||||
}
|
||||
if version == cert.Version1 && !n.Addr().Is4() {
|
||||
return newHelpErrorf("invalid -unsafe-networks definition: v1 certificates can only be ipv4, have %s", rs)
|
||||
if s.IP.To4() == nil {
|
||||
return newHelpErrorf("invalid subnet definition: can only be ipv4, have %s", rs)
|
||||
}
|
||||
unsafeNetworks = append(unsafeNetworks, n)
|
||||
subnets = append(subnets, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var passphrase []byte
|
||||
if !isP11 && *cf.encryption {
|
||||
if *cf.encryption {
|
||||
for i := 0; i < 5; i++ {
|
||||
out.Write([]byte("Enter passphrase: "))
|
||||
passphrase, err = pr.ReadPassword()
|
||||
|
@ -195,114 +167,73 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
|
|||
|
||||
var curve cert.Curve
|
||||
var pub, rawPriv []byte
|
||||
var p11Client *pkclient.PKClient
|
||||
|
||||
if isP11 {
|
||||
switch *cf.curve {
|
||||
case "P256":
|
||||
curve = cert.Curve_P256
|
||||
default:
|
||||
return fmt.Errorf("invalid curve for PKCS#11: %s", *cf.curve)
|
||||
}
|
||||
|
||||
p11Client, err = pkclient.FromUrl(*cf.p11url)
|
||||
switch *cf.curve {
|
||||
case "25519", "X25519", "Curve25519", "CURVE25519":
|
||||
curve = cert.Curve_CURVE25519
|
||||
pub, rawPriv, err = ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while creating PKCS#11 client: %w", err)
|
||||
return fmt.Errorf("error while generating ed25519 keys: %s", err)
|
||||
}
|
||||
defer func(client *pkclient.PKClient) {
|
||||
_ = client.Close()
|
||||
}(p11Client)
|
||||
pub, err = p11Client.GetPubKey()
|
||||
case "P256":
|
||||
var key *ecdsa.PrivateKey
|
||||
curve = cert.Curve_P256
|
||||
key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while getting public key with PKCS#11: %w", err)
|
||||
}
|
||||
} else {
|
||||
switch *cf.curve {
|
||||
case "25519", "X25519", "Curve25519", "CURVE25519":
|
||||
curve = cert.Curve_CURVE25519
|
||||
pub, rawPriv, err = ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while generating ed25519 keys: %s", err)
|
||||
}
|
||||
case "P256":
|
||||
var key *ecdsa.PrivateKey
|
||||
curve = cert.Curve_P256
|
||||
key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while generating ecdsa keys: %s", err)
|
||||
}
|
||||
|
||||
// ecdh.PrivateKey lets us get at the encoded bytes, even though
|
||||
// we aren't using ECDH here.
|
||||
eKey, err := key.ECDH()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while converting ecdsa key: %s", err)
|
||||
}
|
||||
rawPriv = eKey.Bytes()
|
||||
pub = eKey.PublicKey().Bytes()
|
||||
default:
|
||||
return fmt.Errorf("invalid curve: %s", *cf.curve)
|
||||
return fmt.Errorf("error while generating ecdsa keys: %s", err)
|
||||
}
|
||||
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L60
|
||||
rawPriv = key.D.FillBytes(make([]byte, 32))
|
||||
pub = elliptic.Marshal(elliptic.P256(), key.X, key.Y)
|
||||
}
|
||||
|
||||
t := &cert.TBSCertificate{
|
||||
Version: version,
|
||||
Name: *cf.name,
|
||||
Groups: groups,
|
||||
Networks: networks,
|
||||
UnsafeNetworks: unsafeNetworks,
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(*cf.duration),
|
||||
PublicKey: pub,
|
||||
IsCA: true,
|
||||
Curve: curve,
|
||||
nc := cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: *cf.name,
|
||||
Groups: groups,
|
||||
Ips: ips,
|
||||
Subnets: subnets,
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(*cf.duration),
|
||||
PublicKey: pub,
|
||||
IsCA: true,
|
||||
Curve: curve,
|
||||
},
|
||||
}
|
||||
|
||||
if !isP11 {
|
||||
if _, err := os.Stat(*cf.outKeyPath); err == nil {
|
||||
return fmt.Errorf("refusing to overwrite existing CA key: %s", *cf.outKeyPath)
|
||||
}
|
||||
if _, err := os.Stat(*cf.outKeyPath); err == nil {
|
||||
return fmt.Errorf("refusing to overwrite existing CA key: %s", *cf.outKeyPath)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(*cf.outCertPath); err == nil {
|
||||
return fmt.Errorf("refusing to overwrite existing CA cert: %s", *cf.outCertPath)
|
||||
}
|
||||
|
||||
var c cert.Certificate
|
||||
var b []byte
|
||||
|
||||
if isP11 {
|
||||
c, err = t.SignWith(nil, curve, p11Client.SignASN1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while signing with PKCS#11: %w", err)
|
||||
}
|
||||
} else {
|
||||
c, err = t.Sign(nil, curve, rawPriv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while signing: %s", err)
|
||||
}
|
||||
|
||||
if *cf.encryption {
|
||||
b, err = cert.EncryptAndMarshalSigningPrivateKey(curve, rawPriv, passphrase, kdfParams)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while encrypting out-key: %s", err)
|
||||
}
|
||||
} else {
|
||||
b = cert.MarshalSigningPrivateKeyToPEM(curve, rawPriv)
|
||||
}
|
||||
|
||||
err = os.WriteFile(*cf.outKeyPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-key: %s", err)
|
||||
}
|
||||
err = nc.Sign(curve, rawPriv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while signing: %s", err)
|
||||
}
|
||||
|
||||
b, err = c.MarshalPEM()
|
||||
if *cf.encryption {
|
||||
b, err := cert.EncryptAndMarshalSigningPrivateKey(curve, rawPriv, passphrase, kdfParams)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while encrypting out-key: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*cf.outKeyPath, b, 0600)
|
||||
} else {
|
||||
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalSigningPrivateKey(curve, rawPriv), 0600)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-key: %s", err)
|
||||
}
|
||||
|
||||
b, err := nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while marshalling certificate: %s", err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(*cf.outCertPath, b, 0600)
|
||||
err = ioutil.WriteFile(*cf.outCertPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-crt: %s", err)
|
||||
}
|
||||
|
@ -313,7 +244,7 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
|
|||
return fmt.Errorf("error while generating qr code: %s", err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(*cf.outQRPath, b, 0600)
|
||||
err = ioutil.WriteFile(*cf.outQRPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-qr: %s", err)
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"bytes"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -14,9 +15,10 @@ import (
|
|||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
//TODO: test file permissions
|
||||
|
||||
func Test_caSummary(t *testing.T) {
|
||||
assert.Equal(t, "ca <flags>: create a self signed certificate authority", caSummary())
|
||||
}
|
||||
|
@ -42,24 +44,17 @@ func Test_caHelp(t *testing.T) {
|
|||
" -groups string\n"+
|
||||
" \tOptional: comma separated list of groups. This will limit which groups subordinate certs can use\n"+
|
||||
" -ips string\n"+
|
||||
" Deprecated, see -networks\n"+
|
||||
" \tOptional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use for ip addresses\n"+
|
||||
" -name string\n"+
|
||||
" \tRequired: name of the certificate authority\n"+
|
||||
" -networks string\n"+
|
||||
" \tOptional: comma separated list of ip address and network in CIDR notation. This will limit which ip addresses and networks subordinate certs can use in networks\n"+
|
||||
" -out-crt string\n"+
|
||||
" \tOptional: path to write the certificate to (default \"ca.crt\")\n"+
|
||||
" -out-key string\n"+
|
||||
" \tOptional: path to write the private key to (default \"ca.key\")\n"+
|
||||
" -out-qr string\n"+
|
||||
" \tOptional: output a qr code image (png) of the certificate\n"+
|
||||
optionalPkcs11String(" -pkcs11 string\n \tOptional: PKCS#11 URI to an existing private key\n")+
|
||||
" -subnets string\n"+
|
||||
" \tDeprecated, see -unsafe-networks\n"+
|
||||
" -unsafe-networks string\n"+
|
||||
" \tOptional: comma separated list of ip address and network in CIDR notation. This will limit which ip addresses and networks subordinate certs can use in unsafe networks\n"+
|
||||
" -version uint\n"+
|
||||
" \tOptional: version of the certificate format to use (default 2)\n",
|
||||
" \tOptional: comma separated list of ipv4 address and network in CIDR notation. This will limit which ipv4 addresses and networks subordinate certs can use in subnets\n",
|
||||
ob.String(),
|
||||
)
|
||||
}
|
||||
|
@ -88,94 +83,93 @@ func Test_ca(t *testing.T) {
|
|||
|
||||
// required args
|
||||
assertHelpError(t, ca(
|
||||
[]string{"-version", "1", "-out-key", "nope", "-out-crt", "nope", "duration", "100m"}, ob, eb, nopw,
|
||||
[]string{"-out-key", "nope", "-out-crt", "nope", "duration", "100m"}, ob, eb, nopw,
|
||||
), "-name is required")
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// ipv4 only ips
|
||||
assertHelpError(t, ca([]string{"-version", "1", "-name", "ipv6", "-ips", "100::100/100"}, ob, eb, nopw), "invalid -networks definition: v1 certificates can only be ipv4, have 100::100/100")
|
||||
assertHelpError(t, ca([]string{"-name", "ipv6", "-ips", "100::100/100"}, ob, eb, nopw), "invalid ip definition: can only be ipv4, have 100::100/100")
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// ipv4 only subnets
|
||||
assertHelpError(t, ca([]string{"-version", "1", "-name", "ipv6", "-subnets", "100::100/100"}, ob, eb, nopw), "invalid -unsafe-networks definition: v1 certificates can only be ipv4, have 100::100/100")
|
||||
assertHelpError(t, ca([]string{"-name", "ipv6", "-subnets", "100::100/100"}, ob, eb, nopw), "invalid subnet definition: can only be ipv4, have 100::100/100")
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// failed key write
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args := []string{"-version", "1", "-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey"}
|
||||
require.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
||||
args := []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey"}
|
||||
assert.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp key file
|
||||
keyF, err := os.CreateTemp("", "test.key")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, os.Remove(keyF.Name()))
|
||||
keyF, err := ioutil.TempFile("", "test.key")
|
||||
assert.Nil(t, err)
|
||||
os.Remove(keyF.Name())
|
||||
|
||||
// failed cert write
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name()}
|
||||
require.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
||||
args = []string{"-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name()}
|
||||
assert.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp cert file
|
||||
crtF, err := os.CreateTemp("", "test.crt")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, os.Remove(crtF.Name()))
|
||||
require.NoError(t, os.Remove(keyF.Name()))
|
||||
crtF, err := ioutil.TempFile("", "test.crt")
|
||||
assert.Nil(t, err)
|
||||
os.Remove(crtF.Name())
|
||||
os.Remove(keyF.Name())
|
||||
|
||||
// test proper cert with removed empty groups and subnets
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
require.NoError(t, ca(args, ob, eb, nopw))
|
||||
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
assert.Nil(t, ca(args, ob, eb, nopw))
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// read cert and key files
|
||||
rb, _ := os.ReadFile(keyF.Name())
|
||||
lKey, b, c, err := cert.UnmarshalSigningPrivateKeyFromPEM(rb)
|
||||
assert.Equal(t, cert.Curve_CURVE25519, c)
|
||||
assert.Empty(t, b)
|
||||
require.NoError(t, err)
|
||||
rb, _ := ioutil.ReadFile(keyF.Name())
|
||||
lKey, b, err := cert.UnmarshalEd25519PrivateKey(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, lKey, 64)
|
||||
|
||||
rb, _ = os.ReadFile(crtF.Name())
|
||||
lCrt, b, err := cert.UnmarshalCertificateFromPEM(rb)
|
||||
assert.Empty(t, b)
|
||||
require.NoError(t, err)
|
||||
rb, _ = ioutil.ReadFile(crtF.Name())
|
||||
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, "test", lCrt.Name())
|
||||
assert.Empty(t, lCrt.Networks())
|
||||
assert.True(t, lCrt.IsCA())
|
||||
assert.Equal(t, []string{"1", "2", "3", "4", "5"}, lCrt.Groups())
|
||||
assert.Empty(t, lCrt.UnsafeNetworks())
|
||||
assert.Len(t, lCrt.PublicKey(), 32)
|
||||
assert.Equal(t, time.Duration(time.Minute*100), lCrt.NotAfter().Sub(lCrt.NotBefore()))
|
||||
assert.Equal(t, "", lCrt.Issuer())
|
||||
assert.True(t, lCrt.CheckSignature(lCrt.PublicKey()))
|
||||
assert.Equal(t, "test", lCrt.Details.Name)
|
||||
assert.Len(t, lCrt.Details.Ips, 0)
|
||||
assert.True(t, lCrt.Details.IsCA)
|
||||
assert.Equal(t, []string{"1", "2", "3", "4", "5"}, lCrt.Details.Groups)
|
||||
assert.Len(t, lCrt.Details.Subnets, 0)
|
||||
assert.Len(t, lCrt.Details.PublicKey, 32)
|
||||
assert.Equal(t, time.Duration(time.Minute*100), lCrt.Details.NotAfter.Sub(lCrt.Details.NotBefore))
|
||||
assert.Equal(t, "", lCrt.Details.Issuer)
|
||||
assert.True(t, lCrt.CheckSignature(lCrt.Details.PublicKey))
|
||||
|
||||
// test encrypted key
|
||||
os.Remove(keyF.Name())
|
||||
os.Remove(crtF.Name())
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
require.NoError(t, ca(args, ob, eb, testpw))
|
||||
args = []string{"-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
assert.Nil(t, ca(args, ob, eb, testpw))
|
||||
assert.Equal(t, pwPromptOb, ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// read encrypted key file and verify default params
|
||||
rb, _ = os.ReadFile(keyF.Name())
|
||||
rb, _ = ioutil.ReadFile(keyF.Name())
|
||||
k, _ := pem.Decode(rb)
|
||||
ned, err := cert.UnmarshalNebulaEncryptedData(k.Bytes)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
// we won't know salt in advance, so just check start of string
|
||||
assert.Equal(t, uint32(2*1024*1024), ned.EncryptionMetadata.Argon2Parameters.Memory)
|
||||
assert.Equal(t, uint8(4), ned.EncryptionMetadata.Argon2Parameters.Parallelism)
|
||||
|
@ -185,8 +179,8 @@ func Test_ca(t *testing.T) {
|
|||
var curve cert.Curve
|
||||
curve, lKey, b, err = cert.DecryptAndUnmarshalSigningPrivateKey(passphrase, rb)
|
||||
assert.Equal(t, cert.Curve_CURVE25519, curve)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, b)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Len(t, lKey, 64)
|
||||
|
||||
// test when reading passsword results in an error
|
||||
|
@ -194,8 +188,8 @@ func Test_ca(t *testing.T) {
|
|||
os.Remove(crtF.Name())
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
require.Error(t, ca(args, ob, eb, errpw))
|
||||
args = []string{"-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
assert.Error(t, ca(args, ob, eb, errpw))
|
||||
assert.Equal(t, pwPromptOb, ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
|
@ -204,8 +198,8 @@ func Test_ca(t *testing.T) {
|
|||
os.Remove(crtF.Name())
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
require.EqualError(t, ca(args, ob, eb, nopw), "no passphrase specified, remove -encrypt flag to write out-key in plaintext")
|
||||
args = []string{"-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
assert.EqualError(t, ca(args, ob, eb, nopw), "no passphrase specified, remove -encrypt flag to write out-key in plaintext")
|
||||
assert.Equal(t, strings.Repeat(pwPromptOb, 5), ob.String()) // prompts 5 times before giving up
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
|
@ -214,14 +208,14 @@ func Test_ca(t *testing.T) {
|
|||
os.Remove(crtF.Name())
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
require.NoError(t, ca(args, ob, eb, nopw))
|
||||
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
assert.Nil(t, ca(args, ob, eb, nopw))
|
||||
|
||||
// test that we won't overwrite existing certificate file
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
require.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA key: "+keyF.Name())
|
||||
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
assert.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA key: "+keyF.Name())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
|
@ -229,8 +223,8 @@ func Test_ca(t *testing.T) {
|
|||
os.Remove(keyF.Name())
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
require.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA cert: "+crtF.Name())
|
||||
args = []string{"-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
assert.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA cert: "+crtF.Name())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
os.Remove(keyF.Name())
|
||||
|
|
|
@ -4,10 +4,9 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/slackhq/nebula/pkclient"
|
||||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
)
|
||||
|
||||
|
@ -15,8 +14,8 @@ type keygenFlags struct {
|
|||
set *flag.FlagSet
|
||||
outKeyPath *string
|
||||
outPubPath *string
|
||||
curve *string
|
||||
p11url *string
|
||||
|
||||
curve *string
|
||||
}
|
||||
|
||||
func newKeygenFlags() *keygenFlags {
|
||||
|
@ -25,7 +24,6 @@ func newKeygenFlags() *keygenFlags {
|
|||
cf.outPubPath = cf.set.String("out-pub", "", "Required: path to write the public key to")
|
||||
cf.outKeyPath = cf.set.String("out-key", "", "Required: path to write the private key to")
|
||||
cf.curve = cf.set.String("curve", "25519", "ECDH Curve (25519, P256)")
|
||||
cf.p11url = p11Flag(cf.set)
|
||||
return &cf
|
||||
}
|
||||
|
||||
|
@ -36,58 +34,32 @@ func keygen(args []string, out io.Writer, errOut io.Writer) error {
|
|||
return err
|
||||
}
|
||||
|
||||
isP11 := len(*cf.p11url) > 0
|
||||
|
||||
if !isP11 {
|
||||
if err = mustFlagString("out-key", cf.outKeyPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mustFlagString("out-key", cf.outKeyPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = mustFlagString("out-pub", cf.outPubPath); err != nil {
|
||||
if err := mustFlagString("out-pub", cf.outPubPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var pub, rawPriv []byte
|
||||
var curve cert.Curve
|
||||
if isP11 {
|
||||
switch *cf.curve {
|
||||
case "P256":
|
||||
curve = cert.Curve_P256
|
||||
default:
|
||||
return fmt.Errorf("invalid curve for PKCS#11: %s", *cf.curve)
|
||||
}
|
||||
} else {
|
||||
switch *cf.curve {
|
||||
case "25519", "X25519", "Curve25519", "CURVE25519":
|
||||
pub, rawPriv = x25519Keypair()
|
||||
curve = cert.Curve_CURVE25519
|
||||
case "P256":
|
||||
pub, rawPriv = p256Keypair()
|
||||
curve = cert.Curve_P256
|
||||
default:
|
||||
return fmt.Errorf("invalid curve: %s", *cf.curve)
|
||||
}
|
||||
switch *cf.curve {
|
||||
case "25519", "X25519", "Curve25519", "CURVE25519":
|
||||
pub, rawPriv = x25519Keypair()
|
||||
curve = cert.Curve_CURVE25519
|
||||
case "P256":
|
||||
pub, rawPriv = p256Keypair()
|
||||
curve = cert.Curve_P256
|
||||
default:
|
||||
return fmt.Errorf("invalid curve: %s", *cf.curve)
|
||||
}
|
||||
|
||||
if isP11 {
|
||||
p11Client, err := pkclient.FromUrl(*cf.p11url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while creating PKCS#11 client: %w", err)
|
||||
}
|
||||
defer func(client *pkclient.PKClient) {
|
||||
_ = client.Close()
|
||||
}(p11Client)
|
||||
pub, err = p11Client.GetPubKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while getting public key: %w", err)
|
||||
}
|
||||
} else {
|
||||
err = os.WriteFile(*cf.outKeyPath, cert.MarshalPrivateKeyToPEM(curve, rawPriv), 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-key: %s", err)
|
||||
}
|
||||
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-key: %s", err)
|
||||
}
|
||||
err = os.WriteFile(*cf.outPubPath, cert.MarshalPublicKeyToPEM(curve, pub), 0600)
|
||||
|
||||
err = ioutil.WriteFile(*cf.outPubPath, cert.MarshalPublicKey(curve, pub), 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-pub: %s", err)
|
||||
}
|
||||
|
@ -101,7 +73,7 @@ func keygenSummary() string {
|
|||
|
||||
func keygenHelp(out io.Writer) {
|
||||
cf := newKeygenFlags()
|
||||
_, _ = out.Write([]byte("Usage of " + os.Args[0] + " " + keygenSummary() + "\n"))
|
||||
out.Write([]byte("Usage of " + os.Args[0] + " " + keygenSummary() + "\n"))
|
||||
cf.set.SetOutput(out)
|
||||
cf.set.PrintDefaults()
|
||||
}
|
||||
|
|
|
@ -2,14 +2,16 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
//TODO: test file permissions
|
||||
|
||||
func Test_keygenSummary(t *testing.T) {
|
||||
assert.Equal(t, "keygen <flags>: create a public/private key pair. the public key can be passed to `nebula-cert sign`", keygenSummary())
|
||||
}
|
||||
|
@ -25,8 +27,7 @@ func Test_keygenHelp(t *testing.T) {
|
|||
" -out-key string\n"+
|
||||
" \tRequired: path to write the private key to\n"+
|
||||
" -out-pub string\n"+
|
||||
" \tRequired: path to write the public key to\n"+
|
||||
optionalPkcs11String(" -pkcs11 string\n \tOptional: PKCS#11 URI to an existing private key\n"),
|
||||
" \tRequired: path to write the public key to\n",
|
||||
ob.String(),
|
||||
)
|
||||
}
|
||||
|
@ -48,48 +49,46 @@ func Test_keygen(t *testing.T) {
|
|||
ob.Reset()
|
||||
eb.Reset()
|
||||
args := []string{"-out-pub", "/do/not/write/pleasepub", "-out-key", "/do/not/write/pleasekey"}
|
||||
require.EqualError(t, keygen(args, ob, eb), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
||||
assert.EqualError(t, keygen(args, ob, eb), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp key file
|
||||
keyF, err := os.CreateTemp("", "test.key")
|
||||
require.NoError(t, err)
|
||||
keyF, err := ioutil.TempFile("", "test.key")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(keyF.Name())
|
||||
|
||||
// failed pub write
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-out-pub", "/do/not/write/pleasepub", "-out-key", keyF.Name()}
|
||||
require.EqualError(t, keygen(args, ob, eb), "error while writing out-pub: open /do/not/write/pleasepub: "+NoSuchDirError)
|
||||
assert.EqualError(t, keygen(args, ob, eb), "error while writing out-pub: open /do/not/write/pleasepub: "+NoSuchDirError)
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp pub file
|
||||
pubF, err := os.CreateTemp("", "test.pub")
|
||||
require.NoError(t, err)
|
||||
pubF, err := ioutil.TempFile("", "test.pub")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(pubF.Name())
|
||||
|
||||
// test proper keygen
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-out-pub", pubF.Name(), "-out-key", keyF.Name()}
|
||||
require.NoError(t, keygen(args, ob, eb))
|
||||
assert.Nil(t, keygen(args, ob, eb))
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// read cert and key files
|
||||
rb, _ := os.ReadFile(keyF.Name())
|
||||
lKey, b, curve, err := cert.UnmarshalPrivateKeyFromPEM(rb)
|
||||
assert.Equal(t, cert.Curve_CURVE25519, curve)
|
||||
assert.Empty(t, b)
|
||||
require.NoError(t, err)
|
||||
rb, _ := ioutil.ReadFile(keyF.Name())
|
||||
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, lKey, 32)
|
||||
|
||||
rb, _ = os.ReadFile(pubF.Name())
|
||||
lPub, b, curve, err := cert.UnmarshalPublicKeyFromPEM(rb)
|
||||
assert.Equal(t, cert.Curve_CURVE25519, curve)
|
||||
assert.Empty(t, b)
|
||||
require.NoError(t, err)
|
||||
rb, _ = ioutil.ReadFile(pubF.Name())
|
||||
lPub, b, err := cert.UnmarshalX25519PublicKey(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, lPub, 32)
|
||||
}
|
||||
|
|
|
@ -3,15 +3,15 @@ package main
|
|||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
//TODO: all flag parsing continueOnError will print to stderr on its own currently
|
||||
|
||||
func Test_help(t *testing.T) {
|
||||
expected := "Usage of " + os.Args[0] + " <global flags> <mode>:\n" +
|
||||
" Global flags:\n" +
|
||||
|
@ -77,16 +77,8 @@ func assertHelpError(t *testing.T, err error, msg string) {
|
|||
case *helpError:
|
||||
// good
|
||||
default:
|
||||
t.Fatal(fmt.Sprintf("err was not a helpError: %q, expected %q", err, msg))
|
||||
t.Fatal("err was not a helpError")
|
||||
}
|
||||
|
||||
require.EqualError(t, err, msg)
|
||||
}
|
||||
|
||||
func optionalPkcs11String(msg string) string {
|
||||
if p11Supported() {
|
||||
return msg
|
||||
} else {
|
||||
return ""
|
||||
}
|
||||
assert.EqualError(t, err, msg)
|
||||
}
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
//go:build cgo && pkcs11
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
)
|
||||
|
||||
func p11Supported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func p11Flag(set *flag.FlagSet) *string {
|
||||
return set.String("pkcs11", "", "Optional: PKCS#11 URI to an existing private key")
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
//go:build !cgo || !pkcs11
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
)
|
||||
|
||||
func p11Supported() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func p11Flag(set *flag.FlagSet) *string {
|
||||
var ret = ""
|
||||
return &ret
|
||||
}
|
|
@ -5,6 +5,7 @@ import (
|
|||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
|
@ -40,32 +41,33 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||
return err
|
||||
}
|
||||
|
||||
rawCert, err := os.ReadFile(*pf.path)
|
||||
rawCert, err := ioutil.ReadFile(*pf.path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read cert; %s", err)
|
||||
}
|
||||
|
||||
var c cert.Certificate
|
||||
var c *cert.NebulaCertificate
|
||||
var qrBytes []byte
|
||||
part := 0
|
||||
|
||||
var jsonCerts []cert.Certificate
|
||||
|
||||
for {
|
||||
c, rawCert, err = cert.UnmarshalCertificateFromPEM(rawCert)
|
||||
c, rawCert, err = cert.UnmarshalNebulaCertificateFromPEM(rawCert)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while unmarshaling cert: %s", err)
|
||||
}
|
||||
|
||||
if *pf.json {
|
||||
jsonCerts = append(jsonCerts, c)
|
||||
b, _ := json.Marshal(c)
|
||||
out.Write(b)
|
||||
out.Write([]byte("\n"))
|
||||
|
||||
} else {
|
||||
_, _ = out.Write([]byte(c.String()))
|
||||
_, _ = out.Write([]byte("\n"))
|
||||
out.Write([]byte(c.String()))
|
||||
out.Write([]byte("\n"))
|
||||
}
|
||||
|
||||
if *pf.outQRPath != "" {
|
||||
b, err := c.MarshalPEM()
|
||||
b, err := c.MarshalToPEM()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while marshalling cert to PEM: %s", err)
|
||||
}
|
||||
|
@ -79,19 +81,13 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
|
|||
part++
|
||||
}
|
||||
|
||||
if *pf.json {
|
||||
b, _ := json.Marshal(jsonCerts)
|
||||
_, _ = out.Write(b)
|
||||
_, _ = out.Write([]byte("\n"))
|
||||
}
|
||||
|
||||
if *pf.outQRPath != "" {
|
||||
b, err := qrcode.Encode(string(qrBytes), qrcode.Medium, -5)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while generating qr code: %s", err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(*pf.outQRPath, b, 0600)
|
||||
err = ioutil.WriteFile(*pf.outQRPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-qr: %s", err)
|
||||
}
|
||||
|
|
|
@ -2,17 +2,13 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"net/netip"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_printSummary(t *testing.T) {
|
||||
|
@ -53,106 +49,45 @@ func Test_printCert(t *testing.T) {
|
|||
err = printCert([]string{"-path", "does_not_exist"}, ob, eb)
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.EqualError(t, err, "unable to read cert; open does_not_exist: "+NoSuchFileError)
|
||||
assert.EqualError(t, err, "unable to read cert; open does_not_exist: "+NoSuchFileError)
|
||||
|
||||
// invalid cert at path
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
tf, err := os.CreateTemp("", "print-cert")
|
||||
require.NoError(t, err)
|
||||
tf, err := ioutil.TempFile("", "print-cert")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(tf.Name())
|
||||
|
||||
tf.WriteString("-----BEGIN NOPE-----")
|
||||
err = printCert([]string{"-path", tf.Name()}, ob, eb)
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.EqualError(t, err, "error while unmarshaling cert: input did not contain a valid PEM encoded block")
|
||||
assert.EqualError(t, err, "error while unmarshaling cert: input did not contain a valid PEM encoded block")
|
||||
|
||||
// test multiple certs
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
tf.Truncate(0)
|
||||
tf.Seek(0, 0)
|
||||
ca, caKey := NewTestCaCert("test ca", nil, nil, time.Time{}, time.Time{}, nil, nil, nil)
|
||||
c, _ := NewTestCert(ca, caKey, "test", time.Time{}, time.Time{}, []netip.Prefix{netip.MustParsePrefix("10.0.0.123/8")}, nil, []string{"hi"})
|
||||
c := cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: "test",
|
||||
Groups: []string{"hi"},
|
||||
PublicKey: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2},
|
||||
},
|
||||
Signature: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2},
|
||||
}
|
||||
|
||||
p, _ := c.MarshalPEM()
|
||||
p, _ := c.MarshalToPEM()
|
||||
tf.Write(p)
|
||||
tf.Write(p)
|
||||
tf.Write(p)
|
||||
|
||||
err = printCert([]string{"-path", tf.Name()}, ob, eb)
|
||||
fp, _ := c.Fingerprint()
|
||||
pk := hex.EncodeToString(c.PublicKey())
|
||||
sig := hex.EncodeToString(c.Signature())
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(
|
||||
t,
|
||||
//"NebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: "+c.Issuer()+"\n\t\tPublic key: "+pk+"\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: "+fp+"\n\tSignature: "+sig+"\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: "+c.Issuer()+"\n\t\tPublic key: "+pk+"\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: "+fp+"\n\tSignature: "+sig+"\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: "+c.Issuer()+"\n\t\tPublic key: "+pk+"\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: "+fp+"\n\tSignature: "+sig+"\n}\n",
|
||||
`{
|
||||
"details": {
|
||||
"curve": "CURVE25519",
|
||||
"groups": [
|
||||
"hi"
|
||||
],
|
||||
"isCa": false,
|
||||
"issuer": "`+c.Issuer()+`",
|
||||
"name": "test",
|
||||
"networks": [
|
||||
"10.0.0.123/8"
|
||||
],
|
||||
"notAfter": "0001-01-01T00:00:00Z",
|
||||
"notBefore": "0001-01-01T00:00:00Z",
|
||||
"publicKey": "`+pk+`",
|
||||
"unsafeNetworks": []
|
||||
},
|
||||
"fingerprint": "`+fp+`",
|
||||
"signature": "`+sig+`",
|
||||
"version": 1
|
||||
}
|
||||
{
|
||||
"details": {
|
||||
"curve": "CURVE25519",
|
||||
"groups": [
|
||||
"hi"
|
||||
],
|
||||
"isCa": false,
|
||||
"issuer": "`+c.Issuer()+`",
|
||||
"name": "test",
|
||||
"networks": [
|
||||
"10.0.0.123/8"
|
||||
],
|
||||
"notAfter": "0001-01-01T00:00:00Z",
|
||||
"notBefore": "0001-01-01T00:00:00Z",
|
||||
"publicKey": "`+pk+`",
|
||||
"unsafeNetworks": []
|
||||
},
|
||||
"fingerprint": "`+fp+`",
|
||||
"signature": "`+sig+`",
|
||||
"version": 1
|
||||
}
|
||||
{
|
||||
"details": {
|
||||
"curve": "CURVE25519",
|
||||
"groups": [
|
||||
"hi"
|
||||
],
|
||||
"isCa": false,
|
||||
"issuer": "`+c.Issuer()+`",
|
||||
"name": "test",
|
||||
"networks": [
|
||||
"10.0.0.123/8"
|
||||
],
|
||||
"notAfter": "0001-01-01T00:00:00Z",
|
||||
"notBefore": "0001-01-01T00:00:00Z",
|
||||
"publicKey": "`+pk+`",
|
||||
"unsafeNetworks": []
|
||||
},
|
||||
"fingerprint": "`+fp+`",
|
||||
"signature": "`+sig+`",
|
||||
"version": 1
|
||||
}
|
||||
`,
|
||||
"NebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\nNebulaCertificate {\n\tDetails {\n\t\tName: test\n\t\tIps: []\n\t\tSubnets: []\n\t\tGroups: [\n\t\t\t\"hi\"\n\t\t]\n\t\tNot before: 0001-01-01 00:00:00 +0000 UTC\n\t\tNot After: 0001-01-01 00:00:00 +0000 UTC\n\t\tIs CA: false\n\t\tIssuer: \n\t\tPublic key: 0102030405060708090001020304050607080900010203040506070809000102\n\t\tCurve: CURVE25519\n\t}\n\tFingerprint: cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\n\tSignature: 0102030405060708090001020304050607080900010203040506070809000102\n}\n",
|
||||
ob.String(),
|
||||
)
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
@ -162,84 +97,26 @@ func Test_printCert(t *testing.T) {
|
|||
eb.Reset()
|
||||
tf.Truncate(0)
|
||||
tf.Seek(0, 0)
|
||||
c = cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: "test",
|
||||
Groups: []string{"hi"},
|
||||
PublicKey: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2},
|
||||
},
|
||||
Signature: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2},
|
||||
}
|
||||
|
||||
p, _ = c.MarshalToPEM()
|
||||
tf.Write(p)
|
||||
tf.Write(p)
|
||||
tf.Write(p)
|
||||
|
||||
err = printCert([]string{"-json", "-path", tf.Name()}, ob, eb)
|
||||
fp, _ = c.Fingerprint()
|
||||
pk = hex.EncodeToString(c.PublicKey())
|
||||
sig = hex.EncodeToString(c.Signature())
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(
|
||||
t,
|
||||
`[{"details":{"curve":"CURVE25519","groups":["hi"],"isCa":false,"issuer":"`+c.Issuer()+`","name":"test","networks":["10.0.0.123/8"],"notAfter":"0001-01-01T00:00:00Z","notBefore":"0001-01-01T00:00:00Z","publicKey":"`+pk+`","unsafeNetworks":[]},"fingerprint":"`+fp+`","signature":"`+sig+`","version":1},{"details":{"curve":"CURVE25519","groups":["hi"],"isCa":false,"issuer":"`+c.Issuer()+`","name":"test","networks":["10.0.0.123/8"],"notAfter":"0001-01-01T00:00:00Z","notBefore":"0001-01-01T00:00:00Z","publicKey":"`+pk+`","unsafeNetworks":[]},"fingerprint":"`+fp+`","signature":"`+sig+`","version":1},{"details":{"curve":"CURVE25519","groups":["hi"],"isCa":false,"issuer":"`+c.Issuer()+`","name":"test","networks":["10.0.0.123/8"],"notAfter":"0001-01-01T00:00:00Z","notBefore":"0001-01-01T00:00:00Z","publicKey":"`+pk+`","unsafeNetworks":[]},"fingerprint":"`+fp+`","signature":"`+sig+`","version":1}]
|
||||
`,
|
||||
"{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n{\"details\":{\"curve\":\"CURVE25519\",\"groups\":[\"hi\"],\"ips\":[],\"isCa\":false,\"issuer\":\"\",\"name\":\"test\",\"notAfter\":\"0001-01-01T00:00:00Z\",\"notBefore\":\"0001-01-01T00:00:00Z\",\"publicKey\":\"0102030405060708090001020304050607080900010203040506070809000102\",\"subnets\":[]},\"fingerprint\":\"cc3492c0e9c48f17547f5987ea807462ebb3451e622590a10bb3763c344c82bd\",\"signature\":\"0102030405060708090001020304050607080900010203040506070809000102\"}\n",
|
||||
ob.String(),
|
||||
)
|
||||
assert.Equal(t, "", eb.String())
|
||||
}
|
||||
|
||||
// NewTestCaCert will generate a CA cert
|
||||
func NewTestCaCert(name string, pubKey, privKey []byte, before, after time.Time, networks, unsafeNetworks []netip.Prefix, groups []string) (cert.Certificate, []byte) {
|
||||
var err error
|
||||
if pubKey == nil || privKey == nil {
|
||||
pubKey, privKey, err = ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
t := &cert.TBSCertificate{
|
||||
Version: cert.Version1,
|
||||
Name: name,
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pubKey,
|
||||
Networks: networks,
|
||||
UnsafeNetworks: unsafeNetworks,
|
||||
Groups: groups,
|
||||
IsCA: true,
|
||||
}
|
||||
|
||||
c, err := t.Sign(nil, cert.Curve_CURVE25519, privKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return c, privKey
|
||||
}
|
||||
|
||||
func NewTestCert(ca cert.Certificate, signerKey []byte, name string, before, after time.Time, networks, unsafeNetworks []netip.Prefix, groups []string) (cert.Certificate, []byte) {
|
||||
if before.IsZero() {
|
||||
before = ca.NotBefore()
|
||||
}
|
||||
|
||||
if after.IsZero() {
|
||||
after = ca.NotAfter()
|
||||
}
|
||||
|
||||
if len(networks) == 0 {
|
||||
networks = []netip.Prefix{netip.MustParsePrefix("10.0.0.123/8")}
|
||||
}
|
||||
|
||||
pub, rawPriv := x25519Keypair()
|
||||
nc := &cert.TBSCertificate{
|
||||
Version: cert.Version1,
|
||||
Name: name,
|
||||
Networks: networks,
|
||||
UnsafeNetworks: unsafeNetworks,
|
||||
Groups: groups,
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pub,
|
||||
IsCA: false,
|
||||
}
|
||||
|
||||
c, err := nc.Sign(ca, ca.Curve(), signerKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return c, rawPriv
|
||||
}
|
||||
|
|
|
@ -3,63 +3,51 @@ package main
|
|||
import (
|
||||
"crypto/ecdh"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/netip"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/skip2/go-qrcode"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/pkclient"
|
||||
"golang.org/x/crypto/curve25519"
|
||||
)
|
||||
|
||||
type signFlags struct {
|
||||
set *flag.FlagSet
|
||||
version *uint
|
||||
caKeyPath *string
|
||||
caCertPath *string
|
||||
name *string
|
||||
networks *string
|
||||
unsafeNetworks *string
|
||||
duration *time.Duration
|
||||
inPubPath *string
|
||||
outKeyPath *string
|
||||
outCertPath *string
|
||||
outQRPath *string
|
||||
groups *string
|
||||
|
||||
p11url *string
|
||||
|
||||
// Deprecated options
|
||||
ip *string
|
||||
subnets *string
|
||||
set *flag.FlagSet
|
||||
caKeyPath *string
|
||||
caCertPath *string
|
||||
name *string
|
||||
ip *string
|
||||
duration *time.Duration
|
||||
inPubPath *string
|
||||
outKeyPath *string
|
||||
outCertPath *string
|
||||
outQRPath *string
|
||||
groups *string
|
||||
subnets *string
|
||||
}
|
||||
|
||||
func newSignFlags() *signFlags {
|
||||
sf := signFlags{set: flag.NewFlagSet("sign", flag.ContinueOnError)}
|
||||
sf.set.Usage = func() {}
|
||||
sf.version = sf.set.Uint("version", 0, "Optional: version of the certificate format to use, the default is to create both v1 and v2 certificates.")
|
||||
sf.caKeyPath = sf.set.String("ca-key", "ca.key", "Optional: path to the signing CA key")
|
||||
sf.caCertPath = sf.set.String("ca-crt", "ca.crt", "Optional: path to the signing CA cert")
|
||||
sf.name = sf.set.String("name", "", "Required: name of the cert, usually a hostname")
|
||||
sf.networks = sf.set.String("networks", "", "Required: comma separated list of ip address and network in CIDR notation to assign to this cert")
|
||||
sf.unsafeNetworks = sf.set.String("unsafe-networks", "", "Optional: comma separated list of ip address and network in CIDR notation. Unsafe networks this cert can route for")
|
||||
sf.ip = sf.set.String("ip", "", "Required: ipv4 address and network in CIDR notation to assign the cert")
|
||||
sf.duration = sf.set.Duration("duration", 0, "Optional: how long the cert should be valid for. The default is 1 second before the signing cert expires. Valid time units are seconds: \"s\", minutes: \"m\", hours: \"h\"")
|
||||
sf.inPubPath = sf.set.String("in-pub", "", "Optional (if out-key not set): path to read a previously generated public key")
|
||||
sf.outKeyPath = sf.set.String("out-key", "", "Optional (if in-pub not set): path to write the private key to")
|
||||
sf.outCertPath = sf.set.String("out-crt", "", "Optional: path to write the certificate to")
|
||||
sf.outQRPath = sf.set.String("out-qr", "", "Optional: output a qr code image (png) of the certificate")
|
||||
sf.groups = sf.set.String("groups", "", "Optional: comma separated list of groups")
|
||||
sf.p11url = p11Flag(sf.set)
|
||||
|
||||
sf.ip = sf.set.String("ip", "", "Deprecated, see -networks")
|
||||
sf.subnets = sf.set.String("subnets", "", "Deprecated, see -unsafe-networks")
|
||||
sf.subnets = sf.set.String("subnets", "", "Optional: comma separated list of ipv4 address and network in CIDR notation. Subnets this cert can serve for")
|
||||
return &sf
|
||||
|
||||
}
|
||||
|
||||
func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error {
|
||||
|
@ -69,12 +57,8 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
|||
return err
|
||||
}
|
||||
|
||||
isP11 := len(*sf.p11url) > 0
|
||||
|
||||
if !isP11 {
|
||||
if err := mustFlagString("ca-key", sf.caKeyPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mustFlagString("ca-key", sf.caKeyPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mustFlagString("ca-crt", sf.caCertPath); err != nil {
|
||||
return err
|
||||
|
@ -82,83 +66,69 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
|||
if err := mustFlagString("name", sf.name); err != nil {
|
||||
return err
|
||||
}
|
||||
if !isP11 && *sf.inPubPath != "" && *sf.outKeyPath != "" {
|
||||
if err := mustFlagString("ip", sf.ip); err != nil {
|
||||
return err
|
||||
}
|
||||
if *sf.inPubPath != "" && *sf.outKeyPath != "" {
|
||||
return newHelpErrorf("cannot set both -in-pub and -out-key")
|
||||
}
|
||||
|
||||
var v4Networks []netip.Prefix
|
||||
var v6Networks []netip.Prefix
|
||||
if *sf.networks == "" && *sf.ip != "" {
|
||||
// Pull up deprecated -ip flag if needed
|
||||
*sf.networks = *sf.ip
|
||||
}
|
||||
|
||||
if len(*sf.networks) == 0 {
|
||||
return newHelpErrorf("-networks is required")
|
||||
}
|
||||
|
||||
version := cert.Version(*sf.version)
|
||||
if version != 0 && version != cert.Version1 && version != cert.Version2 {
|
||||
return newHelpErrorf("-version must be either %v or %v", cert.Version1, cert.Version2)
|
||||
rawCAKey, err := ioutil.ReadFile(*sf.caKeyPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while reading ca-key: %s", err)
|
||||
}
|
||||
|
||||
var curve cert.Curve
|
||||
var caKey []byte
|
||||
|
||||
if !isP11 {
|
||||
var rawCAKey []byte
|
||||
rawCAKey, err := os.ReadFile(*sf.caKeyPath)
|
||||
// naively attempt to decode the private key as though it is not encrypted
|
||||
caKey, _, curve, err = cert.UnmarshalSigningPrivateKey(rawCAKey)
|
||||
if err == cert.ErrPrivateKeyEncrypted {
|
||||
// ask for a passphrase until we get one
|
||||
var passphrase []byte
|
||||
for i := 0; i < 5; i++ {
|
||||
out.Write([]byte("Enter passphrase: "))
|
||||
passphrase, err = pr.ReadPassword()
|
||||
|
||||
if err == ErrNoTerminal {
|
||||
return fmt.Errorf("ca-key is encrypted and must be decrypted interactively")
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error reading password: %s", err)
|
||||
}
|
||||
|
||||
if len(passphrase) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(passphrase) == 0 {
|
||||
return fmt.Errorf("cannot open encrypted ca-key without passphrase")
|
||||
}
|
||||
|
||||
curve, caKey, _, err = cert.DecryptAndUnmarshalSigningPrivateKey(passphrase, rawCAKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while reading ca-key: %s", err)
|
||||
}
|
||||
|
||||
// naively attempt to decode the private key as though it is not encrypted
|
||||
caKey, _, curve, err = cert.UnmarshalSigningPrivateKeyFromPEM(rawCAKey)
|
||||
if errors.Is(err, cert.ErrPrivateKeyEncrypted) {
|
||||
// ask for a passphrase until we get one
|
||||
var passphrase []byte
|
||||
for i := 0; i < 5; i++ {
|
||||
out.Write([]byte("Enter passphrase: "))
|
||||
passphrase, err = pr.ReadPassword()
|
||||
|
||||
if errors.Is(err, ErrNoTerminal) {
|
||||
return fmt.Errorf("ca-key is encrypted and must be decrypted interactively")
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error reading password: %s", err)
|
||||
}
|
||||
|
||||
if len(passphrase) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(passphrase) == 0 {
|
||||
return fmt.Errorf("cannot open encrypted ca-key without passphrase")
|
||||
}
|
||||
|
||||
curve, caKey, _, err = cert.DecryptAndUnmarshalSigningPrivateKey(passphrase, rawCAKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while parsing encrypted ca-key: %s", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error while parsing ca-key: %s", err)
|
||||
return fmt.Errorf("error while parsing encrypted ca-key: %s", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error while parsing ca-key: %s", err)
|
||||
}
|
||||
|
||||
rawCACert, err := os.ReadFile(*sf.caCertPath)
|
||||
rawCACert, err := ioutil.ReadFile(*sf.caCertPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while reading ca-crt: %s", err)
|
||||
}
|
||||
|
||||
caCert, _, err := cert.UnmarshalCertificateFromPEM(rawCACert)
|
||||
caCert, _, err := cert.UnmarshalNebulaCertificateFromPEM(rawCACert)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while parsing ca-crt: %s", err)
|
||||
}
|
||||
|
||||
if !isP11 {
|
||||
if err := caCert.VerifyPrivateKey(curve, caKey); err != nil {
|
||||
return fmt.Errorf("refusing to sign, root certificate does not match private key")
|
||||
}
|
||||
if err := caCert.VerifyPrivateKey(curve, caKey); err != nil {
|
||||
return fmt.Errorf("refusing to sign, root certificate does not match private key")
|
||||
}
|
||||
|
||||
issuer, err := caCert.Sha256Sum()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while getting -ca-crt fingerprint: %s", err)
|
||||
}
|
||||
|
||||
if caCert.Expired(time.Now()) {
|
||||
|
@ -167,53 +137,19 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
|||
|
||||
// if no duration is given, expire one second before the root expires
|
||||
if *sf.duration <= 0 {
|
||||
*sf.duration = time.Until(caCert.NotAfter()) - time.Second*1
|
||||
*sf.duration = time.Until(caCert.Details.NotAfter) - time.Second*1
|
||||
}
|
||||
|
||||
if *sf.networks != "" {
|
||||
for _, rs := range strings.Split(*sf.networks, ",") {
|
||||
rs := strings.Trim(rs, " ")
|
||||
if rs != "" {
|
||||
n, err := netip.ParsePrefix(rs)
|
||||
if err != nil {
|
||||
return newHelpErrorf("invalid -networks definition: %s", rs)
|
||||
}
|
||||
|
||||
if n.Addr().Is4() {
|
||||
v4Networks = append(v4Networks, n)
|
||||
} else {
|
||||
v6Networks = append(v6Networks, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
ip, ipNet, err := net.ParseCIDR(*sf.ip)
|
||||
if err != nil {
|
||||
return newHelpErrorf("invalid ip definition: %s", err)
|
||||
}
|
||||
|
||||
var v4UnsafeNetworks []netip.Prefix
|
||||
var v6UnsafeNetworks []netip.Prefix
|
||||
if *sf.unsafeNetworks == "" && *sf.subnets != "" {
|
||||
// Pull up deprecated -subnets flag if needed
|
||||
*sf.unsafeNetworks = *sf.subnets
|
||||
if ip.To4() == nil {
|
||||
return newHelpErrorf("invalid ip definition: can only be ipv4, have %s", *sf.ip)
|
||||
}
|
||||
ipNet.IP = ip
|
||||
|
||||
if *sf.unsafeNetworks != "" {
|
||||
for _, rs := range strings.Split(*sf.unsafeNetworks, ",") {
|
||||
rs := strings.Trim(rs, " ")
|
||||
if rs != "" {
|
||||
n, err := netip.ParsePrefix(rs)
|
||||
if err != nil {
|
||||
return newHelpErrorf("invalid -unsafe-networks definition: %s", rs)
|
||||
}
|
||||
|
||||
if n.Addr().Is4() {
|
||||
v4UnsafeNetworks = append(v4UnsafeNetworks, n)
|
||||
} else {
|
||||
v6UnsafeNetworks = append(v6UnsafeNetworks, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var groups []string
|
||||
groups := []string{}
|
||||
if *sf.groups != "" {
|
||||
for _, rg := range strings.Split(*sf.groups, ",") {
|
||||
g := strings.TrimSpace(rg)
|
||||
|
@ -223,43 +159,60 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
|||
}
|
||||
}
|
||||
|
||||
var pub, rawPriv []byte
|
||||
var p11Client *pkclient.PKClient
|
||||
|
||||
if isP11 {
|
||||
curve = cert.Curve_P256
|
||||
p11Client, err = pkclient.FromUrl(*sf.p11url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while creating PKCS#11 client: %w", err)
|
||||
subnets := []*net.IPNet{}
|
||||
if *sf.subnets != "" {
|
||||
for _, rs := range strings.Split(*sf.subnets, ",") {
|
||||
rs := strings.Trim(rs, " ")
|
||||
if rs != "" {
|
||||
_, s, err := net.ParseCIDR(rs)
|
||||
if err != nil {
|
||||
return newHelpErrorf("invalid subnet definition: %s", err)
|
||||
}
|
||||
if s.IP.To4() == nil {
|
||||
return newHelpErrorf("invalid subnet definition: can only be ipv4, have %s", rs)
|
||||
}
|
||||
subnets = append(subnets, s)
|
||||
}
|
||||
}
|
||||
defer func(client *pkclient.PKClient) {
|
||||
_ = client.Close()
|
||||
}(p11Client)
|
||||
}
|
||||
|
||||
var pub, rawPriv []byte
|
||||
if *sf.inPubPath != "" {
|
||||
var pubCurve cert.Curve
|
||||
rawPub, err := os.ReadFile(*sf.inPubPath)
|
||||
rawPub, err := ioutil.ReadFile(*sf.inPubPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while reading in-pub: %s", err)
|
||||
}
|
||||
|
||||
pub, _, pubCurve, err = cert.UnmarshalPublicKeyFromPEM(rawPub)
|
||||
var pubCurve cert.Curve
|
||||
pub, _, pubCurve, err = cert.UnmarshalPublicKey(rawPub)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while parsing in-pub: %s", err)
|
||||
}
|
||||
if pubCurve != curve {
|
||||
return fmt.Errorf("curve of in-pub does not match ca")
|
||||
}
|
||||
} else if isP11 {
|
||||
pub, err = p11Client.GetPubKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while getting public key with PKCS#11: %w", err)
|
||||
}
|
||||
} else {
|
||||
pub, rawPriv = newKeypair(curve)
|
||||
}
|
||||
|
||||
nc := cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: *sf.name,
|
||||
Ips: []*net.IPNet{ipNet},
|
||||
Groups: groups,
|
||||
Subnets: subnets,
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(*sf.duration),
|
||||
PublicKey: pub,
|
||||
IsCA: false,
|
||||
Issuer: issuer,
|
||||
Curve: curve,
|
||||
},
|
||||
}
|
||||
|
||||
if err := nc.CheckRootConstrains(caCert); err != nil {
|
||||
return fmt.Errorf("refusing to sign, root certificate constraints violated: %s", err)
|
||||
}
|
||||
|
||||
if *sf.outKeyPath == "" {
|
||||
*sf.outKeyPath = *sf.name + ".key"
|
||||
}
|
||||
|
@ -272,108 +225,28 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
|||
return fmt.Errorf("refusing to overwrite existing cert: %s", *sf.outCertPath)
|
||||
}
|
||||
|
||||
var crts []cert.Certificate
|
||||
|
||||
notBefore := time.Now()
|
||||
notAfter := notBefore.Add(*sf.duration)
|
||||
|
||||
if version == 0 || version == cert.Version1 {
|
||||
// Make sure we at least have an ip
|
||||
if len(v4Networks) != 1 {
|
||||
return newHelpErrorf("invalid -networks definition: v1 certificates can only have a single ipv4 address")
|
||||
}
|
||||
|
||||
if version == cert.Version1 {
|
||||
// If we are asked to mint a v1 certificate only then we cant just ignore any v6 addresses
|
||||
if len(v6Networks) > 0 {
|
||||
return newHelpErrorf("invalid -networks definition: v1 certificates can only be ipv4")
|
||||
}
|
||||
|
||||
if len(v6UnsafeNetworks) > 0 {
|
||||
return newHelpErrorf("invalid -unsafe-networks definition: v1 certificates can only be ipv4")
|
||||
}
|
||||
}
|
||||
|
||||
t := &cert.TBSCertificate{
|
||||
Version: cert.Version1,
|
||||
Name: *sf.name,
|
||||
Networks: []netip.Prefix{v4Networks[0]},
|
||||
Groups: groups,
|
||||
UnsafeNetworks: v4UnsafeNetworks,
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
PublicKey: pub,
|
||||
IsCA: false,
|
||||
Curve: curve,
|
||||
}
|
||||
|
||||
var nc cert.Certificate
|
||||
if p11Client == nil {
|
||||
nc, err = t.Sign(caCert, curve, caKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while signing: %w", err)
|
||||
}
|
||||
} else {
|
||||
nc, err = t.SignWith(caCert, curve, p11Client.SignASN1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while signing with PKCS#11: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
crts = append(crts, nc)
|
||||
err = nc.Sign(curve, caKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while signing: %s", err)
|
||||
}
|
||||
|
||||
if version == 0 || version == cert.Version2 {
|
||||
t := &cert.TBSCertificate{
|
||||
Version: cert.Version2,
|
||||
Name: *sf.name,
|
||||
Networks: append(v4Networks, v6Networks...),
|
||||
Groups: groups,
|
||||
UnsafeNetworks: append(v4UnsafeNetworks, v6UnsafeNetworks...),
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
PublicKey: pub,
|
||||
IsCA: false,
|
||||
Curve: curve,
|
||||
}
|
||||
|
||||
var nc cert.Certificate
|
||||
if p11Client == nil {
|
||||
nc, err = t.Sign(caCert, curve, caKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while signing: %w", err)
|
||||
}
|
||||
} else {
|
||||
nc, err = t.SignWith(caCert, curve, p11Client.SignASN1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while signing with PKCS#11: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
crts = append(crts, nc)
|
||||
}
|
||||
|
||||
if !isP11 && *sf.inPubPath == "" {
|
||||
if *sf.inPubPath == "" {
|
||||
if _, err := os.Stat(*sf.outKeyPath); err == nil {
|
||||
return fmt.Errorf("refusing to overwrite existing key: %s", *sf.outKeyPath)
|
||||
}
|
||||
|
||||
err = os.WriteFile(*sf.outKeyPath, cert.MarshalPrivateKeyToPEM(curve, rawPriv), 0600)
|
||||
err = ioutil.WriteFile(*sf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-key: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
var b []byte
|
||||
for _, c := range crts {
|
||||
sb, err := c.MarshalPEM()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while marshalling certificate: %s", err)
|
||||
}
|
||||
b = append(b, sb...)
|
||||
b, err := nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while marshalling certificate: %s", err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(*sf.outCertPath, b, 0600)
|
||||
err = ioutil.WriteFile(*sf.outCertPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-crt: %s", err)
|
||||
}
|
||||
|
@ -384,7 +257,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
|||
return fmt.Errorf("error while generating qr code: %s", err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(*sf.outQRPath, b, 0600)
|
||||
err = ioutil.WriteFile(*sf.outQRPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-qr: %s", err)
|
||||
}
|
||||
|
|
|
@ -7,16 +7,18 @@ import (
|
|||
"bytes"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
//TODO: test file permissions
|
||||
|
||||
func Test_signSummary(t *testing.T) {
|
||||
assert.Equal(t, "sign <flags>: create and sign a certificate", signSummary())
|
||||
}
|
||||
|
@ -38,24 +40,17 @@ func Test_signHelp(t *testing.T) {
|
|||
" -in-pub string\n"+
|
||||
" \tOptional (if out-key not set): path to read a previously generated public key\n"+
|
||||
" -ip string\n"+
|
||||
" \tDeprecated, see -networks\n"+
|
||||
" \tRequired: ipv4 address and network in CIDR notation to assign the cert\n"+
|
||||
" -name string\n"+
|
||||
" \tRequired: name of the cert, usually a hostname\n"+
|
||||
" -networks string\n"+
|
||||
" \tRequired: comma separated list of ip address and network in CIDR notation to assign to this cert\n"+
|
||||
" -out-crt string\n"+
|
||||
" \tOptional: path to write the certificate to\n"+
|
||||
" -out-key string\n"+
|
||||
" \tOptional (if in-pub not set): path to write the private key to\n"+
|
||||
" -out-qr string\n"+
|
||||
" \tOptional: output a qr code image (png) of the certificate\n"+
|
||||
optionalPkcs11String(" -pkcs11 string\n \tOptional: PKCS#11 URI to an existing private key\n")+
|
||||
" -subnets string\n"+
|
||||
" \tDeprecated, see -unsafe-networks\n"+
|
||||
" -unsafe-networks string\n"+
|
||||
" \tOptional: comma separated list of ip address and network in CIDR notation. Unsafe networks this cert can route for\n"+
|
||||
" -version uint\n"+
|
||||
" \tOptional: version of the certificate format to use, the default is to create both v1 and v2 certificates.\n",
|
||||
" \tOptional: comma separated list of ipv4 address and network in CIDR notation. Subnets this cert can serve for\n",
|
||||
ob.String(),
|
||||
)
|
||||
}
|
||||
|
@ -82,20 +77,20 @@ func Test_signCert(t *testing.T) {
|
|||
|
||||
// required args
|
||||
assertHelpError(t, signCert(
|
||||
[]string{"-version", "1", "-ca-crt", "./nope", "-ca-key", "./nope", "-ip", "1.1.1.1/24", "-out-key", "nope", "-out-crt", "nope"}, ob, eb, nopw,
|
||||
[]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-ip", "1.1.1.1/24", "-out-key", "nope", "-out-crt", "nope"}, ob, eb, nopw,
|
||||
), "-name is required")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
assertHelpError(t, signCert(
|
||||
[]string{"-version", "1", "-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-out-key", "nope", "-out-crt", "nope"}, ob, eb, nopw,
|
||||
), "-networks is required")
|
||||
[]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-out-key", "nope", "-out-crt", "nope"}, ob, eb, nopw,
|
||||
), "-ip is required")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// cannot set -in-pub and -out-key
|
||||
assertHelpError(t, signCert(
|
||||
[]string{"-version", "1", "-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-in-pub", "nope", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope"}, ob, eb, nopw,
|
||||
[]string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-in-pub", "nope", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope"}, ob, eb, nopw,
|
||||
), "cannot set both -in-pub and -out-key")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
@ -103,18 +98,18 @@ func Test_signCert(t *testing.T) {
|
|||
// failed to read key
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args := []string{"-version", "1", "-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while reading ca-key: open ./nope: "+NoSuchFileError)
|
||||
args := []string{"-ca-crt", "./nope", "-ca-key", "./nope", "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while reading ca-key: open ./nope: "+NoSuchFileError)
|
||||
|
||||
// failed to unmarshal key
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
caKeyF, err := os.CreateTemp("", "sign-cert.key")
|
||||
require.NoError(t, err)
|
||||
caKeyF, err := ioutil.TempFile("", "sign-cert.key")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caKeyF.Name())
|
||||
|
||||
args = []string{"-version", "1", "-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing ca-key: input did not contain a valid PEM encoded block")
|
||||
args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing ca-key: input did not contain a valid PEM encoded block")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
|
@ -122,46 +117,54 @@ func Test_signCert(t *testing.T) {
|
|||
ob.Reset()
|
||||
eb.Reset()
|
||||
caPub, caPriv, _ := ed25519.GenerateKey(rand.Reader)
|
||||
caKeyF.Write(cert.MarshalSigningPrivateKeyToPEM(cert.Curve_CURVE25519, caPriv))
|
||||
caKeyF.Write(cert.MarshalEd25519PrivateKey(caPriv))
|
||||
|
||||
// failed to read cert
|
||||
args = []string{"-version", "1", "-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while reading ca-crt: open ./nope: "+NoSuchFileError)
|
||||
args = []string{"-ca-crt", "./nope", "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while reading ca-crt: open ./nope: "+NoSuchFileError)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// failed to unmarshal cert
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
caCrtF, err := os.CreateTemp("", "sign-cert.crt")
|
||||
require.NoError(t, err)
|
||||
caCrtF, err := ioutil.TempFile("", "sign-cert.crt")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caCrtF.Name())
|
||||
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing ca-crt: input did not contain a valid PEM encoded block")
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing ca-crt: input did not contain a valid PEM encoded block")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// write a proper ca cert for later
|
||||
ca, _ := NewTestCaCert("ca", caPub, caPriv, time.Now(), time.Now().Add(time.Minute*200), nil, nil, nil)
|
||||
b, _ := ca.MarshalPEM()
|
||||
ca := cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: "ca",
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(time.Minute * 200),
|
||||
PublicKey: caPub,
|
||||
IsCA: true,
|
||||
},
|
||||
}
|
||||
b, _ := ca.MarshalToPEM()
|
||||
caCrtF.Write(b)
|
||||
|
||||
// failed to read pub
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", "./nope", "-duration", "100m"}
|
||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while reading in-pub: open ./nope: "+NoSuchFileError)
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", "./nope", "-duration", "100m"}
|
||||
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while reading in-pub: open ./nope: "+NoSuchFileError)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// failed to unmarshal pub
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
inPubF, err := os.CreateTemp("", "in.pub")
|
||||
require.NoError(t, err)
|
||||
inPubF, err := ioutil.TempFile("", "in.pub")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(inPubF.Name())
|
||||
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", inPubF.Name(), "-duration", "100m"}
|
||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing in-pub: input did not contain a valid PEM encoded block")
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-in-pub", inPubF.Name(), "-duration", "100m"}
|
||||
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while parsing in-pub: input did not contain a valid PEM encoded block")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
|
@ -169,124 +172,116 @@ func Test_signCert(t *testing.T) {
|
|||
ob.Reset()
|
||||
eb.Reset()
|
||||
inPub, _ := x25519Keypair()
|
||||
inPubF.Write(cert.MarshalPublicKeyToPEM(cert.Curve_CURVE25519, inPub))
|
||||
inPubF.Write(cert.MarshalX25519PublicKey(inPub))
|
||||
|
||||
// bad ip cidr
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "a1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid -networks definition: a1.1.1.1/24")
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "a1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid ip definition: invalid CIDR address: a1.1.1.1/24")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "100::100/100", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid -networks definition: v1 certificates can only have a single ipv4 address")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24,1.1.1.2/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid -networks definition: v1 certificates can only have a single ipv4 address")
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "100::100/100", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m"}
|
||||
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid ip definition: can only be ipv4, have 100::100/100")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// bad subnet cidr
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
||||
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid -unsafe-networks definition: a")
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
||||
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid subnet definition: invalid CIDR address: a")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "100::100/100"}
|
||||
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid -unsafe-networks definition: v1 certificates can only be ipv4")
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "100::100/100"}
|
||||
assertHelpError(t, signCert(args, ob, eb, nopw), "invalid subnet definition: can only be ipv4, have 100::100/100")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// mismatched ca key
|
||||
_, caPriv2, _ := ed25519.GenerateKey(rand.Reader)
|
||||
caKeyF2, err := os.CreateTemp("", "sign-cert-2.key")
|
||||
require.NoError(t, err)
|
||||
caKeyF2, err := ioutil.TempFile("", "sign-cert-2.key")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caKeyF2.Name())
|
||||
caKeyF2.Write(cert.MarshalSigningPrivateKeyToPEM(cert.Curve_CURVE25519, caPriv2))
|
||||
caKeyF2.Write(cert.MarshalEd25519PrivateKey(caPriv2))
|
||||
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF2.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
||||
require.EqualError(t, signCert(args, ob, eb, nopw), "refusing to sign, root certificate does not match private key")
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF2.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "nope", "-out-key", "nope", "-duration", "100m", "-subnets", "a"}
|
||||
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to sign, root certificate does not match private key")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// failed key write
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey", "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey", "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
||||
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// create temp key file
|
||||
keyF, err := os.CreateTemp("", "test.key")
|
||||
require.NoError(t, err)
|
||||
keyF, err := ioutil.TempFile("", "test.key")
|
||||
assert.Nil(t, err)
|
||||
os.Remove(keyF.Name())
|
||||
|
||||
// failed cert write
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32"}
|
||||
assert.EqualError(t, signCert(args, ob, eb, nopw), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
os.Remove(keyF.Name())
|
||||
|
||||
// create temp cert file
|
||||
crtF, err := os.CreateTemp("", "test.crt")
|
||||
require.NoError(t, err)
|
||||
crtF, err := ioutil.TempFile("", "test.crt")
|
||||
assert.Nil(t, err)
|
||||
os.Remove(crtF.Name())
|
||||
|
||||
// test proper cert with removed empty groups and subnets
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
require.NoError(t, signCert(args, ob, eb, nopw))
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// read cert and key files
|
||||
rb, _ := os.ReadFile(keyF.Name())
|
||||
lKey, b, curve, err := cert.UnmarshalPrivateKeyFromPEM(rb)
|
||||
assert.Equal(t, cert.Curve_CURVE25519, curve)
|
||||
assert.Empty(t, b)
|
||||
require.NoError(t, err)
|
||||
rb, _ := ioutil.ReadFile(keyF.Name())
|
||||
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, lKey, 32)
|
||||
|
||||
rb, _ = os.ReadFile(crtF.Name())
|
||||
lCrt, b, err := cert.UnmarshalCertificateFromPEM(rb)
|
||||
assert.Empty(t, b)
|
||||
require.NoError(t, err)
|
||||
rb, _ = ioutil.ReadFile(crtF.Name())
|
||||
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, "test", lCrt.Name())
|
||||
assert.Equal(t, "1.1.1.1/24", lCrt.Networks()[0].String())
|
||||
assert.Len(t, lCrt.Networks(), 1)
|
||||
assert.False(t, lCrt.IsCA())
|
||||
assert.Equal(t, []string{"1", "2", "3", "4", "5"}, lCrt.Groups())
|
||||
assert.Len(t, lCrt.UnsafeNetworks(), 3)
|
||||
assert.Len(t, lCrt.PublicKey(), 32)
|
||||
assert.Equal(t, time.Duration(time.Minute*100), lCrt.NotAfter().Sub(lCrt.NotBefore()))
|
||||
assert.Equal(t, "test", lCrt.Details.Name)
|
||||
assert.Equal(t, "1.1.1.1/24", lCrt.Details.Ips[0].String())
|
||||
assert.Len(t, lCrt.Details.Ips, 1)
|
||||
assert.False(t, lCrt.Details.IsCA)
|
||||
assert.Equal(t, []string{"1", "2", "3", "4", "5"}, lCrt.Details.Groups)
|
||||
assert.Len(t, lCrt.Details.Subnets, 3)
|
||||
assert.Len(t, lCrt.Details.PublicKey, 32)
|
||||
assert.Equal(t, time.Duration(time.Minute*100), lCrt.Details.NotAfter.Sub(lCrt.Details.NotBefore))
|
||||
|
||||
sns := []string{}
|
||||
for _, sn := range lCrt.UnsafeNetworks() {
|
||||
for _, sn := range lCrt.Details.Subnets {
|
||||
sns = append(sns, sn.String())
|
||||
}
|
||||
assert.Equal(t, []string{"10.1.1.1/32", "10.2.2.2/32", "10.5.5.5/32"}, sns)
|
||||
|
||||
issuer, _ := ca.Fingerprint()
|
||||
assert.Equal(t, issuer, lCrt.Issuer())
|
||||
issuer, _ := ca.Sha256Sum()
|
||||
assert.Equal(t, issuer, lCrt.Details.Issuer)
|
||||
|
||||
assert.True(t, lCrt.CheckSignature(caPub))
|
||||
|
||||
|
@ -295,55 +290,53 @@ func Test_signCert(t *testing.T) {
|
|||
os.Remove(crtF.Name())
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-in-pub", inPubF.Name(), "-duration", "100m", "-groups", "1"}
|
||||
require.NoError(t, signCert(args, ob, eb, nopw))
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-in-pub", inPubF.Name(), "-duration", "100m", "-groups", "1"}
|
||||
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// read cert file and check pub key matches in-pub
|
||||
rb, _ = os.ReadFile(crtF.Name())
|
||||
lCrt, b, err = cert.UnmarshalCertificateFromPEM(rb)
|
||||
assert.Empty(t, b)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, lCrt.PublicKey(), inPub)
|
||||
rb, _ = ioutil.ReadFile(crtF.Name())
|
||||
lCrt, b, err = cert.UnmarshalNebulaCertificateFromPEM(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, lCrt.Details.PublicKey, inPub)
|
||||
|
||||
// test refuse to sign cert with duration beyond root
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
os.Remove(keyF.Name())
|
||||
os.Remove(crtF.Name())
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "1000m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
require.EqualError(t, signCert(args, ob, eb, nopw), "error while signing: certificate expires after signing certificate")
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "1000m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to sign, root certificate constraints violated: certificate expires after signing certificate")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// create valid cert/key for overwrite tests
|
||||
os.Remove(keyF.Name())
|
||||
os.Remove(crtF.Name())
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
require.NoError(t, signCert(args, ob, eb, nopw))
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||
|
||||
// test that we won't overwrite existing key file
|
||||
os.Remove(crtF.Name())
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
require.EqualError(t, signCert(args, ob, eb, nopw), "refusing to overwrite existing key: "+keyF.Name())
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to overwrite existing key: "+keyF.Name())
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// create valid cert/key for overwrite tests
|
||||
os.Remove(keyF.Name())
|
||||
os.Remove(crtF.Name())
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
require.NoError(t, signCert(args, ob, eb, nopw))
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
assert.Nil(t, signCert(args, ob, eb, nopw))
|
||||
|
||||
// test that we won't overwrite existing certificate file
|
||||
os.Remove(keyF.Name())
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
require.EqualError(t, signCert(args, ob, eb, nopw), "refusing to overwrite existing cert: "+crtF.Name())
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
assert.EqualError(t, signCert(args, ob, eb, nopw), "refusing to overwrite existing cert: "+crtF.Name())
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
|
@ -355,12 +348,12 @@ func Test_signCert(t *testing.T) {
|
|||
ob.Reset()
|
||||
eb.Reset()
|
||||
|
||||
caKeyF, err = os.CreateTemp("", "sign-cert.key")
|
||||
require.NoError(t, err)
|
||||
caKeyF, err = ioutil.TempFile("", "sign-cert.key")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caKeyF.Name())
|
||||
|
||||
caCrtF, err = os.CreateTemp("", "sign-cert.crt")
|
||||
require.NoError(t, err)
|
||||
caCrtF, err = ioutil.TempFile("", "sign-cert.crt")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caCrtF.Name())
|
||||
|
||||
// generate the encrypted key
|
||||
|
@ -369,13 +362,21 @@ func Test_signCert(t *testing.T) {
|
|||
b, _ = cert.EncryptAndMarshalSigningPrivateKey(cert.Curve_CURVE25519, caPriv, passphrase, kdfParams)
|
||||
caKeyF.Write(b)
|
||||
|
||||
ca, _ = NewTestCaCert("ca", caPub, caPriv, time.Now(), time.Now().Add(time.Minute*200), nil, nil, nil)
|
||||
b, _ = ca.MarshalPEM()
|
||||
ca = cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: "ca",
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(time.Minute * 200),
|
||||
PublicKey: caPub,
|
||||
IsCA: true,
|
||||
},
|
||||
}
|
||||
b, _ = ca.MarshalToPEM()
|
||||
caCrtF.Write(b)
|
||||
|
||||
// test with the proper password
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
require.NoError(t, signCert(args, ob, eb, testpw))
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
assert.Nil(t, signCert(args, ob, eb, testpw))
|
||||
assert.Equal(t, "Enter passphrase: ", ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
|
@ -384,8 +385,8 @@ func Test_signCert(t *testing.T) {
|
|||
eb.Reset()
|
||||
|
||||
testpw.password = []byte("invalid password")
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
require.Error(t, signCert(args, ob, eb, testpw))
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
assert.Error(t, signCert(args, ob, eb, testpw))
|
||||
assert.Equal(t, "Enter passphrase: ", ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
|
@ -393,8 +394,8 @@ func Test_signCert(t *testing.T) {
|
|||
ob.Reset()
|
||||
eb.Reset()
|
||||
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
require.Error(t, signCert(args, ob, eb, nopw))
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
assert.Error(t, signCert(args, ob, eb, nopw))
|
||||
// normally the user hitting enter on the prompt would add newlines between these
|
||||
assert.Equal(t, "Enter passphrase: Enter passphrase: Enter passphrase: Enter passphrase: Enter passphrase: ", ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
|
@ -403,8 +404,8 @@ func Test_signCert(t *testing.T) {
|
|||
ob.Reset()
|
||||
eb.Reset()
|
||||
|
||||
args = []string{"-version", "1", "-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
require.Error(t, signCert(args, ob, eb, errpw))
|
||||
args = []string{"-ca-crt", caCrtF.Name(), "-ca-key", caKeyF.Name(), "-name", "test", "-ip", "1.1.1.1/24", "-out-crt", crtF.Name(), "-out-key", keyF.Name(), "-duration", "100m", "-subnets", "10.1.1.1/32, , 10.2.2.2/32 , , ,, 10.5.5.5/32", "-groups", "1,, 2 , ,,,3,4,5"}
|
||||
assert.Error(t, signCert(args, ob, eb, errpw))
|
||||
assert.Equal(t, "Enter passphrase: ", ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -40,16 +40,16 @@ func verify(args []string, out io.Writer, errOut io.Writer) error {
|
|||
return err
|
||||
}
|
||||
|
||||
rawCACert, err := os.ReadFile(*vf.caPath)
|
||||
rawCACert, err := ioutil.ReadFile(*vf.caPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while reading ca: %w", err)
|
||||
return fmt.Errorf("error while reading ca: %s", err)
|
||||
}
|
||||
|
||||
caPool := cert.NewCAPool()
|
||||
for {
|
||||
rawCACert, err = caPool.AddCAFromPEM(rawCACert)
|
||||
rawCACert, err = caPool.AddCACertificate(rawCACert)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while adding ca cert to pool: %w", err)
|
||||
return fmt.Errorf("error while adding ca cert to pool: %s", err)
|
||||
}
|
||||
|
||||
if rawCACert == nil || len(rawCACert) == 0 || strings.TrimSpace(string(rawCACert)) == "" {
|
||||
|
@ -57,32 +57,22 @@ func verify(args []string, out io.Writer, errOut io.Writer) error {
|
|||
}
|
||||
}
|
||||
|
||||
rawCert, err := os.ReadFile(*vf.certPath)
|
||||
rawCert, err := ioutil.ReadFile(*vf.certPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read crt: %w", err)
|
||||
}
|
||||
var errs []error
|
||||
for {
|
||||
if len(rawCert) == 0 {
|
||||
break
|
||||
}
|
||||
c, extra, err := cert.UnmarshalCertificateFromPEM(rawCert)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while parsing crt: %w", err)
|
||||
}
|
||||
rawCert = extra
|
||||
_, err = caPool.VerifyCertificate(time.Now(), c)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, cert.ErrCaNotFound):
|
||||
errs = append(errs, fmt.Errorf("error while verifying certificate v%d %s with issuer %s: %w", c.Version(), c.Name(), c.Issuer(), err))
|
||||
default:
|
||||
errs = append(errs, fmt.Errorf("error while verifying certificate %+v: %w", c, err))
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("unable to read crt; %s", err)
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
c, _, err := cert.UnmarshalNebulaCertificateFromPEM(rawCert)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while parsing crt: %s", err)
|
||||
}
|
||||
|
||||
good, err := c.Verify(time.Now(), caPool)
|
||||
if !good {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifySummary() string {
|
||||
|
@ -91,7 +81,7 @@ func verifySummary() string {
|
|||
|
||||
func verifyHelp(out io.Writer) {
|
||||
vf := newVerifyFlags()
|
||||
_, _ = out.Write([]byte("Usage of " + os.Args[0] + " " + verifySummary() + "\n"))
|
||||
out.Write([]byte("Usage of " + os.Args[0] + " " + verifySummary() + "\n"))
|
||||
vf.set.SetOutput(out)
|
||||
vf.set.PrintDefaults()
|
||||
}
|
||||
|
|
|
@ -3,13 +3,13 @@ package main
|
|||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
|
@ -51,25 +51,34 @@ func Test_verify(t *testing.T) {
|
|||
err := verify([]string{"-ca", "does_not_exist", "-crt", "does_not_exist"}, ob, eb)
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.EqualError(t, err, "error while reading ca: open does_not_exist: "+NoSuchFileError)
|
||||
assert.EqualError(t, err, "error while reading ca: open does_not_exist: "+NoSuchFileError)
|
||||
|
||||
// invalid ca at path
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
caFile, err := os.CreateTemp("", "verify-ca")
|
||||
require.NoError(t, err)
|
||||
caFile, err := ioutil.TempFile("", "verify-ca")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caFile.Name())
|
||||
|
||||
caFile.WriteString("-----BEGIN NOPE-----")
|
||||
err = verify([]string{"-ca", caFile.Name(), "-crt", "does_not_exist"}, ob, eb)
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.EqualError(t, err, "error while adding ca cert to pool: input did not contain a valid PEM encoded block")
|
||||
assert.EqualError(t, err, "error while adding ca cert to pool: input did not contain a valid PEM encoded block")
|
||||
|
||||
// make a ca for later
|
||||
caPub, caPriv, _ := ed25519.GenerateKey(rand.Reader)
|
||||
ca, _ := NewTestCaCert("test-ca", caPub, caPriv, time.Now().Add(time.Hour*-1), time.Now().Add(time.Hour*2), nil, nil, nil)
|
||||
b, _ := ca.MarshalPEM()
|
||||
ca := cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: "test-ca",
|
||||
NotBefore: time.Now().Add(time.Hour * -1),
|
||||
NotAfter: time.Now().Add(time.Hour * 2),
|
||||
PublicKey: caPub,
|
||||
IsCA: true,
|
||||
},
|
||||
}
|
||||
ca.Sign(cert.Curve_CURVE25519, caPriv)
|
||||
b, _ := ca.MarshalToPEM()
|
||||
caFile.Truncate(0)
|
||||
caFile.Seek(0, 0)
|
||||
caFile.Write(b)
|
||||
|
@ -78,29 +87,38 @@ func Test_verify(t *testing.T) {
|
|||
err = verify([]string{"-ca", caFile.Name(), "-crt", "does_not_exist"}, ob, eb)
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.EqualError(t, err, "unable to read crt: open does_not_exist: "+NoSuchFileError)
|
||||
assert.EqualError(t, err, "unable to read crt; open does_not_exist: "+NoSuchFileError)
|
||||
|
||||
// invalid crt at path
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
certFile, err := os.CreateTemp("", "verify-cert")
|
||||
require.NoError(t, err)
|
||||
certFile, err := ioutil.TempFile("", "verify-cert")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(certFile.Name())
|
||||
|
||||
certFile.WriteString("-----BEGIN NOPE-----")
|
||||
err = verify([]string{"-ca", caFile.Name(), "-crt", certFile.Name()}, ob, eb)
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.EqualError(t, err, "error while parsing crt: input did not contain a valid PEM encoded block")
|
||||
assert.EqualError(t, err, "error while parsing crt: input did not contain a valid PEM encoded block")
|
||||
|
||||
// unverifiable cert at path
|
||||
crt, _ := NewTestCert(ca, caPriv, "test-cert", time.Now().Add(time.Hour*-1), time.Now().Add(time.Hour), nil, nil, nil)
|
||||
// Slightly evil hack to modify the certificate after it was sealed to generate an invalid signature
|
||||
pub := crt.PublicKey()
|
||||
for i, _ := range pub {
|
||||
pub[i] = 0
|
||||
_, badPriv, _ := ed25519.GenerateKey(rand.Reader)
|
||||
certPub, _ := x25519Keypair()
|
||||
signer, _ := ca.Sha256Sum()
|
||||
crt := cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: "test-cert",
|
||||
NotBefore: time.Now().Add(time.Hour * -1),
|
||||
NotAfter: time.Now().Add(time.Hour),
|
||||
PublicKey: certPub,
|
||||
IsCA: false,
|
||||
Issuer: signer,
|
||||
},
|
||||
}
|
||||
b, _ = crt.MarshalPEM()
|
||||
|
||||
crt.Sign(cert.Curve_CURVE25519, badPriv)
|
||||
b, _ = crt.MarshalToPEM()
|
||||
certFile.Truncate(0)
|
||||
certFile.Seek(0, 0)
|
||||
certFile.Write(b)
|
||||
|
@ -108,11 +126,11 @@ func Test_verify(t *testing.T) {
|
|||
err = verify([]string{"-ca", caFile.Name(), "-crt", certFile.Name()}, ob, eb)
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.ErrorIs(t, err, cert.ErrSignatureMismatch)
|
||||
assert.EqualError(t, err, "certificate signature did not match")
|
||||
|
||||
// verified cert at path
|
||||
crt, _ = NewTestCert(ca, caPriv, "test-cert", time.Now().Add(time.Hour*-1), time.Now().Add(time.Hour), nil, nil, nil)
|
||||
b, _ = crt.MarshalPEM()
|
||||
crt.Sign(cert.Curve_CURVE25519, caPriv)
|
||||
b, _ = crt.MarshalToPEM()
|
||||
certFile.Truncate(0)
|
||||
certFile.Seek(0, 0)
|
||||
certFile.Write(b)
|
||||
|
@ -120,5 +138,5 @@ func Test_verify(t *testing.T) {
|
|||
err = verify([]string{"-ca", caFile.Name(), "-crt", certFile.Name()}, ob, eb)
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
|
|
@ -59,8 +59,13 @@ func main() {
|
|||
}
|
||||
|
||||
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
|
||||
if err != nil {
|
||||
util.LogWithContextIfNeeded("Failed to start", err, l)
|
||||
|
||||
switch v := err.(type) {
|
||||
case util.ContextualError:
|
||||
v.Log(l)
|
||||
os.Exit(1)
|
||||
case error:
|
||||
l.WithError(err).Error("Failed to start")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
|
|
@ -53,14 +53,18 @@ func main() {
|
|||
}
|
||||
|
||||
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
|
||||
if err != nil {
|
||||
util.LogWithContextIfNeeded("Failed to start", err, l)
|
||||
|
||||
switch v := err.(type) {
|
||||
case util.ContextualError:
|
||||
v.Log(l)
|
||||
os.Exit(1)
|
||||
case error:
|
||||
l.WithError(err).Error("Failed to start")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !*configTest {
|
||||
ctrl.Start()
|
||||
notifyReady(l)
|
||||
ctrl.ShutdownBlock()
|
||||
}
|
||||
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SdNotifyReady tells systemd the service is ready and dependent services can now be started
|
||||
// https://www.freedesktop.org/software/systemd/man/sd_notify.html
|
||||
// https://www.freedesktop.org/software/systemd/man/systemd.service.html
|
||||
const SdNotifyReady = "READY=1"
|
||||
|
||||
func notifyReady(l *logrus.Logger) {
|
||||
sockName := os.Getenv("NOTIFY_SOCKET")
|
||||
if sockName == "" {
|
||||
l.Debugln("NOTIFY_SOCKET systemd env var not set, not sending ready signal")
|
||||
return
|
||||
}
|
||||
|
||||
conn, err := net.DialTimeout("unixgram", sockName, time.Second)
|
||||
if err != nil {
|
||||
l.WithError(err).Error("failed to connect to systemd notification socket")
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
err = conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
if err != nil {
|
||||
l.WithError(err).Error("failed to set the write deadline for the systemd notification socket")
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = conn.Write([]byte(SdNotifyReady)); err != nil {
|
||||
l.WithError(err).Error("failed to signal the systemd notification socket")
|
||||
return
|
||||
}
|
||||
|
||||
l.Debugln("notified systemd the service is ready")
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
func notifyReady(_ *logrus.Logger) {
|
||||
// No init service to notify
|
||||
}
|
|
@ -4,7 +4,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
|
@ -15,7 +15,7 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"dario.cat/mergo"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
@ -121,10 +121,6 @@ func (c *C) HasChanged(k string) bool {
|
|||
// CatchHUP will listen for the HUP signal in a go routine and reload all configs found in the
|
||||
// original path provided to Load. The old settings are shallow copied for change detection after the reload.
|
||||
func (c *C) CatchHUP(ctx context.Context) {
|
||||
if c.path == "" {
|
||||
return
|
||||
}
|
||||
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, syscall.SIGHUP)
|
||||
|
||||
|
@ -240,15 +236,6 @@ func (c *C) GetInt(k string, d int) int {
|
|||
return v
|
||||
}
|
||||
|
||||
// GetUint32 will get the uint32 for k or return the default d if not found or invalid
|
||||
func (c *C) GetUint32(k string, d uint32) uint32 {
|
||||
r := c.GetInt(k, int(d))
|
||||
if uint64(r) > uint64(math.MaxUint32) {
|
||||
return d
|
||||
}
|
||||
return uint32(r)
|
||||
}
|
||||
|
||||
// GetBool will get the bool for k or return the default d if not found or invalid
|
||||
func (c *C) GetBool(k string, d bool) bool {
|
||||
r := strings.ToLower(c.GetString(k, fmt.Sprintf("%v", d)))
|
||||
|
@ -361,7 +348,7 @@ func (c *C) parse() error {
|
|||
var m map[interface{}]interface{}
|
||||
|
||||
for _, path := range c.files {
|
||||
b, err := os.ReadFile(path)
|
||||
b, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"dario.cat/mergo"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -15,22 +16,22 @@ import (
|
|||
|
||||
func TestConfig_Load(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
dir, err := os.MkdirTemp("", "config-test")
|
||||
dir, err := ioutil.TempDir("", "config-test")
|
||||
// invalid yaml
|
||||
c := NewC(l)
|
||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
|
||||
require.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}")
|
||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
|
||||
assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}")
|
||||
|
||||
// simple multi config merge
|
||||
c = NewC(l)
|
||||
os.RemoveAll(dir)
|
||||
os.Mkdir(dir, 0755)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||
os.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644)
|
||||
require.NoError(t, c.Load(dir))
|
||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||
ioutil.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644)
|
||||
assert.Nil(t, c.Load(dir))
|
||||
expected := map[interface{}]interface{}{
|
||||
"outer": map[interface{}]interface{}{
|
||||
"inner": "override",
|
||||
|
@ -38,6 +39,9 @@ func TestConfig_Load(t *testing.T) {
|
|||
"new": "hi",
|
||||
}
|
||||
assert.Equal(t, expected, c.Settings)
|
||||
|
||||
//TODO: test symlinked file
|
||||
//TODO: test symlinked directory
|
||||
}
|
||||
|
||||
func TestConfig_Get(t *testing.T) {
|
||||
|
@ -67,28 +71,28 @@ func TestConfig_GetBool(t *testing.T) {
|
|||
l := test.NewLogger()
|
||||
c := NewC(l)
|
||||
c.Settings["bool"] = true
|
||||
assert.True(t, c.GetBool("bool", false))
|
||||
assert.Equal(t, true, c.GetBool("bool", false))
|
||||
|
||||
c.Settings["bool"] = "true"
|
||||
assert.True(t, c.GetBool("bool", false))
|
||||
assert.Equal(t, true, c.GetBool("bool", false))
|
||||
|
||||
c.Settings["bool"] = false
|
||||
assert.False(t, c.GetBool("bool", true))
|
||||
assert.Equal(t, false, c.GetBool("bool", true))
|
||||
|
||||
c.Settings["bool"] = "false"
|
||||
assert.False(t, c.GetBool("bool", true))
|
||||
assert.Equal(t, false, c.GetBool("bool", true))
|
||||
|
||||
c.Settings["bool"] = "Y"
|
||||
assert.True(t, c.GetBool("bool", false))
|
||||
assert.Equal(t, true, c.GetBool("bool", false))
|
||||
|
||||
c.Settings["bool"] = "yEs"
|
||||
assert.True(t, c.GetBool("bool", false))
|
||||
assert.Equal(t, true, c.GetBool("bool", false))
|
||||
|
||||
c.Settings["bool"] = "N"
|
||||
assert.False(t, c.GetBool("bool", true))
|
||||
assert.Equal(t, false, c.GetBool("bool", true))
|
||||
|
||||
c.Settings["bool"] = "nO"
|
||||
assert.False(t, c.GetBool("bool", true))
|
||||
assert.Equal(t, false, c.GetBool("bool", true))
|
||||
}
|
||||
|
||||
func TestConfig_HasChanged(t *testing.T) {
|
||||
|
@ -116,18 +120,18 @@ func TestConfig_HasChanged(t *testing.T) {
|
|||
func TestConfig_ReloadConfig(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
done := make(chan bool, 1)
|
||||
dir, err := os.MkdirTemp("", "config-test")
|
||||
require.NoError(t, err)
|
||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||
dir, err := ioutil.TempDir("", "config-test")
|
||||
assert.Nil(t, err)
|
||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||
|
||||
c := NewC(l)
|
||||
require.NoError(t, c.Load(dir))
|
||||
assert.Nil(t, c.Load(dir))
|
||||
|
||||
assert.False(t, c.HasChanged("outer.inner"))
|
||||
assert.False(t, c.HasChanged("outer"))
|
||||
assert.False(t, c.HasChanged(""))
|
||||
|
||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644)
|
||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644)
|
||||
|
||||
c.RegisterReloadCallback(func(c *C) {
|
||||
done <- true
|
||||
|
|
|
@ -3,8 +3,6 @@ package nebula
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -12,6 +10,8 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
)
|
||||
|
||||
type trafficDecision int
|
||||
|
@ -23,7 +23,6 @@ const (
|
|||
swapPrimary trafficDecision = 3
|
||||
migrateRelays trafficDecision = 4
|
||||
tryRehandshake trafficDecision = 5
|
||||
sendTestPacket trafficDecision = 6
|
||||
)
|
||||
|
||||
type connectionManager struct {
|
||||
|
@ -177,13 +176,13 @@ func (n *connectionManager) Run(ctx context.Context) {
|
|||
}
|
||||
|
||||
func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
|
||||
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, now)
|
||||
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, p, nb, out, now)
|
||||
|
||||
switch decision {
|
||||
case deleteTunnel:
|
||||
if n.hostMap.DeleteHostInfo(hostinfo) {
|
||||
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
|
||||
n.intf.lightHouse.DeleteVpnAddrs(hostinfo.vpnAddrs)
|
||||
n.intf.lightHouse.DeleteVpnIp(hostinfo.vpnIp)
|
||||
}
|
||||
|
||||
case closeTunnel:
|
||||
|
@ -198,9 +197,6 @@ func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte,
|
|||
|
||||
case tryRehandshake:
|
||||
n.tryRehandshake(hostinfo)
|
||||
|
||||
case sendTestPacket:
|
||||
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
||||
}
|
||||
|
||||
n.resetRelayTrafficCheck(hostinfo)
|
||||
|
@ -221,11 +217,11 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
|
|||
relayFor := oldhostinfo.relayState.CopyAllRelayFor()
|
||||
|
||||
for _, r := range relayFor {
|
||||
existing, ok := newhostinfo.relayState.QueryRelayForByIp(r.PeerAddr)
|
||||
existing, ok := newhostinfo.relayState.QueryRelayForByIp(r.PeerIp)
|
||||
|
||||
var index uint32
|
||||
var relayFrom netip.Addr
|
||||
var relayTo netip.Addr
|
||||
var relayFrom iputil.VpnIp
|
||||
var relayTo iputil.VpnIp
|
||||
switch {
|
||||
case ok && existing.State == Established:
|
||||
// This relay already exists in newhostinfo, then do nothing.
|
||||
|
@ -235,11 +231,11 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
|
|||
index = existing.LocalIndex
|
||||
switch r.Type {
|
||||
case TerminalType:
|
||||
relayFrom = n.intf.myVpnAddrs[0]
|
||||
relayTo = existing.PeerAddr
|
||||
relayFrom = newhostinfo.vpnIp
|
||||
relayTo = existing.PeerIp
|
||||
case ForwardingType:
|
||||
relayFrom = existing.PeerAddr
|
||||
relayTo = newhostinfo.vpnAddrs[0]
|
||||
relayFrom = existing.PeerIp
|
||||
relayTo = newhostinfo.vpnIp
|
||||
default:
|
||||
// should never happen
|
||||
}
|
||||
|
@ -253,18 +249,18 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
|
|||
n.relayUsedLock.RUnlock()
|
||||
// The relay doesn't exist at all; create some relay state and send the request.
|
||||
var err error
|
||||
index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerAddr, nil, r.Type, Requested)
|
||||
index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerIp, nil, r.Type, Requested)
|
||||
if err != nil {
|
||||
n.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
||||
continue
|
||||
}
|
||||
switch r.Type {
|
||||
case TerminalType:
|
||||
relayFrom = n.intf.myVpnAddrs[0]
|
||||
relayTo = r.PeerAddr
|
||||
relayFrom = newhostinfo.vpnIp
|
||||
relayTo = r.PeerIp
|
||||
case ForwardingType:
|
||||
relayFrom = r.PeerAddr
|
||||
relayTo = newhostinfo.vpnAddrs[0]
|
||||
relayFrom = r.PeerIp
|
||||
relayTo = newhostinfo.vpnIp
|
||||
default:
|
||||
// should never happen
|
||||
}
|
||||
|
@ -274,49 +270,26 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
|
|||
req := NebulaControl{
|
||||
Type: NebulaControl_CreateRelayRequest,
|
||||
InitiatorRelayIndex: index,
|
||||
RelayFromIp: uint32(relayFrom),
|
||||
RelayToIp: uint32(relayTo),
|
||||
}
|
||||
|
||||
switch newhostinfo.GetCert().Certificate.Version() {
|
||||
case cert.Version1:
|
||||
if !relayFrom.Is4() {
|
||||
n.l.Error("can not migrate v1 relay with a v6 network because the relay is not running a current nebula version")
|
||||
continue
|
||||
}
|
||||
|
||||
if !relayTo.Is4() {
|
||||
n.l.Error("can not migrate v1 relay with a v6 remote network because the relay is not running a current nebula version")
|
||||
continue
|
||||
}
|
||||
|
||||
b := relayFrom.As4()
|
||||
req.OldRelayFromAddr = binary.BigEndian.Uint32(b[:])
|
||||
b = relayTo.As4()
|
||||
req.OldRelayToAddr = binary.BigEndian.Uint32(b[:])
|
||||
case cert.Version2:
|
||||
req.RelayFromAddr = netAddrToProtoAddr(relayFrom)
|
||||
req.RelayToAddr = netAddrToProtoAddr(relayTo)
|
||||
default:
|
||||
newhostinfo.logger(n.l).Error("Unknown certificate version found while attempting to migrate relay")
|
||||
continue
|
||||
}
|
||||
|
||||
msg, err := req.Marshal()
|
||||
if err != nil {
|
||||
n.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
||||
} else {
|
||||
n.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||
n.l.WithFields(logrus.Fields{
|
||||
"relayFrom": req.RelayFromAddr,
|
||||
"relayTo": req.RelayToAddr,
|
||||
"relayFrom": iputil.VpnIp(req.RelayFromIp),
|
||||
"relayTo": iputil.VpnIp(req.RelayToIp),
|
||||
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
||||
"responderRelayIndex": req.ResponderRelayIndex,
|
||||
"vpnAddrs": newhostinfo.vpnAddrs}).
|
||||
"vpnIp": newhostinfo.vpnIp}).
|
||||
Info("send CreateRelayRequest")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
||||
func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []byte, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
||||
n.hostMap.RLock()
|
||||
defer n.hostMap.RUnlock()
|
||||
|
||||
|
@ -332,7 +305,7 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time
|
|||
return closeTunnel, hostinfo, nil
|
||||
}
|
||||
|
||||
primary := n.hostMap.Hosts[hostinfo.vpnAddrs[0]]
|
||||
primary := n.hostMap.Hosts[hostinfo.vpnIp]
|
||||
mainHostInfo := true
|
||||
if primary != nil && primary != hostinfo {
|
||||
mainHostInfo = false
|
||||
|
@ -383,7 +356,6 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time
|
|||
return deleteTunnel, hostinfo, nil
|
||||
}
|
||||
|
||||
decision := doNothing
|
||||
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
|
||||
if !outTraffic {
|
||||
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
|
||||
|
@ -408,7 +380,7 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time
|
|||
}
|
||||
|
||||
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
|
||||
decision = sendTestPacket
|
||||
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
||||
|
||||
} else {
|
||||
if n.l.Level >= logrus.DebugLevel {
|
||||
|
@ -418,7 +390,7 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time
|
|||
|
||||
n.pendingDeletion[hostinfo.localIndexId] = struct{}{}
|
||||
n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval)
|
||||
return decision, hostinfo, nil
|
||||
return doNothing, nil, nil
|
||||
}
|
||||
|
||||
func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
||||
|
@ -426,24 +398,21 @@ func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
|||
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
|
||||
// Let's sort this out.
|
||||
|
||||
// Only one side should swap because if both swap then we may never resolve to a single tunnel.
|
||||
// vpn addr is static across all tunnels for this host pair so lets
|
||||
// use that to determine if we should consider swapping.
|
||||
if current.vpnAddrs[0].Compare(n.intf.myVpnAddrs[0]) < 0 {
|
||||
// Their primary vpn addr is less than mine. Do not swap.
|
||||
if current.vpnIp < n.intf.myVpnIp {
|
||||
// Only one side should flip primary because if both flip then we may never resolve to a single tunnel.
|
||||
// vpn ip is static across all tunnels for this host pair so lets use that to determine who is flipping.
|
||||
// The remotes vpn ip is lower than mine. I will not flip.
|
||||
return false
|
||||
}
|
||||
|
||||
crt := n.intf.pki.getCertState().getCertificate(current.ConnectionState.myCert.Version())
|
||||
// If this tunnel is using the latest certificate then we should swap it to primary for a bit and see if things
|
||||
// settle down.
|
||||
return bytes.Equal(current.ConnectionState.myCert.Signature(), crt.Signature())
|
||||
certState := n.intf.certState.Load()
|
||||
return bytes.Equal(current.ConnectionState.certState.certificate.Signature, certState.certificate.Signature)
|
||||
}
|
||||
|
||||
func (n *connectionManager) swapPrimary(current, primary *HostInfo) {
|
||||
n.hostMap.Lock()
|
||||
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
|
||||
if n.hostMap.Hosts[current.vpnAddrs[0]] == primary {
|
||||
if n.hostMap.Hosts[current.vpnIp] == primary {
|
||||
n.hostMap.unlockedMakePrimary(current)
|
||||
}
|
||||
n.hostMap.Unlock()
|
||||
|
@ -458,19 +427,19 @@ func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostIn
|
|||
return false
|
||||
}
|
||||
|
||||
caPool := n.intf.pki.GetCAPool()
|
||||
err := caPool.VerifyCachedCertificate(now, remoteCert)
|
||||
if err == nil {
|
||||
valid, err := remoteCert.VerifyWithCache(now, n.intf.caPool)
|
||||
if valid {
|
||||
return false
|
||||
}
|
||||
|
||||
if !n.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
|
||||
if !n.intf.disconnectInvalid && err != cert.ErrBlockListed {
|
||||
// Block listed certificates should always be disconnected
|
||||
return false
|
||||
}
|
||||
|
||||
fingerprint, _ := remoteCert.Sha256Sum()
|
||||
hostinfo.logger(n.l).WithError(err).
|
||||
WithField("fingerprint", remoteCert.Fingerprint).
|
||||
WithField("fingerprint", fingerprint).
|
||||
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
||||
|
||||
return true
|
||||
|
@ -483,29 +452,39 @@ func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
|
|||
}
|
||||
|
||||
if n.punchy.GetTargetEverything() {
|
||||
hostinfo.remotes.ForEach(n.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
|
||||
hostinfo.remotes.ForEach(n.hostMap.preferredRanges, func(addr *udp.Addr, preferred bool) {
|
||||
n.metricsTxPunchy.Inc(1)
|
||||
n.intf.outside.WriteTo([]byte{1}, addr)
|
||||
})
|
||||
|
||||
} else if hostinfo.remote.IsValid() {
|
||||
} else if hostinfo.remote != nil {
|
||||
n.metricsTxPunchy.Inc(1)
|
||||
n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
||||
cs := n.intf.pki.getCertState()
|
||||
curCrt := hostinfo.ConnectionState.myCert
|
||||
myCrt := cs.getCertificate(curCrt.Version())
|
||||
if curCrt.Version() >= cs.defaultVersion && bytes.Equal(curCrt.Signature(), myCrt.Signature()) == true {
|
||||
// The current tunnel is using the latest certificate and version, no need to rehandshake.
|
||||
certState := n.intf.certState.Load()
|
||||
if bytes.Equal(hostinfo.ConnectionState.certState.certificate.Signature, certState.certificate.Signature) {
|
||||
return
|
||||
}
|
||||
|
||||
n.l.WithField("vpnAddrs", hostinfo.vpnAddrs).
|
||||
n.l.WithField("vpnIp", hostinfo.vpnIp).
|
||||
WithField("reason", "local certificate is not current").
|
||||
Info("Re-handshaking with remote")
|
||||
|
||||
n.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], nil)
|
||||
//TODO: this is copied from getOrHandshake to keep the extra checks out of the hot path, figure it out
|
||||
newHostinfo := n.intf.handshakeManager.AddVpnIp(hostinfo.vpnIp, n.intf.initHostInfo)
|
||||
if !newHostinfo.HandshakeReady {
|
||||
ixHandshakeStage0(n.intf, newHostinfo.vpnIp, newHostinfo)
|
||||
}
|
||||
|
||||
//If this is a static host, we don't need to wait for the HostQueryReply
|
||||
//We can trigger the handshake right now
|
||||
if _, ok := n.intf.lightHouse.GetStaticHostList()[hostinfo.vpnIp]; ok {
|
||||
select {
|
||||
case n.intf.handshakeManager.trigger <- hostinfo.vpnIp:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,27 +4,28 @@ import (
|
|||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"net/netip"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/flynn/noise"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var vpnIp iputil.VpnIp
|
||||
|
||||
func newTestLighthouse() *LightHouse {
|
||||
lh := &LightHouse{
|
||||
l: test.NewLogger(),
|
||||
addrMap: map[netip.Addr]*RemoteList{},
|
||||
queryChan: make(chan netip.Addr, 10),
|
||||
l: test.NewLogger(),
|
||||
addrMap: map[iputil.VpnIp]*RemoteList{},
|
||||
}
|
||||
lighthouses := map[netip.Addr]struct{}{}
|
||||
staticList := map[netip.Addr]struct{}{}
|
||||
lighthouses := map[iputil.VpnIp]struct{}{}
|
||||
staticList := map[iputil.VpnIp]struct{}{}
|
||||
|
||||
lh.lighthouses.Store(&lighthouses)
|
||||
lh.staticList.Store(&staticList)
|
||||
|
@ -35,19 +36,18 @@ func newTestLighthouse() *LightHouse {
|
|||
func Test_NewConnectionManagerTest(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||
vpnIp := netip.MustParseAddr("172.1.1.2")
|
||||
preferredRanges := []netip.Prefix{localrange}
|
||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||
vpnIp = iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
|
||||
preferredRanges := []*net.IPNet{localrange}
|
||||
|
||||
// Very incomplete mock objects
|
||||
hostMap := newHostMap(l)
|
||||
hostMap.preferredRanges.Store(&preferredRanges)
|
||||
|
||||
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||
cs := &CertState{
|
||||
defaultVersion: cert.Version1,
|
||||
privateKey: []byte{},
|
||||
v1Cert: &dummyCert{version: cert.Version1},
|
||||
v1HandshakeBytes: []byte{},
|
||||
rawCertificate: []byte{},
|
||||
privateKey: []byte{},
|
||||
certificate: &cert.NebulaCertificate{},
|
||||
rawCertificateNoKey: []byte{},
|
||||
}
|
||||
|
||||
lh := newTestLighthouse()
|
||||
|
@ -57,11 +57,10 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||
outside: &udp.NoopConn{},
|
||||
firewall: &Firewall{},
|
||||
lightHouse: lh,
|
||||
pki: &PKI{},
|
||||
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||
l: l,
|
||||
}
|
||||
ifce.pki.cs.Store(cs)
|
||||
ifce.certState.Store(cs)
|
||||
|
||||
// Create manager
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -74,13 +73,13 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||
|
||||
// Add an ip we have established a connection w/ to hostmap
|
||||
hostinfo := &HostInfo{
|
||||
vpnAddrs: []netip.Addr{vpnIp},
|
||||
vpnIp: vpnIp,
|
||||
localIndexId: 1099,
|
||||
remoteIndexId: 9901,
|
||||
}
|
||||
hostinfo.ConnectionState = &ConnectionState{
|
||||
myCert: &dummyCert{version: cert.Version1},
|
||||
H: &noise.HandshakeState{},
|
||||
certState: cs,
|
||||
H: &noise.HandshakeState{},
|
||||
}
|
||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||
|
||||
|
@ -88,7 +87,7 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||
nc.Out(hostinfo.localIndexId)
|
||||
nc.In(hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.out, hostinfo.localIndexId)
|
||||
|
||||
|
@ -105,31 +104,29 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||
|
||||
// Do a final traffic check tick, the host should now be removed
|
||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
}
|
||||
|
||||
func Test_NewConnectionManagerTest2(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||
vpnIp := netip.MustParseAddr("172.1.1.2")
|
||||
preferredRanges := []netip.Prefix{localrange}
|
||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||
preferredRanges := []*net.IPNet{localrange}
|
||||
|
||||
// Very incomplete mock objects
|
||||
hostMap := newHostMap(l)
|
||||
hostMap.preferredRanges.Store(&preferredRanges)
|
||||
|
||||
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||
cs := &CertState{
|
||||
defaultVersion: cert.Version1,
|
||||
privateKey: []byte{},
|
||||
v1Cert: &dummyCert{version: cert.Version1},
|
||||
v1HandshakeBytes: []byte{},
|
||||
rawCertificate: []byte{},
|
||||
privateKey: []byte{},
|
||||
certificate: &cert.NebulaCertificate{},
|
||||
rawCertificateNoKey: []byte{},
|
||||
}
|
||||
|
||||
lh := newTestLighthouse()
|
||||
|
@ -139,11 +136,10 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||
outside: &udp.NoopConn{},
|
||||
firewall: &Firewall{},
|
||||
lightHouse: lh,
|
||||
pki: &PKI{},
|
||||
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||
l: l,
|
||||
}
|
||||
ifce.pki.cs.Store(cs)
|
||||
ifce.certState.Store(cs)
|
||||
|
||||
// Create manager
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -156,21 +152,21 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||
|
||||
// Add an ip we have established a connection w/ to hostmap
|
||||
hostinfo := &HostInfo{
|
||||
vpnAddrs: []netip.Addr{vpnIp},
|
||||
vpnIp: vpnIp,
|
||||
localIndexId: 1099,
|
||||
remoteIndexId: 9901,
|
||||
}
|
||||
hostinfo.ConnectionState = &ConnectionState{
|
||||
myCert: &dummyCert{version: cert.Version1},
|
||||
H: &noise.HandshakeState{},
|
||||
certState: cs,
|
||||
H: &noise.HandshakeState{},
|
||||
}
|
||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||
|
||||
// We saw traffic out to vpnIp
|
||||
nc.Out(hostinfo.localIndexId)
|
||||
nc.In(hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.vpnAddrs[0])
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.vpnIp)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
|
||||
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||
|
@ -186,7 +182,7 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||
|
||||
// We saw traffic, should no longer be pending deletion
|
||||
nc.In(hostinfo.localIndexId)
|
||||
|
@ -195,7 +191,7 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||
}
|
||||
|
||||
// Check if we can disconnect the peer.
|
||||
|
@ -204,64 +200,67 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||
func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
||||
now := time.Now()
|
||||
l := test.NewLogger()
|
||||
|
||||
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||
vpnIp := netip.MustParseAddr("172.1.1.2")
|
||||
preferredRanges := []netip.Prefix{localrange}
|
||||
hostMap := newHostMap(l)
|
||||
hostMap.preferredRanges.Store(&preferredRanges)
|
||||
ipNet := net.IPNet{
|
||||
IP: net.IPv4(172, 1, 1, 2),
|
||||
Mask: net.IPMask{255, 255, 255, 0},
|
||||
}
|
||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||
preferredRanges := []*net.IPNet{localrange}
|
||||
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||
|
||||
// Generate keys for CA and peer's cert.
|
||||
pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader)
|
||||
tbs := &cert.TBSCertificate{
|
||||
Version: 1,
|
||||
Name: "ca",
|
||||
IsCA: true,
|
||||
NotBefore: now,
|
||||
NotAfter: now.Add(1 * time.Hour),
|
||||
PublicKey: pubCA,
|
||||
caCert := cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: "ca",
|
||||
NotBefore: now,
|
||||
NotAfter: now.Add(1 * time.Hour),
|
||||
IsCA: true,
|
||||
PublicKey: pubCA,
|
||||
},
|
||||
}
|
||||
|
||||
caCert, err := tbs.Sign(nil, cert.Curve_CURVE25519, privCA)
|
||||
require.NoError(t, err)
|
||||
ncp := cert.NewCAPool()
|
||||
require.NoError(t, ncp.AddCA(caCert))
|
||||
caCert.Sign(cert.Curve_CURVE25519, privCA)
|
||||
ncp := &cert.NebulaCAPool{
|
||||
CAs: cert.NewCAPool().CAs,
|
||||
}
|
||||
ncp.CAs["ca"] = &caCert
|
||||
|
||||
pubCrt, _, _ := ed25519.GenerateKey(rand.Reader)
|
||||
tbs = &cert.TBSCertificate{
|
||||
Version: 1,
|
||||
Name: "host",
|
||||
Networks: []netip.Prefix{vpncidr},
|
||||
NotBefore: now,
|
||||
NotAfter: now.Add(60 * time.Second),
|
||||
PublicKey: pubCrt,
|
||||
peerCert := cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: "host",
|
||||
Ips: []*net.IPNet{&ipNet},
|
||||
Subnets: []*net.IPNet{},
|
||||
NotBefore: now,
|
||||
NotAfter: now.Add(60 * time.Second),
|
||||
PublicKey: pubCrt,
|
||||
IsCA: false,
|
||||
Issuer: "ca",
|
||||
},
|
||||
}
|
||||
peerCert, err := tbs.Sign(caCert, cert.Curve_CURVE25519, privCA)
|
||||
require.NoError(t, err)
|
||||
|
||||
cachedPeerCert, err := ncp.VerifyCertificate(now.Add(time.Second), peerCert)
|
||||
peerCert.Sign(cert.Curve_CURVE25519, privCA)
|
||||
|
||||
cs := &CertState{
|
||||
privateKey: []byte{},
|
||||
v1Cert: &dummyCert{},
|
||||
v1HandshakeBytes: []byte{},
|
||||
rawCertificate: []byte{},
|
||||
privateKey: []byte{},
|
||||
certificate: &cert.NebulaCertificate{},
|
||||
rawCertificateNoKey: []byte{},
|
||||
}
|
||||
|
||||
lh := newTestLighthouse()
|
||||
ifce := &Interface{
|
||||
hostMap: hostMap,
|
||||
inside: &test.NoopTun{},
|
||||
outside: &udp.NoopConn{},
|
||||
firewall: &Firewall{},
|
||||
lightHouse: lh,
|
||||
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||
l: l,
|
||||
pki: &PKI{},
|
||||
hostMap: hostMap,
|
||||
inside: &test.NoopTun{},
|
||||
outside: &udp.NoopConn{},
|
||||
firewall: &Firewall{},
|
||||
lightHouse: lh,
|
||||
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||
l: l,
|
||||
disconnectInvalid: true,
|
||||
caPool: ncp,
|
||||
}
|
||||
ifce.pki.cs.Store(cs)
|
||||
ifce.pki.caPool.Store(ncp)
|
||||
ifce.disconnectInvalid.Store(true)
|
||||
ifce.certState.Store(cs)
|
||||
|
||||
// Create manager
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -269,16 +268,12 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||
ifce.connectionManager = nc
|
||||
|
||||
hostinfo := &HostInfo{
|
||||
vpnAddrs: []netip.Addr{vpnIp},
|
||||
ConnectionState: &ConnectionState{
|
||||
myCert: &dummyCert{},
|
||||
peerCert: cachedPeerCert,
|
||||
H: &noise.HandshakeState{},
|
||||
},
|
||||
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
|
||||
hostinfo.ConnectionState = &ConnectionState{
|
||||
certState: cs,
|
||||
peerCert: &peerCert,
|
||||
H: &noise.HandshakeState{},
|
||||
}
|
||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||
|
||||
// Move ahead 45s.
|
||||
// Check if to disconnect with invalid certificate.
|
||||
|
@ -294,114 +289,3 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||
invalid = nc.isInvalidCertificate(nextTick, hostinfo)
|
||||
assert.True(t, invalid)
|
||||
}
|
||||
|
||||
type dummyCert struct {
|
||||
version cert.Version
|
||||
curve cert.Curve
|
||||
groups []string
|
||||
isCa bool
|
||||
issuer string
|
||||
name string
|
||||
networks []netip.Prefix
|
||||
notAfter time.Time
|
||||
notBefore time.Time
|
||||
publicKey []byte
|
||||
signature []byte
|
||||
unsafeNetworks []netip.Prefix
|
||||
}
|
||||
|
||||
func (d *dummyCert) Version() cert.Version {
|
||||
return d.version
|
||||
}
|
||||
|
||||
func (d *dummyCert) Curve() cert.Curve {
|
||||
return d.curve
|
||||
}
|
||||
|
||||
func (d *dummyCert) Groups() []string {
|
||||
return d.groups
|
||||
}
|
||||
|
||||
func (d *dummyCert) IsCA() bool {
|
||||
return d.isCa
|
||||
}
|
||||
|
||||
func (d *dummyCert) Issuer() string {
|
||||
return d.issuer
|
||||
}
|
||||
|
||||
func (d *dummyCert) Name() string {
|
||||
return d.name
|
||||
}
|
||||
|
||||
func (d *dummyCert) Networks() []netip.Prefix {
|
||||
return d.networks
|
||||
}
|
||||
|
||||
func (d *dummyCert) NotAfter() time.Time {
|
||||
return d.notAfter
|
||||
}
|
||||
|
||||
func (d *dummyCert) NotBefore() time.Time {
|
||||
return d.notBefore
|
||||
}
|
||||
|
||||
func (d *dummyCert) PublicKey() []byte {
|
||||
return d.publicKey
|
||||
}
|
||||
|
||||
func (d *dummyCert) Signature() []byte {
|
||||
return d.signature
|
||||
}
|
||||
|
||||
func (d *dummyCert) UnsafeNetworks() []netip.Prefix {
|
||||
return d.unsafeNetworks
|
||||
}
|
||||
|
||||
func (d *dummyCert) MarshalForHandshakes() ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *dummyCert) Sign(curve cert.Curve, key []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dummyCert) CheckSignature(key []byte) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *dummyCert) Expired(t time.Time) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *dummyCert) CheckRootConstraints(signer cert.Certificate) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dummyCert) VerifyPrivateKey(curve cert.Curve, key []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dummyCert) String() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (d *dummyCert) Marshal() ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *dummyCert) MarshalPEM() ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *dummyCert) Fingerprint() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (d *dummyCert) MarshalJSON() ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *dummyCert) Copy() cert.Certificate {
|
||||
return d
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package nebula
|
|||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
|
@ -19,54 +18,51 @@ type ConnectionState struct {
|
|||
eKey *NebulaCipherState
|
||||
dKey *NebulaCipherState
|
||||
H *noise.HandshakeState
|
||||
myCert cert.Certificate
|
||||
peerCert *cert.CachedCertificate
|
||||
certState *CertState
|
||||
peerCert *cert.NebulaCertificate
|
||||
initiator bool
|
||||
messageCounter atomic.Uint64
|
||||
window *Bits
|
||||
queueLock sync.Mutex
|
||||
writeLock sync.Mutex
|
||||
ready bool
|
||||
}
|
||||
|
||||
func NewConnectionState(l *logrus.Logger, cs *CertState, crt cert.Certificate, initiator bool, pattern noise.HandshakePattern) (*ConnectionState, error) {
|
||||
func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
|
||||
var dhFunc noise.DHFunc
|
||||
switch crt.Curve() {
|
||||
curCertState := f.certState.Load()
|
||||
|
||||
switch curCertState.certificate.Details.Curve {
|
||||
case cert.Curve_CURVE25519:
|
||||
dhFunc = noise.DH25519
|
||||
case cert.Curve_P256:
|
||||
if cs.pkcs11Backed {
|
||||
dhFunc = noiseutil.DHP256PKCS11
|
||||
} else {
|
||||
dhFunc = noiseutil.DHP256
|
||||
}
|
||||
dhFunc = noiseutil.DHP256
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid curve: %s", crt.Curve())
|
||||
l.Errorf("invalid curve: %s", curCertState.certificate.Details.Curve)
|
||||
return nil
|
||||
}
|
||||
cs := noise.NewCipherSuite(dhFunc, noiseutil.CipherAESGCM, noise.HashSHA256)
|
||||
if f.cipher == "chachapoly" {
|
||||
cs = noise.NewCipherSuite(dhFunc, noise.CipherChaChaPoly, noise.HashSHA256)
|
||||
}
|
||||
|
||||
var ncs noise.CipherSuite
|
||||
if cs.cipher == "chachapoly" {
|
||||
ncs = noise.NewCipherSuite(dhFunc, noise.CipherChaChaPoly, noise.HashSHA256)
|
||||
} else {
|
||||
ncs = noise.NewCipherSuite(dhFunc, noiseutil.CipherAESGCM, noise.HashSHA256)
|
||||
}
|
||||
|
||||
static := noise.DHKey{Private: cs.privateKey, Public: crt.PublicKey()}
|
||||
static := noise.DHKey{Private: curCertState.privateKey, Public: curCertState.publicKey}
|
||||
|
||||
b := NewBits(ReplayWindow)
|
||||
// Clear out bit 0, we never transmit it, and we don't want it showing as packet loss
|
||||
// Clear out bit 0, we never transmit it and we don't want it showing as packet loss
|
||||
b.Update(l, 0)
|
||||
|
||||
hs, err := noise.NewHandshakeState(noise.Config{
|
||||
CipherSuite: ncs,
|
||||
Random: rand.Reader,
|
||||
Pattern: pattern,
|
||||
Initiator: initiator,
|
||||
StaticKeypair: static,
|
||||
//NOTE: These should come from CertState (pki.go) when we finally implement it
|
||||
PresharedKey: []byte{},
|
||||
PresharedKeyPlacement: 0,
|
||||
CipherSuite: cs,
|
||||
Random: rand.Reader,
|
||||
Pattern: pattern,
|
||||
Initiator: initiator,
|
||||
StaticKeypair: static,
|
||||
PresharedKey: psk,
|
||||
PresharedKeyPlacement: pskStage,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewConnectionState: %s", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// The queue and ready params prevent a counter race that would happen when
|
||||
|
@ -75,12 +71,11 @@ func NewConnectionState(l *logrus.Logger, cs *CertState, crt cert.Certificate, i
|
|||
H: hs,
|
||||
initiator: initiator,
|
||||
window: b,
|
||||
myCert: crt,
|
||||
ready: false,
|
||||
certState: curCertState,
|
||||
}
|
||||
// always start the counter from 2, as packet 1 and packet 2 are handshake packets.
|
||||
ci.messageCounter.Add(2)
|
||||
|
||||
return ci, nil
|
||||
return ci
|
||||
}
|
||||
|
||||
func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
|
||||
|
@ -88,9 +83,6 @@ func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
|
|||
"certificate": cs.peerCert,
|
||||
"initiator": cs.initiator,
|
||||
"message_counter": cs.messageCounter.Load(),
|
||||
"ready": cs.ready,
|
||||
})
|
||||
}
|
||||
|
||||
func (cs *ConnectionState) Curve() cert.Curve {
|
||||
return cs.myCert.Curve()
|
||||
}
|
||||
|
|
208
control.go
208
control.go
|
@ -2,7 +2,7 @@ package nebula
|
|||
|
||||
import (
|
||||
"context"
|
||||
"net/netip"
|
||||
"net"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
@ -10,42 +10,33 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/overlay"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
)
|
||||
|
||||
// Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching
|
||||
// core. This means copying IP objects, slices, de-referencing pointers and taking the actual value, etc
|
||||
|
||||
type controlEach func(h *HostInfo)
|
||||
|
||||
type controlHostLister interface {
|
||||
QueryVpnAddr(vpnAddr netip.Addr) *HostInfo
|
||||
ForEachIndex(each controlEach)
|
||||
ForEachVpnAddr(each controlEach)
|
||||
GetPreferredRanges() []netip.Prefix
|
||||
}
|
||||
|
||||
type Control struct {
|
||||
f *Interface
|
||||
l *logrus.Logger
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
sshStart func()
|
||||
statsStart func()
|
||||
dnsStart func()
|
||||
lighthouseStart func()
|
||||
f *Interface
|
||||
l *logrus.Logger
|
||||
cancel context.CancelFunc
|
||||
sshStart func()
|
||||
httpStart func()
|
||||
dnsStart func()
|
||||
}
|
||||
|
||||
type ControlHostInfo struct {
|
||||
VpnAddrs []netip.Addr `json:"vpnAddrs"`
|
||||
LocalIndex uint32 `json:"localIndex"`
|
||||
RemoteIndex uint32 `json:"remoteIndex"`
|
||||
RemoteAddrs []netip.AddrPort `json:"remoteAddrs"`
|
||||
Cert cert.Certificate `json:"cert"`
|
||||
MessageCounter uint64 `json:"messageCounter"`
|
||||
CurrentRemote netip.AddrPort `json:"currentRemote"`
|
||||
CurrentRelaysToMe []netip.Addr `json:"currentRelaysToMe"`
|
||||
CurrentRelaysThroughMe []netip.Addr `json:"currentRelaysThroughMe"`
|
||||
VpnIp net.IP `json:"vpnIp"`
|
||||
LocalIndex uint32 `json:"localIndex"`
|
||||
RemoteIndex uint32 `json:"remoteIndex"`
|
||||
RemoteAddrs []*udp.Addr `json:"remoteAddrs"`
|
||||
CachedPackets int `json:"cachedPackets"`
|
||||
Cert *cert.NebulaCertificate `json:"cert"`
|
||||
MessageCounter uint64 `json:"messageCounter"`
|
||||
CurrentRemote *udp.Addr `json:"currentRemote"`
|
||||
CurrentRelaysToMe []iputil.VpnIp `json:"currentRelaysToMe"`
|
||||
CurrentRelaysThroughMe []iputil.VpnIp `json:"currentRelaysThroughMe"`
|
||||
}
|
||||
|
||||
// Start actually runs nebula, this is a nonblocking call. To block use Control.ShutdownBlock()
|
||||
|
@ -57,25 +48,18 @@ func (c *Control) Start() {
|
|||
if c.sshStart != nil {
|
||||
go c.sshStart()
|
||||
}
|
||||
if c.statsStart != nil {
|
||||
go c.statsStart()
|
||||
if c.httpStart != nil {
|
||||
go c.httpStart()
|
||||
}
|
||||
if c.dnsStart != nil {
|
||||
go c.dnsStart()
|
||||
}
|
||||
if c.lighthouseStart != nil {
|
||||
c.lighthouseStart()
|
||||
}
|
||||
|
||||
// Start reading packets.
|
||||
c.f.run()
|
||||
}
|
||||
|
||||
func (c *Control) Context() context.Context {
|
||||
return c.ctx
|
||||
}
|
||||
|
||||
// Stop signals nebula to shutdown and close all tunnels, returns after the shutdown is complete
|
||||
// Stop signals nebula to shutdown, returns after the shutdown is complete
|
||||
func (c *Control) Stop() {
|
||||
// Stop the handshakeManager (and other services), to prevent new tunnels from
|
||||
// being created while we're shutting them all down.
|
||||
|
@ -105,7 +89,7 @@ func (c *Control) RebindUDPServer() {
|
|||
_ = c.f.outside.Rebind()
|
||||
|
||||
// Trigger a lighthouse update, useful for mobile clients that should have an update interval of 0
|
||||
c.f.lightHouse.SendUpdate()
|
||||
c.f.lightHouse.SendUpdate(c.f)
|
||||
|
||||
// Let the main interface know that we rebound so that underlying tunnels know to trigger punches from their remotes
|
||||
c.f.rebindCount++
|
||||
|
@ -114,7 +98,7 @@ func (c *Control) RebindUDPServer() {
|
|||
// ListHostmapHosts returns details about the actual or pending (handshaking) hostmap by vpn ip
|
||||
func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo {
|
||||
if pendingMap {
|
||||
return listHostMapHosts(c.f.handshakeManager)
|
||||
return listHostMapHosts(c.f.handshakeManager.pendingHostMap)
|
||||
} else {
|
||||
return listHostMapHosts(c.f.hostMap)
|
||||
}
|
||||
|
@ -123,88 +107,46 @@ func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo {
|
|||
// ListHostmapIndexes returns details about the actual or pending (handshaking) hostmap by local index id
|
||||
func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {
|
||||
if pendingMap {
|
||||
return listHostMapIndexes(c.f.handshakeManager)
|
||||
return listHostMapIndexes(c.f.handshakeManager.pendingHostMap)
|
||||
} else {
|
||||
return listHostMapIndexes(c.f.hostMap)
|
||||
}
|
||||
}
|
||||
|
||||
// GetCertByVpnIp returns the authenticated certificate of the given vpn IP, or nil if not found
|
||||
func (c *Control) GetCertByVpnIp(vpnIp netip.Addr) cert.Certificate {
|
||||
_, found := c.f.myVpnAddrsTable.Lookup(vpnIp)
|
||||
if found {
|
||||
// Only returning the default certificate since its impossible
|
||||
// for any other host but ourselves to have more than 1
|
||||
return c.f.pki.getCertState().GetDefaultCertificate().Copy()
|
||||
}
|
||||
hi := c.f.hostMap.QueryVpnAddr(vpnIp)
|
||||
if hi == nil {
|
||||
return nil
|
||||
}
|
||||
return hi.GetCert().Certificate.Copy()
|
||||
}
|
||||
|
||||
// CreateTunnel creates a new tunnel to the given vpn ip.
|
||||
func (c *Control) CreateTunnel(vpnIp netip.Addr) {
|
||||
c.f.handshakeManager.StartHandshake(vpnIp, nil)
|
||||
}
|
||||
|
||||
// PrintTunnel creates a new tunnel to the given vpn ip.
|
||||
func (c *Control) PrintTunnel(vpnIp netip.Addr) *ControlHostInfo {
|
||||
hi := c.f.hostMap.QueryVpnAddr(vpnIp)
|
||||
if hi == nil {
|
||||
return nil
|
||||
}
|
||||
chi := copyHostInfo(hi, c.f.hostMap.GetPreferredRanges())
|
||||
return &chi
|
||||
}
|
||||
|
||||
// QueryLighthouse queries the lighthouse.
|
||||
func (c *Control) QueryLighthouse(vpnIp netip.Addr) *CacheMap {
|
||||
hi := c.f.lightHouse.Query(vpnIp)
|
||||
if hi == nil {
|
||||
return nil
|
||||
}
|
||||
return hi.CopyCache()
|
||||
}
|
||||
|
||||
// GetHostInfoByVpnAddr returns a single tunnels hostInfo, or nil if not found
|
||||
// Caller should take care to Unmap() any 4in6 addresses prior to calling.
|
||||
func (c *Control) GetHostInfoByVpnAddr(vpnAddr netip.Addr, pending bool) *ControlHostInfo {
|
||||
var hl controlHostLister
|
||||
// GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not found
|
||||
func (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlHostInfo {
|
||||
var hm *HostMap
|
||||
if pending {
|
||||
hl = c.f.handshakeManager
|
||||
hm = c.f.handshakeManager.pendingHostMap
|
||||
} else {
|
||||
hl = c.f.hostMap
|
||||
hm = c.f.hostMap
|
||||
}
|
||||
|
||||
h := hl.QueryVpnAddr(vpnAddr)
|
||||
if h == nil {
|
||||
h, err := hm.QueryVpnIp(vpnIp)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ch := copyHostInfo(h, c.f.hostMap.GetPreferredRanges())
|
||||
ch := copyHostInfo(h, c.f.hostMap.preferredRanges)
|
||||
return &ch
|
||||
}
|
||||
|
||||
// SetRemoteForTunnel forces a tunnel to use a specific remote
|
||||
// Caller should take care to Unmap() any 4in6 addresses prior to calling.
|
||||
func (c *Control) SetRemoteForTunnel(vpnIp netip.Addr, addr netip.AddrPort) *ControlHostInfo {
|
||||
hostInfo := c.f.hostMap.QueryVpnAddr(vpnIp)
|
||||
if hostInfo == nil {
|
||||
func (c *Control) SetRemoteForTunnel(vpnIp iputil.VpnIp, addr udp.Addr) *ControlHostInfo {
|
||||
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
hostInfo.SetRemote(addr)
|
||||
ch := copyHostInfo(hostInfo, c.f.hostMap.GetPreferredRanges())
|
||||
hostInfo.SetRemote(addr.Copy())
|
||||
ch := copyHostInfo(hostInfo, c.f.hostMap.preferredRanges)
|
||||
return &ch
|
||||
}
|
||||
|
||||
// CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well.
|
||||
// Caller should take care to Unmap() any 4in6 addresses prior to calling.
|
||||
func (c *Control) CloseTunnel(vpnIp netip.Addr, localOnly bool) bool {
|
||||
hostInfo := c.f.hostMap.QueryVpnAddr(vpnIp)
|
||||
if hostInfo == nil {
|
||||
func (c *Control) CloseTunnel(vpnIp iputil.VpnIp, localOnly bool) bool {
|
||||
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -227,24 +169,29 @@ func (c *Control) CloseTunnel(vpnIp netip.Addr, localOnly bool) bool {
|
|||
// CloseAllTunnels is just like CloseTunnel except it goes through and shuts them all down, optionally you can avoid shutting down lighthouse tunnels
|
||||
// the int returned is a count of tunnels closed
|
||||
func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
|
||||
//TODO: this is probably better as a function in ConnectionManager or HostMap directly
|
||||
lighthouses := c.f.lightHouse.GetLighthouses()
|
||||
|
||||
shutdown := func(h *HostInfo) {
|
||||
if excludeLighthouses && c.f.lightHouse.IsAnyLighthouseAddr(h.vpnAddrs) {
|
||||
return
|
||||
if excludeLighthouses {
|
||||
if _, ok := lighthouses[h.vpnIp]; ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
c.f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu))
|
||||
c.f.closeTunnel(h)
|
||||
|
||||
c.l.WithField("vpnAddrs", h.vpnAddrs).WithField("udpAddr", h.remote).
|
||||
c.l.WithField("vpnIp", h.vpnIp).WithField("udpAddr", h.remote).
|
||||
Debug("Sending close tunnel message")
|
||||
closed++
|
||||
}
|
||||
|
||||
// Learn which hosts are being used as relays, so we can shut them down last.
|
||||
relayingHosts := map[netip.Addr]*HostInfo{}
|
||||
relayingHosts := map[iputil.VpnIp]*HostInfo{}
|
||||
// Grab the hostMap lock to access the Relays map
|
||||
c.f.hostMap.Lock()
|
||||
for _, relayingHost := range c.f.hostMap.Relays {
|
||||
relayingHosts[relayingHost.vpnAddrs[0]] = relayingHost
|
||||
relayingHosts[relayingHost.vpnIp] = relayingHost
|
||||
}
|
||||
c.f.hostMap.Unlock()
|
||||
|
||||
|
@ -252,7 +199,7 @@ func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
|
|||
// Grab the hostMap lock to access the Hosts map
|
||||
c.f.hostMap.Lock()
|
||||
for _, relayHost := range c.f.hostMap.Indexes {
|
||||
if _, ok := relayingHosts[relayHost.vpnAddrs[0]]; !ok {
|
||||
if _, ok := relayingHosts[relayHost.vpnIp]; !ok {
|
||||
hostInfos = append(hostInfos, relayHost)
|
||||
}
|
||||
}
|
||||
|
@ -267,23 +214,16 @@ func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
|
|||
return
|
||||
}
|
||||
|
||||
func (c *Control) Device() overlay.Device {
|
||||
return c.f.inside
|
||||
}
|
||||
func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
|
||||
|
||||
func copyHostInfo(h *HostInfo, preferredRanges []netip.Prefix) ControlHostInfo {
|
||||
chi := ControlHostInfo{
|
||||
VpnAddrs: make([]netip.Addr, len(h.vpnAddrs)),
|
||||
VpnIp: h.vpnIp.ToIP(),
|
||||
LocalIndex: h.localIndexId,
|
||||
RemoteIndex: h.remoteIndexId,
|
||||
RemoteAddrs: h.remotes.CopyAddrs(preferredRanges),
|
||||
CachedPackets: len(h.packetStore),
|
||||
CurrentRelaysToMe: h.relayState.CopyRelayIps(),
|
||||
CurrentRelaysThroughMe: h.relayState.CopyRelayForIps(),
|
||||
CurrentRemote: h.remote,
|
||||
}
|
||||
|
||||
for i, a := range h.vpnAddrs {
|
||||
chi.VpnAddrs[i] = a
|
||||
}
|
||||
|
||||
if h.ConnectionState != nil {
|
||||
|
@ -291,26 +231,38 @@ func copyHostInfo(h *HostInfo, preferredRanges []netip.Prefix) ControlHostInfo {
|
|||
}
|
||||
|
||||
if c := h.GetCert(); c != nil {
|
||||
chi.Cert = c.Certificate.Copy()
|
||||
chi.Cert = c.Copy()
|
||||
}
|
||||
|
||||
if h.remote != nil {
|
||||
chi.CurrentRemote = h.remote.Copy()
|
||||
}
|
||||
|
||||
return chi
|
||||
}
|
||||
|
||||
func listHostMapHosts(hl controlHostLister) []ControlHostInfo {
|
||||
hosts := make([]ControlHostInfo, 0)
|
||||
pr := hl.GetPreferredRanges()
|
||||
hl.ForEachVpnAddr(func(hostinfo *HostInfo) {
|
||||
hosts = append(hosts, copyHostInfo(hostinfo, pr))
|
||||
})
|
||||
func listHostMapHosts(hm *HostMap) []ControlHostInfo {
|
||||
hm.RLock()
|
||||
hosts := make([]ControlHostInfo, len(hm.Hosts))
|
||||
i := 0
|
||||
for _, v := range hm.Hosts {
|
||||
hosts[i] = copyHostInfo(v, hm.preferredRanges)
|
||||
i++
|
||||
}
|
||||
hm.RUnlock()
|
||||
|
||||
return hosts
|
||||
}
|
||||
|
||||
func listHostMapIndexes(hl controlHostLister) []ControlHostInfo {
|
||||
hosts := make([]ControlHostInfo, 0)
|
||||
pr := hl.GetPreferredRanges()
|
||||
hl.ForEachIndex(func(hostinfo *HostInfo) {
|
||||
hosts = append(hosts, copyHostInfo(hostinfo, pr))
|
||||
})
|
||||
func listHostMapIndexes(hm *HostMap) []ControlHostInfo {
|
||||
hm.RLock()
|
||||
hosts := make([]ControlHostInfo, len(hm.Indexes))
|
||||
i := 0
|
||||
for _, v := range hm.Indexes {
|
||||
hosts[i] = copyHostInfo(v, hm.preferredRanges)
|
||||
i++
|
||||
}
|
||||
hm.RUnlock()
|
||||
|
||||
return hosts
|
||||
}
|
||||
|
|
|
@ -2,67 +2,71 @@ package nebula
|
|||
|
||||
import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
//TODO: CERT-V2 with multiple certificate versions we have a problem with this test
|
||||
// Some certs versions have different characteristics and each version implements their own Copy() func
|
||||
// which means this is not a good place to test for exposing memory
|
||||
l := test.NewLogger()
|
||||
// Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object
|
||||
// To properly ensure we are not exposing core memory to the caller
|
||||
hm := newHostMap(l)
|
||||
hm.preferredRanges.Store(&[]netip.Prefix{})
|
||||
|
||||
remote1 := netip.MustParseAddrPort("0.0.0.100:4444")
|
||||
remote2 := netip.MustParseAddrPort("[1:2:3:4:5:6:7:8]:4444")
|
||||
|
||||
hm := NewHostMap(l, "test", &net.IPNet{}, make([]*net.IPNet, 0))
|
||||
remote1 := udp.NewAddr(net.ParseIP("0.0.0.100"), 4444)
|
||||
remote2 := udp.NewAddr(net.ParseIP("1:2:3:4:5:6:7:8"), 4444)
|
||||
ipNet := net.IPNet{
|
||||
IP: remote1.Addr().AsSlice(),
|
||||
IP: net.IPv4(1, 2, 3, 4),
|
||||
Mask: net.IPMask{255, 255, 255, 0},
|
||||
}
|
||||
|
||||
ipNet2 := net.IPNet{
|
||||
IP: remote2.Addr().AsSlice(),
|
||||
IP: net.ParseIP("1:2:3:4:5:6:7:8"),
|
||||
Mask: net.IPMask{255, 255, 255, 0},
|
||||
}
|
||||
|
||||
remotes := NewRemoteList([]netip.Addr{netip.IPv4Unspecified()}, nil)
|
||||
remotes.unlockedPrependV4(netip.IPv4Unspecified(), netAddrToProtoV4AddrPort(remote1.Addr(), remote1.Port()))
|
||||
remotes.unlockedPrependV6(netip.IPv4Unspecified(), netAddrToProtoV6AddrPort(remote2.Addr(), remote2.Port()))
|
||||
crt := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: "test",
|
||||
Ips: []*net.IPNet{&ipNet},
|
||||
Subnets: []*net.IPNet{},
|
||||
Groups: []string{"default-group"},
|
||||
NotBefore: time.Unix(1, 0),
|
||||
NotAfter: time.Unix(2, 0),
|
||||
PublicKey: []byte{5, 6, 7, 8},
|
||||
IsCA: false,
|
||||
Issuer: "the-issuer",
|
||||
InvertedGroups: map[string]struct{}{"default-group": {}},
|
||||
},
|
||||
Signature: []byte{1, 2, 1, 2, 1, 3},
|
||||
}
|
||||
|
||||
vpnIp, ok := netip.AddrFromSlice(ipNet.IP)
|
||||
assert.True(t, ok)
|
||||
|
||||
crt := &dummyCert{}
|
||||
hm.unlockedAddHostInfo(&HostInfo{
|
||||
remotes := NewRemoteList(nil)
|
||||
remotes.unlockedPrependV4(0, NewIp4AndPort(remote1.IP, uint32(remote1.Port)))
|
||||
remotes.unlockedPrependV6(0, NewIp6AndPort(remote2.IP, uint32(remote2.Port)))
|
||||
hm.Add(iputil.Ip2VpnIp(ipNet.IP), &HostInfo{
|
||||
remote: remote1,
|
||||
remotes: remotes,
|
||||
ConnectionState: &ConnectionState{
|
||||
peerCert: &cert.CachedCertificate{Certificate: crt},
|
||||
peerCert: crt,
|
||||
},
|
||||
remoteIndexId: 200,
|
||||
localIndexId: 201,
|
||||
vpnAddrs: []netip.Addr{vpnIp},
|
||||
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||
relayState: RelayState{
|
||||
relays: map[netip.Addr]struct{}{},
|
||||
relayForByAddr: map[netip.Addr]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
relays: map[iputil.VpnIp]struct{}{},
|
||||
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
},
|
||||
}, &Interface{})
|
||||
})
|
||||
|
||||
vpnIp2, ok := netip.AddrFromSlice(ipNet2.IP)
|
||||
assert.True(t, ok)
|
||||
|
||||
hm.unlockedAddHostInfo(&HostInfo{
|
||||
hm.Add(iputil.Ip2VpnIp(ipNet2.IP), &HostInfo{
|
||||
remote: remote1,
|
||||
remotes: remotes,
|
||||
ConnectionState: &ConnectionState{
|
||||
|
@ -70,13 +74,13 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||
},
|
||||
remoteIndexId: 200,
|
||||
localIndexId: 201,
|
||||
vpnAddrs: []netip.Addr{vpnIp2},
|
||||
vpnIp: iputil.Ip2VpnIp(ipNet2.IP),
|
||||
relayState: RelayState{
|
||||
relays: map[netip.Addr]struct{}{},
|
||||
relayForByAddr: map[netip.Addr]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
relays: map[iputil.VpnIp]struct{}{},
|
||||
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
},
|
||||
}, &Interface{})
|
||||
})
|
||||
|
||||
c := Control{
|
||||
f: &Interface{
|
||||
|
@ -85,28 +89,28 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||
l: logrus.New(),
|
||||
}
|
||||
|
||||
thi := c.GetHostInfoByVpnAddr(vpnIp, false)
|
||||
thi := c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet.IP), false)
|
||||
|
||||
expectedInfo := ControlHostInfo{
|
||||
VpnAddrs: []netip.Addr{vpnIp},
|
||||
VpnIp: net.IPv4(1, 2, 3, 4).To4(),
|
||||
LocalIndex: 201,
|
||||
RemoteIndex: 200,
|
||||
RemoteAddrs: []netip.AddrPort{remote2, remote1},
|
||||
RemoteAddrs: []*udp.Addr{remote2, remote1},
|
||||
CachedPackets: 0,
|
||||
Cert: crt.Copy(),
|
||||
MessageCounter: 0,
|
||||
CurrentRemote: remote1,
|
||||
CurrentRelaysToMe: []netip.Addr{},
|
||||
CurrentRelaysThroughMe: []netip.Addr{},
|
||||
CurrentRemote: udp.NewAddr(net.ParseIP("0.0.0.100"), 4444),
|
||||
CurrentRelaysToMe: []iputil.VpnIp{},
|
||||
CurrentRelaysThroughMe: []iputil.VpnIp{},
|
||||
}
|
||||
|
||||
// Make sure we don't have any unexpected fields
|
||||
assertFields(t, []string{"VpnAddrs", "LocalIndex", "RemoteIndex", "RemoteAddrs", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
|
||||
assert.EqualValues(t, &expectedInfo, thi)
|
||||
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "CachedPackets", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
|
||||
test.AssertDeepCopyEqual(t, &expectedInfo, thi)
|
||||
|
||||
// Make sure we don't panic if the host info doesn't have a cert yet
|
||||
assert.NotPanics(t, func() {
|
||||
thi = c.GetHostInfoByVpnAddr(vpnIp2, false)
|
||||
thi = c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet2.IP), false)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -4,11 +4,14 @@
|
|||
package nebula
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"net"
|
||||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/layers"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/overlay"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
)
|
||||
|
@ -47,30 +50,37 @@ func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType,
|
|||
|
||||
// InjectLightHouseAddr will push toAddr into the local lighthouse cache for the vpnIp
|
||||
// This is necessary if you did not configure static hosts or are not running a lighthouse
|
||||
func (c *Control) InjectLightHouseAddr(vpnIp netip.Addr, toAddr netip.AddrPort) {
|
||||
func (c *Control) InjectLightHouseAddr(vpnIp net.IP, toAddr *net.UDPAddr) {
|
||||
c.f.lightHouse.Lock()
|
||||
remoteList := c.f.lightHouse.unlockedGetRemoteList([]netip.Addr{vpnIp})
|
||||
remoteList := c.f.lightHouse.unlockedGetRemoteList(iputil.Ip2VpnIp(vpnIp))
|
||||
remoteList.Lock()
|
||||
defer remoteList.Unlock()
|
||||
c.f.lightHouse.Unlock()
|
||||
|
||||
if toAddr.Addr().Is4() {
|
||||
remoteList.unlockedPrependV4(vpnIp, netAddrToProtoV4AddrPort(toAddr.Addr(), toAddr.Port()))
|
||||
iVpnIp := iputil.Ip2VpnIp(vpnIp)
|
||||
if v4 := toAddr.IP.To4(); v4 != nil {
|
||||
remoteList.unlockedPrependV4(iVpnIp, NewIp4AndPort(v4, uint32(toAddr.Port)))
|
||||
} else {
|
||||
remoteList.unlockedPrependV6(vpnIp, netAddrToProtoV6AddrPort(toAddr.Addr(), toAddr.Port()))
|
||||
remoteList.unlockedPrependV6(iVpnIp, NewIp6AndPort(toAddr.IP, uint32(toAddr.Port)))
|
||||
}
|
||||
}
|
||||
|
||||
// InjectRelays will push relayVpnIps into the local lighthouse cache for the vpnIp
|
||||
// This is necessary to inform an initiator of possible relays for communicating with a responder
|
||||
func (c *Control) InjectRelays(vpnIp netip.Addr, relayVpnIps []netip.Addr) {
|
||||
func (c *Control) InjectRelays(vpnIp net.IP, relayVpnIps []net.IP) {
|
||||
c.f.lightHouse.Lock()
|
||||
remoteList := c.f.lightHouse.unlockedGetRemoteList([]netip.Addr{vpnIp})
|
||||
remoteList := c.f.lightHouse.unlockedGetRemoteList(iputil.Ip2VpnIp(vpnIp))
|
||||
remoteList.Lock()
|
||||
defer remoteList.Unlock()
|
||||
c.f.lightHouse.Unlock()
|
||||
|
||||
remoteList.unlockedSetRelay(vpnIp, relayVpnIps)
|
||||
iVpnIp := iputil.Ip2VpnIp(vpnIp)
|
||||
uVpnIp := []uint32{}
|
||||
for _, rVPnIp := range relayVpnIps {
|
||||
uVpnIp = append(uVpnIp, uint32(iputil.Ip2VpnIp(rVPnIp)))
|
||||
}
|
||||
|
||||
remoteList.unlockedSetRelay(iVpnIp, iVpnIp, uVpnIp)
|
||||
}
|
||||
|
||||
// GetFromTun will pull a packet off the tun side of nebula
|
||||
|
@ -97,42 +107,20 @@ func (c *Control) InjectUDPPacket(p *udp.Packet) {
|
|||
}
|
||||
|
||||
// InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol
|
||||
func (c *Control) InjectTunUDPPacket(toAddr netip.Addr, toPort uint16, fromAddr netip.Addr, fromPort uint16, data []byte) {
|
||||
serialize := make([]gopacket.SerializableLayer, 0)
|
||||
var netLayer gopacket.NetworkLayer
|
||||
if toAddr.Is6() {
|
||||
if !fromAddr.Is6() {
|
||||
panic("Cant send ipv6 to ipv4")
|
||||
}
|
||||
ip := &layers.IPv6{
|
||||
Version: 6,
|
||||
NextHeader: layers.IPProtocolUDP,
|
||||
SrcIP: fromAddr.Unmap().AsSlice(),
|
||||
DstIP: toAddr.Unmap().AsSlice(),
|
||||
}
|
||||
serialize = append(serialize, ip)
|
||||
netLayer = ip
|
||||
} else {
|
||||
if !fromAddr.Is4() {
|
||||
panic("Cant send ipv4 to ipv6")
|
||||
}
|
||||
|
||||
ip := &layers.IPv4{
|
||||
Version: 4,
|
||||
TTL: 64,
|
||||
Protocol: layers.IPProtocolUDP,
|
||||
SrcIP: fromAddr.Unmap().AsSlice(),
|
||||
DstIP: toAddr.Unmap().AsSlice(),
|
||||
}
|
||||
serialize = append(serialize, ip)
|
||||
netLayer = ip
|
||||
func (c *Control) InjectTunUDPPacket(toIp net.IP, toPort uint16, fromPort uint16, data []byte) {
|
||||
ip := layers.IPv4{
|
||||
Version: 4,
|
||||
TTL: 64,
|
||||
Protocol: layers.IPProtocolUDP,
|
||||
SrcIP: c.f.inside.Cidr().IP,
|
||||
DstIP: toIp,
|
||||
}
|
||||
|
||||
udp := layers.UDP{
|
||||
SrcPort: layers.UDPPort(fromPort),
|
||||
DstPort: layers.UDPPort(toPort),
|
||||
}
|
||||
err := udp.SetNetworkLayerForChecksum(netLayer)
|
||||
err := udp.SetNetworkLayerForChecksum(&ip)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -142,9 +130,7 @@ func (c *Control) InjectTunUDPPacket(toAddr netip.Addr, toPort uint16, fromAddr
|
|||
ComputeChecksums: true,
|
||||
FixLengths: true,
|
||||
}
|
||||
|
||||
serialize = append(serialize, &udp, gopacket.Payload(data))
|
||||
err = gopacket.SerializeLayers(buffer, opt, serialize...)
|
||||
err = gopacket.SerializeLayers(buffer, opt, &ip, &udp, gopacket.Payload(data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -152,21 +138,21 @@ func (c *Control) InjectTunUDPPacket(toAddr netip.Addr, toPort uint16, fromAddr
|
|||
c.f.inside.(*overlay.TestTun).Send(buffer.Bytes())
|
||||
}
|
||||
|
||||
func (c *Control) GetVpnAddrs() []netip.Addr {
|
||||
return c.f.myVpnAddrs
|
||||
func (c *Control) GetVpnIp() iputil.VpnIp {
|
||||
return c.f.myVpnIp
|
||||
}
|
||||
|
||||
func (c *Control) GetUDPAddr() netip.AddrPort {
|
||||
return c.f.outside.(*udp.TesterConn).Addr
|
||||
func (c *Control) GetUDPAddr() string {
|
||||
return c.f.outside.(*udp.TesterConn).Addr.String()
|
||||
}
|
||||
|
||||
func (c *Control) KillPendingTunnel(vpnIp netip.Addr) bool {
|
||||
hostinfo := c.f.handshakeManager.QueryVpnAddr(vpnIp)
|
||||
if hostinfo == nil {
|
||||
func (c *Control) KillPendingTunnel(vpnIp net.IP) bool {
|
||||
hostinfo, ok := c.f.handshakeManager.pendingHostMap.Hosts[iputil.Ip2VpnIp(vpnIp)]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
c.f.handshakeManager.DeleteHostInfo(hostinfo)
|
||||
c.f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -174,10 +160,20 @@ func (c *Control) GetHostmap() *HostMap {
|
|||
return c.f.hostMap
|
||||
}
|
||||
|
||||
func (c *Control) GetCertState() *CertState {
|
||||
return c.f.pki.getCertState()
|
||||
func (c *Control) GetCert() *cert.NebulaCertificate {
|
||||
return c.f.certState.Load().certificate
|
||||
}
|
||||
|
||||
func (c *Control) ReHandshake(vpnIp netip.Addr) {
|
||||
c.f.handshakeManager.StartHandshake(vpnIp, nil)
|
||||
func (c *Control) ReHandshake(vpnIp iputil.VpnIp) {
|
||||
hostinfo := c.f.handshakeManager.AddVpnIp(vpnIp, c.f.initHostInfo)
|
||||
ixHandshakeStage0(c.f, vpnIp, hostinfo)
|
||||
|
||||
// If this is a static host, we don't need to wait for the HostQueryReply
|
||||
// We can trigger the handshake right now
|
||||
if _, ok := c.f.lightHouse.GetStaticHostList()[hostinfo.vpnIp]; ok {
|
||||
select {
|
||||
case c.f.handshakeManager.trigger <- hostinfo.vpnIp:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
13
dist/arch/nebula.service
vendored
Normal file
13
dist/arch/nebula.service
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
[Unit]
|
||||
Description=Nebula overlay networking tool
|
||||
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||
After=basic.target network.target network-online.target
|
||||
|
||||
[Service]
|
||||
SyslogIdentifier=nebula
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
14
dist/fedora/nebula.service
vendored
Normal file
14
dist/fedora/nebula.service
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
[Unit]
|
||||
Description=Nebula overlay networking tool
|
||||
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||
After=basic.target network.target network-online.target
|
||||
Before=sshd.service
|
||||
|
||||
[Service]
|
||||
SyslogIdentifier=nebula
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
139
dns_server.go
139
dns_server.go
|
@ -3,15 +3,14 @@ package nebula
|
|||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/miekg/dns"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
)
|
||||
|
||||
// This whole thing should be rewritten to use context
|
||||
|
@ -22,120 +21,73 @@ var dnsAddr string
|
|||
|
||||
type dnsRecords struct {
|
||||
sync.RWMutex
|
||||
l *logrus.Logger
|
||||
dnsMap4 map[string]netip.Addr
|
||||
dnsMap6 map[string]netip.Addr
|
||||
hostMap *HostMap
|
||||
myVpnAddrsTable *bart.Table[struct{}]
|
||||
dnsMap map[string]string
|
||||
hostMap *HostMap
|
||||
}
|
||||
|
||||
func newDnsRecords(l *logrus.Logger, cs *CertState, hostMap *HostMap) *dnsRecords {
|
||||
func newDnsRecords(hostMap *HostMap) *dnsRecords {
|
||||
return &dnsRecords{
|
||||
l: l,
|
||||
dnsMap4: make(map[string]netip.Addr),
|
||||
dnsMap6: make(map[string]netip.Addr),
|
||||
hostMap: hostMap,
|
||||
myVpnAddrsTable: cs.myVpnAddrsTable,
|
||||
dnsMap: make(map[string]string),
|
||||
hostMap: hostMap,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dnsRecords) Query(q uint16, data string) netip.Addr {
|
||||
data = strings.ToLower(data)
|
||||
func (d *dnsRecords) Query(data string) string {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
switch q {
|
||||
case dns.TypeA:
|
||||
if r, ok := d.dnsMap4[data]; ok {
|
||||
return r
|
||||
}
|
||||
case dns.TypeAAAA:
|
||||
if r, ok := d.dnsMap6[data]; ok {
|
||||
return r
|
||||
}
|
||||
if r, ok := d.dnsMap[strings.ToLower(data)]; ok {
|
||||
return r
|
||||
}
|
||||
|
||||
return netip.Addr{}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (d *dnsRecords) QueryCert(data string) string {
|
||||
ip, err := netip.ParseAddr(data[:len(data)-1])
|
||||
ip := net.ParseIP(data[:len(data)-1])
|
||||
if ip == nil {
|
||||
return ""
|
||||
}
|
||||
iip := iputil.Ip2VpnIp(ip)
|
||||
hostinfo, err := d.hostMap.QueryVpnIp(iip)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
hostinfo := d.hostMap.QueryVpnAddr(ip)
|
||||
if hostinfo == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
q := hostinfo.GetCert()
|
||||
if q == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
b, err := q.Certificate.MarshalJSON()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return string(b)
|
||||
cert := q.Details
|
||||
c := fmt.Sprintf("\"Name: %s\" \"Ips: %s\" \"Subnets %s\" \"Groups %s\" \"NotBefore %s\" \"NotAFter %s\" \"PublicKey %x\" \"IsCA %t\" \"Issuer %s\"", cert.Name, cert.Ips, cert.Subnets, cert.Groups, cert.NotBefore, cert.NotAfter, cert.PublicKey, cert.IsCA, cert.Issuer)
|
||||
return c
|
||||
}
|
||||
|
||||
// Add adds the first IPv4 and IPv6 address that appears in `addresses` as the record for `host`
|
||||
func (d *dnsRecords) Add(host string, addresses []netip.Addr) {
|
||||
host = strings.ToLower(host)
|
||||
func (d *dnsRecords) Add(host, data string) {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
haveV4 := false
|
||||
haveV6 := false
|
||||
for _, addr := range addresses {
|
||||
if addr.Is4() && !haveV4 {
|
||||
d.dnsMap4[host] = addr
|
||||
haveV4 = true
|
||||
} else if addr.Is6() && !haveV6 {
|
||||
d.dnsMap6[host] = addr
|
||||
haveV6 = true
|
||||
}
|
||||
if haveV4 && haveV6 {
|
||||
break
|
||||
}
|
||||
}
|
||||
d.dnsMap[strings.ToLower(host)] = data
|
||||
}
|
||||
|
||||
func (d *dnsRecords) isSelfNebulaOrLocalhost(addr string) bool {
|
||||
a, _, _ := net.SplitHostPort(addr)
|
||||
b, err := netip.ParseAddr(a)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if b.IsLoopback() {
|
||||
return true
|
||||
}
|
||||
|
||||
_, found := d.myVpnAddrsTable.Lookup(b)
|
||||
return found //if we found it in this table, it's good
|
||||
}
|
||||
|
||||
func (d *dnsRecords) parseQuery(m *dns.Msg, w dns.ResponseWriter) {
|
||||
func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
|
||||
for _, q := range m.Question {
|
||||
switch q.Qtype {
|
||||
case dns.TypeA, dns.TypeAAAA:
|
||||
qType := dns.TypeToString[q.Qtype]
|
||||
d.l.Debugf("Query for %s %s", qType, q.Name)
|
||||
ip := d.Query(q.Qtype, q.Name)
|
||||
if ip.IsValid() {
|
||||
rr, err := dns.NewRR(fmt.Sprintf("%s %s %s", q.Name, qType, ip))
|
||||
case dns.TypeA:
|
||||
l.Debugf("Query for A %s", q.Name)
|
||||
ip := dnsR.Query(q.Name)
|
||||
if ip != "" {
|
||||
rr, err := dns.NewRR(fmt.Sprintf("%s A %s", q.Name, ip))
|
||||
if err == nil {
|
||||
m.Answer = append(m.Answer, rr)
|
||||
}
|
||||
}
|
||||
case dns.TypeTXT:
|
||||
// We only answer these queries from nebula nodes or localhost
|
||||
if !d.isSelfNebulaOrLocalhost(w.RemoteAddr().String()) {
|
||||
a, _, _ := net.SplitHostPort(w.RemoteAddr().String())
|
||||
b := net.ParseIP(a)
|
||||
// We don't answer these queries from non nebula nodes or localhost
|
||||
//l.Debugf("Does %s contain %s", b, dnsR.hostMap.vpnCIDR)
|
||||
if !dnsR.hostMap.vpnCIDR.Contains(b) && a != "127.0.0.1" {
|
||||
return
|
||||
}
|
||||
d.l.Debugf("Query for TXT %s", q.Name)
|
||||
ip := d.QueryCert(q.Name)
|
||||
l.Debugf("Query for TXT %s", q.Name)
|
||||
ip := dnsR.QueryCert(q.Name)
|
||||
if ip != "" {
|
||||
rr, err := dns.NewRR(fmt.Sprintf("%s TXT %s", q.Name, ip))
|
||||
if err == nil {
|
||||
|
@ -144,30 +96,28 @@ func (d *dnsRecords) parseQuery(m *dns.Msg, w dns.ResponseWriter) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(m.Answer) == 0 {
|
||||
m.Rcode = dns.RcodeNameError
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dnsRecords) handleDnsRequest(w dns.ResponseWriter, r *dns.Msg) {
|
||||
func handleDnsRequest(l *logrus.Logger, w dns.ResponseWriter, r *dns.Msg) {
|
||||
m := new(dns.Msg)
|
||||
m.SetReply(r)
|
||||
m.Compress = false
|
||||
|
||||
switch r.Opcode {
|
||||
case dns.OpcodeQuery:
|
||||
d.parseQuery(m, w)
|
||||
parseQuery(l, m, w)
|
||||
}
|
||||
|
||||
w.WriteMsg(m)
|
||||
}
|
||||
|
||||
func dnsMain(l *logrus.Logger, cs *CertState, hostMap *HostMap, c *config.C) func() {
|
||||
dnsR = newDnsRecords(l, cs, hostMap)
|
||||
func dnsMain(l *logrus.Logger, hostMap *HostMap, c *config.C) func() {
|
||||
dnsR = newDnsRecords(hostMap)
|
||||
|
||||
// attach request handler func
|
||||
dns.HandleFunc(".", dnsR.handleDnsRequest)
|
||||
dns.HandleFunc(".", func(w dns.ResponseWriter, r *dns.Msg) {
|
||||
handleDnsRequest(l, w, r)
|
||||
})
|
||||
|
||||
c.RegisterReloadCallback(func(c *config.C) {
|
||||
reloadDns(l, c)
|
||||
|
@ -179,12 +129,7 @@ func dnsMain(l *logrus.Logger, cs *CertState, hostMap *HostMap, c *config.C) fun
|
|||
}
|
||||
|
||||
func getDnsServerAddr(c *config.C) string {
|
||||
dnsHost := strings.TrimSpace(c.GetString("lighthouse.dns.host", ""))
|
||||
// Old guidance was to provide the literal `[::]` in `lighthouse.dns.host` but that won't resolve.
|
||||
if dnsHost == "[::]" {
|
||||
dnsHost = "::"
|
||||
}
|
||||
return net.JoinHostPort(dnsHost, strconv.Itoa(c.GetInt("lighthouse.dns.port", 53)))
|
||||
return c.GetString("lighthouse.dns.host", "") + ":" + strconv.Itoa(c.GetInt("lighthouse.dns.port", 53))
|
||||
}
|
||||
|
||||
func startDns(l *logrus.Logger, c *config.C) {
|
||||
|
|
|
@ -1,73 +1,19 @@
|
|||
package nebula
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParsequery(t *testing.T) {
|
||||
l := logrus.New()
|
||||
//TODO: This test is basically pointless
|
||||
hostMap := &HostMap{}
|
||||
ds := newDnsRecords(l, &CertState{}, hostMap)
|
||||
addrs := []netip.Addr{
|
||||
netip.MustParseAddr("1.2.3.4"),
|
||||
netip.MustParseAddr("1.2.3.5"),
|
||||
netip.MustParseAddr("fd01::24"),
|
||||
netip.MustParseAddr("fd01::25"),
|
||||
}
|
||||
ds.Add("test.com.com", addrs)
|
||||
ds := newDnsRecords(hostMap)
|
||||
ds.Add("test.com.com", "1.2.3.4")
|
||||
|
||||
m := &dns.Msg{}
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("test.com.com", dns.TypeA)
|
||||
ds.parseQuery(m, nil)
|
||||
assert.NotNil(t, m.Answer)
|
||||
assert.Equal(t, "1.2.3.4", m.Answer[0].(*dns.A).A.String())
|
||||
|
||||
m = &dns.Msg{}
|
||||
m.SetQuestion("test.com.com", dns.TypeAAAA)
|
||||
ds.parseQuery(m, nil)
|
||||
assert.NotNil(t, m.Answer)
|
||||
assert.Equal(t, "fd01::24", m.Answer[0].(*dns.AAAA).AAAA.String())
|
||||
}
|
||||
|
||||
func Test_getDnsServerAddr(t *testing.T) {
|
||||
c := config.NewC(nil)
|
||||
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "0.0.0.0",
|
||||
"port": "1",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "0.0.0.0:1", getDnsServerAddr(c))
|
||||
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "::",
|
||||
"port": "1",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
||||
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "[::]",
|
||||
"port": "1",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
||||
|
||||
// Make sure whitespace doesn't mess us up
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "[::] ",
|
||||
"port": "1",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
||||
//parseQuery(m)
|
||||
}
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
FROM gcr.io/distroless/static:latest
|
||||
|
||||
ARG TARGETOS TARGETARCH
|
||||
COPY build/$TARGETOS-$TARGETARCH/nebula /nebula
|
||||
COPY build/$TARGETOS-$TARGETARCH/nebula-cert /nebula-cert
|
||||
|
||||
VOLUME ["/config"]
|
||||
|
||||
ENTRYPOINT ["/nebula"]
|
||||
# Allow users to override the args passed to nebula
|
||||
CMD ["-config", "/config/config.yml"]
|
|
@ -1,24 +0,0 @@
|
|||
# NebulaOSS/nebula Docker Image
|
||||
|
||||
## Building
|
||||
|
||||
From the root of the repository, run `make docker`.
|
||||
|
||||
## Running
|
||||
|
||||
To run the built image, use the following command:
|
||||
|
||||
```
|
||||
docker run \
|
||||
--name nebula \
|
||||
--network host \
|
||||
--cap-add NET_ADMIN \
|
||||
--volume ./config:/config \
|
||||
--rm \
|
||||
nebulaoss/nebula
|
||||
```
|
||||
|
||||
A few notes:
|
||||
|
||||
- The `NET_ADMIN` capability is necessary to create the tun adapter on the host (this is unnecessary if the tun device is disabled.)
|
||||
- `--volume ./config:/config` should point to a directory that contains your `config.yml` and any other necessary files.
|
File diff suppressed because it is too large
Load diff
|
@ -4,61 +4,45 @@
|
|||
package e2e
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/netip"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"dario.cat/mergo"
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/layers"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/cert_test"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/e2e/router"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/crypto/curve25519"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type m map[string]interface{}
|
||||
|
||||
// newSimpleServer creates a nebula instance with many assumptions
|
||||
func newSimpleServer(v cert.Version, caCrt cert.Certificate, caKey []byte, name string, sVpnNetworks string, overrides m) (*nebula.Control, []netip.Prefix, netip.AddrPort, *config.C) {
|
||||
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, *net.IPNet, *net.UDPAddr, *config.C) {
|
||||
l := NewTestLogger()
|
||||
|
||||
var vpnNetworks []netip.Prefix
|
||||
for _, sn := range strings.Split(sVpnNetworks, ",") {
|
||||
vpnIpNet, err := netip.ParsePrefix(strings.TrimSpace(sn))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
vpnNetworks = append(vpnNetworks, vpnIpNet)
|
||||
vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}}
|
||||
copy(vpnIpNet.IP, udpIp)
|
||||
vpnIpNet.IP[1] += 128
|
||||
udpAddr := net.UDPAddr{
|
||||
IP: udpIp,
|
||||
Port: 4242,
|
||||
}
|
||||
_, _, myPrivKey, myPEM := newTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
|
||||
|
||||
if len(vpnNetworks) == 0 {
|
||||
panic("no vpn networks")
|
||||
}
|
||||
|
||||
var udpAddr netip.AddrPort
|
||||
if vpnNetworks[0].Addr().Is4() {
|
||||
budpIp := vpnNetworks[0].Addr().As4()
|
||||
budpIp[1] -= 128
|
||||
udpAddr = netip.AddrPortFrom(netip.AddrFrom4(budpIp), 4242)
|
||||
} else {
|
||||
budpIp := vpnNetworks[0].Addr().As16()
|
||||
// beef for funsies
|
||||
budpIp[2] = 190
|
||||
budpIp[3] = 239
|
||||
udpAddr = netip.AddrPortFrom(netip.AddrFrom16(budpIp), 4242)
|
||||
}
|
||||
_, _, myPrivKey, myPEM := cert_test.NewTestCert(v, cert.Curve_CURVE25519, caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnNetworks, nil, []string{})
|
||||
|
||||
caB, err := caCrt.MarshalPEM()
|
||||
caB, err := caCrt.MarshalToPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -86,8 +70,8 @@ func newSimpleServer(v cert.Version, caCrt cert.Certificate, caKey []byte, name
|
|||
// "try_interval": "1s",
|
||||
//},
|
||||
"listen": m{
|
||||
"host": udpAddr.Addr().String(),
|
||||
"port": udpAddr.Port(),
|
||||
"host": udpAddr.IP.String(),
|
||||
"port": udpAddr.Port,
|
||||
},
|
||||
"logging": m{
|
||||
"timestamp_format": fmt.Sprintf("%v 15:04:05.000000", name),
|
||||
|
@ -100,16 +84,11 @@ func newSimpleServer(v cert.Version, caCrt cert.Certificate, caKey []byte, name
|
|||
}
|
||||
|
||||
if overrides != nil {
|
||||
final := m{}
|
||||
err = mergo.Merge(&final, overrides, mergo.WithAppendSlice)
|
||||
err = mergo.Merge(&overrides, mc, mergo.WithAppendSlice)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = mergo.Merge(&final, mc, mergo.WithAppendSlice)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
mc = final
|
||||
mc = overrides
|
||||
}
|
||||
|
||||
cb, err := yaml.Marshal(mc)
|
||||
|
@ -126,7 +105,113 @@ func newSimpleServer(v cert.Version, caCrt cert.Certificate, caKey []byte, name
|
|||
panic(err)
|
||||
}
|
||||
|
||||
return control, vpnNetworks, udpAddr, c
|
||||
return control, vpnIpNet, &udpAddr, c
|
||||
}
|
||||
|
||||
// newTestCaCert will generate a CA cert
|
||||
func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if before.IsZero() {
|
||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
}
|
||||
if after.IsZero() {
|
||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
}
|
||||
|
||||
nc := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: "test ca",
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pub,
|
||||
IsCA: true,
|
||||
InvertedGroups: make(map[string]struct{}),
|
||||
},
|
||||
}
|
||||
|
||||
if len(ips) > 0 {
|
||||
nc.Details.Ips = ips
|
||||
}
|
||||
|
||||
if len(subnets) > 0 {
|
||||
nc.Details.Subnets = subnets
|
||||
}
|
||||
|
||||
if len(groups) > 0 {
|
||||
nc.Details.Groups = groups
|
||||
}
|
||||
|
||||
err = nc.Sign(cert.Curve_CURVE25519, priv)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pem, err := nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return nc, pub, priv, pem
|
||||
}
|
||||
|
||||
// newTestCert will generate a signed certificate with the provided details.
|
||||
// Expiry times are defaulted if you do not pass them in
|
||||
func newTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip *net.IPNet, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||
issuer, err := ca.Sha256Sum()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if before.IsZero() {
|
||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
}
|
||||
|
||||
if after.IsZero() {
|
||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
}
|
||||
|
||||
pub, rawPriv := x25519Keypair()
|
||||
|
||||
nc := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: name,
|
||||
Ips: []*net.IPNet{ip},
|
||||
Subnets: subnets,
|
||||
Groups: groups,
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pub,
|
||||
IsCA: false,
|
||||
Issuer: issuer,
|
||||
InvertedGroups: make(map[string]struct{}),
|
||||
},
|
||||
}
|
||||
|
||||
err = nc.Sign(ca.Details.Curve, key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pem, err := nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
|
||||
}
|
||||
|
||||
func x25519Keypair() ([]byte, []byte) {
|
||||
privkey := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return pubkey, privkey
|
||||
}
|
||||
|
||||
type doneCb func()
|
||||
|
@ -147,73 +232,64 @@ func deadline(t *testing.T, seconds time.Duration) doneCb {
|
|||
}
|
||||
}
|
||||
|
||||
func assertTunnel(t *testing.T, vpnIpA, vpnIpB netip.Addr, controlA, controlB *nebula.Control, r *router.R) {
|
||||
func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) {
|
||||
// Send a packet from them to me
|
||||
controlB.InjectTunUDPPacket(vpnIpA, 80, vpnIpB, 90, []byte("Hi from B"))
|
||||
controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B"))
|
||||
bPacket := r.RouteForAllUntilTxTun(controlA)
|
||||
assertUdpPacket(t, []byte("Hi from B"), bPacket, vpnIpB, vpnIpA, 90, 80)
|
||||
|
||||
// And once more from me to them
|
||||
controlA.InjectTunUDPPacket(vpnIpB, 80, vpnIpA, 90, []byte("Hello from A"))
|
||||
controlA.InjectTunUDPPacket(vpnIpB, 80, 90, []byte("Hello from A"))
|
||||
aPacket := r.RouteForAllUntilTxTun(controlB)
|
||||
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
|
||||
}
|
||||
|
||||
func assertHostInfoPair(t *testing.T, addrA, addrB netip.AddrPort, vpnNetsA, vpnNetsB []netip.Prefix, controlA, controlB *nebula.Control) {
|
||||
func assertHostInfoPair(t *testing.T, addrA, addrB *net.UDPAddr, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control) {
|
||||
// Get both host infos
|
||||
//TODO: CERT-V2 we may want to loop over each vpnAddr and assert all the things
|
||||
hBinA := controlA.GetHostInfoByVpnAddr(vpnNetsB[0].Addr(), false)
|
||||
assert.NotNil(t, hBinA, "Host B was not found by vpnAddr in controlA")
|
||||
hBinA := controlA.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpB), false)
|
||||
assert.NotNil(t, hBinA, "Host B was not found by vpnIp in controlA")
|
||||
|
||||
hAinB := controlB.GetHostInfoByVpnAddr(vpnNetsA[0].Addr(), false)
|
||||
assert.NotNil(t, hAinB, "Host A was not found by vpnAddr in controlB")
|
||||
hAinB := controlB.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpA), false)
|
||||
assert.NotNil(t, hAinB, "Host A was not found by vpnIp in controlB")
|
||||
|
||||
// Check that both vpn and real addr are correct
|
||||
assert.EqualValues(t, getAddrs(vpnNetsB), hBinA.VpnAddrs, "Host B VpnIp is wrong in control A")
|
||||
assert.EqualValues(t, getAddrs(vpnNetsA), hAinB.VpnAddrs, "Host A VpnIp is wrong in control B")
|
||||
assert.Equal(t, vpnIpB, hBinA.VpnIp, "Host B VpnIp is wrong in control A")
|
||||
assert.Equal(t, vpnIpA, hAinB.VpnIp, "Host A VpnIp is wrong in control B")
|
||||
|
||||
assert.Equal(t, addrB, hBinA.CurrentRemote, "Host B remote is wrong in control A")
|
||||
assert.Equal(t, addrA, hAinB.CurrentRemote, "Host A remote is wrong in control B")
|
||||
assert.Equal(t, addrB.IP.To16(), hBinA.CurrentRemote.IP.To16(), "Host B remote ip is wrong in control A")
|
||||
assert.Equal(t, addrA.IP.To16(), hAinB.CurrentRemote.IP.To16(), "Host A remote ip is wrong in control B")
|
||||
|
||||
assert.Equal(t, addrB.Port, int(hBinA.CurrentRemote.Port), "Host B remote port is wrong in control A")
|
||||
assert.Equal(t, addrA.Port, int(hAinB.CurrentRemote.Port), "Host A remote port is wrong in control B")
|
||||
|
||||
// Check that our indexes match
|
||||
assert.Equal(t, hBinA.LocalIndex, hAinB.RemoteIndex, "Host B local index does not match host A remote index")
|
||||
assert.Equal(t, hBinA.RemoteIndex, hAinB.LocalIndex, "Host B remote index does not match host A local index")
|
||||
|
||||
//TODO: Would be nice to assert this memory
|
||||
//checkIndexes := func(name string, hm *HostMap, hi *HostInfo) {
|
||||
// hBbyIndex := hmA.Indexes[hBinA.localIndexId]
|
||||
// assert.NotNil(t, hBbyIndex, "Could not host info by local index in %s", name)
|
||||
// assert.Equal(t, &hBbyIndex, &hBinA, "%s Indexes map did not point to the right host info", name)
|
||||
//
|
||||
// //TODO: remote indexes are susceptible to collision
|
||||
// hBbyRemoteIndex := hmA.RemoteIndexes[hBinA.remoteIndexId]
|
||||
// assert.NotNil(t, hBbyIndex, "Could not host info by remote index in %s", name)
|
||||
// assert.Equal(t, &hBbyRemoteIndex, &hBinA, "%s RemoteIndexes did not point to the right host info", name)
|
||||
//}
|
||||
//
|
||||
//// Check hostmap indexes too
|
||||
//checkIndexes("hmA", hmA, hBinA)
|
||||
//checkIndexes("hmB", hmB, hAinB)
|
||||
}
|
||||
|
||||
func assertUdpPacket(t *testing.T, expected, b []byte, fromIp, toIp netip.Addr, fromPort, toPort uint16) {
|
||||
if toIp.Is6() {
|
||||
assertUdpPacket6(t, expected, b, fromIp, toIp, fromPort, toPort)
|
||||
} else {
|
||||
assertUdpPacket4(t, expected, b, fromIp, toIp, fromPort, toPort)
|
||||
}
|
||||
}
|
||||
|
||||
func assertUdpPacket6(t *testing.T, expected, b []byte, fromIp, toIp netip.Addr, fromPort, toPort uint16) {
|
||||
packet := gopacket.NewPacket(b, layers.LayerTypeIPv6, gopacket.Lazy)
|
||||
v6 := packet.Layer(layers.LayerTypeIPv6).(*layers.IPv6)
|
||||
assert.NotNil(t, v6, "No ipv6 data found")
|
||||
|
||||
assert.Equal(t, fromIp.AsSlice(), []byte(v6.SrcIP), "Source ip was incorrect")
|
||||
assert.Equal(t, toIp.AsSlice(), []byte(v6.DstIP), "Dest ip was incorrect")
|
||||
|
||||
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
||||
assert.NotNil(t, udp, "No udp data found")
|
||||
|
||||
assert.Equal(t, fromPort, uint16(udp.SrcPort), "Source port was incorrect")
|
||||
assert.Equal(t, toPort, uint16(udp.DstPort), "Dest port was incorrect")
|
||||
|
||||
data := packet.ApplicationLayer()
|
||||
assert.NotNil(t, data)
|
||||
assert.Equal(t, expected, data.Payload(), "Data was incorrect")
|
||||
}
|
||||
|
||||
func assertUdpPacket4(t *testing.T, expected, b []byte, fromIp, toIp netip.Addr, fromPort, toPort uint16) {
|
||||
func assertUdpPacket(t *testing.T, expected, b []byte, fromIp, toIp net.IP, fromPort, toPort uint16) {
|
||||
packet := gopacket.NewPacket(b, layers.LayerTypeIPv4, gopacket.Lazy)
|
||||
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
||||
assert.NotNil(t, v4, "No ipv4 data found")
|
||||
|
||||
assert.Equal(t, fromIp.AsSlice(), []byte(v4.SrcIP), "Source ip was incorrect")
|
||||
assert.Equal(t, toIp.AsSlice(), []byte(v4.DstIP), "Dest ip was incorrect")
|
||||
assert.Equal(t, fromIp, v4.SrcIP, "Source ip was incorrect")
|
||||
assert.Equal(t, toIp, v4.DstIP, "Dest ip was incorrect")
|
||||
|
||||
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
||||
assert.NotNil(t, udp, "No udp data found")
|
||||
|
@ -226,14 +302,6 @@ func assertUdpPacket4(t *testing.T, expected, b []byte, fromIp, toIp netip.Addr,
|
|||
assert.Equal(t, expected, data.Payload(), "Data was incorrect")
|
||||
}
|
||||
|
||||
func getAddrs(ns []netip.Prefix) []netip.Addr {
|
||||
var a []netip.Addr
|
||||
for _, n := range ns {
|
||||
a = append(a, n.Addr())
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func NewTestLogger() *logrus.Logger {
|
||||
l := logrus.New()
|
||||
|
||||
|
|
|
@ -5,11 +5,11 @@ package router
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/slackhq/nebula"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
)
|
||||
|
||||
type edge struct {
|
||||
|
@ -58,9 +58,8 @@ func renderHostmap(c *nebula.Control) (string, []*edge) {
|
|||
var lines []string
|
||||
var globalLines []*edge
|
||||
|
||||
crt := c.GetCertState().GetDefaultCertificate()
|
||||
clusterName := strings.Trim(crt.Name(), " ")
|
||||
clusterVpnIp := crt.Networks()[0].Addr()
|
||||
clusterName := strings.Trim(c.GetCert().Details.Name, " ")
|
||||
clusterVpnIp := c.GetCert().Details.Ips[0].IP
|
||||
r := fmt.Sprintf("\tsubgraph %s[\"%s (%s)\"]\n", clusterName, clusterName, clusterVpnIp)
|
||||
|
||||
hm := c.GetHostmap()
|
||||
|
@ -102,8 +101,8 @@ func renderHostmap(c *nebula.Control) (string, []*edge) {
|
|||
for _, idx := range indexes {
|
||||
hi, ok := hm.Indexes[idx]
|
||||
if ok {
|
||||
r += fmt.Sprintf("\t\t\t%v.%v[\"%v (%v)\"]\n", clusterName, idx, idx, hi.GetVpnAddrs())
|
||||
remoteClusterName := strings.Trim(hi.GetCert().Certificate.Name(), " ")
|
||||
r += fmt.Sprintf("\t\t\t%v.%v[\"%v (%v)\"]\n", clusterName, idx, idx, hi.GetVpnIp())
|
||||
remoteClusterName := strings.Trim(hi.GetCert().Details.Name, " ")
|
||||
globalLines = append(globalLines, &edge{from: fmt.Sprintf("%v.%v", clusterName, idx), to: fmt.Sprintf("%v.%v", remoteClusterName, hi.GetRemoteIndex())})
|
||||
_ = hi
|
||||
}
|
||||
|
@ -119,14 +118,14 @@ func renderHostmap(c *nebula.Control) (string, []*edge) {
|
|||
return r, globalLines
|
||||
}
|
||||
|
||||
func sortedHosts(hosts map[netip.Addr]*nebula.HostInfo) []netip.Addr {
|
||||
keys := make([]netip.Addr, 0, len(hosts))
|
||||
func sortedHosts(hosts map[iputil.VpnIp]*nebula.HostInfo) []iputil.VpnIp {
|
||||
keys := make([]iputil.VpnIp, 0, len(hosts))
|
||||
for key := range hosts {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
sort.SliceStable(keys, func(i, j int) bool {
|
||||
return keys[i].Compare(keys[j]) > 0
|
||||
return keys[i] > keys[j]
|
||||
})
|
||||
|
||||
return keys
|
||||
|
|
|
@ -6,12 +6,13 @@ package router
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -20,6 +21,7 @@ import (
|
|||
"github.com/google/gopacket/layers"
|
||||
"github.com/slackhq/nebula"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
@ -27,18 +29,18 @@ import (
|
|||
type R struct {
|
||||
// Simple map of the ip:port registered on a control to the control
|
||||
// Basically a router, right?
|
||||
controls map[netip.AddrPort]*nebula.Control
|
||||
controls map[string]*nebula.Control
|
||||
|
||||
// A map for inbound packets for a control that doesn't know about this address
|
||||
inNat map[netip.AddrPort]*nebula.Control
|
||||
inNat map[string]*nebula.Control
|
||||
|
||||
// A last used map, if an inbound packet hit the inNat map then
|
||||
// all return packets should use the same last used inbound address for the outbound sender
|
||||
// map[from address + ":" + to address] => ip:port to rewrite in the udp packet to receiver
|
||||
outNat map[string]netip.AddrPort
|
||||
outNat map[string]net.UDPAddr
|
||||
|
||||
// A map of vpn ip to the nebula control it belongs to
|
||||
vpnControls map[netip.Addr]*nebula.Control
|
||||
vpnControls map[iputil.VpnIp]*nebula.Control
|
||||
|
||||
ignoreFlows []ignoreFlow
|
||||
flow []flowEntry
|
||||
|
@ -116,10 +118,10 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R {
|
|||
}
|
||||
|
||||
r := &R{
|
||||
controls: make(map[netip.AddrPort]*nebula.Control),
|
||||
vpnControls: make(map[netip.Addr]*nebula.Control),
|
||||
inNat: make(map[netip.AddrPort]*nebula.Control),
|
||||
outNat: make(map[string]netip.AddrPort),
|
||||
controls: make(map[string]*nebula.Control),
|
||||
vpnControls: make(map[iputil.VpnIp]*nebula.Control),
|
||||
inNat: make(map[string]*nebula.Control),
|
||||
outNat: make(map[string]net.UDPAddr),
|
||||
flow: []flowEntry{},
|
||||
ignoreFlows: []ignoreFlow{},
|
||||
fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())),
|
||||
|
@ -133,13 +135,10 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R {
|
|||
for _, c := range controls {
|
||||
addr := c.GetUDPAddr()
|
||||
if _, ok := r.controls[addr]; ok {
|
||||
panic("Duplicate listen address: " + addr.String())
|
||||
}
|
||||
|
||||
for _, vpnAddr := range c.GetVpnAddrs() {
|
||||
r.vpnControls[vpnAddr] = c
|
||||
panic("Duplicate listen address: " + addr)
|
||||
}
|
||||
|
||||
r.vpnControls[c.GetVpnIp()] = c
|
||||
r.controls[addr] = c
|
||||
}
|
||||
|
||||
|
@ -166,13 +165,13 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R {
|
|||
// It does not look at the addr attached to the instance.
|
||||
// If a route is used, this will behave like a NAT for the return path.
|
||||
// Rewriting the source ip:port to what was last sent to from the origin
|
||||
func (r *R) AddRoute(ip netip.Addr, port uint16, c *nebula.Control) {
|
||||
func (r *R) AddRoute(ip net.IP, port uint16, c *nebula.Control) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
inAddr := netip.AddrPortFrom(ip, port)
|
||||
inAddr := net.JoinHostPort(ip.String(), fmt.Sprintf("%v", port))
|
||||
if _, ok := r.inNat[inAddr]; ok {
|
||||
panic("Duplicate listen address inNat: " + inAddr.String())
|
||||
panic("Duplicate listen address inNat: " + inAddr)
|
||||
}
|
||||
r.inNat[inAddr] = c
|
||||
}
|
||||
|
@ -199,7 +198,7 @@ func (r *R) renderFlow() {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
var participants = map[netip.AddrPort]struct{}{}
|
||||
var participants = map[string]struct{}{}
|
||||
var participantsVals []string
|
||||
|
||||
fmt.Fprintln(f, "```mermaid")
|
||||
|
@ -216,11 +215,11 @@ func (r *R) renderFlow() {
|
|||
continue
|
||||
}
|
||||
participants[addr] = struct{}{}
|
||||
sanAddr := normalizeName(addr.String())
|
||||
sanAddr := strings.Replace(addr, ":", "-", 1)
|
||||
participantsVals = append(participantsVals, sanAddr)
|
||||
fmt.Fprintf(
|
||||
f, " participant %s as Nebula: %s<br/>UDP: %s\n",
|
||||
sanAddr, e.packet.from.GetVpnAddrs(), sanAddr,
|
||||
sanAddr, e.packet.from.GetVpnIp(), sanAddr,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -253,9 +252,9 @@ func (r *R) renderFlow() {
|
|||
|
||||
fmt.Fprintf(f,
|
||||
" %s%s%s: %s(%s), index %v, counter: %v\n",
|
||||
normalizeName(p.from.GetUDPAddr().String()),
|
||||
strings.Replace(p.from.GetUDPAddr(), ":", "-", 1),
|
||||
line,
|
||||
normalizeName(p.to.GetUDPAddr().String()),
|
||||
strings.Replace(p.to.GetUDPAddr(), ":", "-", 1),
|
||||
h.TypeName(), h.SubTypeName(), h.RemoteIndex, h.MessageCounter,
|
||||
)
|
||||
}
|
||||
|
@ -270,11 +269,6 @@ func (r *R) renderFlow() {
|
|||
}
|
||||
}
|
||||
|
||||
func normalizeName(s string) string {
|
||||
rx := regexp.MustCompile("[\\[\\]\\:]")
|
||||
return rx.ReplaceAllLiteralString(s, "_")
|
||||
}
|
||||
|
||||
// IgnoreFlow tells the router to stop recording future flows that matches the provided criteria.
|
||||
// messageType and subType will target nebula underlay packets while tun will target nebula overlay packets
|
||||
// NOTE: This is a very broad system, if you set tun to true then no more tun traffic will be rendered
|
||||
|
@ -311,7 +305,7 @@ func (r *R) RenderHostmaps(title string, controls ...*nebula.Control) {
|
|||
func (r *R) renderHostmaps(title string) {
|
||||
c := maps.Values(r.controls)
|
||||
sort.SliceStable(c, func(i, j int) bool {
|
||||
return c[i].GetVpnAddrs()[0].Compare(c[j].GetVpnAddrs()[0]) > 0
|
||||
return c[i].GetVpnIp() > c[j].GetVpnIp()
|
||||
})
|
||||
|
||||
s := renderHostmaps(c...)
|
||||
|
@ -426,12 +420,13 @@ func (r *R) RouteUntilTxTun(sender *nebula.Control, receiver *nebula.Control) []
|
|||
|
||||
// Nope, lets push the sender along
|
||||
case p := <-udpTx:
|
||||
outAddr := sender.GetUDPAddr()
|
||||
r.Lock()
|
||||
a := sender.GetUDPAddr()
|
||||
c := r.getControl(a, p.To, p)
|
||||
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||
c := r.getControl(outAddr, inAddr, p)
|
||||
if c == nil {
|
||||
r.Unlock()
|
||||
panic("No control for udp tx " + a.String())
|
||||
panic("No control for udp tx")
|
||||
}
|
||||
fp := r.unlockedInjectFlow(sender, c, p, false)
|
||||
c.InjectUDPPacket(p)
|
||||
|
@ -484,11 +479,13 @@ func (r *R) RouteForAllUntilTxTun(receiver *nebula.Control) []byte {
|
|||
} else {
|
||||
// we are a udp tx, route and continue
|
||||
p := rx.Interface().(*udp.Packet)
|
||||
a := cm[x].GetUDPAddr()
|
||||
c := r.getControl(a, p.To, p)
|
||||
outAddr := cm[x].GetUDPAddr()
|
||||
|
||||
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||
c := r.getControl(outAddr, inAddr, p)
|
||||
if c == nil {
|
||||
r.Unlock()
|
||||
panic(fmt.Sprintf("No control for udp tx %s", p.To))
|
||||
panic("No control for udp tx")
|
||||
}
|
||||
fp := r.unlockedInjectFlow(cm[x], c, p, false)
|
||||
c.InjectUDPPacket(p)
|
||||
|
@ -512,10 +509,12 @@ func (r *R) RouteExitFunc(sender *nebula.Control, whatDo ExitFunc) {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
receiver := r.getControl(sender.GetUDPAddr(), p.To, p)
|
||||
outAddr := sender.GetUDPAddr()
|
||||
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||
receiver := r.getControl(outAddr, inAddr, p)
|
||||
if receiver == nil {
|
||||
r.Unlock()
|
||||
panic("Can't RouteExitFunc for host: " + p.To.String())
|
||||
panic("Can't route for host: " + inAddr)
|
||||
}
|
||||
|
||||
e := whatDo(p, receiver)
|
||||
|
@ -591,13 +590,13 @@ func (r *R) InjectUDPPacket(sender, receiver *nebula.Control, packet *udp.Packet
|
|||
// RouteForUntilAfterToAddr will route for sender and return only after it sees and sends a packet destined for toAddr
|
||||
// finish can be any of the exitType values except `keepRouting`, the default value is `routeAndExit`
|
||||
// If the router doesn't have the nebula controller for that address, we panic
|
||||
func (r *R) RouteForUntilAfterToAddr(sender *nebula.Control, toAddr netip.AddrPort, finish ExitType) {
|
||||
func (r *R) RouteForUntilAfterToAddr(sender *nebula.Control, toAddr *net.UDPAddr, finish ExitType) {
|
||||
if finish == KeepRouting {
|
||||
finish = RouteAndExit
|
||||
}
|
||||
|
||||
r.RouteExitFunc(sender, func(p *udp.Packet, r *nebula.Control) ExitType {
|
||||
if p.To == toAddr {
|
||||
if p.ToIp.Equal(toAddr.IP) && p.ToPort == uint16(toAddr.Port) {
|
||||
return finish
|
||||
}
|
||||
|
||||
|
@ -631,10 +630,13 @@ func (r *R) RouteForAllExitFunc(whatDo ExitFunc) {
|
|||
r.Lock()
|
||||
|
||||
p := rx.Interface().(*udp.Packet)
|
||||
receiver := r.getControl(cm[x].GetUDPAddr(), p.To, p)
|
||||
|
||||
outAddr := cm[x].GetUDPAddr()
|
||||
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||
receiver := r.getControl(outAddr, inAddr, p)
|
||||
if receiver == nil {
|
||||
r.Unlock()
|
||||
panic("Can't RouteForAllExitFunc for host: " + p.To.String())
|
||||
panic("Can't route for host: " + inAddr)
|
||||
}
|
||||
|
||||
e := whatDo(p, receiver)
|
||||
|
@ -695,10 +697,12 @@ func (r *R) FlushAll() {
|
|||
|
||||
p := rx.Interface().(*udp.Packet)
|
||||
|
||||
receiver := r.getControl(cm[x].GetUDPAddr(), p.To, p)
|
||||
outAddr := cm[x].GetUDPAddr()
|
||||
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||
receiver := r.getControl(outAddr, inAddr, p)
|
||||
if receiver == nil {
|
||||
r.Unlock()
|
||||
panic("Can't FlushAll for host: " + p.To.String())
|
||||
panic("Can't route for host: " + inAddr)
|
||||
}
|
||||
r.Unlock()
|
||||
}
|
||||
|
@ -706,14 +710,28 @@ func (r *R) FlushAll() {
|
|||
|
||||
// getControl performs or seeds NAT translation and returns the control for toAddr, p from fields may change
|
||||
// This is an internal router function, the caller must hold the lock
|
||||
func (r *R) getControl(fromAddr, toAddr netip.AddrPort, p *udp.Packet) *nebula.Control {
|
||||
if newAddr, ok := r.outNat[fromAddr.String()+":"+toAddr.String()]; ok {
|
||||
p.From = newAddr
|
||||
func (r *R) getControl(fromAddr, toAddr string, p *udp.Packet) *nebula.Control {
|
||||
if newAddr, ok := r.outNat[fromAddr+":"+toAddr]; ok {
|
||||
p.FromIp = newAddr.IP
|
||||
p.FromPort = uint16(newAddr.Port)
|
||||
}
|
||||
|
||||
c, ok := r.inNat[toAddr]
|
||||
if ok {
|
||||
r.outNat[c.GetUDPAddr().String()+":"+fromAddr.String()] = toAddr
|
||||
sHost, sPort, err := net.SplitHostPort(toAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(sPort)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
r.outNat[c.GetUDPAddr()+":"+fromAddr] = net.UDPAddr{
|
||||
IP: net.ParseIP(sHost),
|
||||
Port: port,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
|
@ -721,42 +739,29 @@ func (r *R) getControl(fromAddr, toAddr netip.AddrPort, p *udp.Packet) *nebula.C
|
|||
}
|
||||
|
||||
func (r *R) formatUdpPacket(p *packet) string {
|
||||
var packet gopacket.Packet
|
||||
var srcAddr netip.Addr
|
||||
|
||||
packet = gopacket.NewPacket(p.packet.Data, layers.LayerTypeIPv6, gopacket.Lazy)
|
||||
if packet.ErrorLayer() == nil {
|
||||
v6 := packet.Layer(layers.LayerTypeIPv6).(*layers.IPv6)
|
||||
if v6 == nil {
|
||||
panic("not an ipv6 packet")
|
||||
}
|
||||
srcAddr, _ = netip.AddrFromSlice(v6.SrcIP)
|
||||
} else {
|
||||
packet = gopacket.NewPacket(p.packet.Data, layers.LayerTypeIPv4, gopacket.Lazy)
|
||||
v6 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
||||
if v6 == nil {
|
||||
panic("not an ipv6 packet")
|
||||
}
|
||||
srcAddr, _ = netip.AddrFromSlice(v6.SrcIP)
|
||||
packet := gopacket.NewPacket(p.packet.Data, layers.LayerTypeIPv4, gopacket.Lazy)
|
||||
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
||||
if v4 == nil {
|
||||
panic("not an ipv4 packet")
|
||||
}
|
||||
|
||||
from := "unknown"
|
||||
if c, ok := r.vpnControls[srcAddr]; ok {
|
||||
from = c.GetUDPAddr().String()
|
||||
if c, ok := r.vpnControls[iputil.Ip2VpnIp(v4.SrcIP)]; ok {
|
||||
from = c.GetUDPAddr()
|
||||
}
|
||||
|
||||
udpLayer := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
||||
if udpLayer == nil {
|
||||
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
||||
if udp == nil {
|
||||
panic("not a udp packet")
|
||||
}
|
||||
|
||||
data := packet.ApplicationLayer()
|
||||
return fmt.Sprintf(
|
||||
" %s-->>%s: src port: %v<br/>dest port: %v<br/>data: \"%v\"\n",
|
||||
normalizeName(from),
|
||||
normalizeName(p.to.GetUDPAddr().String()),
|
||||
udpLayer.SrcPort,
|
||||
udpLayer.DstPort,
|
||||
strings.Replace(from, ":", "-", 1),
|
||||
strings.Replace(p.to.GetUDPAddr(), ":", "-", 1),
|
||||
udp.SrcPort,
|
||||
udp.DstPort,
|
||||
string(data.Payload()),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -11,13 +11,7 @@ pki:
|
|||
#blocklist:
|
||||
# - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
|
||||
# disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid.
|
||||
#disconnect_invalid: true
|
||||
|
||||
# default_version controls which certificate version is used in handshakes.
|
||||
# This setting only applies if both a v1 and a v2 certificate are configured, in which case it will default to `1`.
|
||||
# Once all hosts in the mesh are configured with both a v1 and v2 certificate then this should be changed to `2`.
|
||||
# After all hosts in the mesh are using a v2 certificate then v1 certificates are no longer needed.
|
||||
# default_version: 1
|
||||
#disconnect_invalid: false
|
||||
|
||||
# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
|
||||
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
|
||||
|
@ -27,19 +21,6 @@ pki:
|
|||
static_host_map:
|
||||
"192.168.100.1": ["100.64.22.11:4242"]
|
||||
|
||||
# The static_map config stanza can be used to configure how the static_host_map behaves.
|
||||
#static_map:
|
||||
# cadence determines how frequently DNS is re-queried for updated IP addresses when a static_host_map entry contains
|
||||
# a DNS name.
|
||||
#cadence: 30s
|
||||
|
||||
# network determines the type of IP addresses to ask the DNS server for. The default is "ip4" because nodes typically
|
||||
# do not know their public IPv4 address. Connecting to the Lighthouse via IPv4 allows the Lighthouse to detect the
|
||||
# public address. Other valid options are "ip6" and "ip" (returns both.)
|
||||
#network: ip4
|
||||
|
||||
# lookup_timeout is the DNS query timeout.
|
||||
#lookup_timeout: 250ms
|
||||
|
||||
lighthouse:
|
||||
# am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
|
||||
|
@ -126,8 +107,8 @@ lighthouse:
|
|||
# Port Nebula will be listening on. The default here is 4242. For a lighthouse node, the port should be defined,
|
||||
# however using port 0 will dynamically assign a port and is recommended for roaming nodes.
|
||||
listen:
|
||||
# To listen on only ipv4, use "0.0.0.0"
|
||||
host: "::"
|
||||
# To listen on both any ipv4 and ipv6 use "::"
|
||||
host: 0.0.0.0
|
||||
port: 4242
|
||||
# Sets the max number of packets to pull from the kernel for each syscall (under systems that support recvmmsg)
|
||||
# default is 64, does not support reload
|
||||
|
@ -144,11 +125,6 @@ listen:
|
|||
# valid values: always, never, private
|
||||
# This setting is reloadable.
|
||||
#send_recv_error: always
|
||||
# The so_sock option is a Linux-specific feature that allows all outgoing Nebula packets to be tagged with a specific identifier.
|
||||
# This tagging enables IP rule-based filtering. For example, it supports 0.0.0.0/0 unsafe_routes,
|
||||
# allowing for more precise routing decisions based on the packet tags. Default is 0 meaning no mark is set.
|
||||
# This setting is reloadable.
|
||||
#so_mark: 0
|
||||
|
||||
# Routines is the number of thread pairs to run that consume from the tun and UDP queues.
|
||||
# Currently, this defaults to 1 which means we have 1 tun queue reader and 1
|
||||
|
@ -178,11 +154,11 @@ punchy:
|
|||
|
||||
# Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest
|
||||
# path to a network adjacent nebula node.
|
||||
# This setting is reloadable.
|
||||
# NOTE: the previous option "local_range" only allowed definition of a single range
|
||||
# and has been deprecated for "preferred_ranges"
|
||||
#preferred_ranges: ["172.16.0.0/24"]
|
||||
|
||||
# sshd can expose informational and administrative functions via ssh. This can expose informational and administrative
|
||||
# functions, and allows manual tweaking of various network settings when debugging or testing.
|
||||
# sshd can expose informational and administrative functions via ssh this is a
|
||||
#sshd:
|
||||
# Toggles the feature
|
||||
#enabled: true
|
||||
|
@ -191,15 +167,12 @@ punchy:
|
|||
# A file containing the ssh host private key to use
|
||||
# A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
|
||||
#host_key: ./ssh_host_ed25519_key
|
||||
# Authorized users and their public keys
|
||||
# A file containing a list of authorized public keys
|
||||
#authorized_users:
|
||||
#- user: steeeeve
|
||||
# keys can be an array of strings or single string
|
||||
#keys:
|
||||
#- "ssh public key string"
|
||||
# Trusted SSH CA public keys. These are the public keys of the CAs that are allowed to sign SSH keys for access.
|
||||
#trusted_cas:
|
||||
#- "ssh public key string"
|
||||
|
||||
# EXPERIMENTAL: relay support for networks that can't establish direct connections.
|
||||
relay:
|
||||
|
@ -221,7 +194,6 @@ tun:
|
|||
disabled: false
|
||||
# Name of the device. If not set, a default will be chosen by the OS.
|
||||
# For macOS: if set, must be in the form `utun[0-9]+`.
|
||||
# For NetBSD: Required to be set, must be in the form `tun[0-9]+`
|
||||
dev: nebula1
|
||||
# Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
|
||||
drop_local_broadcast: false
|
||||
|
@ -243,7 +215,6 @@ tun:
|
|||
# `mtu`: will default to tun mtu if this option is not specified
|
||||
# `metric`: will default to 0 if this option is not specified
|
||||
# `install`: will default to true, controls whether this route is installed in the systems routing table.
|
||||
# This setting is reloadable.
|
||||
unsafe_routes:
|
||||
#- route: 172.16.1.0/24
|
||||
# via: 192.168.100.99
|
||||
|
@ -255,12 +226,10 @@ tun:
|
|||
# in nebula configuration files. Default false, not reloadable.
|
||||
#use_system_route_table: false
|
||||
|
||||
# TODO
|
||||
# Configure logging level
|
||||
logging:
|
||||
# panic, fatal, error, warning, info, or debug. Default is info and is reloadable.
|
||||
#NOTE: Debug mode can log remotely controlled/untrusted data which can quickly fill a disk in some
|
||||
# scenarios. Debug logging is also CPU intensive and will decrease performance overall.
|
||||
# Only enable debug logging while actively investigating an issue.
|
||||
# panic, fatal, error, warning, info, or debug. Default is info
|
||||
level: info
|
||||
# json or text formats currently available. Default is text
|
||||
format: text
|
||||
|
@ -275,6 +244,9 @@ logging:
|
|||
# As an example, to log as RFC3339 with millisecond precision, set to:
|
||||
#timestamp_format: "2006-01-02T15:04:05.000Z07:00"
|
||||
|
||||
#http:
|
||||
#listen: 127.0.0.1:8080
|
||||
|
||||
#stats:
|
||||
#type: graphite
|
||||
#prefix: nebula
|
||||
|
@ -283,7 +255,6 @@ logging:
|
|||
#interval: 10s
|
||||
|
||||
#type: prometheus
|
||||
#listen: 127.0.0.1:8080
|
||||
#path: /metrics
|
||||
#namespace: prometheusns
|
||||
#subsystem: nebula
|
||||
|
@ -305,10 +276,6 @@ logging:
|
|||
# A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
|
||||
#try_interval: 100ms
|
||||
#retries: 20
|
||||
|
||||
# query_buffer is the size of the buffer channel for querying lighthouses
|
||||
#query_buffer: 64
|
||||
|
||||
# trigger_buffer is the size of the buffer channel for quickly sending handshakes
|
||||
# after receiving the response for lighthouse queries
|
||||
#trigger_buffer: 64
|
||||
|
@ -325,13 +292,6 @@ firewall:
|
|||
outbound_action: drop
|
||||
inbound_action: drop
|
||||
|
||||
# Controls the default value for local_cidr. Default is true, will be deprecated after v1.9 and defaulted to false.
|
||||
# This setting only affects nebula hosts with subnets encoded in their certificate. A nebula host acting as an
|
||||
# unsafe router with `default_local_cidr_any: true` will expose their unsafe routes to every inbound rule regardless
|
||||
# of the actual destination for the packet. Setting this to false requires each inbound rule to contain a `local_cidr`
|
||||
# if the intention is to allow traffic to flow to an unsafe route.
|
||||
#default_local_cidr_any: false
|
||||
|
||||
conntrack:
|
||||
tcp_timeout: 12m
|
||||
udp_timeout: 3m
|
||||
|
@ -339,19 +299,15 @@ firewall:
|
|||
|
||||
# The firewall is default deny. There is no way to write a deny rule.
|
||||
# Rules are comprised of a protocol, port, and one or more of host, group, or CIDR
|
||||
# Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr) AND (local cidr)
|
||||
# Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr)
|
||||
# - port: Takes `0` or `any` as any, a single number `80`, a range `200-901`, or `fragment` to match second and further fragments of fragmented packets (since there is no port available).
|
||||
# code: same as port but makes more sense when talking about ICMP, TODO: this is not currently implemented in a way that works, use `any`
|
||||
# proto: `any`, `tcp`, `udp`, or `icmp`
|
||||
# host: `any` or a literal hostname, ie `test-host`
|
||||
# group: `any` or a literal group name, ie `default-group`
|
||||
# groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
|
||||
# cidr: a remote CIDR, `0.0.0.0/0` is any ipv4 and `::/0` is any ipv6.
|
||||
# local_cidr: a local CIDR, `0.0.0.0/0` is any ipv4 and `::/0` is any ipv6. This could be used to filter destinations when using unsafe_routes.
|
||||
# If no unsafe networks are present in the certificate(s) or `default_local_cidr_any` is true then the default is any ipv4 or ipv6 network.
|
||||
# Otherwise the default is any vpn network assigned to via the certificate.
|
||||
# `default_local_cidr_any` defaults to false and is deprecated, it will be removed in a future release.
|
||||
# If there are unsafe routes present its best to set `local_cidr` to whatever best fits the situation.
|
||||
# cidr: a remote CIDR, `0.0.0.0/0` is any.
|
||||
# local_cidr: a local CIDR, `0.0.0.0/0` is any. This could be used to filter destinations when using unsafe_routes.
|
||||
# ca_name: An issuing CA name
|
||||
# ca_sha: An issuing CA shasum
|
||||
|
||||
|
@ -373,10 +329,3 @@ firewall:
|
|||
groups:
|
||||
- laptop
|
||||
- home
|
||||
|
||||
# Expose a subnet (unsafe route) to hosts with the group remote_client
|
||||
# This example assume you have a subnet of 192.168.100.1/24 or larger encoded in the certificate
|
||||
- port: 8080
|
||||
proto: tcp
|
||||
group: remote_client
|
||||
local_cidr: 192.168.100.1/24
|
||||
|
|
|
@ -1,109 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/service"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := run(); err != nil {
|
||||
log.Fatalf("%+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func run() error {
|
||||
configStr := `
|
||||
tun:
|
||||
user: true
|
||||
|
||||
static_host_map:
|
||||
'192.168.100.1': ['localhost:4242']
|
||||
|
||||
listen:
|
||||
host: 0.0.0.0
|
||||
port: 4241
|
||||
|
||||
lighthouse:
|
||||
am_lighthouse: false
|
||||
interval: 60
|
||||
hosts:
|
||||
- '192.168.100.1'
|
||||
|
||||
firewall:
|
||||
outbound:
|
||||
# Allow all outbound traffic from this node
|
||||
- port: any
|
||||
proto: any
|
||||
host: any
|
||||
|
||||
inbound:
|
||||
# Allow icmp between any nebula hosts
|
||||
- port: any
|
||||
proto: icmp
|
||||
host: any
|
||||
- port: any
|
||||
proto: any
|
||||
host: any
|
||||
|
||||
pki:
|
||||
ca: /home/rice/Developer/nebula-config/ca.crt
|
||||
cert: /home/rice/Developer/nebula-config/app.crt
|
||||
key: /home/rice/Developer/nebula-config/app.key
|
||||
`
|
||||
var cfg config.C
|
||||
if err := cfg.LoadString(configStr); err != nil {
|
||||
return err
|
||||
}
|
||||
svc, err := service.New(&cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ln, err := svc.Listen("tcp", ":1234")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
log.Printf("accept error: %s", err)
|
||||
break
|
||||
}
|
||||
defer func(conn net.Conn) {
|
||||
_ = conn.Close()
|
||||
}(conn)
|
||||
|
||||
log.Printf("got connection")
|
||||
|
||||
_, err = conn.Write([]byte("hello world\n"))
|
||||
if err != nil {
|
||||
log.Printf("write error: %s", err)
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(conn)
|
||||
for scanner.Scan() {
|
||||
message := scanner.Text()
|
||||
_, err = fmt.Fprintf(conn, "echo: %q\n", message)
|
||||
if err != nil {
|
||||
log.Printf("write error: %s", err)
|
||||
}
|
||||
log.Printf("got message %q", message)
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.Printf("scanner error: %s", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
_ = svc.Close()
|
||||
if err := svc.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
138
examples/quickstart-vagrant/README.md
Normal file
138
examples/quickstart-vagrant/README.md
Normal file
|
@ -0,0 +1,138 @@
|
|||
# Quickstart Guide
|
||||
|
||||
This guide is intended to bring up a vagrant environment with 1 lighthouse and 2 generic hosts running nebula.
|
||||
|
||||
## Creating the virtualenv for ansible
|
||||
|
||||
Within the `quickstart/` directory, do the following
|
||||
|
||||
```
|
||||
# make a virtual environment
|
||||
virtualenv venv
|
||||
|
||||
# get into the virtualenv
|
||||
source venv/bin/activate
|
||||
|
||||
# install ansible
|
||||
pip install -r requirements.yml
|
||||
```
|
||||
|
||||
## Bringing up the vagrant environment
|
||||
|
||||
A plugin that is used for the Vagrant environment is `vagrant-hostmanager`
|
||||
|
||||
To install, run
|
||||
|
||||
```
|
||||
vagrant plugin install vagrant-hostmanager
|
||||
```
|
||||
|
||||
All hosts within the Vagrantfile are brought up with
|
||||
|
||||
`vagrant up`
|
||||
|
||||
Once the boxes are up, go into the `ansible/` directory and deploy the playbook by running
|
||||
|
||||
`ansible-playbook playbook.yml -i inventory -u vagrant`
|
||||
|
||||
## Testing within the vagrant env
|
||||
|
||||
Once the ansible run is done, hop onto a vagrant box
|
||||
|
||||
`vagrant ssh generic1.vagrant`
|
||||
|
||||
or specifically
|
||||
|
||||
`ssh vagrant@<ip-address-in-vagrant-file` (password for the vagrant user on the boxes is `vagrant`)
|
||||
|
||||
Some quick tests once the vagrant boxes are up are to ping from `generic1.vagrant` to `generic2.vagrant` using
|
||||
their respective nebula ip address.
|
||||
|
||||
```
|
||||
vagrant@generic1:~$ ping 10.168.91.220
|
||||
PING 10.168.91.220 (10.168.91.220) 56(84) bytes of data.
|
||||
64 bytes from 10.168.91.220: icmp_seq=1 ttl=64 time=241 ms
|
||||
64 bytes from 10.168.91.220: icmp_seq=2 ttl=64 time=0.704 ms
|
||||
```
|
||||
|
||||
You can further verify that the allowed nebula firewall rules work by ssh'ing from 1 generic box to the other.
|
||||
|
||||
`ssh vagrant@<nebula-ip-address>` (password for the vagrant user on the boxes is `vagrant`)
|
||||
|
||||
See `/etc/nebula/config.yml` on a box for firewall rules.
|
||||
|
||||
To see full handshakes and hostmaps, change the logging config of `/etc/nebula/config.yml` on the vagrant boxes from
|
||||
info to debug.
|
||||
|
||||
You can watch nebula logs by running
|
||||
|
||||
```
|
||||
sudo journalctl -fu nebula
|
||||
```
|
||||
|
||||
Refer to the nebula src code directory's README for further instructions on configuring nebula.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Is nebula up and running?
|
||||
|
||||
Run and verify that
|
||||
|
||||
```
|
||||
ifconfig
|
||||
```
|
||||
|
||||
shows you an interface with the name `nebula1` being up.
|
||||
|
||||
```
|
||||
vagrant@generic1:~$ ifconfig nebula1
|
||||
nebula1: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST> mtu 1300
|
||||
inet 10.168.91.210 netmask 255.128.0.0 destination 10.168.91.210
|
||||
inet6 fe80::aeaf:b105:e6dc:936c prefixlen 64 scopeid 0x20<link>
|
||||
unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)
|
||||
RX packets 2 bytes 168 (168.0 B)
|
||||
RX errors 0 dropped 0 overruns 0 frame 0
|
||||
TX packets 11 bytes 600 (600.0 B)
|
||||
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
|
||||
```
|
||||
|
||||
### Connectivity
|
||||
|
||||
Are you able to ping other boxes on the private nebula network?
|
||||
|
||||
The following are the private nebula ip addresses of the vagrant env
|
||||
|
||||
```
|
||||
generic1.vagrant [nebula_ip] 10.168.91.210
|
||||
generic2.vagrant [nebula_ip] 10.168.91.220
|
||||
lighthouse1.vagrant [nebula_ip] 10.168.91.230
|
||||
```
|
||||
|
||||
Try pinging generic1.vagrant to and from any other box using its nebula ip above.
|
||||
|
||||
Double check the nebula firewall rules under /etc/nebula/config.yml to make sure that connectivity is allowed for your use-case if on a specific port.
|
||||
|
||||
```
|
||||
vagrant@lighthouse1:~$ grep -A21 firewall /etc/nebula/config.yml
|
||||
firewall:
|
||||
conntrack:
|
||||
tcp_timeout: 12m
|
||||
udp_timeout: 3m
|
||||
default_timeout: 10m
|
||||
|
||||
inbound:
|
||||
- proto: icmp
|
||||
port: any
|
||||
host: any
|
||||
- proto: any
|
||||
port: 22
|
||||
host: any
|
||||
- proto: any
|
||||
port: 53
|
||||
host: any
|
||||
|
||||
outbound:
|
||||
- proto: any
|
||||
port: any
|
||||
host: any
|
||||
```
|
40
examples/quickstart-vagrant/Vagrantfile
vendored
Normal file
40
examples/quickstart-vagrant/Vagrantfile
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
Vagrant.require_version ">= 2.2.6"
|
||||
|
||||
nodes = [
|
||||
{ :hostname => 'generic1.vagrant', :ip => '172.11.91.210', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||
{ :hostname => 'generic2.vagrant', :ip => '172.11.91.220', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||
{ :hostname => 'lighthouse1.vagrant', :ip => '172.11.91.230', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||
]
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
|
||||
config.ssh.insert_key = false
|
||||
|
||||
if Vagrant.has_plugin?('vagrant-cachier')
|
||||
config.cache.enable :apt
|
||||
else
|
||||
printf("** Install vagrant-cachier plugin to speedup deploy: `vagrant plugin install vagrant-cachier`.**\n")
|
||||
end
|
||||
|
||||
if Vagrant.has_plugin?('vagrant-hostmanager')
|
||||
config.hostmanager.enabled = true
|
||||
config.hostmanager.manage_host = true
|
||||
config.hostmanager.include_offline = true
|
||||
else
|
||||
config.vagrant.plugins = "vagrant-hostmanager"
|
||||
end
|
||||
|
||||
nodes.each do |node|
|
||||
config.vm.define node[:hostname] do |node_config|
|
||||
node_config.vm.box = node[:box]
|
||||
node_config.vm.hostname = node[:hostname]
|
||||
node_config.vm.network :private_network, ip: node[:ip]
|
||||
node_config.vm.provider :virtualbox do |vb|
|
||||
vb.memory = node[:ram]
|
||||
vb.cpus = node[:cpus]
|
||||
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
|
||||
vb.customize ['guestproperty', 'set', :id, '/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold', 10000]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
4
examples/quickstart-vagrant/ansible/ansible.cfg
Normal file
4
examples/quickstart-vagrant/ansible/ansible.cfg
Normal file
|
@ -0,0 +1,4 @@
|
|||
[defaults]
|
||||
host_key_checking = False
|
||||
private_key_file = ~/.vagrant.d/insecure_private_key
|
||||
become = yes
|
|
@ -0,0 +1,21 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'to_nebula_ip': self.to_nebula_ip,
|
||||
'map_to_nebula_ips': self.map_to_nebula_ips,
|
||||
}
|
||||
|
||||
def to_nebula_ip(self, ip_str):
|
||||
ip_list = list(map(int, ip_str.split(".")))
|
||||
ip_list[0] = 10
|
||||
ip_list[1] = 168
|
||||
ip = '.'.join(map(str, ip_list))
|
||||
return ip
|
||||
|
||||
def map_to_nebula_ips(self, ip_strs):
|
||||
ip_list = [ self.to_nebula_ip(ip_str) for ip_str in ip_strs ]
|
||||
ips = ', '.join(ip_list)
|
||||
return ips
|
11
examples/quickstart-vagrant/ansible/inventory
Normal file
11
examples/quickstart-vagrant/ansible/inventory
Normal file
|
@ -0,0 +1,11 @@
|
|||
[all]
|
||||
generic1.vagrant
|
||||
generic2.vagrant
|
||||
lighthouse1.vagrant
|
||||
|
||||
[generic]
|
||||
generic1.vagrant
|
||||
generic2.vagrant
|
||||
|
||||
[lighthouse]
|
||||
lighthouse1.vagrant
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue