Compare commits

..

No commits in common. "master" and "v1.1.2" have entirely different histories.

149 changed files with 4162 additions and 16509 deletions

View file

@ -1,37 +1,14 @@
version: 2
updates:
- package-ecosystem: "docker"
directory: "/docker/janus"
schedule:
interval: "daily"
- package-ecosystem: "docker"
directory: "/docker/proxy"
schedule:
interval: "daily"
- package-ecosystem: "docker"
directory: "/docker/server"
schedule:
interval: "daily"
- package-ecosystem: gomod
directory: /
schedule:
interval: daily
groups:
etcd:
patterns:
- "go.etcd.io*"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
groups:
artifacts:
patterns:
- "actions/*-artifact"
- package-ecosystem: "pip"
directory: "/docs"

View file

@ -1,18 +1,6 @@
name: check-continentmap
on:
push:
branches: [ master ]
paths:
- '.github/workflows/check-continentmap.yml'
- 'scripts/get_continent_map.py'
- 'Makefile'
pull_request:
branches: [ master ]
paths:
- '.github/workflows/check-continentmap.yml'
- 'scripts/get_continent_map.py'
- 'Makefile'
schedule:
- cron: "0 2 * * SUN"
@ -24,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Check continentmap
run: make check-continentmap

View file

@ -36,15 +36,15 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@v3
uses: github/codeql-action/autobuild@v2
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
uses: github/codeql-action/analyze@v2

View file

@ -23,7 +23,7 @@ jobs:
steps:
- name: Add reaction on start
uses: peter-evans/create-or-update-comment@v4
uses: peter-evans/create-or-update-comment@v2
with:
token: ${{ secrets.COMMAND_BOT_PAT }}
repository: ${{ github.event.repository.full_name }}
@ -31,7 +31,7 @@ jobs:
reaction-type: "+1"
- name: Checkout the latest code
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.COMMAND_BOT_PAT }}
@ -42,7 +42,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.COMMAND_BOT_PAT }}
- name: Add reaction on failure
uses: peter-evans/create-or-update-comment@v4
uses: peter-evans/create-or-update-comment@v2
if: failure()
with:
token: ${{ secrets.COMMAND_BOT_PAT }}

View file

@ -2,15 +2,6 @@ name: Deploy to Docker Hub / GHCR
on:
pull_request:
branches: [ master ]
paths:
- '.github/workflows/deploydocker.yml'
- '**.go'
- 'go.*'
- 'Makefile'
- '*.conf.in'
- 'docker/server/*'
- 'docker/proxy/*'
push:
branches:
- master
@ -27,14 +18,14 @@ jobs:
steps:
- name: Check Out Repo
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@v2
- name: Generate Docker metadata
id: meta
uses: docker/metadata-action@v5
uses: docker/metadata-action@v4
with:
images: |
strukturag/nextcloud-spreed-signaling
@ -46,7 +37,7 @@ jobs:
type=semver,pattern={{major}}
type=sha,prefix=
- name: Cache Docker layers
uses: actions/cache@v4
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
@ -54,14 +45,14 @@ jobs:
${{ runner.os }}-buildx-
- name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Login to GHCR
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@ -69,11 +60,11 @@ jobs:
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
- name: Build and push
id: docker_build
uses: docker/build-push-action@v6
uses: docker/build-push-action@v4
with:
context: .
file: ./docker/server/Dockerfile
@ -92,14 +83,14 @@ jobs:
steps:
- name: Check Out Repo
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@v2
- name: Generate Docker metadata
id: meta
uses: docker/metadata-action@v5
uses: docker/metadata-action@v4
with:
images: |
strukturag/nextcloud-spreed-signaling
@ -107,16 +98,14 @@ jobs:
labels: |
org.opencontainers.image.title=nextcloud-spreed-signaling-proxy
org.opencontainers.image.description=Signaling proxy for the standalone signaling server for Nextcloud Talk.
flavor: |
suffix=-proxy,onlatest=true
tags: |
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha,prefix=
type=ref,event=branch,suffix=-proxy
type=semver,pattern={{version}},suffix=-proxy
type=semver,pattern={{major}}.{{minor}},suffix=-proxy
type=semver,pattern={{major}},suffix=-proxy
type=sha,prefix=,suffix=-proxy
- name: Cache Docker layers
uses: actions/cache@v4
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
@ -124,14 +113,14 @@ jobs:
${{ runner.os }}-buildx-
- name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Login to GHCR
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@ -139,11 +128,11 @@ jobs:
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
- name: Build and push
id: docker_build
uses: docker/build-push-action@v6
uses: docker/build-push-action@v4
with:
context: .
file: ./docker/proxy/Dockerfile

View file

@ -22,7 +22,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Update docker-compose
run: |
@ -36,7 +36,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Update docker-compose
run: |

View file

@ -23,14 +23,18 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
- name: Build Docker image
uses: docker/build-push-action@v6
uses: docker/build-push-action@v4
with:
context: docker/janus
load: true
tags: ${{ env.TEST_TAG }}
- name: Test Docker image
run: |
docker run --rm ${{ env.TEST_TAG }} /usr/local/bin/janus --version

View file

@ -3,24 +3,8 @@ name: Docker image
on:
pull_request:
branches: [ master ]
paths:
- '.github/workflows/docker.yml'
- '**.go'
- 'go.*'
- 'Makefile'
- '*.conf.in'
- 'docker/server/*'
- 'docker/proxy/*'
push:
branches: [ master ]
paths:
- '.github/workflows/docker.yml'
- '**.go'
- 'go.*'
- 'Makefile'
- '*.conf.in'
- 'docker/server/*'
- 'docker/proxy/*'
permissions:
contents: read
@ -33,16 +17,28 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
- name: Build Docker image for testing
uses: docker/build-push-action@v4
with:
context: .
file: docker/server/Dockerfile
load: true
tags: ${{ env.TEST_TAG }}
- name: Test Docker image
run: |
docker run --rm ${{ env.TEST_TAG }} /usr/bin/nextcloud-spreed-signaling --version
- name: Build Docker image
uses: docker/build-push-action@v6
uses: docker/build-push-action@v4
with:
context: .
file: docker/server/Dockerfile
@ -52,16 +48,28 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
- name: Build Docker image for testing
uses: docker/build-push-action@v4
with:
context: .
file: docker/proxy/Dockerfile
load: true
tags: ${{ env.TEST_TAG }}
- name: Test Docker image
run: |
docker run --rm ${{ env.TEST_TAG }} /usr/bin/nextcloud-spreed-signaling-proxy --version
- name: Build Docker image
uses: docker/build-push-action@v6
uses: docker/build-push-action@v4
with:
context: .
file: docker/proxy/Dockerfile

View file

@ -1,46 +0,0 @@
name: Go Vulnerability Checker
on:
push:
branches: [ master ]
paths:
- '.github/workflows/govuln.yml'
- '**.go'
- 'go.*'
pull_request:
branches: [ master ]
paths:
- '.github/workflows/govuln.yml'
- '**.go'
- 'go.*'
schedule:
- cron: "0 2 * * SUN"
permissions:
contents: read
jobs:
run:
runs-on: ubuntu-latest
strategy:
matrix:
go-version:
- "1.21"
- "1.22"
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- run: date
- name: Install dependencies
run: |
sudo apt -y update && sudo apt -y install protobuf-compiler
make common
- name: Install and run govulncheck
run: |
set -euo pipefail
go install golang.org/x/vuln/cmd/govulncheck@latest
govulncheck ./...

View file

@ -1,46 +0,0 @@
name: licensecheck
on:
push:
branches: [ master ]
paths:
- '.github/workflows/licensecheck.yml'
- '**.go'
pull_request:
branches: [ master ]
paths:
- '.github/workflows/licensecheck.yml'
- '**.go'
permissions:
contents: read
jobs:
golang:
name: golang
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install licensecheck
run: |
sudo apt-get -y update
sudo apt-get -y install licensecheck
- id: licensecheck
name: Check licenses
run: |
{
echo 'CHECK_RESULT<<EOF'
licensecheck *.go */*.go
echo EOF
} >> "$GITHUB_ENV"
- name: Check for missing licenses
run: |
MISSING=$(echo "$CHECK_RESULT" | grep UNKNOWN || true)
if [ -n "$MISSING" ]; then \
echo "$MISSING"; \
exit 1; \
fi

View file

@ -5,14 +5,12 @@ on:
branches: [ master ]
paths:
- '.github/workflows/lint.yml'
- '.golangci.yml'
- '**.go'
- 'go.*'
pull_request:
branches: [ master ]
paths:
- '.github/workflows/lint.yml'
- '.golangci.yml'
- '**.go'
- 'go.*'
@ -25,10 +23,28 @@ jobs:
runs-on: ubuntu-latest
continue-on-error: true
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: "1.21"
go-version: "1.20"
- id: go-cache-paths
run: |
echo "go-build=$(go env GOCACHE)" >> $GITHUB_OUTPUT
echo "go-mod=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
echo "go-version=$(go version | cut -d ' ' -f 3)" >> $GITHUB_OUTPUT
- name: Go build cache
uses: actions/cache@v3
with:
path: ${{ steps.go-cache-paths.outputs.go-build }}
key: ${{ runner.os }}-${{ steps.go-cache-paths.outputs.go-version }}-build-${{ hashFiles('**/go.mod', '**/go.sum') }}
- name: Go mod cache
uses: actions/cache@v3
with:
path: ${{ steps.go-cache-paths.outputs.go-mod }}
key: ${{ runner.os }}-${{ steps.go-cache-paths.outputs.go-version }}-mod-${{ hashFiles('**/go.mod', '**/go.sum') }}
- name: Install dependencies
run: |
@ -36,27 +52,7 @@ jobs:
make common
- name: lint
uses: golangci/golangci-lint-action@v6.0.1
uses: golangci/golangci-lint-action@v3.4.0
with:
version: latest
args: --timeout=2m0s
skip-cache: true
dependencies:
name: dependencies
runs-on: ubuntu-latest
continue-on-error: true
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: "stable"
- name: Check minimum supported version of Go
run: |
go mod tidy -go=1.21 -compat=1.21
- name: Check go.mod / go.sum
run: |
git add go.*
git diff --cached --exit-code go.*

View file

@ -1,27 +0,0 @@
name: shellcheck
on:
push:
branches: [ master ]
paths:
- '.github/workflows/shellcheck.yml'
- '**.sh'
pull_request:
branches: [ master ]
paths:
- '.github/workflows/shellcheck.yml'
- '**.sh'
permissions:
contents: read
jobs:
lint:
name: shellcheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: shellcheck
run: |
find -name "*.sh" | xargs shellcheck

View file

@ -24,15 +24,34 @@ jobs:
strategy:
matrix:
go-version:
- "1.21"
- "1.22"
- "1.18"
- "1.19"
- "1.20"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go-version }}
- id: go-cache-paths
run: |
echo "go-build=$(go env GOCACHE)" >> $GITHUB_OUTPUT
echo "go-mod=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
echo "go-version=$(go version | cut -d ' ' -f 3)" >> $GITHUB_OUTPUT
- name: Go build cache
uses: actions/cache@v3
with:
path: ${{ steps.go-cache-paths.outputs.go-build }}
key: ${{ runner.os }}-${{ steps.go-cache-paths.outputs.go-version }}-build-${{ hashFiles('**/go.mod', '**/go.sum') }}
- name: Go mod cache
uses: actions/cache@v3
with:
path: ${{ steps.go-cache-paths.outputs.go-mod }}
key: ${{ runner.os }}-${{ steps.go-cache-paths.outputs.go-version }}-mod-${{ hashFiles('**/go.mod', '**/go.sum') }}
- name: Install dependencies
run: |
sudo apt -y update && sudo apt -y install protobuf-compiler
@ -43,7 +62,7 @@ jobs:
make tarball
- name: Upload tarball
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: tarball-${{ matrix.go-version }}
path: nextcloud-spreed-signaling*.tar.gz
@ -52,12 +71,13 @@ jobs:
strategy:
matrix:
go-version:
- "1.21"
- "1.22"
- "1.18"
- "1.19"
- "1.20"
runs-on: ubuntu-latest
needs: [create]
steps:
- uses: actions/setup-go@v5
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go-version }}
@ -66,23 +86,25 @@ jobs:
sudo apt -y update && sudo apt -y install protobuf-compiler
- name: Download tarball
uses: actions/download-artifact@v4
uses: actions/download-artifact@v3
with:
name: tarball-${{ matrix.go-version }}
- name: Extract tarball
run: |
mkdir -p tmp
tar xvf nextcloud-spreed-signaling*.tar.gz --strip-components=1 -C tmp
tar xf nextcloud-spreed-signaling*.tar.gz --strip-components=1 -C tmp
[ -d "tmp/vendor" ] || exit 1
- name: Build
env:
GOPROXY: off
run: |
echo "Building with $(nproc) threads"
make -C tmp build -j$(nproc)
- name: Run tests
env:
USE_DB_IP_GEOIP_DATABASE: "1"
GOPROXY: off
run: |
make -C tmp test TIMEOUT=120s

View file

@ -23,19 +23,37 @@ jobs:
go:
env:
MAXMIND_GEOLITE2_LICENSE: ${{ secrets.MAXMIND_GEOLITE2_LICENSE }}
USE_DB_IP_GEOIP_DATABASE: "1"
strategy:
matrix:
go-version:
- "1.21"
- "1.22"
- "1.18"
- "1.19"
- "1.20"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go-version }}
- id: go-cache-paths
run: |
echo "go-build=$(go env GOCACHE)" >> $GITHUB_OUTPUT
echo "go-mod=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
echo "go-version=$(go version | cut -d ' ' -f 3)" >> $GITHUB_OUTPUT
- name: Go build cache
uses: actions/cache@v3
with:
path: ${{ steps.go-cache-paths.outputs.go-build }}
key: ${{ runner.os }}-${{ steps.go-cache-paths.outputs.go-version }}-build-${{ hashFiles('**/go.mod', '**/go.sum') }}
- name: Go mod cache
uses: actions/cache@v3
with:
path: ${{ steps.go-cache-paths.outputs.go-mod }}
key: ${{ runner.os }}-${{ steps.go-cache-paths.outputs.go-version }}-mod-${{ hashFiles('**/go.mod', '**/go.sum') }}
- name: Install dependencies
run: |
sudo apt -y update && sudo apt -y install protobuf-compiler
@ -63,7 +81,7 @@ jobs:
outfile: cover.lcov
- name: Coveralls Parallel
uses: coverallsapp/github-action@v2.3.0
uses: coverallsapp/github-action@v1.2.4
env:
COVERALLS_FLAG_NAME: run-${{ matrix.go-version }}
with:
@ -78,7 +96,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Coveralls Finished
uses: coverallsapp/github-action@v2.3.0
uses: coverallsapp/github-action@v1.2.4
with:
github-token: ${{ secrets.github_token }}
parallel-finished: true

View file

@ -25,7 +25,7 @@ linters-settings:
- name: receiver-naming
- name: time-naming
- name: unexported-return
#- name: indent-error-flow
- name: indent-error-flow
- name: errorf
- name: empty-block
- name: superfluous-else

View file

@ -2,523 +2,6 @@
All notable changes to this project will be documented in this file.
## 1.3.1 - 2024-05-23
### Changed
- Bump alpine from 3.19 to 3.20 in /docker/janus
[#746](https://github.com/strukturag/nextcloud-spreed-signaling/pull/746)
- CI: Remove deprecated options from lint workflow.
[#748](https://github.com/strukturag/nextcloud-spreed-signaling/pull/748)
- docker: Update Janus in example image to 1.2.2
[#749](https://github.com/strukturag/nextcloud-spreed-signaling/pull/749)
- Improve detection of actual client IP.
[#747](https://github.com/strukturag/nextcloud-spreed-signaling/pull/747)
### Fixed
- docker: Fix proxy entrypoint.
[#745](https://github.com/strukturag/nextcloud-spreed-signaling/pull/745)
## 1.3.0 - 2024-05-22
### Added
- Support resuming remote sessions
[#715](https://github.com/strukturag/nextcloud-spreed-signaling/pull/715)
- Gracefully shut down signaling server on SIGUSR1.
[#706](https://github.com/strukturag/nextcloud-spreed-signaling/pull/706)
- docker: Add helper scripts to gracefully stop / wait for server.
[#722](https://github.com/strukturag/nextcloud-spreed-signaling/pull/722)
- Support environment variables in some configuration.
[#721](https://github.com/strukturag/nextcloud-spreed-signaling/pull/721)
- Add Context to clients / sessions.
[#732](https://github.com/strukturag/nextcloud-spreed-signaling/pull/732)
- Drop support for Golang 1.20
[#737](https://github.com/strukturag/nextcloud-spreed-signaling/pull/737)
- CI: Run "govulncheck".
[#694](https://github.com/strukturag/nextcloud-spreed-signaling/pull/694)
- Make trusted proxies configurable and default to loopback / private IPs.
[#738](https://github.com/strukturag/nextcloud-spreed-signaling/pull/738)
- Add support for remote streams (preview)
[#708](https://github.com/strukturag/nextcloud-spreed-signaling/pull/708)
- Add throttler for backend requests
[#744](https://github.com/strukturag/nextcloud-spreed-signaling/pull/744)
### Changed
- build(deps): Bump github.com/nats-io/nats.go from 1.34.0 to 1.34.1
[#697](https://github.com/strukturag/nextcloud-spreed-signaling/pull/697)
- build(deps): Bump google.golang.org/grpc from 1.62.1 to 1.63.0
[#699](https://github.com/strukturag/nextcloud-spreed-signaling/pull/699)
- build(deps): Bump google.golang.org/grpc from 1.63.0 to 1.63.2
[#700](https://github.com/strukturag/nextcloud-spreed-signaling/pull/700)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.12 to 2.10.14
[#702](https://github.com/strukturag/nextcloud-spreed-signaling/pull/702)
- Include previous value with etcd watch events.
[#704](https://github.com/strukturag/nextcloud-spreed-signaling/pull/704)
- build(deps): Bump go.uber.org/zap from 1.17.0 to 1.27.0
[#705](https://github.com/strukturag/nextcloud-spreed-signaling/pull/705)
- Improve support for Janus 1.x
[#669](https://github.com/strukturag/nextcloud-spreed-signaling/pull/669)
- build(deps): Bump sphinx from 7.2.6 to 7.3.5 in /docs
[#709](https://github.com/strukturag/nextcloud-spreed-signaling/pull/709)
- build(deps): Bump sphinx from 7.3.5 to 7.3.7 in /docs
[#712](https://github.com/strukturag/nextcloud-spreed-signaling/pull/712)
- build(deps): Bump golang.org/x/net from 0.21.0 to 0.23.0
[#711](https://github.com/strukturag/nextcloud-spreed-signaling/pull/711)
- Don't keep expiration timestamp in each session.
[#713](https://github.com/strukturag/nextcloud-spreed-signaling/pull/713)
- build(deps): Bump mkdocs from 1.5.3 to 1.6.0 in /docs
[#714](https://github.com/strukturag/nextcloud-spreed-signaling/pull/714)
- Speedup tests by running in parallel
[#718](https://github.com/strukturag/nextcloud-spreed-signaling/pull/718)
- build(deps): Bump golangci/golangci-lint-action from 4.0.0 to 5.0.0
[#719](https://github.com/strukturag/nextcloud-spreed-signaling/pull/719)
- build(deps): Bump golangci/golangci-lint-action from 5.0.0 to 5.1.0
[#720](https://github.com/strukturag/nextcloud-spreed-signaling/pull/720)
- build(deps): Bump coverallsapp/github-action from 2.2.3 to 2.3.0
[#728](https://github.com/strukturag/nextcloud-spreed-signaling/pull/728)
- build(deps): Bump jinja2 from 3.1.3 to 3.1.4 in /docs
[#726](https://github.com/strukturag/nextcloud-spreed-signaling/pull/726)
- build(deps): Bump google.golang.org/protobuf from 1.33.0 to 1.34.1
[#725](https://github.com/strukturag/nextcloud-spreed-signaling/pull/725)
- build(deps): Bump github.com/prometheus/client_golang from 1.19.0 to 1.19.1
[#730](https://github.com/strukturag/nextcloud-spreed-signaling/pull/730)
- build(deps): Bump golangci/golangci-lint-action from 5.1.0 to 6.0.1
[#729](https://github.com/strukturag/nextcloud-spreed-signaling/pull/729)
- build(deps): Bump google.golang.org/grpc from 1.63.2 to 1.64.0
[#734](https://github.com/strukturag/nextcloud-spreed-signaling/pull/734)
- Validate received SDP earlier.
[#707](https://github.com/strukturag/nextcloud-spreed-signaling/pull/707)
- Log something if mcu publisher / subscriber was closed.
[#736](https://github.com/strukturag/nextcloud-spreed-signaling/pull/736)
- build(deps): Bump the etcd group with 4 updates
[#693](https://github.com/strukturag/nextcloud-spreed-signaling/pull/693)
- build(deps): Bump github.com/nats-io/nats.go from 1.34.1 to 1.35.0
[#740](https://github.com/strukturag/nextcloud-spreed-signaling/pull/740)
- Don't use unnecessary pointer to "json.RawMessage".
[#739](https://github.com/strukturag/nextcloud-spreed-signaling/pull/739)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.14 to 2.10.15
[#741](https://github.com/strukturag/nextcloud-spreed-signaling/pull/741)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.15 to 2.10.16
[#743](https://github.com/strukturag/nextcloud-spreed-signaling/pull/743)
### Fixed
- Improve detecting renames in file watcher.
[#698](https://github.com/strukturag/nextcloud-spreed-signaling/pull/698)
- Update etcd watch handling.
[#701](https://github.com/strukturag/nextcloud-spreed-signaling/pull/701)
- Prevent goroutine leaks in GRPC tests.
[#716](https://github.com/strukturag/nextcloud-spreed-signaling/pull/716)
- Fix potential race in capabilities test.
[#731](https://github.com/strukturag/nextcloud-spreed-signaling/pull/731)
- Don't log read error after we closed the connection.
[#735](https://github.com/strukturag/nextcloud-spreed-signaling/pull/735)
- Fix lock order inversion when leaving room / publishing room sessions.
[#742](https://github.com/strukturag/nextcloud-spreed-signaling/pull/742)
- Relax "MessageClientMessageData" validation.
[#733](https://github.com/strukturag/nextcloud-spreed-signaling/pull/733)
## 1.2.4 - 2024-04-03
### Added
- Add metrics for current number of HTTP client connections.
[#668](https://github.com/strukturag/nextcloud-spreed-signaling/pull/668)
- Support getting GeoIP DB from db-ip.com for tests.
[#689](https://github.com/strukturag/nextcloud-spreed-signaling/pull/689)
- Use fsnotify to detect file changes
[#680](https://github.com/strukturag/nextcloud-spreed-signaling/pull/680)
- CI: Check dependencies for minimum supported version.
[#692](https://github.com/strukturag/nextcloud-spreed-signaling/pull/692)
### Changed
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.9 to 2.10.10
[#650](https://github.com/strukturag/nextcloud-spreed-signaling/pull/650)
- CI: Also test with Golang 1.22
[#651](https://github.com/strukturag/nextcloud-spreed-signaling/pull/651)
- build(deps): Bump the etcd group with 4 updates
[#649](https://github.com/strukturag/nextcloud-spreed-signaling/pull/649)
- Improve Makefile
[#653](https://github.com/strukturag/nextcloud-spreed-signaling/pull/653)
- build(deps): Bump google.golang.org/grpc from 1.61.0 to 1.61.1
[#659](https://github.com/strukturag/nextcloud-spreed-signaling/pull/659)
- build(deps): Bump golangci/golangci-lint-action from 3.7.0 to 4.0.0
[#658](https://github.com/strukturag/nextcloud-spreed-signaling/pull/658)
- Minor improvements to DNS monitor
[#663](https://github.com/strukturag/nextcloud-spreed-signaling/pull/663)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.10 to 2.10.11
[#662](https://github.com/strukturag/nextcloud-spreed-signaling/pull/662)
- build(deps): Bump google.golang.org/grpc from 1.61.1 to 1.62.0
[#664](https://github.com/strukturag/nextcloud-spreed-signaling/pull/664)
- Support ports in full URLs for DNS monitor.
[#667](https://github.com/strukturag/nextcloud-spreed-signaling/pull/667)
- Calculate proxy load based on maximum bandwidth.
[#670](https://github.com/strukturag/nextcloud-spreed-signaling/pull/670)
- build(deps): Bump github.com/nats-io/nats.go from 1.32.0 to 1.33.1
[#661](https://github.com/strukturag/nextcloud-spreed-signaling/pull/661)
- build(deps): Bump golang from 1.21-alpine to 1.22-alpine in /docker/server
[#655](https://github.com/strukturag/nextcloud-spreed-signaling/pull/655)
- build(deps): Bump golang from 1.21-alpine to 1.22-alpine in /docker/proxy
[#656](https://github.com/strukturag/nextcloud-spreed-signaling/pull/656)
- docker: Update Janus from 0.11.8 to 0.14.1.
[#672](https://github.com/strukturag/nextcloud-spreed-signaling/pull/672)
- build(deps): Bump alpine from 3.18 to 3.19 in /docker/janus
[#613](https://github.com/strukturag/nextcloud-spreed-signaling/pull/613)
- Reuse backoff waiting code where possible
[#673](https://github.com/strukturag/nextcloud-spreed-signaling/pull/673)
- build(deps): Bump github.com/prometheus/client_golang from 1.18.0 to 1.19.0
[#674](https://github.com/strukturag/nextcloud-spreed-signaling/pull/674)
- Docker improvements
[#675](https://github.com/strukturag/nextcloud-spreed-signaling/pull/675)
- make: Don't update dependencies but use pinned versions.
[#679](https://github.com/strukturag/nextcloud-spreed-signaling/pull/679)
- build(deps): Bump github.com/pion/sdp/v3 from 3.0.6 to 3.0.7
[#678](https://github.com/strukturag/nextcloud-spreed-signaling/pull/678)
- build(deps): Bump google.golang.org/grpc from 1.62.0 to 1.62.1
[#677](https://github.com/strukturag/nextcloud-spreed-signaling/pull/677)
- build(deps): Bump google.golang.org/protobuf from 1.32.0 to 1.33.0
[#676](https://github.com/strukturag/nextcloud-spreed-signaling/pull/676)
- build(deps): Bump github.com/pion/sdp/v3 from 3.0.7 to 3.0.8
[#681](https://github.com/strukturag/nextcloud-spreed-signaling/pull/681)
- Update source of continentmap to original CSV file.
[#682](https://github.com/strukturag/nextcloud-spreed-signaling/pull/682)
- build(deps): Bump markdown from 3.5.2 to 3.6 in /docs
[#684](https://github.com/strukturag/nextcloud-spreed-signaling/pull/684)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.11 to 2.10.12
[#683](https://github.com/strukturag/nextcloud-spreed-signaling/pull/683)
- build(deps): Bump github.com/pion/sdp/v3 from 3.0.8 to 3.0.9
[#687](https://github.com/strukturag/nextcloud-spreed-signaling/pull/687)
- build(deps): Bump the etcd group with 4 updates
[#686](https://github.com/strukturag/nextcloud-spreed-signaling/pull/686)
- build(deps): Bump github.com/nats-io/nats.go from 1.33.1 to 1.34.0
[#685](https://github.com/strukturag/nextcloud-spreed-signaling/pull/685)
- Revert "build(deps): Bump the etcd group with 4 updates"
[#691](https://github.com/strukturag/nextcloud-spreed-signaling/pull/691)
- CI: Limit when to run Docker build jobs.
[#695](https://github.com/strukturag/nextcloud-spreed-signaling/pull/695)
- Remove deprecated section on multiple signaling servers from README.
[#696](https://github.com/strukturag/nextcloud-spreed-signaling/pull/696)
### Fixed
- Fix race condition when accessing "expected" in proxy_config tests.
[#652](https://github.com/strukturag/nextcloud-spreed-signaling/pull/652)
- Fix deadlock when entry is removed while receiver holds lock in lookup.
[#654](https://github.com/strukturag/nextcloud-spreed-signaling/pull/654)
- Fix flaky "TestProxyConfigStaticDNS".
[#671](https://github.com/strukturag/nextcloud-spreed-signaling/pull/671)
- Fix flaky DnsMonitor test.
[#690](https://github.com/strukturag/nextcloud-spreed-signaling/pull/690)
## 1.2.3 - 2024-01-31
### Added
- CI: Check license headers.
[#627](https://github.com/strukturag/nextcloud-spreed-signaling/pull/627)
- Add "welcome" endpoint to proxy.
[#644](https://github.com/strukturag/nextcloud-spreed-signaling/pull/644)
### Changed
- build(deps): Bump github/codeql-action from 2 to 3
[#619](https://github.com/strukturag/nextcloud-spreed-signaling/pull/619)
- build(deps): Bump github.com/google/uuid from 1.4.0 to 1.5.0
[#618](https://github.com/strukturag/nextcloud-spreed-signaling/pull/618)
- build(deps): Bump google.golang.org/grpc from 1.59.0 to 1.60.0
[#617](https://github.com/strukturag/nextcloud-spreed-signaling/pull/617)
- build(deps): Bump the artifacts group with 2 updates
[#622](https://github.com/strukturag/nextcloud-spreed-signaling/pull/622)
- build(deps): Bump golang.org/x/crypto from 0.16.0 to 0.17.0
[#623](https://github.com/strukturag/nextcloud-spreed-signaling/pull/623)
- build(deps): Bump google.golang.org/grpc from 1.60.0 to 1.60.1
[#624](https://github.com/strukturag/nextcloud-spreed-signaling/pull/624)
- Refactor proxy config
[#606](https://github.com/strukturag/nextcloud-spreed-signaling/pull/606)
- build(deps): Bump google.golang.org/protobuf from 1.31.0 to 1.32.0
[#629](https://github.com/strukturag/nextcloud-spreed-signaling/pull/629)
- build(deps): Bump github.com/prometheus/client_golang from 1.17.0 to 1.18.0
[#630](https://github.com/strukturag/nextcloud-spreed-signaling/pull/630)
- build(deps): Bump jinja2 from 3.1.2 to 3.1.3 in /docs
[#632](https://github.com/strukturag/nextcloud-spreed-signaling/pull/632)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.7 to 2.10.9
[#633](https://github.com/strukturag/nextcloud-spreed-signaling/pull/633)
- build(deps): Bump markdown from 3.5.1 to 3.5.2 in /docs
[#631](https://github.com/strukturag/nextcloud-spreed-signaling/pull/631)
- build(deps): Bump github.com/nats-io/nats.go from 1.31.0 to 1.32.0
[#634](https://github.com/strukturag/nextcloud-spreed-signaling/pull/634)
- build(deps): Bump readthedocs-sphinx-search from 0.3.1 to 0.3.2 in /docs
[#635](https://github.com/strukturag/nextcloud-spreed-signaling/pull/635)
- build(deps): Bump actions/cache from 3 to 4
[#638](https://github.com/strukturag/nextcloud-spreed-signaling/pull/638)
- build(deps): Bump github.com/google/uuid from 1.5.0 to 1.6.0
[#643](https://github.com/strukturag/nextcloud-spreed-signaling/pull/643)
- build(deps): Bump google.golang.org/grpc from 1.60.1 to 1.61.0
[#645](https://github.com/strukturag/nextcloud-spreed-signaling/pull/645)
- build(deps): Bump peter-evans/create-or-update-comment from 3 to 4
[#646](https://github.com/strukturag/nextcloud-spreed-signaling/pull/646)
- CI: No longer need to manually cache Go modules.
[#648](https://github.com/strukturag/nextcloud-spreed-signaling/pull/648)
- CI: Disable cache for linter to bring back annotations.
[#647](https://github.com/strukturag/nextcloud-spreed-signaling/pull/647)
- Refactor DNS monitoring
[#648](https://github.com/strukturag/nextcloud-spreed-signaling/pull/648)
### Fixed
- Fix link to NATS install docs
[#637](https://github.com/strukturag/nextcloud-spreed-signaling/pull/637)
- docker: Always need to set proxy token id / key for server.
[#641](https://github.com/strukturag/nextcloud-spreed-signaling/pull/641)
## 1.2.2 - 2023-12-11
### Added
- Include "~docker" in version if built on Docker.
[#602](https://github.com/strukturag/nextcloud-spreed-signaling/pull/602)
### Changed
- CI: No need to build docker images for testing, done internally.
[#603](https://github.com/strukturag/nextcloud-spreed-signaling/pull/603)
- build(deps): Bump sphinx-rtd-theme from 1.3.0 to 2.0.0 in /docs
[#604](https://github.com/strukturag/nextcloud-spreed-signaling/pull/604)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.5 to 2.10.6
[#605](https://github.com/strukturag/nextcloud-spreed-signaling/pull/605)
- build(deps): Bump actions/setup-go from 4 to 5
[#608](https://github.com/strukturag/nextcloud-spreed-signaling/pull/608)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.6 to 2.10.7
[#612](https://github.com/strukturag/nextcloud-spreed-signaling/pull/612)
- build(deps): Bump the etcd group with 4 updates
[#611](https://github.com/strukturag/nextcloud-spreed-signaling/pull/611)
### Fixed
- Skip options from default section when parsing "geoip-overrides".
[#609](https://github.com/strukturag/nextcloud-spreed-signaling/pull/609)
- Hangup virtual session if it gets disinvited.
[#610](https://github.com/strukturag/nextcloud-spreed-signaling/pull/610)
## 1.2.1 - 2023-11-15
### Added
- feat(scripts): Add a script to simplify the logs to make it more easily to trace a user/session
[#480](https://github.com/strukturag/nextcloud-spreed-signaling/pull/480)
### Changed
- build(deps): Bump markdown from 3.5 to 3.5.1 in /docs
[#594](https://github.com/strukturag/nextcloud-spreed-signaling/pull/594)
- build(deps): Bump github.com/gorilla/websocket from 1.5.0 to 1.5.1
[#595](https://github.com/strukturag/nextcloud-spreed-signaling/pull/595)
- build(deps): Bump github.com/gorilla/securecookie from 1.1.1 to 1.1.2
[#597](https://github.com/strukturag/nextcloud-spreed-signaling/pull/597)
- build(deps): Bump github.com/gorilla/mux from 1.8.0 to 1.8.1
[#596](https://github.com/strukturag/nextcloud-spreed-signaling/pull/596)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.4 to 2.10.5
[#599](https://github.com/strukturag/nextcloud-spreed-signaling/pull/599)
- Improve support for multiple backends with dialouts
[#592](https://github.com/strukturag/nextcloud-spreed-signaling/pull/592)
- build(deps): Bump go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc from 0.25.0 to 0.46.0
[#600](https://github.com/strukturag/nextcloud-spreed-signaling/pull/600)
## 1.2.0 - 2023-10-30
### Added
- Use GeoIP overrides if no GeoIP database is configured.
[#532](https://github.com/strukturag/nextcloud-spreed-signaling/pull/532)
- Log warning if no (static) backends have been configured.
[#533](https://github.com/strukturag/nextcloud-spreed-signaling/pull/533)
- Fallback to common shared secret if none is set for backends.
[#534](https://github.com/strukturag/nextcloud-spreed-signaling/pull/534)
- CI: Test with Golang 1.21
[#536](https://github.com/strukturag/nextcloud-spreed-signaling/pull/536)
- Return response if session tries to join room again.
[#547](https://github.com/strukturag/nextcloud-spreed-signaling/pull/547)
- Support TTL for transient data.
[#575](https://github.com/strukturag/nextcloud-spreed-signaling/pull/575)
- Implement message handler for dialout support.
[#563](https://github.com/strukturag/nextcloud-spreed-signaling/pull/563)
- No longer support Golang 1.19.
[#580](https://github.com/strukturag/nextcloud-spreed-signaling/pull/580)
### Changed
- build(deps): Bump google.golang.org/grpc from 1.56.1 to 1.57.0
[#520](https://github.com/strukturag/nextcloud-spreed-signaling/pull/520)
- build(deps): Bump coverallsapp/github-action from 2.2.0 to 2.2.1
[#514](https://github.com/strukturag/nextcloud-spreed-signaling/pull/514)
- build(deps): Bump github.com/nats-io/nats.go from 1.27.1 to 1.28.0
[#515](https://github.com/strukturag/nextcloud-spreed-signaling/pull/515)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.19 to 2.9.20
[#513](https://github.com/strukturag/nextcloud-spreed-signaling/pull/513)
- build(deps): Bump mkdocs from 1.4.3 to 1.5.1 in /docs
[#523](https://github.com/strukturag/nextcloud-spreed-signaling/pull/523)
- build(deps): Bump markdown from 3.3.7 to 3.4.4 in /docs
[#519](https://github.com/strukturag/nextcloud-spreed-signaling/pull/519)
- build(deps): Bump mkdocs from 1.5.1 to 1.5.2 in /docs
[#525](https://github.com/strukturag/nextcloud-spreed-signaling/pull/525)
- build(deps): Bump github.com/oschwald/maxminddb-golang from 1.11.0 to 1.12.0
[#524](https://github.com/strukturag/nextcloud-spreed-signaling/pull/524)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.20 to 2.9.21
[#530](https://github.com/strukturag/nextcloud-spreed-signaling/pull/530)
- build(deps): Bump sphinx from 6.2.1 to 7.2.4 in /docs
[#542](https://github.com/strukturag/nextcloud-spreed-signaling/pull/542)
- build(deps): Bump github.com/google/uuid from 1.3.0 to 1.3.1
[#539](https://github.com/strukturag/nextcloud-spreed-signaling/pull/539)
- build(deps): Bump sphinx from 7.2.4 to 7.2.5 in /docs
[#544](https://github.com/strukturag/nextcloud-spreed-signaling/pull/544)
- build(deps): Bump coverallsapp/github-action from 2.2.1 to 2.2.2
[#546](https://github.com/strukturag/nextcloud-spreed-signaling/pull/546)
- build(deps): Bump actions/checkout from 3 to 4
[#545](https://github.com/strukturag/nextcloud-spreed-signaling/pull/545)
- build(deps): Bump google.golang.org/grpc from 1.57.0 to 1.58.0
[#549](https://github.com/strukturag/nextcloud-spreed-signaling/pull/549)
- build(deps): Bump docker/metadata-action from 4 to 5
[#552](https://github.com/strukturag/nextcloud-spreed-signaling/pull/552)
- build(deps): Bump docker/setup-qemu-action from 2 to 3
[#553](https://github.com/strukturag/nextcloud-spreed-signaling/pull/553)
- build(deps): Bump docker/login-action from 2 to 3
[#554](https://github.com/strukturag/nextcloud-spreed-signaling/pull/554)
- build(deps): Bump docker/setup-buildx-action from 2 to 3
[#555](https://github.com/strukturag/nextcloud-spreed-signaling/pull/555)
- build(deps): Bump coverallsapp/github-action from 2.2.2 to 2.2.3
[#551](https://github.com/strukturag/nextcloud-spreed-signaling/pull/551)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.21 to 2.9.22
[#550](https://github.com/strukturag/nextcloud-spreed-signaling/pull/550)
- build(deps): Bump docker/build-push-action from 4 to 5
[#557](https://github.com/strukturag/nextcloud-spreed-signaling/pull/557)
- build(deps): Bump github.com/nats-io/nats.go from 1.28.0 to 1.29.0
[#558](https://github.com/strukturag/nextcloud-spreed-signaling/pull/558)
- build(deps): Bump google.golang.org/grpc from 1.58.0 to 1.58.1
[#559](https://github.com/strukturag/nextcloud-spreed-signaling/pull/559)
- build(deps): Bump sphinx from 7.2.5 to 7.2.6 in /docs
[#560](https://github.com/strukturag/nextcloud-spreed-signaling/pull/560)
- build(deps): Bump mkdocs from 1.5.2 to 1.5.3 in /docs
[#561](https://github.com/strukturag/nextcloud-spreed-signaling/pull/561)
- build(deps): Bump markdown from 3.4.4 to 3.5 in /docs
[#570](https://github.com/strukturag/nextcloud-spreed-signaling/pull/570)
- build(deps): Bump google.golang.org/grpc from 1.58.1 to 1.58.3
[#573](https://github.com/strukturag/nextcloud-spreed-signaling/pull/573)
- build(deps): Bump github.com/prometheus/client_golang from 1.16.0 to 1.17.0
[#569](https://github.com/strukturag/nextcloud-spreed-signaling/pull/569)
- build(deps): Bump golang.org/x/net from 0.12.0 to 0.17.0
[#574](https://github.com/strukturag/nextcloud-spreed-signaling/pull/574)
- build(deps): Bump github.com/nats-io/nats.go from 1.29.0 to 1.30.2
[#568](https://github.com/strukturag/nextcloud-spreed-signaling/pull/568)
- build(deps): Bump google.golang.org/grpc from 1.58.3 to 1.59.0
[#578](https://github.com/strukturag/nextcloud-spreed-signaling/pull/578)
- build(deps): Bump github.com/nats-io/nats.go from 1.30.2 to 1.31.0
[#577](https://github.com/strukturag/nextcloud-spreed-signaling/pull/577)
- dependabot: Check for updates in docker files.
- build(deps): Bump golang from 1.20-alpine to 1.21-alpine in /docker/proxy
[#581](https://github.com/strukturag/nextcloud-spreed-signaling/pull/581)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.22 to 2.10.3
[#576](https://github.com/strukturag/nextcloud-spreed-signaling/pull/576)
- build(deps): Bump alpine from 3.14 to 3.18 in /docker/janus
[#582](https://github.com/strukturag/nextcloud-spreed-signaling/pull/582)
- build(deps): Bump golang from 1.20-alpine to 1.21-alpine in /docker/server
[#583](https://github.com/strukturag/nextcloud-spreed-signaling/pull/583)
- Improve get-version.sh
[#584](https://github.com/strukturag/nextcloud-spreed-signaling/pull/584)
-build(deps): Bump go.etcd.io/etcd/client/pkg/v3 from 3.5.9 to 3.5.10
[#588](https://github.com/strukturag/nextcloud-spreed-signaling/pull/588)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.3 to 2.10.4
[#586](https://github.com/strukturag/nextcloud-spreed-signaling/pull/586)
- build(deps): Bump github.com/google/uuid from 1.3.1 to 1.4.0
[#585](https://github.com/strukturag/nextcloud-spreed-signaling/pull/585)
- dependabot: Group etcd updates.
- build(deps): Bump the etcd group with 3 updates
[#590](https://github.com/strukturag/nextcloud-spreed-signaling/pull/590)
- Switch to atomic types from Go 1.19
[#500](https://github.com/strukturag/nextcloud-spreed-signaling/pull/500)
- Move common flags code to own struct.
[#591](https://github.com/strukturag/nextcloud-spreed-signaling/pull/591)
## 1.1.3 - 2023-07-05
### Added
- stats: Support configuring subnets for allowed IPs.
[#448](https://github.com/strukturag/nextcloud-spreed-signaling/pull/448)
- Add common code to handle allowed IPs.
[#450](https://github.com/strukturag/nextcloud-spreed-signaling/pull/450)
- Add allowall to docker image
[#488](https://github.com/strukturag/nextcloud-spreed-signaling/pull/488)
- Follow the Go release policy by supporting only the last two versions.
This drops support for Golang 1.18.
[#499](https://github.com/strukturag/nextcloud-spreed-signaling/pull/499)
### Changed
- build(deps): Bump google.golang.org/protobuf from 1.29.0 to 1.29.1
[#446](https://github.com/strukturag/nextcloud-spreed-signaling/pull/446)
- build(deps): Bump actions/setup-go from 3 to 4
[#447](https://github.com/strukturag/nextcloud-spreed-signaling/pull/447)
- build(deps): Bump google.golang.org/protobuf from 1.29.1 to 1.30.0
[#449](https://github.com/strukturag/nextcloud-spreed-signaling/pull/449)
- build(deps): Bump coverallsapp/github-action from 1.2.4 to 2.0.0
[#451](https://github.com/strukturag/nextcloud-spreed-signaling/pull/451)
- build(deps): Bump readthedocs-sphinx-search from 0.2.0 to 0.3.1 in /docs
[#456](https://github.com/strukturag/nextcloud-spreed-signaling/pull/456)
- build(deps): Bump coverallsapp/github-action from 2.0.0 to 2.1.0
[#460](https://github.com/strukturag/nextcloud-spreed-signaling/pull/460)
- build(deps): Bump peter-evans/create-or-update-comment from 2 to 3
[#459](https://github.com/strukturag/nextcloud-spreed-signaling/pull/459)
- build(deps): Bump sphinx from 6.1.3 to 6.2.1 in /docs
[#468](https://github.com/strukturag/nextcloud-spreed-signaling/pull/468)
- build(deps): Bump mkdocs from 1.4.2 to 1.4.3 in /docs
[#471](https://github.com/strukturag/nextcloud-spreed-signaling/pull/471)
- build(deps): Bump sphinx-rtd-theme from 1.2.0 to 1.2.1 in /docs
[#479](https://github.com/strukturag/nextcloud-spreed-signaling/pull/479)
- build(deps): Bump coverallsapp/github-action from 2.1.0 to 2.1.2
[#466](https://github.com/strukturag/nextcloud-spreed-signaling/pull/466)
- build(deps): Bump golangci/golangci-lint-action from 3.4.0 to 3.5.0
[#481](https://github.com/strukturag/nextcloud-spreed-signaling/pull/481)
- Simplify vendoring.
[#482](https://github.com/strukturag/nextcloud-spreed-signaling/pull/482)
- build(deps): Bump sphinx-rtd-theme from 1.2.1 to 1.2.2 in /docs
[#485](https://github.com/strukturag/nextcloud-spreed-signaling/pull/485)
- build(deps): Bump coverallsapp/github-action from 2.1.2 to 2.2.0
[#484](https://github.com/strukturag/nextcloud-spreed-signaling/pull/484)
- build(deps): Bump google.golang.org/grpc from 1.53.0 to 1.55.0
[#472](https://github.com/strukturag/nextcloud-spreed-signaling/pull/472)
- build(deps): Bump go.etcd.io/etcd/client/v3 from 3.5.7 to 3.5.9
[#473](https://github.com/strukturag/nextcloud-spreed-signaling/pull/473)
- build(deps): Bump github.com/nats-io/nats.go from 1.24.0 to 1.26.0
[#478](https://github.com/strukturag/nextcloud-spreed-signaling/pull/478)
- build(deps): Bump golangci/golangci-lint-action from 3.5.0 to 3.6.0
[#492](https://github.com/strukturag/nextcloud-spreed-signaling/pull/492)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.15 to 2.9.17
[#495](https://github.com/strukturag/nextcloud-spreed-signaling/pull/495)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.17 to 2.9.18
[#496](https://github.com/strukturag/nextcloud-spreed-signaling/pull/496)
- build(deps): Bump github.com/prometheus/client_golang from 1.14.0 to 1.15.1
[#493](https://github.com/strukturag/nextcloud-spreed-signaling/pull/493)
- docker: Don't build concurrently.
[#498](https://github.com/strukturag/nextcloud-spreed-signaling/pull/498)
- Use "struct{}" channel if only used as signaling mechanism.
[#491](https://github.com/strukturag/nextcloud-spreed-signaling/pull/491)
- build(deps): Bump google.golang.org/grpc from 1.55.0 to 1.56.0
[#502](https://github.com/strukturag/nextcloud-spreed-signaling/pull/502)
- build(deps): Bump github.com/prometheus/client_golang from 1.15.1 to 1.16.0
[#501](https://github.com/strukturag/nextcloud-spreed-signaling/pull/501)
- build(deps): Bump github.com/oschwald/maxminddb-golang from 1.10.0 to 1.11.0
[#503](https://github.com/strukturag/nextcloud-spreed-signaling/pull/503)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.9.18 to 2.9.19
[#504](https://github.com/strukturag/nextcloud-spreed-signaling/pull/504)
- build(deps): Bump google.golang.org/grpc from 1.56.0 to 1.56.1
[#505](https://github.com/strukturag/nextcloud-spreed-signaling/pull/505)
- build(deps): Bump github.com/nats-io/nats.go from 1.27.0 to 1.27.1
[#506](https://github.com/strukturag/nextcloud-spreed-signaling/pull/506)
- build(deps): Bump google.golang.org/protobuf from 1.30.0 to 1.31.0
[#507](https://github.com/strukturag/nextcloud-spreed-signaling/pull/507)
### Fixed
- CI: Make sure proxy Docker image is never tagged as "latest".
[#445](https://github.com/strukturag/nextcloud-spreed-signaling/pull/445)
- Write backends comma-separated to config
[#487](https://github.com/strukturag/nextcloud-spreed-signaling/pull/487)
- Fix duplicate join events
[#490](https://github.com/strukturag/nextcloud-spreed-signaling/pull/490)
- Add missing lock for "roomSessionId" to avoid potential races.
[#497](https://github.com/strukturag/nextcloud-spreed-signaling/pull/497)
## 1.1.2 - 2023-03-13
### Added

View file

@ -7,20 +7,14 @@ GOFMT := "$(GODIR)/gofmt"
GOOS ?= linux
GOARCH ?= amd64
GOVERSION := $(shell "$(GO)" env GOVERSION | sed "s|go||" )
BINDIR := $(CURDIR)/bin
BINDIR := "$(CURDIR)/bin"
VENDORDIR := "$(CURDIR)/vendor"
VERSION := $(shell "$(CURDIR)/scripts/get-version.sh")
TARVERSION := $(shell "$(CURDIR)/scripts/get-version.sh" --tar)
PACKAGENAME := github.com/strukturag/nextcloud-spreed-signaling
ALL_PACKAGES := $(PACKAGENAME) $(PACKAGENAME)/client $(PACKAGENAME)/proxy $(PACKAGENAME)/server
PROTO_FILES := $(basename $(wildcard *.proto))
PROTO_GO_FILES := $(addsuffix .pb.go,$(PROTO_FILES)) $(addsuffix _grpc.pb.go,$(PROTO_FILES))
EASYJSON_GO_FILES := \
api_async_easyjson.go \
api_backend_easyjson.go \
api_grpc_easyjson.go \
api_proxy_easyjson.go \
api_signaling_easyjson.go
PROTOC_GEN_GRPC_VERSION := v1.3.0
ifneq ($(VERSION),)
INTERNALLDFLAGS := -X main.version=$(VERSION)
@ -45,21 +39,13 @@ TIMEOUT := 60s
endif
ifneq ($(TEST),)
TESTARGS := $(TESTARGS) -run "$(TEST)"
TESTARGS := $(TESTARGS) -run $(TEST)
endif
ifneq ($(COUNT),)
TESTARGS := $(TESTARGS) -count $(COUNT)
endif
ifneq ($(PARALLEL),)
TESTARGS := $(TESTARGS) -parallel $(PARALLEL)
endif
ifneq ($(VERBOSE),)
TESTARGS := $(TESTARGS) -v
endif
ifeq ($(GOARCH), amd64)
GOPATHBIN := $(GOPATH)/bin
else
@ -69,13 +55,15 @@ endif
hook:
[ ! -d "$(CURDIR)/.git/hooks" ] || ln -sf "$(CURDIR)/scripts/pre-commit.hook" "$(CURDIR)/.git/hooks/pre-commit"
$(GOPATHBIN)/easyjson: go.mod go.sum
$(GOPATHBIN)/easyjson:
[ "$(GOPROXY)" = "off" ] || $(GO) get -u -d github.com/mailru/easyjson/...
$(GO) install github.com/mailru/easyjson/...
$(GOPATHBIN)/protoc-gen-go: go.mod go.sum
$(GOPATHBIN)/protoc-gen-go:
$(GO) install google.golang.org/protobuf/cmd/protoc-gen-go
$(GOPATHBIN)/protoc-gen-go-grpc: go.mod go.sum
$(GOPATHBIN)/protoc-gen-go-grpc:
[ "$(GOPROXY)" = "off" ] || $(GO) get -u -d google.golang.org/grpc/cmd/protoc-gen-go-grpc
$(GO) install google.golang.org/grpc/cmd/protoc-gen-go-grpc
continentmap.go:
@ -92,68 +80,89 @@ check-continentmap:
get:
$(GO) get $(PACKAGE)
fmt: hook | $(PROTO_GO_FILES)
fmt: hook | common_proto
$(GOFMT) -s -w *.go client proxy server
vet: common
$(GO) vet $(ALL_PACKAGES)
test: vet common
$(GO) test -timeout $(TIMEOUT) $(TESTARGS) $(ALL_PACKAGES)
$(GO) test -v -timeout $(TIMEOUT) $(TESTARGS) $(ALL_PACKAGES)
cover: vet common
rm -f cover.out && \
$(GO) test -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
$(GO) test -v -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
sed -i "/_easyjson/d" cover.out && \
sed -i "/\.pb\.go/d" cover.out && \
$(GO) tool cover -func=cover.out
coverhtml: vet common
rm -f cover.out && \
$(GO) test -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
$(GO) test -v -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
sed -i "/_easyjson/d" cover.out && \
sed -i "/\.pb\.go/d" cover.out && \
$(GO) tool cover -html=cover.out -o coverage.html
%_easyjson.go: %.go $(GOPATHBIN)/easyjson | $(PROTO_GO_FILES)
rm -f easyjson-bootstrap*.go
%_easyjson.go: %.go $(GOPATHBIN)/easyjson | common_proto
PATH="$(GODIR)":$(PATH) "$(GOPATHBIN)/easyjson" -all $*.go
%.pb.go: %.proto $(GOPATHBIN)/protoc-gen-go $(GOPATHBIN)/protoc-gen-go-grpc
PATH="$(GODIR)":"$(GOPATHBIN)":$(PATH) protoc \
--go_out=. --go_opt=paths=source_relative \
$*.proto
%_grpc.pb.go: %.proto $(GOPATHBIN)/protoc-gen-go $(GOPATHBIN)/protoc-gen-go-grpc
PATH="$(GODIR)":"$(GOPATHBIN)":$(PATH) protoc \
PATH="$(GODIR)":"$(GOPATHBIN)":$(PATH) protoc --go_out=. --go_opt=paths=source_relative \
--go-grpc_out=. --go-grpc_opt=paths=source_relative \
$*.proto
common: $(EASYJSON_GO_FILES) $(PROTO_GO_FILES)
common: common_easyjson common_proto
common_easyjson: \
api_async_easyjson.go \
api_backend_easyjson.go \
api_grpc_easyjson.go \
api_proxy_easyjson.go \
api_signaling_easyjson.go
common_proto: \
grpc_backend.pb.go \
grpc_internal.pb.go \
grpc_mcu.pb.go \
grpc_sessions.pb.go
$(BINDIR):
mkdir -p "$(BINDIR)"
mkdir -p $(BINDIR)
client: common $(BINDIR)
$(GO) build $(BUILDARGS) -ldflags '$(INTERNALLDFLAGS)' -o "$(BINDIR)/client" ./client/...
$(GO) build $(BUILDARGS) -ldflags '$(INTERNALLDFLAGS)' -o $(BINDIR)/client ./client/...
server: common $(BINDIR)
$(GO) build $(BUILDARGS) -ldflags '$(INTERNALLDFLAGS)' -o "$(BINDIR)/signaling" ./server/...
$(GO) build $(BUILDARGS) -ldflags '$(INTERNALLDFLAGS)' -o $(BINDIR)/signaling ./server/...
proxy: common $(BINDIR)
$(GO) build $(BUILDARGS) -ldflags '$(INTERNALLDFLAGS)' -o "$(BINDIR)/proxy" ./proxy/...
$(GO) build $(BUILDARGS) -ldflags '$(INTERNALLDFLAGS)' -o $(BINDIR)/proxy ./proxy/...
clean:
rm -f $(EASYJSON_GO_FILES)
rm -f *_easyjson.go
rm -f easyjson-bootstrap*.go
rm -f $(PROTO_GO_FILES)
rm -f *.pb.go
build: server proxy
vendor: go.mod go.sum common
set -e ;\
rm -rf $(VENDORDIR)
$(GO) mod vendor
EASYJSON_DIR=$$($(GO) list -m -f '{{.Dir}}' github.com/mailru/easyjson); \
PROTOBUF_DIR=$$($(GO) list -m -f '{{.Dir}}' google.golang.org/protobuf); \
$(GO) mod tidy; \
$(GO) mod vendor; \
mkdir -p $(VENDORDIR)/github.com/mailru/easyjson/; \
cp -rf --no-preserve=mode $$EASYJSON_DIR/easyjson/ $(VENDORDIR)/github.com/mailru/easyjson/; \
mkdir -p $(VENDORDIR)/google.golang.org/grpc/cmd/protoc-gen-go-grpc/; \
[ -d "$(GOPATH)/pkg/mod/google.golang.org/grpc/cmd/protoc-gen-go-grpc@$(PROTOC_GEN_GRPC_VERSION)" ] || echo "Folder for protoc-gen-go-grpc command does not exist, please check Makefile."; \
[ -d "$(GOPATH)/pkg/mod/google.golang.org/grpc/cmd/protoc-gen-go-grpc@$(PROTOC_GEN_GRPC_VERSION)" ] || exit 1; \
cp -rf --no-preserve=mode $(GOPATH)/pkg/mod/google.golang.org/grpc/cmd/protoc-gen-go-grpc@$(PROTOC_GEN_GRPC_VERSION)/* $(VENDORDIR)/google.golang.org/grpc/cmd/protoc-gen-go-grpc/; \
cp -rf --no-preserve=mode $$PROTOBUF_DIR/cmd/ $(VENDORDIR)/google.golang.org/protobuf/; \
cp -rf --no-preserve=mode $$PROTOBUF_DIR/internal/ $(VENDORDIR)/google.golang.org/protobuf/; \
cp -rf --no-preserve=mode $$PROTOBUF_DIR/reflect/ $(VENDORDIR)/google.golang.org/protobuf/; \
find $(VENDORDIR)/google.golang.org/protobuf/ -name "*_test.go" -delete; \
find $(VENDORDIR)/google.golang.org/protobuf/ -name "testdata" | xargs rm -r; \
tarball: vendor
git archive \
@ -169,7 +178,5 @@ tarball: vendor
dist: tarball
.NOTPARALLEL: $(EASYJSON_GO_FILES)
.PHONY: continentmap.go common vendor
.SECONDARY: $(EASYJSON_GO_FILES) $(PROTO_GO_FILES)
.DELETE_ON_ERROR:
.NOTPARALLEL: %_easyjson.go
.PHONY: continentmap.go vendor

View file

@ -1,6 +1,6 @@
# Spreed standalone signaling server
![Build Status](https://github.com/strukturag/nextcloud-spreed-signaling/actions/workflows/test.yml/badge.svg)
![Build Status](https://github.com/strukturag/nextcloud-spreed-signaling/workflows/test/badge.svg)
[![Coverage Status](https://coveralls.io/repos/github/strukturag/nextcloud-spreed-signaling/badge.svg?branch=master)](https://coveralls.io/github/strukturag/nextcloud-spreed-signaling?branch=master)
[![Documentation Status](https://readthedocs.org/projects/nextcloud-spreed-signaling/badge/?version=latest)](https://nextcloud-spreed-signaling.readthedocs.io/en/latest/?badge=latest)
[![Go Report](https://goreportcard.com/badge/github.com/strukturag/nextcloud-spreed-signaling)](https://goreportcard.com/report/github.com/strukturag/nextcloud-spreed-signaling)
@ -17,13 +17,10 @@ information on the API of the signaling server.
The following tools are required for building the signaling server.
- git
- go >= 1.21
- go >= 1.18 (usually the last three versions of go are supported)
- make
- protobuf-compiler >= 3
Usually the last two versions of Go are supported. This follows the release
policy of Go: https://go.dev/doc/devel/release#policy
All other dependencies are fetched automatically while building.
$ make build
@ -118,7 +115,7 @@ https://docs.docker.com/compose/install/
## Setup of NATS server
There is a detailed description on how to install and run the NATS server
available at https://docs.nats.io/running-a-nats-service/introduction
available at http://nats.io/documentation/tutorials/gnatsd-install/
You can use the `gnatsd.conf` file as base for the configuration of the NATS
server.
@ -171,17 +168,7 @@ proxy process gracefully after all clients have been disconnected. No new
publishers will be accepted in this case.
### Remote streams (preview)
With Janus 1.1.0 or newer, remote streams are supported, i.e. a subscriber can
receive a published stream from any server. For this, you need to configure
`hostname`, `token_id` and `token_key` in the proxy configuration. Each proxy
server also supports configuring maximum `incoming` and `outgoing` bandwidth
settings, which will also be used to select remote streams.
See `proxy.conf.in` in section `app` for details.
## Clustering
### Clustering
The signaling server supports a clustering mode where multiple running servers
can be interconnected to form a single "virtual" server. This can be used to
@ -309,8 +296,6 @@ interface on port `8080` below):
# Enable proxying Websocket requests to the standalone signaling server.
ProxyPass "/standalone-signaling/" "ws://127.0.0.1:8080/"
RequestHeader set X-Real-IP %{REMOTE_ADDR}s
RewriteEngine On
# Websocket connections from the clients.
RewriteRule ^/standalone-signaling/spreed/$ - [L]
@ -346,7 +331,6 @@ myserver.domain.invalid {
route /standalone-signaling/* {
uri strip_prefix /standalone-signaling
reverse_proxy http://127.0.0.1:8080
header_up X-Real-IP {remote_host}
}
}
```
@ -393,3 +377,23 @@ Usage:
config file to use (default "server.conf")
-maxClients int
number of client connections (default 100)
## Running multiple signaling servers
IMPORTANT: This is considered experimental and might not work with all
functionality of the signaling server, especially when using the Janus
integration.
The signaling server uses the NATS server to send messages to peers that are
not connected locally. Therefore multiple signaling servers running on different
hosts can use the same NATS server to build a simple cluster, allowing more
simultaneous connections and distribute the load.
To set this up, make sure all signaling servers are using the same settings for
their `session` keys and the `secret` in the `backend` section. Also the URL to
the NATS server (option `url` in section `nats`) must point to the same NATS
server.
If all this is setup correctly, clients can connect to either of the signaling
servers and exchange messages between them.

View file

@ -1,134 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2023 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"bytes"
"fmt"
"net"
"strings"
)
type AllowedIps struct {
allowed []*net.IPNet
}
func (a *AllowedIps) String() string {
var b bytes.Buffer
b.WriteString("[")
for idx, n := range a.allowed {
if idx > 0 {
b.WriteString(", ")
}
b.WriteString(n.String())
}
b.WriteString("]")
return b.String()
}
func (a *AllowedIps) Empty() bool {
return len(a.allowed) == 0
}
func (a *AllowedIps) Allowed(ip net.IP) bool {
for _, i := range a.allowed {
if i.Contains(ip) {
return true
}
}
return false
}
func parseIPNet(s string) (*net.IPNet, error) {
var ipnet *net.IPNet
if strings.ContainsRune(s, '/') {
var err error
if _, ipnet, err = net.ParseCIDR(s); err != nil {
return nil, fmt.Errorf("invalid IP address/subnet %s: %w", s, err)
}
} else {
ip := net.ParseIP(s)
if ip == nil {
return nil, fmt.Errorf("invalid IP address %s", s)
}
ipnet = &net.IPNet{
IP: ip,
Mask: net.CIDRMask(len(ip)*8, len(ip)*8),
}
}
return ipnet, nil
}
func ParseAllowedIps(allowed string) (*AllowedIps, error) {
var allowedIps []*net.IPNet
for _, ip := range strings.Split(allowed, ",") {
ip = strings.TrimSpace(ip)
if ip != "" {
i, err := parseIPNet(ip)
if err != nil {
return nil, err
}
allowedIps = append(allowedIps, i)
}
}
result := &AllowedIps{
allowed: allowedIps,
}
return result, nil
}
func DefaultAllowedIps() *AllowedIps {
allowedIps := []*net.IPNet{
{
IP: net.ParseIP("127.0.0.1"),
Mask: net.CIDRMask(32, 32),
},
}
result := &AllowedIps{
allowed: allowedIps,
}
return result
}
var (
privateIpNets = []string{
// Loopback addresses.
"127.0.0.0/8",
// Private addresses.
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
}
)
func DefaultPrivateIps() *AllowedIps {
allowed, err := ParseAllowedIps(strings.Join(privateIpNets, ","))
if err != nil {
panic(fmt.Errorf("could not parse private ips %+v: %w", privateIpNets, err))
}
return allowed
}

View file

@ -1,73 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2023 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"net"
"testing"
)
func TestAllowedIps(t *testing.T) {
a, err := ParseAllowedIps("127.0.0.1, 192.168.0.1, 192.168.1.1/24")
if err != nil {
t.Fatal(err)
}
if a.Empty() {
t.Fatal("should not be empty")
}
if expected := `[127.0.0.1/32, 192.168.0.1/32, 192.168.1.0/24]`; a.String() != expected {
t.Errorf("expected %s, got %s", expected, a.String())
}
allowed := []string{
"127.0.0.1",
"192.168.0.1",
"192.168.1.1",
"192.168.1.100",
}
notAllowed := []string{
"192.168.0.2",
"10.1.2.3",
}
for _, addr := range allowed {
t.Run(addr, func(t *testing.T) {
ip := net.ParseIP(addr)
if ip == nil {
t.Errorf("error parsing %s", addr)
} else if !a.Allowed(ip) {
t.Errorf("should allow %s", addr)
}
})
}
for _, addr := range notAllowed {
t.Run(addr, func(t *testing.T) {
ip := net.ParseIP(addr)
if ip == nil {
t.Errorf("error parsing %s", addr)
} else if a.Allowed(ip) {
t.Errorf("should not allow %s", addr)
}
})
}
}

View file

@ -21,11 +21,7 @@
*/
package signaling
import (
"encoding/json"
"fmt"
"time"
)
import "time"
type AsyncMessage struct {
SendTime time.Time `json:"sendtime"`
@ -45,14 +41,6 @@ type AsyncMessage struct {
Id string `json:"id"`
}
func (m *AsyncMessage) String() string {
data, err := json.Marshal(m)
if err != nil {
return fmt.Sprintf("Could not serialize %#v: %s", m, err)
}
return string(data)
}
type AsyncRoomMessage struct {
Type string `json:"type"`

View file

@ -31,9 +31,7 @@ import (
"fmt"
"net/http"
"net/url"
"regexp"
"strings"
"time"
)
const (
@ -106,10 +104,6 @@ type BackendServerRoomRequest struct {
SwitchTo *BackendRoomSwitchToMessageRequest `json:"switchto,omitempty"`
Dialout *BackendRoomDialoutRequest `json:"dialout,omitempty"`
Transient *BackendRoomTransientRequest `json:"transient,omitempty"`
// Internal properties
ReceivedTime int64 `json:"received,omitempty"`
}
@ -118,8 +112,8 @@ type BackendRoomInviteRequest struct {
UserIds []string `json:"userids,omitempty"`
// TODO(jojo): We should get rid of "AllUserIds" and find a better way to
// notify existing users the room has changed and they need to update it.
AllUserIds []string `json:"alluserids,omitempty"`
Properties json.RawMessage `json:"properties,omitempty"`
AllUserIds []string `json:"alluserids,omitempty"`
Properties *json.RawMessage `json:"properties,omitempty"`
}
type BackendRoomDisinviteRequest struct {
@ -127,13 +121,13 @@ type BackendRoomDisinviteRequest struct {
SessionIds []string `json:"sessionids,omitempty"`
// TODO(jojo): We should get rid of "AllUserIds" and find a better way to
// notify existing users the room has changed and they need to update it.
AllUserIds []string `json:"alluserids,omitempty"`
Properties json.RawMessage `json:"properties,omitempty"`
AllUserIds []string `json:"alluserids,omitempty"`
Properties *json.RawMessage `json:"properties,omitempty"`
}
type BackendRoomUpdateRequest struct {
UserIds []string `json:"userids,omitempty"`
Properties json.RawMessage `json:"properties,omitempty"`
UserIds []string `json:"userids,omitempty"`
Properties *json.RawMessage `json:"properties,omitempty"`
}
type BackendRoomDeleteRequest struct {
@ -154,7 +148,7 @@ type BackendRoomParticipantsRequest struct {
}
type BackendRoomMessageRequest struct {
Data json.RawMessage `json:"data,omitempty"`
Data *json.RawMessage `json:"data,omitempty"`
}
type BackendRoomSwitchToSessionsList []string
@ -169,76 +163,18 @@ type BackendRoomSwitchToMessageRequest struct {
// In the map, the key is the session id, the value additional details
// (or null) for the session. The details will be included in the request
// to the connected client.
Sessions json.RawMessage `json:"sessions,omitempty"`
Sessions *json.RawMessage `json:"sessions,omitempty"`
// Internal properties
SessionsList BackendRoomSwitchToSessionsList `json:"sessionslist,omitempty"`
SessionsMap BackendRoomSwitchToSessionsMap `json:"sessionsmap,omitempty"`
}
type BackendRoomDialoutRequest struct {
// E.164 number to dial (e.g. "+1234567890")
Number string `json:"number"`
Options json.RawMessage `json:"options,omitempty"`
}
var (
checkE164Number = regexp.MustCompile(`^\+\d{2,}$`)
)
func isValidNumber(s string) bool {
return checkE164Number.MatchString(s)
}
func (r *BackendRoomDialoutRequest) ValidateNumber() *Error {
if r.Number == "" {
return NewError("number_missing", "No number provided")
}
if !isValidNumber(r.Number) {
return NewError("invalid_number", "Expected E.164 number.")
}
return nil
}
type TransientAction string
const (
TransientActionSet TransientAction = "set"
TransientActionDelete TransientAction = "delete"
)
type BackendRoomTransientRequest struct {
Action TransientAction `json:"action"`
Key string `json:"key"`
Value interface{} `json:"value,omitempty"`
TTL time.Duration `json:"ttl,omitempty"`
}
type BackendServerRoomResponse struct {
Type string `json:"type"`
Dialout *BackendRoomDialoutResponse `json:"dialout,omitempty"`
}
type BackendRoomDialoutError struct {
Code string `json:"code"`
Message string `json:"message,omitempty"`
}
type BackendRoomDialoutResponse struct {
CallId string `json:"callid,omitempty"`
Error *Error `json:"error,omitempty"`
}
// Requests from the signaling server to the Nextcloud backend.
type BackendClientAuthRequest struct {
Version string `json:"version"`
Params json.RawMessage `json:"params"`
Version string `json:"version"`
Params *json.RawMessage `json:"params"`
}
type BackendClientRequest struct {
@ -256,7 +192,7 @@ type BackendClientRequest struct {
Session *BackendClientSessionRequest `json:"session,omitempty"`
}
func NewBackendClientAuthRequest(params json.RawMessage) *BackendClientRequest {
func NewBackendClientAuthRequest(params *json.RawMessage) *BackendClientRequest {
return &BackendClientRequest{
Type: "auth",
Auth: &BackendClientAuthRequest{
@ -284,9 +220,9 @@ type BackendClientResponse struct {
}
type BackendClientAuthResponse struct {
Version string `json:"version"`
UserId string `json:"userid"`
User json.RawMessage `json:"user"`
Version string `json:"version"`
UserId string `json:"userid"`
User *json.RawMessage `json:"user"`
}
type BackendClientRoomRequest struct {
@ -315,14 +251,14 @@ func NewBackendClientRoomRequest(roomid string, userid string, sessionid string)
}
type BackendClientRoomResponse struct {
Version string `json:"version"`
RoomId string `json:"roomid"`
Properties json.RawMessage `json:"properties"`
Version string `json:"version"`
RoomId string `json:"roomid"`
Properties *json.RawMessage `json:"properties"`
// Optional information about the Nextcloud Talk session. Can be used for
// example to define a "userid" for otherwise anonymous users.
// See "RoomSessionData" for a possible content.
Session json.RawMessage `json:"session,omitempty"`
Session *json.RawMessage `json:"session,omitempty"`
Permissions *[]Permission `json:"permissions,omitempty"`
}
@ -359,12 +295,12 @@ type BackendClientRingResponse struct {
}
type BackendClientSessionRequest struct {
Version string `json:"version"`
RoomId string `json:"roomid"`
Action string `json:"action"`
SessionId string `json:"sessionid"`
UserId string `json:"userid,omitempty"`
User json.RawMessage `json:"user,omitempty"`
Version string `json:"version"`
RoomId string `json:"roomid"`
Action string `json:"action"`
SessionId string `json:"sessionid"`
UserId string `json:"userid,omitempty"`
User *json.RawMessage `json:"user,omitempty"`
}
type BackendClientSessionResponse struct {
@ -396,8 +332,8 @@ type OcsMeta struct {
}
type OcsBody struct {
Meta OcsMeta `json:"meta"`
Data json.RawMessage `json:"data"`
Meta OcsMeta `json:"meta"`
Data *json.RawMessage `json:"data"`
}
type OcsResponse struct {

View file

@ -27,7 +27,6 @@ import (
)
func TestBackendChecksum(t *testing.T) {
t.Parallel()
rnd := newRandomString(32)
body := []byte{1, 2, 3, 4, 5}
secret := []byte("shared-secret")
@ -57,28 +56,3 @@ func TestBackendChecksum(t *testing.T) {
t.Errorf("Checksum %s could not be validated from request", check1)
}
}
func TestValidNumbers(t *testing.T) {
t.Parallel()
valid := []string{
"+12",
"+12345",
}
invalid := []string{
"+1",
"12345",
" +12345",
" +12345 ",
"+123-45",
}
for _, number := range valid {
if !isValidNumber(number) {
t.Errorf("number %s should be valid", number)
}
}
for _, number := range invalid {
if isValidNumber(number) {
t.Errorf("number %s should not be valid", number)
}
}
}

View file

@ -24,7 +24,6 @@ package signaling
import (
"encoding/json"
"fmt"
"net/url"
"github.com/golang-jwt/jwt/v4"
)
@ -49,14 +48,6 @@ type ProxyClientMessage struct {
Payload *PayloadProxyClientMessage `json:"payload,omitempty"`
}
func (m *ProxyClientMessage) String() string {
data, err := json.Marshal(m)
if err != nil {
return fmt.Sprintf("Could not serialize %#v: %s", m, err)
}
return string(data)
}
func (m *ProxyClientMessage) CheckValid() error {
switch m.Type {
case "":
@ -124,14 +115,6 @@ type ProxyServerMessage struct {
Event *EventProxyServerMessage `json:"event,omitempty"`
}
func (r *ProxyServerMessage) String() string {
data, err := json.Marshal(r)
if err != nil {
return fmt.Sprintf("Could not serialize %#v: %s", r, err)
}
return string(data)
}
func (r *ProxyServerMessage) CloseAfterSend(session Session) bool {
switch r.Type {
case "bye":
@ -196,20 +179,12 @@ type ByeProxyServerMessage struct {
type CommandProxyClientMessage struct {
Type string `json:"type"`
Sid string `json:"sid,omitempty"`
StreamType StreamType `json:"streamType,omitempty"`
PublisherId string `json:"publisherId,omitempty"`
ClientId string `json:"clientId,omitempty"`
Bitrate int `json:"bitrate,omitempty"`
MediaTypes MediaType `json:"mediatypes,omitempty"`
RemoteUrl string `json:"remoteUrl,omitempty"`
remoteUrl *url.URL
RemoteToken string `json:"remoteToken,omitempty"`
Hostname string `json:"hostname,omitempty"`
Port int `json:"port,omitempty"`
RtcpPort int `json:"rtcpPort,omitempty"`
Sid string `json:"sid,omitempty"`
StreamType string `json:"streamType,omitempty"`
PublisherId string `json:"publisherId,omitempty"`
ClientId string `json:"clientId,omitempty"`
Bitrate int `json:"bitrate,omitempty"`
MediaTypes MediaType `json:"mediatypes,omitempty"`
}
func (m *CommandProxyClientMessage) CheckValid() error {
@ -227,17 +202,6 @@ func (m *CommandProxyClientMessage) CheckValid() error {
if m.StreamType == "" {
return fmt.Errorf("stream type missing")
}
if m.RemoteUrl != "" {
if m.RemoteToken == "" {
return fmt.Errorf("remote token missing")
}
remoteUrl, err := url.Parse(m.RemoteUrl)
if err != nil {
return fmt.Errorf("invalid remote url: %w", err)
}
m.remoteUrl = remoteUrl
}
case "delete-publisher":
fallthrough
case "delete-subscriber":
@ -251,10 +215,6 @@ func (m *CommandProxyClientMessage) CheckValid() error {
type CommandProxyServerMessage struct {
Id string `json:"id,omitempty"`
Sid string `json:"sid,omitempty"`
Bitrate int `json:"bitrate,omitempty"`
Streams []PublisherStream `json:"streams,omitempty"`
}
// Type "payload"
@ -299,41 +259,12 @@ type PayloadProxyServerMessage struct {
// Type "event"
type EventProxyServerBandwidth struct {
// Incoming is the bandwidth utilization for publishers in percent.
Incoming *float64 `json:"incoming,omitempty"`
// Outgoing is the bandwidth utilization for subscribers in percent.
Outgoing *float64 `json:"outgoing,omitempty"`
}
func (b *EventProxyServerBandwidth) String() string {
if b.Incoming != nil && b.Outgoing != nil {
return fmt.Sprintf("bandwidth: incoming=%.3f%%, outgoing=%.3f%%", *b.Incoming, *b.Outgoing)
} else if b.Incoming != nil {
return fmt.Sprintf("bandwidth: incoming=%.3f%%, outgoing=unlimited", *b.Incoming)
} else if b.Outgoing != nil {
return fmt.Sprintf("bandwidth: incoming=unlimited, outgoing=%.3f%%", *b.Outgoing)
} else {
return "bandwidth: incoming=unlimited, outgoing=unlimited"
}
}
func (b EventProxyServerBandwidth) AllowIncoming() bool {
return b.Incoming == nil || *b.Incoming < 100
}
func (b EventProxyServerBandwidth) AllowOutgoing() bool {
return b.Outgoing == nil || *b.Outgoing < 100
}
type EventProxyServerMessage struct {
Type string `json:"type"`
ClientId string `json:"clientId,omitempty"`
Load int64 `json:"load,omitempty"`
Sid string `json:"sid,omitempty"`
Bandwidth *EventProxyServerBandwidth `json:"bandwidth,omitempty"`
}
// Information on a proxy in the etcd cluster.

View file

@ -23,16 +23,12 @@ package signaling
import (
"encoding/json"
"errors"
"fmt"
"log"
"net/url"
"sort"
"strings"
"time"
"github.com/golang-jwt/jwt/v4"
"github.com/pion/sdp/v3"
)
const (
@ -43,11 +39,6 @@ const (
HelloVersionV2 = "2.0"
)
var (
ErrNoSdp = NewError("no_sdp", "Payload does not contain a SDP.")
ErrInvalidSdp = NewError("invalid_sdp", "Payload does not contain a valid SDP.")
)
// ClientMessage is a message that is sent from a client to the server.
type ClientMessage struct {
json.Marshaler
@ -171,10 +162,6 @@ type ServerMessage struct {
Event *EventServerMessage `json:"event,omitempty"`
TransientData *TransientDataServerMessage `json:"transient,omitempty"`
Internal *InternalServerMessage `json:"internal,omitempty"`
Dialout *DialoutInternalClientMessage `json:"dialout,omitempty"`
}
func (r *ServerMessage) CloseAfterSend(session Session) bool {
@ -198,12 +185,12 @@ func (r *ServerMessage) CloseAfterSend(session Session) bool {
}
func (r *ServerMessage) IsChatRefresh() bool {
if r.Type != "message" || r.Message == nil || len(r.Message.Data) == 0 {
if r.Type != "message" || r.Message == nil || r.Message.Data == nil || len(*r.Message.Data) == 0 {
return false
}
var data MessageServerMessageData
if err := json.Unmarshal(r.Message.Data, &data); err != nil {
if err := json.Unmarshal(*r.Message.Data, &data); err != nil {
return false
}
@ -233,9 +220,9 @@ func (r *ServerMessage) String() string {
}
type Error struct {
Code string `json:"code"`
Message string `json:"message"`
Details json.RawMessage `json:"details,omitempty"`
Code string `json:"code"`
Message string `json:"message"`
Details interface{} `json:"details,omitempty"`
}
func NewError(code string, message string) *Error {
@ -243,19 +230,10 @@ func NewError(code string, message string) *Error {
}
func NewErrorDetail(code string, message string, details interface{}) *Error {
var rawDetails json.RawMessage
if details != nil {
var err error
if rawDetails, err = json.Marshal(details); err != nil {
log.Printf("Could not marshal details %+v for error %s with %s: %s", details, code, message, err)
return NewError("internal_error", "Could not marshal error details")
}
}
return &Error{
Code: code,
Message: message,
Details: rawDetails,
Details: details,
}
}
@ -366,7 +344,7 @@ func (p *HelloV2AuthParams) CheckValid() error {
type HelloV2TokenClaims struct {
jwt.RegisteredClaims
UserData json.RawMessage `json:"userdata,omitempty"`
UserData *json.RawMessage `json:"userdata,omitempty"`
}
type HelloClientMessageAuth struct {
@ -374,7 +352,7 @@ type HelloClientMessageAuth struct {
// "HelloClientTypeClient"
Type string `json:"type,omitempty"`
Params json.RawMessage `json:"params"`
Params *json.RawMessage `json:"params"`
Url string `json:"url"`
parsedUrl *url.URL
@ -393,7 +371,7 @@ type HelloClientMessage struct {
Features []string `json:"features,omitempty"`
// The authentication credentials.
Auth *HelloClientMessageAuth `json:"auth,omitempty"`
Auth HelloClientMessageAuth `json:"auth"`
}
func (m *HelloClientMessage) CheckValid() error {
@ -401,7 +379,7 @@ func (m *HelloClientMessage) CheckValid() error {
return InvalidHelloVersion
}
if m.ResumeId == "" {
if m.Auth == nil || len(m.Auth.Params) == 0 {
if m.Auth.Params == nil || len(*m.Auth.Params) == 0 {
return fmt.Errorf("params missing")
}
if m.Auth.Type == "" {
@ -425,14 +403,14 @@ func (m *HelloClientMessage) CheckValid() error {
case HelloVersionV1:
// No additional validation necessary.
case HelloVersionV2:
if err := json.Unmarshal(m.Auth.Params, &m.Auth.helloV2Params); err != nil {
if err := json.Unmarshal(*m.Auth.Params, &m.Auth.helloV2Params); err != nil {
return err
} else if err := m.Auth.helloV2Params.CheckValid(); err != nil {
return err
}
}
case HelloClientTypeInternal:
if err := json.Unmarshal(m.Auth.Params, &m.Auth.internalParams); err != nil {
if err := json.Unmarshal(*m.Auth.Params, &m.Auth.internalParams); err != nil {
return err
} else if err := m.Auth.internalParams.CheckValid(); err != nil {
return err
@ -455,14 +433,12 @@ const (
ServerFeatureWelcome = "welcome"
ServerFeatureHelloV2 = "hello-v2"
ServerFeatureSwitchTo = "switchto"
ServerFeatureDialout = "dialout"
// Features to send to internal clients only.
ServerFeatureInternalVirtualSessions = "virtual-sessions"
// Possible client features from the "hello" request.
ClientFeatureInternalInCall = "internal-incall"
ClientFeatureStartDialout = "start-dialout"
)
var (
@ -473,7 +449,6 @@ var (
ServerFeatureWelcome,
ServerFeatureHelloV2,
ServerFeatureSwitchTo,
ServerFeatureDialout,
}
DefaultFeaturesInternal = []string{
ServerFeatureInternalVirtualSessions,
@ -482,7 +457,6 @@ var (
ServerFeatureWelcome,
ServerFeatureHelloV2,
ServerFeatureSwitchTo,
ServerFeatureDialout,
}
DefaultWelcomeFeatures = []string{
ServerFeatureAudioVideoPermissions,
@ -492,7 +466,6 @@ var (
ServerFeatureWelcome,
ServerFeatureHelloV2,
ServerFeatureSwitchTo,
ServerFeatureDialout,
}
)
@ -534,12 +507,8 @@ func (m *RoomClientMessage) CheckValid() error {
}
type RoomServerMessage struct {
RoomId string `json:"roomid"`
Properties json.RawMessage `json:"properties,omitempty"`
}
type RoomErrorDetails struct {
Room *RoomServerMessage `json:"room"`
RoomId string `json:"roomid"`
Properties *json.RawMessage `json:"properties,omitempty"`
}
// Type "message"
@ -560,7 +529,7 @@ type MessageClientMessageRecipient struct {
type MessageClientMessage struct {
Recipient MessageClientMessageRecipient `json:"recipient"`
Data json.RawMessage `json:"data"`
Data *json.RawMessage `json:"data"`
}
type MessageClientMessageData struct {
@ -569,44 +538,10 @@ type MessageClientMessageData struct {
RoomType string `json:"roomType"`
Bitrate int `json:"bitrate,omitempty"`
Payload map[string]interface{} `json:"payload"`
offerSdp *sdp.SessionDescription // Only set if Type == "offer"
answerSdp *sdp.SessionDescription // Only set if Type == "answer"
}
func (m *MessageClientMessageData) CheckValid() error {
if m.RoomType != "" && !IsValidStreamType(m.RoomType) {
return fmt.Errorf("invalid room type: %s", m.RoomType)
}
if m.Type == "offer" || m.Type == "answer" {
sdpValue, found := m.Payload["sdp"]
if !found {
return ErrNoSdp
}
sdpText, ok := sdpValue.(string)
if !ok {
return ErrInvalidSdp
}
var sdp sdp.SessionDescription
if err := sdp.Unmarshal([]byte(sdpText)); err != nil {
return NewErrorDetail("invalid_sdp", "Error parsing SDP from payload.", map[string]interface{}{
"error": err.Error(),
})
}
switch m.Type {
case "offer":
m.offerSdp = &sdp
case "answer":
m.answerSdp = &sdp
}
}
return nil
}
func (m *MessageClientMessage) CheckValid() error {
if len(m.Data) == 0 {
if m.Data == nil || len(*m.Data) == 0 {
return fmt.Errorf("message empty")
}
switch m.Recipient.Type {
@ -647,7 +582,7 @@ type MessageServerMessage struct {
Sender *MessageServerMessageSender `json:"sender"`
Recipient *MessageClientMessageRecipient `json:"recipient,omitempty"`
Data json.RawMessage `json:"data"`
Data *json.RawMessage `json:"data"`
}
// Type "control"
@ -664,7 +599,7 @@ type ControlServerMessage struct {
Sender *MessageServerMessageSender `json:"sender"`
Recipient *MessageClientMessageRecipient `json:"recipient,omitempty"`
Data json.RawMessage `json:"data"`
Data *json.RawMessage `json:"data"`
}
// Type "internal"
@ -693,10 +628,10 @@ type AddSessionOptions struct {
type AddSessionInternalClientMessage struct {
CommonSessionInternalClientMessage
UserId string `json:"userid,omitempty"`
User json.RawMessage `json:"user,omitempty"`
Flags uint32 `json:"flags,omitempty"`
InCall *int `json:"incall,omitempty"`
UserId string `json:"userid,omitempty"`
User *json.RawMessage `json:"user,omitempty"`
Flags uint32 `json:"flags,omitempty"`
InCall *int `json:"incall,omitempty"`
Options *AddSessionOptions `json:"options,omitempty"`
}
@ -734,52 +669,6 @@ func (m *InCallInternalClientMessage) CheckValid() error {
return nil
}
type DialoutStatus string
var (
DialoutStatusAccepted DialoutStatus = "accepted"
DialoutStatusRinging DialoutStatus = "ringing"
DialoutStatusConnected DialoutStatus = "connected"
DialoutStatusRejected DialoutStatus = "rejected"
DialoutStatusCleared DialoutStatus = "cleared"
)
type DialoutStatusInternalClientMessage struct {
CallId string `json:"callid"`
Status DialoutStatus `json:"status"`
// Cause is set if Status is "cleared" or "rejected".
Cause string `json:"cause,omitempty"`
Code int `json:"code,omitempty"`
Message string `json:"message,omitempty"`
}
type DialoutInternalClientMessage struct {
Type string `json:"type"`
RoomId string `json:"roomid,omitempty"`
Error *Error `json:"error,omitempty"`
Status *DialoutStatusInternalClientMessage `json:"status,omitempty"`
}
func (m *DialoutInternalClientMessage) CheckValid() error {
switch m.Type {
case "":
return errors.New("type missing")
case "error":
if m.Error == nil {
return errors.New("error missing")
}
case "status":
if m.Status == nil {
return errors.New("status missing")
}
}
return nil
}
type InternalClientMessage struct {
Type string `json:"type"`
@ -790,14 +679,10 @@ type InternalClientMessage struct {
RemoveSession *RemoveSessionInternalClientMessage `json:"removesession,omitempty"`
InCall *InCallInternalClientMessage `json:"incall,omitempty"`
Dialout *DialoutInternalClientMessage `json:"dialout,omitempty"`
}
func (m *InternalClientMessage) CheckValid() error {
switch m.Type {
case "":
return errors.New("type missing")
case "addsession":
if m.AddSession == nil {
return fmt.Errorf("addsession missing")
@ -822,36 +707,17 @@ func (m *InternalClientMessage) CheckValid() error {
} else if err := m.InCall.CheckValid(); err != nil {
return err
}
case "dialout":
if m.Dialout == nil {
return fmt.Errorf("dialout missing")
} else if err := m.Dialout.CheckValid(); err != nil {
return err
}
}
return nil
}
type InternalServerDialoutRequest struct {
RoomId string `json:"roomid"`
Backend string `json:"backend"`
Request *BackendRoomDialoutRequest `json:"request"`
}
type InternalServerMessage struct {
Type string `json:"type"`
Dialout *InternalServerDialoutRequest `json:"dialout,omitempty"`
}
// Type "event"
type RoomEventServerMessage struct {
RoomId string `json:"roomid"`
Properties json.RawMessage `json:"properties,omitempty"`
RoomId string `json:"roomid"`
Properties *json.RawMessage `json:"properties,omitempty"`
// TODO(jojo): Change "InCall" to "int" when #914 has landed in NC Talk.
InCall json.RawMessage `json:"incall,omitempty"`
InCall *json.RawMessage `json:"incall,omitempty"`
Changed []map[string]interface{} `json:"changed,omitempty"`
Users []map[string]interface{} `json:"users,omitempty"`
@ -878,8 +744,8 @@ type RoomDisinviteEventServerMessage struct {
}
type RoomEventMessage struct {
RoomId string `json:"roomid"`
Data json.RawMessage `json:"data,omitempty"`
RoomId string `json:"roomid"`
Data *json.RawMessage `json:"data,omitempty"`
}
type RoomFlagsServerMessage struct {
@ -929,10 +795,10 @@ func (m *EventServerMessage) String() string {
}
type EventServerMessageSessionEntry struct {
SessionId string `json:"sessionid"`
UserId string `json:"userid"`
User json.RawMessage `json:"user,omitempty"`
RoomSessionId string `json:"roomsessionid,omitempty"`
SessionId string `json:"sessionid"`
UserId string `json:"userid"`
User *json.RawMessage `json:"user,omitempty"`
RoomSessionId string `json:"roomsessionid,omitempty"`
}
func (e *EventServerMessageSessionEntry) Clone() *EventServerMessageSessionEntry {
@ -965,9 +831,8 @@ type AnswerOfferMessage struct {
type TransientDataClientMessage struct {
Type string `json:"type"`
Key string `json:"key,omitempty"`
Value json.RawMessage `json:"value,omitempty"`
TTL time.Duration `json:"ttl,omitempty"`
Key string `json:"key,omitempty"`
Value *json.RawMessage `json:"value,omitempty"`
}
func (m *TransientDataClientMessage) CheckValid() error {

View file

@ -81,7 +81,6 @@ func testMessages(t *testing.T, messageType string, valid_messages []testCheckVa
}
func TestClientMessage(t *testing.T) {
t.Parallel()
// The message needs a type.
msg := ClientMessage{}
if err := msg.CheckValid(); err == nil {
@ -90,31 +89,30 @@ func TestClientMessage(t *testing.T) {
}
func TestHelloClientMessage(t *testing.T) {
t.Parallel()
internalAuthParams := []byte("{\"backend\":\"https://domain.invalid\"}")
tokenAuthParams := []byte("{\"token\":\"invalid-token\"}")
valid_messages := []testCheckValid{
// Hello version 1
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Params: json.RawMessage("{}"),
Auth: HelloClientMessageAuth{
Params: &json.RawMessage{'{', '}'},
Url: "https://domain.invalid",
},
},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Auth: HelloClientMessageAuth{
Type: "client",
Params: json.RawMessage("{}"),
Params: &json.RawMessage{'{', '}'},
Url: "https://domain.invalid",
},
},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Auth: HelloClientMessageAuth{
Type: "internal",
Params: internalAuthParams,
Params: (*json.RawMessage)(&internalAuthParams),
},
},
&HelloClientMessage{
@ -124,16 +122,16 @@ func TestHelloClientMessage(t *testing.T) {
// Hello version 2
&HelloClientMessage{
Version: HelloVersionV2,
Auth: &HelloClientMessageAuth{
Params: tokenAuthParams,
Auth: HelloClientMessageAuth{
Params: (*json.RawMessage)(&tokenAuthParams),
Url: "https://domain.invalid",
},
},
&HelloClientMessage{
Version: HelloVersionV2,
Auth: &HelloClientMessageAuth{
Auth: HelloClientMessageAuth{
Type: "client",
Params: tokenAuthParams,
Params: (*json.RawMessage)(&tokenAuthParams),
Url: "https://domain.invalid",
},
},
@ -149,75 +147,75 @@ func TestHelloClientMessage(t *testing.T) {
&HelloClientMessage{Version: HelloVersionV1},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Params: json.RawMessage("{}"),
Auth: HelloClientMessageAuth{
Params: &json.RawMessage{'{', '}'},
Type: "invalid-type",
},
},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Auth: HelloClientMessageAuth{
Url: "https://domain.invalid",
},
},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Params: json.RawMessage("{}"),
Auth: HelloClientMessageAuth{
Params: &json.RawMessage{'{', '}'},
},
},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Params: json.RawMessage("{}"),
Auth: HelloClientMessageAuth{
Params: &json.RawMessage{'{', '}'},
Url: "invalid-url",
},
},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Auth: HelloClientMessageAuth{
Type: "internal",
Params: json.RawMessage("{}"),
Params: &json.RawMessage{'{', '}'},
},
},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Auth: HelloClientMessageAuth{
Type: "internal",
Params: json.RawMessage("xyz"), // Invalid JSON.
Params: &json.RawMessage{'x', 'y', 'z'}, // Invalid JSON.
},
},
// Hello version 2
&HelloClientMessage{
Version: HelloVersionV2,
Auth: &HelloClientMessageAuth{
Auth: HelloClientMessageAuth{
Url: "https://domain.invalid",
},
},
&HelloClientMessage{
Version: HelloVersionV2,
Auth: &HelloClientMessageAuth{
Params: tokenAuthParams,
Auth: HelloClientMessageAuth{
Params: (*json.RawMessage)(&tokenAuthParams),
},
},
&HelloClientMessage{
Version: HelloVersionV2,
Auth: &HelloClientMessageAuth{
Params: tokenAuthParams,
Auth: HelloClientMessageAuth{
Params: (*json.RawMessage)(&tokenAuthParams),
Url: "invalid-url",
},
},
&HelloClientMessage{
Version: HelloVersionV2,
Auth: &HelloClientMessageAuth{
Params: internalAuthParams,
Auth: HelloClientMessageAuth{
Params: (*json.RawMessage)(&internalAuthParams),
Url: "https://domain.invalid",
},
},
&HelloClientMessage{
Version: HelloVersionV2,
Auth: &HelloClientMessageAuth{
Params: json.RawMessage("xyz"), // Invalid JSON.
Auth: HelloClientMessageAuth{
Params: &json.RawMessage{'x', 'y', 'z'}, // Invalid JSON.
Url: "https://domain.invalid",
},
},
@ -235,27 +233,26 @@ func TestHelloClientMessage(t *testing.T) {
}
func TestMessageClientMessage(t *testing.T) {
t.Parallel()
valid_messages := []testCheckValid{
&MessageClientMessage{
Recipient: MessageClientMessageRecipient{
Type: "session",
SessionId: "the-session-id",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
&MessageClientMessage{
Recipient: MessageClientMessageRecipient{
Type: "user",
UserId: "the-user-id",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
&MessageClientMessage{
Recipient: MessageClientMessageRecipient{
Type: "room",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
}
invalid_messages := []testCheckValid{
@ -270,20 +267,20 @@ func TestMessageClientMessage(t *testing.T) {
Recipient: MessageClientMessageRecipient{
Type: "session",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
&MessageClientMessage{
Recipient: MessageClientMessageRecipient{
Type: "session",
UserId: "the-user-id",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
&MessageClientMessage{
Recipient: MessageClientMessageRecipient{
Type: "user",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
&MessageClientMessage{
Recipient: MessageClientMessageRecipient{
@ -296,13 +293,13 @@ func TestMessageClientMessage(t *testing.T) {
Type: "user",
SessionId: "the-user-id",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
&MessageClientMessage{
Recipient: MessageClientMessageRecipient{
Type: "unknown-type",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
}
testMessages(t, "message", valid_messages, invalid_messages)
@ -317,7 +314,6 @@ func TestMessageClientMessage(t *testing.T) {
}
func TestByeClientMessage(t *testing.T) {
t.Parallel()
// Any "bye" message is valid.
valid_messages := []testCheckValid{
&ByeClientMessage{},
@ -336,7 +332,6 @@ func TestByeClientMessage(t *testing.T) {
}
func TestRoomClientMessage(t *testing.T) {
t.Parallel()
// Any "room" message is valid.
valid_messages := []testCheckValid{
&RoomClientMessage{},
@ -355,7 +350,6 @@ func TestRoomClientMessage(t *testing.T) {
}
func TestErrorMessages(t *testing.T) {
t.Parallel()
id := "request-id"
msg := ClientMessage{
Id: id,
@ -388,13 +382,12 @@ func TestErrorMessages(t *testing.T) {
}
func TestIsChatRefresh(t *testing.T) {
t.Parallel()
var msg ServerMessage
data_true := []byte("{\"type\":\"chat\",\"chat\":{\"refresh\":true}}")
msg = ServerMessage{
Type: "message",
Message: &MessageServerMessage{
Data: data_true,
Data: (*json.RawMessage)(&data_true),
},
}
if !msg.IsChatRefresh() {
@ -405,7 +398,7 @@ func TestIsChatRefresh(t *testing.T) {
msg = ServerMessage{
Type: "message",
Message: &MessageServerMessage{
Data: data_false,
Data: (*json.RawMessage)(&data_false),
},
}
if msg.IsChatRefresh() {
@ -433,7 +426,6 @@ func assertEqualStrings(t *testing.T, expected, result []string) {
}
func Test_Welcome_AddRemoveFeature(t *testing.T) {
t.Parallel()
var msg WelcomeServerMessage
assertEqualStrings(t, []string{}, msg.Features)

View file

@ -280,8 +280,6 @@ func (e *asyncEventsNats) Close() {
sub.close()
}
}(e.sessionSubscriptions)
// Can't use clear(...) here as the maps are processed asynchronously by the
// goroutines above.
e.backendRoomSubscriptions = make(map[string]*asyncBackendRoomSubscriberNats)
e.roomSubscriptions = make(map[string]*asyncRoomSubscriberNats)
e.userSubscriptions = make(map[string]*asyncUserSubscriberNats)

View file

@ -194,7 +194,7 @@ func (b *BackendClient) PerformJSONRequest(ctx context.Context, u *url.URL, requ
if err := json.Unmarshal(body, &ocs); err != nil {
log.Printf("Could not decode OCS response %s from %s: %s", string(body), req.URL, err)
return err
} else if ocs.Ocs == nil || len(ocs.Ocs.Data) == 0 {
} else if ocs.Ocs == nil || ocs.Ocs.Data == nil {
log.Printf("Incomplete OCS response %s from %s", string(body), req.URL)
return ErrIncompleteResponse
}
@ -205,8 +205,8 @@ func (b *BackendClient) PerformJSONRequest(ctx context.Context, u *url.URL, requ
return ErrThrottledResponse
}
if err := json.Unmarshal(ocs.Ocs.Data, response); err != nil {
log.Printf("Could not decode OCS response body %s from %s: %s", string(ocs.Ocs.Data), req.URL, err)
if err := json.Unmarshal(*ocs.Ocs.Data, response); err != nil {
log.Printf("Could not decode OCS response body %s from %s: %s", string(*ocs.Ocs.Data), req.URL, err)
return err
}
} else if err := json.Unmarshal(body, response); err != nil {

View file

@ -45,7 +45,7 @@ func returnOCS(t *testing.T, w http.ResponseWriter, body []byte) {
StatusCode: http.StatusOK,
Message: "OK",
},
Data: body,
Data: (*json.RawMessage)(&body),
},
}
if strings.Contains(t.Name(), "Throttled") {
@ -70,8 +70,6 @@ func returnOCS(t *testing.T, w http.ResponseWriter, body []byte) {
}
func TestPostOnRedirect(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
r := mux.NewRouter()
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/ocs/v2.php/two", http.StatusTemporaryRedirect)
@ -127,8 +125,6 @@ func TestPostOnRedirect(t *testing.T) {
}
func TestPostOnRedirectDifferentHost(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
r := mux.NewRouter()
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "http://domain.invalid/ocs/v2.php/two", http.StatusTemporaryRedirect)
@ -169,8 +165,6 @@ func TestPostOnRedirectDifferentHost(t *testing.T) {
}
func TestPostOnRedirectStatusFound(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
r := mux.NewRouter()
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/ocs/v2.php/two", http.StatusFound)
@ -223,8 +217,6 @@ func TestPostOnRedirectStatusFound(t *testing.T) {
}
func TestHandleThrottled(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
r := mux.NewRouter()
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
returnOCS(t, w, []byte("[]"))

View file

@ -92,7 +92,6 @@ func testBackends(t *testing.T, config *BackendConfiguration, valid_urls [][]str
}
func TestIsUrlAllowed_Compat(t *testing.T) {
CatchLogForTest(t)
// Old-style configuration
valid_urls := []string{
"http://domain.invalid",
@ -115,7 +114,6 @@ func TestIsUrlAllowed_Compat(t *testing.T) {
}
func TestIsUrlAllowed_CompatForceHttps(t *testing.T) {
CatchLogForTest(t)
// Old-style configuration, force HTTPS
valid_urls := []string{
"https://domain.invalid",
@ -137,7 +135,6 @@ func TestIsUrlAllowed_CompatForceHttps(t *testing.T) {
}
func TestIsUrlAllowed(t *testing.T) {
CatchLogForTest(t)
valid_urls := [][]string{
{"https://domain.invalid/foo", string(testBackendSecret) + "-foo"},
{"https://domain.invalid/foo/", string(testBackendSecret) + "-foo"},
@ -183,7 +180,6 @@ func TestIsUrlAllowed(t *testing.T) {
}
func TestIsUrlAllowed_EmptyAllowlist(t *testing.T) {
CatchLogForTest(t)
valid_urls := []string{}
invalid_urls := []string{
"http://domain.invalid",
@ -201,7 +197,6 @@ func TestIsUrlAllowed_EmptyAllowlist(t *testing.T) {
}
func TestIsUrlAllowed_AllowAll(t *testing.T) {
CatchLogForTest(t)
valid_urls := []string{
"http://domain.invalid",
"https://domain.invalid",
@ -227,7 +222,6 @@ type ParseBackendIdsTestcase struct {
}
func TestParseBackendIds(t *testing.T) {
CatchLogForTest(t)
testcases := []ParseBackendIdsTestcase{
{"", nil},
{"backend1", []string{"backend1"}},
@ -247,7 +241,6 @@ func TestParseBackendIds(t *testing.T) {
}
func TestBackendReloadNoChange(t *testing.T) {
CatchLogForTest(t)
current := testutil.ToFloat64(statsBackendsCurrent)
original_config := goconf.NewConfigFile()
original_config.AddOption("backend", "backends", "backend1, backend2")
@ -283,7 +276,6 @@ func TestBackendReloadNoChange(t *testing.T) {
}
func TestBackendReloadChangeExistingURL(t *testing.T) {
CatchLogForTest(t)
current := testutil.ToFloat64(statsBackendsCurrent)
original_config := goconf.NewConfigFile()
original_config.AddOption("backend", "backends", "backend1, backend2")
@ -324,7 +316,6 @@ func TestBackendReloadChangeExistingURL(t *testing.T) {
}
func TestBackendReloadChangeSecret(t *testing.T) {
CatchLogForTest(t)
current := testutil.ToFloat64(statsBackendsCurrent)
original_config := goconf.NewConfigFile()
original_config.AddOption("backend", "backends", "backend1, backend2")
@ -363,7 +354,6 @@ func TestBackendReloadChangeSecret(t *testing.T) {
}
func TestBackendReloadAddBackend(t *testing.T) {
CatchLogForTest(t)
current := testutil.ToFloat64(statsBackendsCurrent)
original_config := goconf.NewConfigFile()
original_config.AddOption("backend", "backends", "backend1")
@ -404,7 +394,6 @@ func TestBackendReloadAddBackend(t *testing.T) {
}
func TestBackendReloadRemoveHost(t *testing.T) {
CatchLogForTest(t)
current := testutil.ToFloat64(statsBackendsCurrent)
original_config := goconf.NewConfigFile()
original_config.AddOption("backend", "backends", "backend1, backend2")
@ -442,7 +431,6 @@ func TestBackendReloadRemoveHost(t *testing.T) {
}
func TestBackendReloadRemoveBackendFromSharedHost(t *testing.T) {
CatchLogForTest(t)
current := testutil.ToFloat64(statsBackendsCurrent)
original_config := goconf.NewConfigFile()
original_config.AddOption("backend", "backends", "backend1, backend2")
@ -498,8 +486,6 @@ func mustParse(s string) *url.URL {
}
func TestBackendConfiguration_Etcd(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
etcd, client := NewEtcdClientForTest(t)
url1 := "https://domain1.invalid/foo"
@ -519,7 +505,8 @@ func TestBackendConfiguration_Etcd(t *testing.T) {
defer cfg.Close()
storage := cfg.storage.(*backendStorageEtcd)
ch := storage.getWakeupChannelForTesting()
ch := make(chan bool, 1)
storage.SetWakeupForTesting(ch)
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
@ -631,56 +618,3 @@ func TestBackendConfiguration_Etcd(t *testing.T) {
t.Errorf("Should have removed host information for %s", "domain1.invalid")
}
}
func TestBackendCommonSecret(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
u1, err := url.Parse("http://domain1.invalid")
if err != nil {
t.Fatal(err)
}
u2, err := url.Parse("http://domain2.invalid")
if err != nil {
t.Fatal(err)
}
original_config := goconf.NewConfigFile()
original_config.AddOption("backend", "backends", "backend1, backend2")
original_config.AddOption("backend", "secret", string(testBackendSecret))
original_config.AddOption("backend1", "url", u1.String())
original_config.AddOption("backend2", "url", u2.String())
original_config.AddOption("backend2", "secret", string(testBackendSecret)+"-backend2")
cfg, err := NewBackendConfiguration(original_config, nil)
if err != nil {
t.Fatal(err)
}
if b1 := cfg.GetBackend(u1); b1 == nil {
t.Error("didn't get backend")
} else if !bytes.Equal(b1.Secret(), testBackendSecret) {
t.Errorf("expected secret %s, got %s", string(testBackendSecret), string(b1.Secret()))
}
if b2 := cfg.GetBackend(u2); b2 == nil {
t.Error("didn't get backend")
} else if !bytes.Equal(b2.Secret(), []byte(string(testBackendSecret)+"-backend2")) {
t.Errorf("expected secret %s, got %s", string(testBackendSecret)+"-backend2", string(b2.Secret()))
}
updated_config := goconf.NewConfigFile()
updated_config.AddOption("backend", "backends", "backend1, backend2")
updated_config.AddOption("backend", "secret", string(testBackendSecret))
updated_config.AddOption("backend1", "url", u1.String())
updated_config.AddOption("backend1", "secret", string(testBackendSecret)+"-backend1")
updated_config.AddOption("backend2", "url", u2.String())
cfg.Reload(updated_config)
if b1 := cfg.GetBackend(u1); b1 == nil {
t.Error("didn't get backend")
} else if !bytes.Equal(b1.Secret(), []byte(string(testBackendSecret)+"-backend1")) {
t.Errorf("expected secret %s, got %s", string(testBackendSecret)+"-backend1", string(b1.Secret()))
}
if b2 := cfg.GetBackend(u2); b2 == nil {
t.Error("didn't get backend")
} else if !bytes.Equal(b2.Secret(), testBackendSecret) {
t.Errorf("expected secret %s, got %s", string(testBackendSecret), string(b2.Secret()))
}
}

View file

@ -28,7 +28,6 @@ import (
"crypto/sha1"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"log"
@ -36,10 +35,8 @@ import (
"net/http"
"net/url"
"reflect"
"regexp"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/dlintw/goconf"
@ -68,7 +65,7 @@ type BackendServer struct {
turnvalid time.Duration
turnservers []string
statsAllowedIps atomic.Pointer[AllowedIps]
statsAllowedIps map[string]bool
invalidSecret []byte
}
@ -103,16 +100,21 @@ func NewBackendServer(config *goconf.ConfigFile, hub *Hub, version string) (*Bac
}
statsAllowed, _ := config.GetString("stats", "allowed_ips")
statsAllowedIps, err := ParseAllowedIps(statsAllowed)
if err != nil {
return nil, err
}
if !statsAllowedIps.Empty() {
log.Printf("Only allowing access to the stats endpoint from %s", statsAllowed)
} else {
var statsAllowedIps map[string]bool
if statsAllowed == "" {
log.Printf("No IPs configured for the stats endpoint, only allowing access from 127.0.0.1")
statsAllowedIps = DefaultAllowedIps()
statsAllowedIps = map[string]bool{
"127.0.0.1": true,
}
} else {
log.Printf("Only allowing access to the stats endpoing from %s", statsAllowed)
statsAllowedIps = make(map[string]bool)
for _, ip := range strings.Split(statsAllowed, ",") {
ip = strings.TrimSpace(ip)
if ip != "" {
statsAllowedIps[ip] = true
}
}
}
invalidSecret := make([]byte, 32)
@ -120,7 +122,7 @@ func NewBackendServer(config *goconf.ConfigFile, hub *Hub, version string) (*Bac
return nil, err
}
result := &BackendServer{
return &BackendServer{
hub: hub,
events: hub.events,
roomSessions: hub.roomSessions,
@ -131,27 +133,9 @@ func NewBackendServer(config *goconf.ConfigFile, hub *Hub, version string) (*Bac
turnvalid: turnvalid,
turnservers: turnserverslist,
invalidSecret: invalidSecret,
}
result.statsAllowedIps.Store(statsAllowedIps)
return result, nil
}
func (b *BackendServer) Reload(config *goconf.ConfigFile) {
statsAllowed, _ := config.GetString("stats", "allowed_ips")
if statsAllowedIps, err := ParseAllowedIps(statsAllowed); err == nil {
if !statsAllowedIps.Empty() {
log.Printf("Only allowing access to the stats endpoint from %s", statsAllowed)
} else {
log.Printf("No IPs configured for the stats endpoint, only allowing access from 127.0.0.1")
statsAllowedIps = DefaultAllowedIps()
}
b.statsAllowedIps.Store(statsAllowedIps)
} else {
log.Printf("Error parsing allowed stats ips from \"%s\": %s", statsAllowedIps, err)
}
statsAllowedIps: statsAllowedIps,
invalidSecret: invalidSecret,
}, nil
}
func (b *BackendServer) Start(r *mux.Router) error {
@ -295,7 +279,7 @@ func (b *BackendServer) parseRequestBody(f func(http.ResponseWriter, *http.Reque
}
}
func (b *BackendServer) sendRoomInvite(roomid string, backend *Backend, userids []string, properties json.RawMessage) {
func (b *BackendServer) sendRoomInvite(roomid string, backend *Backend, userids []string, properties *json.RawMessage) {
msg := &AsyncMessage{
Type: "message",
Message: &ServerMessage{
@ -365,7 +349,7 @@ func (b *BackendServer) sendRoomDisinvite(roomid string, backend *Backend, reaso
wg.Wait()
}
func (b *BackendServer) sendRoomUpdate(roomid string, backend *Backend, notified_userids []string, all_userids []string, properties json.RawMessage) {
func (b *BackendServer) sendRoomUpdate(roomid string, backend *Backend, notified_userids []string, all_userids []string, properties *json.RawMessage) {
msg := &AsyncMessage{
Type: "message",
Message: &ServerMessage{
@ -571,11 +555,11 @@ func (b *BackendServer) sendRoomSwitchTo(roomid string, backend *Backend, reques
var wg sync.WaitGroup
var mu sync.Mutex
if len(request.SwitchTo.Sessions) > 0 {
if request.SwitchTo.Sessions != nil {
// We support both a list of sessions or a map with additional details per session.
if request.SwitchTo.Sessions[0] == '[' {
if (*request.SwitchTo.Sessions)[0] == '[' {
var sessionsList BackendRoomSwitchToSessionsList
if err := json.Unmarshal(request.SwitchTo.Sessions, &sessionsList); err != nil {
if err := json.Unmarshal(*request.SwitchTo.Sessions, &sessionsList); err != nil {
return err
}
@ -613,7 +597,7 @@ func (b *BackendServer) sendRoomSwitchTo(roomid string, backend *Backend, reques
request.SwitchTo.SessionsMap = nil
} else {
var sessionsMap BackendRoomSwitchToSessionsMap
if err := json.Unmarshal(request.SwitchTo.Sessions, &sessionsMap); err != nil {
if err := json.Unmarshal(*request.SwitchTo.Sessions, &sessionsMap); err != nil {
return err
}
@ -660,135 +644,7 @@ func (b *BackendServer) sendRoomSwitchTo(roomid string, backend *Backend, reques
return b.events.PublishBackendRoomMessage(roomid, backend, message)
}
type BackendResponseWithStatus interface {
Status() int
}
type DialoutErrorResponse struct {
BackendServerRoomResponse
status int
}
func (r *DialoutErrorResponse) Status() int {
return r.status
}
func returnDialoutError(status int, err *Error) (any, error) {
response := &DialoutErrorResponse{
BackendServerRoomResponse: BackendServerRoomResponse{
Type: "dialout",
Dialout: &BackendRoomDialoutResponse{
Error: err,
},
},
status: status,
}
return response, nil
}
var checkNumeric = regexp.MustCompile(`^[0-9]+$`)
func isNumeric(s string) bool {
return checkNumeric.MatchString(s)
}
func (b *BackendServer) startDialout(roomid string, backend *Backend, backendUrl string, request *BackendServerRoomRequest) (any, error) {
if err := request.Dialout.ValidateNumber(); err != nil {
return returnDialoutError(http.StatusBadRequest, err)
}
if !isNumeric(roomid) {
return returnDialoutError(http.StatusBadRequest, NewError("invalid_roomid", "The room id must be numeric."))
}
session := b.hub.GetDialoutSession(roomid, backend)
if session == nil {
return returnDialoutError(http.StatusNotFound, NewError("no_client_available", "No available client found to trigger dialout."))
}
url := backend.Url()
if url == "" {
// Old-style compat backend, use client-provided URL.
url = backendUrl
if url != "" && url[len(url)-1] != '/' {
url += "/"
}
}
id := newRandomString(32)
msg := &ServerMessage{
Id: id,
Type: "internal",
Internal: &InternalServerMessage{
Type: "dialout",
Dialout: &InternalServerDialoutRequest{
RoomId: roomid,
Backend: url,
Request: request.Dialout,
},
},
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
var response atomic.Pointer[DialoutInternalClientMessage]
session.HandleResponse(id, func(message *ClientMessage) bool {
response.Store(message.Internal.Dialout)
cancel()
// Don't send error to other sessions in the room.
return message.Internal.Dialout.Error != nil
})
defer session.ClearResponseHandler(id)
if !session.SendMessage(msg) {
return returnDialoutError(http.StatusBadGateway, NewError("error_notify", "Could not notify about new dialout."))
}
<-ctx.Done()
if err := ctx.Err(); err != nil && !errors.Is(err, context.Canceled) {
return returnDialoutError(http.StatusGatewayTimeout, NewError("timeout", "Timeout while waiting for dialout to start."))
}
dialout := response.Load()
if dialout == nil {
return returnDialoutError(http.StatusBadGateway, NewError("error_notify", "No dialout response received."))
}
switch dialout.Type {
case "error":
return returnDialoutError(http.StatusBadGateway, dialout.Error)
case "status":
if dialout.Status.Status != DialoutStatusAccepted {
log.Printf("Received unsupported dialout status when triggering dialout: %+v", dialout)
return returnDialoutError(http.StatusBadGateway, NewError("unsupported_status", "Unsupported dialout status received."))
}
return &BackendServerRoomResponse{
Type: "dialout",
Dialout: &BackendRoomDialoutResponse{
CallId: dialout.Status.CallId,
},
}, nil
}
log.Printf("Received unsupported dialout type when triggering dialout: %+v", dialout)
return returnDialoutError(http.StatusBadGateway, NewError("unsupported_type", "Unsupported dialout type received."))
}
func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body []byte) {
throttle, err := b.hub.throttler.CheckBruteforce(r.Context(), b.hub.getRealUserIP(r), "BackendRoomAuth")
if err == ErrBruteforceDetected {
http.Error(w, "Too many requests", http.StatusTooManyRequests)
return
} else if err != nil {
log.Printf("Error checking for bruteforce: %s", err)
http.Error(w, "Could not check for bruteforce", http.StatusInternalServerError)
return
}
v := mux.Vars(r)
roomid := v["roomid"]
@ -801,7 +657,6 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
if backend == nil {
// Unknown backend URL passed, return immediately.
throttle(r.Context())
http.Error(w, "Authentication check failed", http.StatusForbidden)
return
}
@ -823,14 +678,12 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
}
if backend == nil {
throttle(r.Context())
http.Error(w, "Authentication check failed", http.StatusForbidden)
return
}
}
if !ValidateBackendChecksum(r, body, backend.Secret()) {
throttle(r.Context())
http.Error(w, "Authentication check failed", http.StatusForbidden)
return
}
@ -844,7 +697,7 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
request.ReceivedTime = time.Now().UnixNano()
var response any
var err error
switch request.Type {
case "invite":
b.sendRoomInvite(roomid, backend, request.Invite.UserIds, request.Invite.Properties)
@ -874,8 +727,6 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
err = b.sendRoomMessage(roomid, backend, &request)
case "switchto":
err = b.sendRoomSwitchTo(roomid, backend, &request)
case "dialout":
response, err = b.startDialout(roomid, backend, backendUrl, &request)
default:
http.Error(w, "Unsupported request type: "+request.Type, http.StatusBadRequest)
return
@ -887,43 +738,22 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
return
}
var responseData []byte
responseStatus := http.StatusOK
if response == nil {
// TODO(jojo): Return better response struct.
responseData = []byte("{}")
} else {
if s, ok := response.(BackendResponseWithStatus); ok {
responseStatus = s.Status()
}
responseData, err = json.Marshal(response)
if err != nil {
log.Printf("Could not serialize backend response %+v: %s", response, err)
responseStatus = http.StatusInternalServerError
responseData = []byte("{\"error\":\"could_not_serialize\"}")
}
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(responseStatus)
w.Write(responseData) // nolint
}
func (b *BackendServer) allowStatsAccess(r *http.Request) bool {
addr := b.hub.getRealUserIP(r)
ip := net.ParseIP(addr)
if len(ip) == 0 {
return false
}
allowed := b.statsAllowedIps.Load()
return allowed != nil && allowed.Allowed(ip)
w.WriteHeader(http.StatusOK)
// TODO(jojo): Return better response struct.
w.Write([]byte("{}")) // nolint
}
func (b *BackendServer) validateStatsRequest(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
if !b.allowStatsAccess(r) {
addr := getRealUserIP(r)
if strings.Contains(addr, ":") {
if host, _, err := net.SplitHostPort(addr); err == nil {
addr = host
}
}
if !b.statsAllowedIps[addr] {
http.Error(w, "Authentication check failed", http.StatusForbidden)
return
}

View file

@ -30,10 +30,8 @@ import (
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/http/httptest"
"net/textproto"
"net/url"
"reflect"
"strings"
@ -80,18 +78,11 @@ func CreateBackendServerForTestFromConfig(t *testing.T, config *goconf.ConfigFil
if err != nil {
t.Fatal(err)
}
if strings.Contains(t.Name(), "Compat") {
config.AddOption("backend", "allowed", u.Host)
config.AddOption("backend", "secret", string(testBackendSecret))
} else {
backendId := "backend1"
config.AddOption("backend", "backends", backendId)
config.AddOption(backendId, "url", server.URL)
config.AddOption(backendId, "secret", string(testBackendSecret))
}
config.AddOption("backend", "allowed", u.Host)
if u.Scheme == "http" {
config.AddOption("backend", "allowhttp", "true")
}
config.AddOption("backend", "secret", string(testBackendSecret))
config.AddOption("sessions", "hashkey", "12345678901234567890123456789012")
config.AddOption("sessions", "blockkey", "09876543210987654321098765432109")
config.AddOption("clients", "internalsecret", string(testInternalSecret))
@ -170,7 +161,7 @@ func CreateBackendServerWithClusteringForTestFromConfig(t *testing.T, config1 *g
t.Cleanup(func() {
events1.Close()
})
client1, _ := NewGrpcClientsForTest(t, addr2)
client1 := NewGrpcClientsForTest(t, addr2)
hub1, err := NewHub(config1, events1, grpcServer1, client1, nil, r1, "no-version")
if err != nil {
t.Fatal(err)
@ -199,7 +190,7 @@ func CreateBackendServerWithClusteringForTestFromConfig(t *testing.T, config1 *g
t.Cleanup(func() {
events2.Close()
})
client2, _ := NewGrpcClientsForTest(t, addr1)
client2 := NewGrpcClientsForTest(t, addr1)
hub2, err := NewHub(config2, events2, grpcServer2, client2, nil, r2, "no-version")
if err != nil {
t.Fatal(err)
@ -234,8 +225,8 @@ func CreateBackendServerWithClusteringForTestFromConfig(t *testing.T, config1 *g
return b1, b2, hub1, hub2, server1, server2
}
func performBackendRequest(requestUrl string, body []byte) (*http.Response, error) {
request, err := http.NewRequest("POST", requestUrl, bytes.NewReader(body))
func performBackendRequest(url string, body []byte) (*http.Response, error) {
request, err := http.NewRequest("POST", url, bytes.NewReader(body))
if err != nil {
return nil, err
}
@ -244,11 +235,7 @@ func performBackendRequest(requestUrl string, body []byte) (*http.Response, erro
check := CalculateBackendChecksum(rnd, body, testBackendSecret)
request.Header.Set("Spreed-Signaling-Random", rnd)
request.Header.Set("Spreed-Signaling-Checksum", check)
u, err := url.Parse(requestUrl)
if err != nil {
return nil, err
}
request.Header.Set("Spreed-Signaling-Backend", u.Scheme+"://"+u.Host)
request.Header.Set("Spreed-Signaling-Backend", url)
client := &http.Client{}
return client.Do(request)
}
@ -276,8 +263,6 @@ func expectRoomlistEvent(ch chan *AsyncMessage, msgType string) (*EventServerMes
}
func TestBackendServer_NoAuth(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, _, _, server := CreateBackendServerForTest(t)
roomId := "the-room-id"
@ -304,8 +289,6 @@ func TestBackendServer_NoAuth(t *testing.T) {
}
func TestBackendServer_InvalidAuth(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, _, _, server := CreateBackendServerForTest(t)
roomId := "the-room-id"
@ -334,8 +317,6 @@ func TestBackendServer_InvalidAuth(t *testing.T) {
}
func TestBackendServer_OldCompatAuth(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, _, _, server := CreateBackendServerForTest(t)
roomId := "the-room-id"
@ -350,7 +331,7 @@ func TestBackendServer_OldCompatAuth(t *testing.T) {
AllUserIds: []string{
userid,
},
Properties: roomProperties,
Properties: &roomProperties,
},
}
@ -385,8 +366,6 @@ func TestBackendServer_OldCompatAuth(t *testing.T) {
}
func TestBackendServer_InvalidBody(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, _, _, server := CreateBackendServerForTest(t)
roomId := "the-room-id"
@ -406,8 +385,6 @@ func TestBackendServer_InvalidBody(t *testing.T) {
}
func TestBackendServer_UnsupportedRequest(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, _, _, server := CreateBackendServerForTest(t)
msg := &BackendServerRoomRequest{
@ -434,10 +411,8 @@ func TestBackendServer_UnsupportedRequest(t *testing.T) {
}
func TestBackendServer_RoomInvite(t *testing.T) {
CatchLogForTest(t)
for _, backend := range eventBackendsForTest {
t.Run(backend, func(t *testing.T) {
t.Parallel()
RunTestBackendServer_RoomInvite(t)
})
}
@ -481,7 +456,7 @@ func RunTestBackendServer_RoomInvite(t *testing.T) {
AllUserIds: []string{
userid,
},
Properties: roomProperties,
Properties: &roomProperties,
},
}
@ -510,16 +485,14 @@ func RunTestBackendServer_RoomInvite(t *testing.T) {
t.Errorf("Expected invite, got %+v", event)
} else if event.Invite.RoomId != roomId {
t.Errorf("Expected room %s, got %+v", roomId, event)
} else if !bytes.Equal(event.Invite.Properties, roomProperties) {
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(event.Invite.Properties))
} else if event.Invite.Properties == nil || !bytes.Equal(*event.Invite.Properties, roomProperties) {
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(*event.Invite.Properties))
}
}
func TestBackendServer_RoomDisinvite(t *testing.T) {
CatchLogForTest(t)
for _, backend := range eventBackendsForTest {
t.Run(backend, func(t *testing.T) {
t.Parallel()
RunTestBackendServer_RoomDisinvite(t)
})
}
@ -583,7 +556,7 @@ func RunTestBackendServer_RoomDisinvite(t *testing.T) {
roomId + "-" + hello.Hello.SessionId,
},
AllUserIds: []string{},
Properties: roomProperties,
Properties: &roomProperties,
},
}
@ -611,8 +584,8 @@ func RunTestBackendServer_RoomDisinvite(t *testing.T) {
t.Errorf("Expected disinvite, got %+v", event)
} else if event.Disinvite.RoomId != roomId {
t.Errorf("Expected room %s, got %+v", roomId, event)
} else if len(event.Disinvite.Properties) > 0 {
t.Errorf("Room properties should be omitted, got %s", string(event.Disinvite.Properties))
} else if event.Disinvite.Properties != nil {
t.Errorf("Room properties should be omitted, got %s", string(*event.Disinvite.Properties))
} else if event.Disinvite.Reason != "disinvited" {
t.Errorf("Reason should be disinvited, got %s", event.Disinvite.Reason)
}
@ -631,8 +604,6 @@ func RunTestBackendServer_RoomDisinvite(t *testing.T) {
}
func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client1 := NewTestClient(t, server, hub)
@ -654,8 +625,7 @@ func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) {
if err != nil {
t.Fatal(err)
}
hello2, err := client2.RunUntilHello(ctx)
if err != nil {
if _, err := client2.RunUntilHello(ctx); err != nil {
t.Fatal(err)
}
@ -664,14 +634,16 @@ func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) {
if _, err := client1.JoinRoom(ctx, roomId1); err != nil {
t.Fatal(err)
}
if err := client1.RunUntilJoined(ctx, hello1.Hello); err != nil {
t.Error(err)
}
roomId2 := "test-room2"
if _, err := client2.JoinRoom(ctx, roomId2); err != nil {
t.Fatal(err)
}
if err := client2.RunUntilJoined(ctx, hello2.Hello); err != nil {
// Ignore "join" events.
if err := client1.DrainMessages(ctx); err != nil {
t.Error(err)
}
if err := client2.DrainMessages(ctx); err != nil {
t.Error(err)
}
@ -729,7 +701,6 @@ func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) {
UserIds: []string{
testDefaultUserId,
},
Properties: testRoomProperties,
},
}
@ -758,10 +729,8 @@ func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) {
}
func TestBackendServer_RoomUpdate(t *testing.T) {
CatchLogForTest(t)
for _, backend := range eventBackendsForTest {
t.Run(backend, func(t *testing.T) {
t.Parallel()
RunTestBackendServer_RoomUpdate(t)
})
}
@ -781,7 +750,7 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
if backend == nil {
t.Fatalf("Did not find backend")
}
room, err := hub.createRoom(roomId, emptyProperties, backend)
room, err := hub.createRoom(roomId, &emptyProperties, backend)
if err != nil {
t.Fatalf("Could not create room: %s", err)
}
@ -805,7 +774,7 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
UserIds: []string{
userid,
},
Properties: roomProperties,
Properties: &roomProperties,
},
}
@ -833,8 +802,8 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
t.Errorf("Expected update, got %+v", event)
} else if event.Update.RoomId != roomId {
t.Errorf("Expected room %s, got %+v", roomId, event)
} else if !bytes.Equal(event.Update.Properties, roomProperties) {
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(event.Update.Properties))
} else if event.Update.Properties == nil || !bytes.Equal(*event.Update.Properties, roomProperties) {
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(*event.Update.Properties))
}
// TODO: Use event to wait for asynchronous messages.
@ -844,16 +813,14 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
if room == nil {
t.Fatalf("Room %s does not exist", roomId)
}
if string(room.Properties()) != string(roomProperties) {
t.Errorf("Expected properties %s for room %s, got %s", string(roomProperties), room.Id(), string(room.Properties()))
if string(*room.Properties()) != string(roomProperties) {
t.Errorf("Expected properties %s for room %s, got %s", string(roomProperties), room.Id(), string(*room.Properties()))
}
}
func TestBackendServer_RoomDelete(t *testing.T) {
CatchLogForTest(t)
for _, backend := range eventBackendsForTest {
t.Run(backend, func(t *testing.T) {
t.Parallel()
RunTestBackendServer_RoomDelete(t)
})
}
@ -873,7 +840,7 @@ func RunTestBackendServer_RoomDelete(t *testing.T) {
if backend == nil {
t.Fatalf("Did not find backend")
}
if _, err := hub.createRoom(roomId, emptyProperties, backend); err != nil {
if _, err := hub.createRoom(roomId, &emptyProperties, backend); err != nil {
t.Fatalf("Could not create room: %s", err)
}
@ -921,8 +888,8 @@ func RunTestBackendServer_RoomDelete(t *testing.T) {
t.Errorf("Expected disinvite, got %+v", event)
} else if event.Disinvite.RoomId != roomId {
t.Errorf("Expected room %s, got %+v", roomId, event)
} else if len(event.Disinvite.Properties) > 0 {
t.Errorf("Room properties should be omitted, got %s", string(event.Disinvite.Properties))
} else if event.Disinvite.Properties != nil {
t.Errorf("Room properties should be omitted, got %s", string(*event.Disinvite.Properties))
} else if event.Disinvite.Reason != "deleted" {
t.Errorf("Reason should be deleted, got %s", event.Disinvite.Reason)
}
@ -937,10 +904,8 @@ func RunTestBackendServer_RoomDelete(t *testing.T) {
}
func TestBackendServer_ParticipantsUpdatePermissions(t *testing.T) {
CatchLogForTest(t)
for _, subtest := range clusteredTests {
t.Run(subtest, func(t *testing.T) {
t.Parallel()
var hub1 *Hub
var hub2 *Hub
var server1 *httptest.Server
@ -1070,8 +1035,6 @@ func TestBackendServer_ParticipantsUpdatePermissions(t *testing.T) {
}
func TestBackendServer_ParticipantsUpdateEmptyPermissions(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client := NewTestClient(t, server, hub)
@ -1157,8 +1120,6 @@ func TestBackendServer_ParticipantsUpdateEmptyPermissions(t *testing.T) {
}
func TestBackendServer_ParticipantsUpdateTimeout(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client1 := NewTestClient(t, server, hub)
@ -1372,10 +1333,8 @@ func TestBackendServer_ParticipantsUpdateTimeout(t *testing.T) {
}
func TestBackendServer_InCallAll(t *testing.T) {
CatchLogForTest(t)
for _, subtest := range clusteredTests {
t.Run(subtest, func(t *testing.T) {
t.Parallel()
var hub1 *Hub
var hub2 *Hub
var server1 *httptest.Server
@ -1500,8 +1459,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
t.Error(err)
} else if !in_call_1.All {
t.Errorf("All flag not set in message %+v", in_call_1)
} else if !bytes.Equal(in_call_1.InCall, []byte("7")) {
t.Errorf("Expected inCall flag 7, got %s", string(in_call_1.InCall))
} else if !bytes.Equal(*in_call_1.InCall, []byte("7")) {
t.Errorf("Expected inCall flag 7, got %s", string(*in_call_1.InCall))
}
if msg2_a, err := client2.RunUntilMessage(ctx); err != nil {
@ -1510,8 +1469,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
t.Error(err)
} else if !in_call_1.All {
t.Errorf("All flag not set in message %+v", in_call_1)
} else if !bytes.Equal(in_call_1.InCall, []byte("7")) {
t.Errorf("Expected inCall flag 7, got %s", string(in_call_1.InCall))
} else if !bytes.Equal(*in_call_1.InCall, []byte("7")) {
t.Errorf("Expected inCall flag 7, got %s", string(*in_call_1.InCall))
}
if !room1.IsSessionInCall(session1) {
@ -1581,8 +1540,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
t.Error(err)
} else if !in_call_1.All {
t.Errorf("All flag not set in message %+v", in_call_1)
} else if !bytes.Equal(in_call_1.InCall, []byte("0")) {
t.Errorf("Expected inCall flag 0, got %s", string(in_call_1.InCall))
} else if !bytes.Equal(*in_call_1.InCall, []byte("0")) {
t.Errorf("Expected inCall flag 0, got %s", string(*in_call_1.InCall))
}
if msg2_a, err := client2.RunUntilMessage(ctx); err != nil {
@ -1591,8 +1550,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
t.Error(err)
} else if !in_call_1.All {
t.Errorf("All flag not set in message %+v", in_call_1)
} else if !bytes.Equal(in_call_1.InCall, []byte("0")) {
t.Errorf("Expected inCall flag 0, got %s", string(in_call_1.InCall))
} else if !bytes.Equal(*in_call_1.InCall, []byte("0")) {
t.Errorf("Expected inCall flag 0, got %s", string(*in_call_1.InCall))
}
if room1.IsSessionInCall(session1) {
@ -1624,8 +1583,6 @@ func TestBackendServer_InCallAll(t *testing.T) {
}
func TestBackendServer_RoomMessage(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client := NewTestClient(t, server, hub)
@ -1659,7 +1616,7 @@ func TestBackendServer_RoomMessage(t *testing.T) {
msg := &BackendServerRoomRequest{
Type: "message",
Message: &BackendRoomMessageRequest{
Data: messageData,
Data: &messageData,
},
}
@ -1685,14 +1642,12 @@ func TestBackendServer_RoomMessage(t *testing.T) {
t.Error(err)
} else if message.RoomId != roomId {
t.Errorf("Expected message for room %s, got %s", roomId, message.RoomId)
} else if !bytes.Equal(messageData, message.Data) {
t.Errorf("Expected message data %s, got %s", string(messageData), string(message.Data))
} else if !bytes.Equal(messageData, *message.Data) {
t.Errorf("Expected message data %s, got %s", string(messageData), string(*message.Data))
}
}
func TestBackendServer_TurnCredentials(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, _, _, server := CreateBackendServerForTestWithTurn(t)
q := make(url.Values)
@ -1734,497 +1689,3 @@ func TestBackendServer_TurnCredentials(t *testing.T) {
t.Errorf("Expected the list of servers as %s, got %s", turnServers, cred.URIs)
}
}
func TestBackendServer_StatsAllowedIps(t *testing.T) {
CatchLogForTest(t)
config := goconf.NewConfigFile()
config.AddOption("app", "trustedproxies", "1.2.3.4")
config.AddOption("stats", "allowed_ips", "127.0.0.1, 192.168.0.1, 192.168.1.1/24")
_, backend, _, _, _, _ := CreateBackendServerForTestFromConfig(t, config)
allowed := []string{
"127.0.0.1",
"127.0.0.1:1234",
"192.168.0.1:1234",
"192.168.1.1:1234",
"192.168.1.100:1234",
}
notAllowed := []string{
"192.168.0.2:1234",
"10.1.2.3:1234",
}
for _, addr := range allowed {
addr := addr
t.Run(addr, func(t *testing.T) {
t.Parallel()
r1 := &http.Request{
RemoteAddr: addr,
}
if !backend.allowStatsAccess(r1) {
t.Errorf("should allow %s", addr)
}
if host, _, err := net.SplitHostPort(addr); err == nil {
addr = host
}
r2 := &http.Request{
RemoteAddr: "1.2.3.4:12345",
Header: http.Header{
textproto.CanonicalMIMEHeaderKey("x-real-ip"): []string{addr},
},
}
if !backend.allowStatsAccess(r2) {
t.Errorf("should allow %s", addr)
}
r3 := &http.Request{
RemoteAddr: "1.2.3.4:12345",
Header: http.Header{
textproto.CanonicalMIMEHeaderKey("x-forwarded-for"): []string{addr},
},
}
if !backend.allowStatsAccess(r3) {
t.Errorf("should allow %s", addr)
}
r4 := &http.Request{
RemoteAddr: "1.2.3.4:12345",
Header: http.Header{
textproto.CanonicalMIMEHeaderKey("x-forwarded-for"): []string{addr + ", 1.2.3.4:23456"},
},
}
if !backend.allowStatsAccess(r4) {
t.Errorf("should allow %s", addr)
}
})
}
for _, addr := range notAllowed {
addr := addr
t.Run(addr, func(t *testing.T) {
t.Parallel()
r := &http.Request{
RemoteAddr: addr,
}
if backend.allowStatsAccess(r) {
t.Errorf("should not allow %s", addr)
}
})
}
}
func Test_IsNumeric(t *testing.T) {
t.Parallel()
numeric := []string{
"0",
"1",
"12345",
}
nonNumeric := []string{
"",
" ",
" 0",
"0 ",
" 0 ",
"-1",
"1.2",
"1a",
"a1",
}
for _, s := range numeric {
if !isNumeric(s) {
t.Errorf("%s should be numeric", s)
}
}
for _, s := range nonNumeric {
if isNumeric(s) {
t.Errorf("%s should not be numeric", s)
}
}
}
func TestBackendServer_DialoutNoSipBridge(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client := NewTestClient(t, server, hub)
defer client.CloseWithBye()
if err := client.SendHelloInternal(); err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
_, err := client.RunUntilHello(ctx)
if err != nil {
t.Fatal(err)
}
roomId := "12345"
msg := &BackendServerRoomRequest{
Type: "dialout",
Dialout: &BackendRoomDialoutRequest{
Number: "+1234567890",
},
}
data, err := json.Marshal(msg)
if err != nil {
t.Fatal(err)
}
res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
t.Error(err)
}
if res.StatusCode != http.StatusNotFound {
t.Fatalf("Expected error %d, got %s: %s", http.StatusNotFound, res.Status, string(body))
}
var response BackendServerRoomResponse
if err := json.Unmarshal(body, &response); err != nil {
t.Fatal(err)
}
if response.Type != "dialout" || response.Dialout == nil {
t.Fatalf("expected type dialout, got %s", string(body))
}
if response.Dialout.Error == nil {
t.Fatalf("expected dialout error, got %s", string(body))
}
if expected := "no_client_available"; response.Dialout.Error.Code != expected {
t.Errorf("expected error code %s, got %s", expected, string(body))
}
}
func TestBackendServer_DialoutAccepted(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client := NewTestClient(t, server, hub)
defer client.CloseWithBye()
if err := client.SendHelloInternalWithFeatures([]string{"start-dialout"}); err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
_, err := client.RunUntilHello(ctx)
if err != nil {
t.Fatal(err)
}
roomId := "12345"
callId := "call-123"
stopped := make(chan struct{})
go func() {
defer close(stopped)
msg, err := client.RunUntilMessage(ctx)
if err != nil {
t.Error(err)
return
}
if msg.Type != "internal" || msg.Internal.Type != "dialout" {
t.Errorf("expected internal dialout message, got %+v", msg)
return
}
if msg.Internal.Dialout.RoomId != roomId {
t.Errorf("expected room id %s, got %+v", roomId, msg)
}
if url := server.URL + "/"; msg.Internal.Dialout.Backend != url {
t.Errorf("expected backend %s, got %+v", url, msg)
}
response := &ClientMessage{
Id: msg.Id,
Type: "internal",
Internal: &InternalClientMessage{
Type: "dialout",
Dialout: &DialoutInternalClientMessage{
Type: "status",
RoomId: msg.Internal.Dialout.RoomId,
Status: &DialoutStatusInternalClientMessage{
Status: "accepted",
CallId: callId,
},
},
},
}
if err := client.WriteJSON(response); err != nil {
t.Error(err)
}
}()
defer func() {
<-stopped
}()
msg := &BackendServerRoomRequest{
Type: "dialout",
Dialout: &BackendRoomDialoutRequest{
Number: "+1234567890",
},
}
data, err := json.Marshal(msg)
if err != nil {
t.Fatal(err)
}
res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
t.Error(err)
}
if res.StatusCode != http.StatusOK {
t.Fatalf("Expected error %d, got %s: %s", http.StatusOK, res.Status, string(body))
}
var response BackendServerRoomResponse
if err := json.Unmarshal(body, &response); err != nil {
t.Fatal(err)
}
if response.Type != "dialout" || response.Dialout == nil {
t.Fatalf("expected type dialout, got %s", string(body))
}
if response.Dialout.Error != nil {
t.Fatalf("expected dialout success, got %s", string(body))
}
if response.Dialout.CallId != callId {
t.Errorf("expected call id %s, got %s", callId, string(body))
}
}
func TestBackendServer_DialoutAcceptedCompat(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client := NewTestClient(t, server, hub)
defer client.CloseWithBye()
if err := client.SendHelloInternalWithFeatures([]string{"start-dialout"}); err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
_, err := client.RunUntilHello(ctx)
if err != nil {
t.Fatal(err)
}
roomId := "12345"
callId := "call-123"
stopped := make(chan struct{})
go func() {
defer close(stopped)
msg, err := client.RunUntilMessage(ctx)
if err != nil {
t.Error(err)
return
}
if msg.Type != "internal" || msg.Internal.Type != "dialout" {
t.Errorf("expected internal dialout message, got %+v", msg)
return
}
if msg.Internal.Dialout.RoomId != roomId {
t.Errorf("expected room id %s, got %+v", roomId, msg)
}
if url := server.URL + "/"; msg.Internal.Dialout.Backend != url {
t.Errorf("expected backend %s, got %+v", url, msg)
}
response := &ClientMessage{
Id: msg.Id,
Type: "internal",
Internal: &InternalClientMessage{
Type: "dialout",
Dialout: &DialoutInternalClientMessage{
Type: "status",
RoomId: msg.Internal.Dialout.RoomId,
Status: &DialoutStatusInternalClientMessage{
Status: "accepted",
CallId: callId,
},
},
},
}
if err := client.WriteJSON(response); err != nil {
t.Error(err)
}
}()
defer func() {
<-stopped
}()
msg := &BackendServerRoomRequest{
Type: "dialout",
Dialout: &BackendRoomDialoutRequest{
Number: "+1234567890",
},
}
data, err := json.Marshal(msg)
if err != nil {
t.Fatal(err)
}
res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
t.Error(err)
}
if res.StatusCode != http.StatusOK {
t.Fatalf("Expected error %d, got %s: %s", http.StatusOK, res.Status, string(body))
}
var response BackendServerRoomResponse
if err := json.Unmarshal(body, &response); err != nil {
t.Fatal(err)
}
if response.Type != "dialout" || response.Dialout == nil {
t.Fatalf("expected type dialout, got %s", string(body))
}
if response.Dialout.Error != nil {
t.Fatalf("expected dialout success, got %s", string(body))
}
if response.Dialout.CallId != callId {
t.Errorf("expected call id %s, got %s", callId, string(body))
}
}
func TestBackendServer_DialoutRejected(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client := NewTestClient(t, server, hub)
defer client.CloseWithBye()
if err := client.SendHelloInternalWithFeatures([]string{"start-dialout"}); err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
_, err := client.RunUntilHello(ctx)
if err != nil {
t.Fatal(err)
}
roomId := "12345"
errorCode := "error-code"
errorMessage := "rejected call"
stopped := make(chan struct{})
go func() {
defer close(stopped)
msg, err := client.RunUntilMessage(ctx)
if err != nil {
t.Error(err)
return
}
if msg.Type != "internal" || msg.Internal.Type != "dialout" {
t.Errorf("expected internal dialout message, got %+v", msg)
return
}
if msg.Internal.Dialout.RoomId != roomId {
t.Errorf("expected room id %s, got %+v", roomId, msg)
}
if url := server.URL + "/"; msg.Internal.Dialout.Backend != url {
t.Errorf("expected backend %s, got %+v", url, msg)
}
response := &ClientMessage{
Id: msg.Id,
Type: "internal",
Internal: &InternalClientMessage{
Type: "dialout",
Dialout: &DialoutInternalClientMessage{
Type: "error",
Error: NewError(errorCode, errorMessage),
},
},
}
if err := client.WriteJSON(response); err != nil {
t.Error(err)
}
}()
defer func() {
<-stopped
}()
msg := &BackendServerRoomRequest{
Type: "dialout",
Dialout: &BackendRoomDialoutRequest{
Number: "+1234567890",
},
}
data, err := json.Marshal(msg)
if err != nil {
t.Fatal(err)
}
res, err := performBackendRequest(server.URL+"/api/v1/room/"+roomId, data)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
t.Error(err)
}
if res.StatusCode != http.StatusBadGateway {
t.Fatalf("Expected error %d, got %s: %s", http.StatusBadGateway, res.Status, string(body))
}
var response BackendServerRoomResponse
if err := json.Unmarshal(body, &response); err != nil {
t.Fatal(err)
}
if response.Type != "dialout" || response.Dialout == nil {
t.Fatalf("expected type dialout, got %s", string(body))
}
if response.Dialout.Error == nil {
t.Fatalf("expected dialout error, got %s", string(body))
}
if response.Dialout.Error.Code != errorCode {
t.Errorf("expected error code %s, got %s", errorCode, string(body))
}
if response.Dialout.Error.Message != errorMessage {
t.Errorf("expected error message %s, got %s", errorMessage, string(body))
}
}

View file

@ -24,10 +24,10 @@ package signaling
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"net/url"
"sync"
"time"
"github.com/dlintw/goconf"
@ -43,10 +43,8 @@ type backendStorageEtcd struct {
initializedCtx context.Context
initializedFunc context.CancelFunc
wakeupChanForTesting chan struct{}
closeCtx context.Context
closeFunc context.CancelFunc
initializedWg sync.WaitGroup
wakeupChanForTesting chan bool
}
func NewBackendStorageEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient) (BackendStorage, error) {
@ -60,7 +58,6 @@ func NewBackendStorageEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient) (B
}
initializedCtx, initializedFunc := context.WithCancel(context.Background())
closeCtx, closeFunc := context.WithCancel(context.Background())
result := &backendStorageEtcd{
backendStorageCommon: backendStorageCommon{
backends: make(map[string][]*Backend),
@ -71,8 +68,6 @@ func NewBackendStorageEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient) (B
initializedCtx: initializedCtx,
initializedFunc: initializedFunc,
closeCtx: closeCtx,
closeFunc: closeFunc,
}
etcdClient.AddListener(result)
@ -88,86 +83,75 @@ func (s *backendStorageEtcd) WaitForInitialized(ctx context.Context) error {
}
}
func (s *backendStorageEtcd) SetWakeupForTesting(ch chan bool) {
s.mu.Lock()
defer s.mu.Unlock()
s.wakeupChanForTesting = ch
}
func (s *backendStorageEtcd) wakeupForTesting() {
if s.wakeupChanForTesting == nil {
return
}
select {
case s.wakeupChanForTesting <- struct{}{}:
case s.wakeupChanForTesting <- true:
default:
}
}
func (s *backendStorageEtcd) EtcdClientCreated(client *EtcdClient) {
s.initializedWg.Add(1)
go func() {
if err := client.WaitForConnection(s.closeCtx); err != nil {
if errors.Is(err, context.Canceled) {
return
}
panic(err)
if err := client.Watch(context.Background(), s.keyPrefix, s, clientv3.WithPrefix()); err != nil {
log.Printf("Error processing watch for %s: %s", s.keyPrefix, err)
}
}()
backoff, err := NewExponentialBackoff(initialWaitDelay, maxWaitDelay)
if err != nil {
panic(err)
}
for s.closeCtx.Err() == nil {
response, err := s.getBackends(s.closeCtx, client, s.keyPrefix)
go func() {
client.WaitForConnection()
waitDelay := initialWaitDelay
for {
response, err := s.getBackends(client, s.keyPrefix)
if err != nil {
if errors.Is(err, context.Canceled) {
return
} else if errors.Is(err, context.DeadlineExceeded) {
log.Printf("Timeout getting initial list of backends, retry in %s", backoff.NextWait())
if err == context.DeadlineExceeded {
log.Printf("Timeout getting initial list of backends, retry in %s", waitDelay)
} else {
log.Printf("Could not get initial list of backends, retry in %s: %s", backoff.NextWait(), err)
log.Printf("Could not get initial list of backends, retry in %s: %s", waitDelay, err)
}
backoff.Wait(s.closeCtx)
time.Sleep(waitDelay)
waitDelay = waitDelay * 2
if waitDelay > maxWaitDelay {
waitDelay = maxWaitDelay
}
continue
}
for _, ev := range response.Kvs {
s.EtcdKeyUpdated(client, string(ev.Key), ev.Value, nil)
s.EtcdKeyUpdated(client, string(ev.Key), ev.Value)
}
s.initializedWg.Wait()
s.initializedFunc()
nextRevision := response.Header.Revision + 1
prevRevision := nextRevision
backoff.Reset()
for s.closeCtx.Err() == nil {
var err error
if nextRevision, err = client.Watch(s.closeCtx, s.keyPrefix, nextRevision, s, clientv3.WithPrefix()); err != nil {
log.Printf("Error processing watch for %s (%s), retry in %s", s.keyPrefix, err, backoff.NextWait())
backoff.Wait(s.closeCtx)
continue
}
if nextRevision != prevRevision {
backoff.Reset()
prevRevision = nextRevision
} else {
log.Printf("Processing watch for %s interrupted, retry in %s", s.keyPrefix, backoff.NextWait())
backoff.Wait(s.closeCtx)
}
}
return
}
}()
}
func (s *backendStorageEtcd) EtcdWatchCreated(client *EtcdClient, key string) {
s.initializedWg.Done()
}
func (s *backendStorageEtcd) getBackends(ctx context.Context, client *EtcdClient, keyPrefix string) (*clientv3.GetResponse, error) {
ctx, cancel := context.WithTimeout(ctx, time.Second)
func (s *backendStorageEtcd) getBackends(client *EtcdClient, keyPrefix string) (*clientv3.GetResponse, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
return client.Get(ctx, keyPrefix, clientv3.WithPrefix())
}
func (s *backendStorageEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []byte, prevValue []byte) {
func (s *backendStorageEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []byte) {
var info BackendInformationEtcd
if err := json.Unmarshal(data, &info); err != nil {
log.Printf("Could not decode backend information %s: %s", string(data), err)
@ -227,7 +211,7 @@ func (s *backendStorageEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data
s.wakeupForTesting()
}
func (s *backendStorageEtcd) EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte) {
func (s *backendStorageEtcd) EtcdKeyDeleted(client *EtcdClient, key string) {
s.mu.Lock()
defer s.mu.Unlock()
@ -263,7 +247,6 @@ func (s *backendStorageEtcd) EtcdKeyDeleted(client *EtcdClient, key string, prev
func (s *backendStorageEtcd) Close() {
s.etcdClient.RemoveListener(s)
s.closeFunc()
}
func (s *backendStorageEtcd) Reload(config *goconf.ConfigFile) {

View file

@ -1,77 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2023 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"testing"
"github.com/dlintw/goconf"
"go.etcd.io/etcd/server/v3/embed"
)
func (s *backendStorageEtcd) getWakeupChannelForTesting() <-chan struct{} {
s.mu.Lock()
defer s.mu.Unlock()
if s.wakeupChanForTesting != nil {
return s.wakeupChanForTesting
}
ch := make(chan struct{}, 1)
s.wakeupChanForTesting = ch
return ch
}
type testListener struct {
etcd *embed.Etcd
closed chan struct{}
}
func (tl *testListener) EtcdClientCreated(client *EtcdClient) {
tl.etcd.Server.Stop()
close(tl.closed)
}
func Test_BackendStorageEtcdNoLeak(t *testing.T) {
CatchLogForTest(t)
ensureNoGoroutinesLeak(t, func(t *testing.T) {
etcd, client := NewEtcdClientForTest(t)
tl := &testListener{
etcd: etcd,
closed: make(chan struct{}),
}
client.AddListener(tl)
defer client.RemoveListener(tl)
config := goconf.NewConfigFile()
config.AddOption("backend", "backendtype", "etcd")
config.AddOption("backend", "backendprefix", "/backends")
cfg, err := NewBackendConfiguration(config, client)
if err != nil {
t.Fatal(err)
}
<-tl.closed
cfg.Close()
})
}

View file

@ -66,7 +66,7 @@ func NewBackendStorageStatic(config *goconf.ConfigFile) (BackendStorage, error)
}
numBackends++
} else if backendIds, _ := config.GetString("backend", "backends"); backendIds != "" {
for host, configuredBackends := range getConfiguredHosts(backendIds, config, commonSecret) {
for host, configuredBackends := range getConfiguredHosts(backendIds, config) {
backends[host] = append(backends[host], configuredBackends...)
for _, be := range configuredBackends {
log.Printf("Backend %s added for %s", be.id, be.url)
@ -115,10 +115,6 @@ func NewBackendStorageStatic(config *goconf.ConfigFile) (BackendStorage, error)
}
}
if numBackends == 0 {
log.Printf("WARNING: No backends configured, client connections will not be possible.")
}
statsBackendsCurrent.Add(float64(numBackends))
return &backendStorageStatic{
backendStorageCommon: backendStorageCommon{
@ -196,7 +192,7 @@ func getConfiguredBackendIDs(backendIds string) (ids []string) {
return ids
}
func getConfiguredHosts(backendIds string, config *goconf.ConfigFile, commonSecret string) (hosts map[string][]*Backend) {
func getConfiguredHosts(backendIds string, config *goconf.ConfigFile) (hosts map[string][]*Backend) {
hosts = make(map[string][]*Backend)
for _, id := range getConfiguredBackendIDs(backendIds) {
u, _ := config.GetString(id, "url")
@ -220,10 +216,6 @@ func getConfiguredHosts(backendIds string, config *goconf.ConfigFile, commonSecr
}
secret, _ := config.GetString(id, "secret")
if secret == "" && commonSecret != "" {
log.Printf("Backend %s has no own shared secret set, using common shared secret", id)
secret = commonSecret
}
if u == "" || secret == "" {
log.Printf("Backend %s is missing or incomplete, skipping", id)
continue
@ -273,10 +265,8 @@ func (s *backendStorageStatic) Reload(config *goconf.ConfigFile) {
return
}
commonSecret, _ := config.GetString("backend", "secret")
if backendIds, _ := config.GetString("backend", "backends"); backendIds != "" {
configuredHosts := getConfiguredHosts(backendIds, config, commonSecret)
configuredHosts := getConfiguredHosts(backendIds, config)
// remove backends that are no longer configured
for hostname := range s.backends {

View file

@ -28,7 +28,6 @@ import (
)
func TestBackoff_Exponential(t *testing.T) {
t.Parallel()
backoff, err := NewExponentialBackoff(100*time.Millisecond, 500*time.Millisecond)
if err != nil {
t.Fatal(err)

View file

@ -48,6 +48,9 @@ const (
maxInvalidateInterval = time.Minute
)
// Can be overwritten by tests.
var getCapabilitiesNow = time.Now
type capabilitiesEntry struct {
nextUpdate time.Time
capabilities map[string]interface{}
@ -56,9 +59,6 @@ type capabilitiesEntry struct {
type Capabilities struct {
mu sync.RWMutex
// Can be overwritten by tests.
getNow func() time.Time
version string
pool *HttpClientPool
entries map[string]*capabilitiesEntry
@ -67,8 +67,6 @@ type Capabilities struct {
func NewCapabilities(version string, pool *HttpClientPool) (*Capabilities, error) {
result := &Capabilities{
getNow: time.Now,
version: version,
pool: pool,
entries: make(map[string]*capabilitiesEntry),
@ -88,15 +86,15 @@ type CapabilitiesVersion struct {
}
type CapabilitiesResponse struct {
Version CapabilitiesVersion `json:"version"`
Capabilities map[string]json.RawMessage `json:"capabilities"`
Version CapabilitiesVersion `json:"version"`
Capabilities map[string]*json.RawMessage `json:"capabilities"`
}
func (c *Capabilities) getCapabilities(key string) (map[string]interface{}, bool) {
c.mu.RLock()
defer c.mu.RUnlock()
now := c.getNow()
now := getCapabilitiesNow()
if entry, found := c.entries[key]; found && entry.nextUpdate.After(now) {
return entry.capabilities, true
}
@ -105,15 +103,14 @@ func (c *Capabilities) getCapabilities(key string) (map[string]interface{}, bool
}
func (c *Capabilities) setCapabilities(key string, capabilities map[string]interface{}) {
c.mu.Lock()
defer c.mu.Unlock()
now := c.getNow()
now := getCapabilitiesNow()
entry := &capabilitiesEntry{
nextUpdate: now.Add(CapabilitiesCacheDuration),
capabilities: capabilities,
}
c.mu.Lock()
defer c.mu.Unlock()
c.entries[key] = entry
}
@ -121,7 +118,7 @@ func (c *Capabilities) invalidateCapabilities(key string) {
c.mu.Lock()
defer c.mu.Unlock()
now := c.getNow()
now := getCapabilitiesNow()
if entry, found := c.nextInvalidate[key]; found && entry.After(now) {
return
}
@ -191,25 +188,25 @@ func (c *Capabilities) loadCapabilities(ctx context.Context, u *url.URL) (map[st
if err := json.Unmarshal(body, &ocs); err != nil {
log.Printf("Could not decode OCS response %s from %s: %s", string(body), capUrl.String(), err)
return nil, false, err
} else if ocs.Ocs == nil || len(ocs.Ocs.Data) == 0 {
} else if ocs.Ocs == nil || ocs.Ocs.Data == nil {
log.Printf("Incomplete OCS response %s from %s", string(body), u)
return nil, false, fmt.Errorf("incomplete OCS response")
}
var response CapabilitiesResponse
if err := json.Unmarshal(ocs.Ocs.Data, &response); err != nil {
log.Printf("Could not decode OCS response body %s from %s: %s", string(ocs.Ocs.Data), capUrl.String(), err)
if err := json.Unmarshal(*ocs.Ocs.Data, &response); err != nil {
log.Printf("Could not decode OCS response body %s from %s: %s", string(*ocs.Ocs.Data), capUrl.String(), err)
return nil, false, err
}
capaObj, found := response.Capabilities[AppNameSpreed]
if !found || len(capaObj) == 0 {
if !found || capaObj == nil {
log.Printf("No capabilities received for app spreed from %s: %+v", capUrl.String(), response)
return nil, false, nil
}
var capa map[string]interface{}
if err := json.Unmarshal(capaObj, &capa); err != nil {
if err := json.Unmarshal(*capaObj, &capa); err != nil {
log.Printf("Unsupported capabilities received for app spreed from %s: %+v", capUrl.String(), response)
return nil, false, nil
}

View file

@ -80,9 +80,9 @@ func NewCapabilitiesForTestWithCallback(t *testing.T, callback func(*Capabilitie
Version: CapabilitiesVersion{
Major: 20,
},
Capabilities: map[string]json.RawMessage{
"anotherApp": emptyArray,
"spreed": spreedCapa,
Capabilities: map[string]*json.RawMessage{
"anotherApp": (*json.RawMessage)(&emptyArray),
"spreed": (*json.RawMessage)(&spreedCapa),
},
}
@ -102,7 +102,7 @@ func NewCapabilitiesForTestWithCallback(t *testing.T, callback func(*Capabilitie
StatusCode: http.StatusOK,
Message: http.StatusText(http.StatusOK),
},
Data: data,
Data: (*json.RawMessage)(&data),
}
if data, err = json.Marshal(ocs); err != nil {
t.Fatal(err)
@ -120,25 +120,16 @@ func NewCapabilitiesForTest(t *testing.T) (*url.URL, *Capabilities) {
return NewCapabilitiesForTestWithCallback(t, nil)
}
func SetCapabilitiesGetNow(t *testing.T, capabilities *Capabilities, f func() time.Time) {
capabilities.mu.Lock()
defer capabilities.mu.Unlock()
old := capabilities.getNow
func SetCapabilitiesGetNow(t *testing.T, f func() time.Time) {
old := getCapabilitiesNow
t.Cleanup(func() {
capabilities.mu.Lock()
defer capabilities.mu.Unlock()
capabilities.getNow = old
getCapabilitiesNow = old
})
capabilities.getNow = f
getCapabilitiesNow = f
}
func TestCapabilities(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
url, capabilities := NewCapabilitiesForTest(t)
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
@ -201,11 +192,9 @@ func TestCapabilities(t *testing.T) {
}
func TestInvalidateCapabilities(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
var called atomic.Uint32
var called uint32
url, capabilities := NewCapabilitiesForTestWithCallback(t, func(cr *CapabilitiesResponse) {
called.Add(1)
atomic.AddUint32(&called, 1)
})
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
@ -220,7 +209,7 @@ func TestInvalidateCapabilities(t *testing.T) {
t.Errorf("expected direct response")
}
if value := called.Load(); value != 1 {
if value := atomic.LoadUint32(&called); value != 1 {
t.Errorf("expected called %d, got %d", 1, value)
}
@ -235,7 +224,7 @@ func TestInvalidateCapabilities(t *testing.T) {
t.Errorf("expected direct response")
}
if value := called.Load(); value != 2 {
if value := atomic.LoadUint32(&called); value != 2 {
t.Errorf("expected called %d, got %d", 2, value)
}
@ -250,12 +239,12 @@ func TestInvalidateCapabilities(t *testing.T) {
t.Errorf("expected cached response")
}
if value := called.Load(); value != 2 {
if value := atomic.LoadUint32(&called); value != 2 {
t.Errorf("expected called %d, got %d", 2, value)
}
// At a later time, invalidating can be done again.
SetCapabilitiesGetNow(t, capabilities, func() time.Time {
SetCapabilitiesGetNow(t, func() time.Time {
return time.Now().Add(2 * time.Minute)
})
@ -269,7 +258,7 @@ func TestInvalidateCapabilities(t *testing.T) {
t.Errorf("expected direct response")
}
if value := called.Load(); value != 3 {
if value := atomic.LoadUint32(&called); value != 3 {
t.Errorf("expected called %d, got %d", 3, value)
}
}

View file

@ -27,19 +27,26 @@ import (
"fmt"
"log"
"os"
"sync/atomic"
"sync"
"time"
)
var (
// CertificateCheckInterval defines the interval in which certificate files
// are checked for modifications.
CertificateCheckInterval = time.Minute
)
type CertificateReloader struct {
certFile string
certWatcher *FileWatcher
mu sync.Mutex
keyFile string
keyWatcher *FileWatcher
certFile string
keyFile string
certificate atomic.Pointer[tls.Certificate]
certificate *tls.Certificate
lastModified time.Time
reloadCounter atomic.Uint64
nextCheck time.Time
}
func NewCertificateReloader(certFile string, keyFile string) (*CertificateReloader, error) {
@ -48,43 +55,52 @@ func NewCertificateReloader(certFile string, keyFile string) (*CertificateReload
return nil, fmt.Errorf("could not load certificate / key: %w", err)
}
reloader := &CertificateReloader{
stat, err := os.Stat(certFile)
if err != nil {
return nil, fmt.Errorf("could not stat %s: %w", certFile, err)
}
return &CertificateReloader{
certFile: certFile,
keyFile: keyFile,
}
reloader.certificate.Store(&pair)
reloader.certWatcher, err = NewFileWatcher(certFile, reloader.reload)
if err != nil {
return nil, err
}
reloader.keyWatcher, err = NewFileWatcher(keyFile, reloader.reload)
if err != nil {
reloader.certWatcher.Close() // nolint
return nil, err
}
return reloader, nil
}
certificate: &pair,
lastModified: stat.ModTime(),
func (r *CertificateReloader) Close() {
r.keyWatcher.Close()
r.certWatcher.Close()
}
func (r *CertificateReloader) reload(filename string) {
log.Printf("reloading certificate from %s with %s", r.certFile, r.keyFile)
pair, err := tls.LoadX509KeyPair(r.certFile, r.keyFile)
if err != nil {
log.Printf("could not load certificate / key: %s", err)
return
}
r.certificate.Store(&pair)
r.reloadCounter.Add(1)
nextCheck: time.Now().Add(CertificateCheckInterval),
}, nil
}
func (r *CertificateReloader) getCertificate() (*tls.Certificate, error) {
return r.certificate.Load(), nil
r.mu.Lock()
defer r.mu.Unlock()
now := time.Now()
if now.Before(r.nextCheck) {
return r.certificate, nil
}
r.nextCheck = now.Add(CertificateCheckInterval)
stat, err := os.Stat(r.certFile)
if err != nil {
log.Printf("could not stat %s: %s", r.certFile, err)
return r.certificate, nil
}
if !stat.ModTime().Equal(r.lastModified) {
log.Printf("reloading certificate from %s with %s", r.certFile, r.keyFile)
pair, err := tls.LoadX509KeyPair(r.certFile, r.keyFile)
if err != nil {
log.Printf("could not load certificate / key: %s", err)
return r.certificate, nil
}
r.certificate = &pair
r.lastModified = stat.ModTime()
}
return r.certificate, nil
}
func (r *CertificateReloader) GetCertificate(h *tls.ClientHelloInfo) (*tls.Certificate, error) {
@ -95,17 +111,15 @@ func (r *CertificateReloader) GetClientCertificate(i *tls.CertificateRequestInfo
return r.getCertificate()
}
func (r *CertificateReloader) GetReloadCounter() uint64 {
return r.reloadCounter.Load()
}
type CertPoolReloader struct {
certFile string
certWatcher *FileWatcher
mu sync.Mutex
pool atomic.Pointer[x509.CertPool]
certFile string
reloadCounter atomic.Uint64
pool *x509.CertPool
lastModified time.Time
nextCheck time.Time
}
func loadCertPool(filename string) (*x509.CertPool, error) {
@ -128,38 +142,49 @@ func NewCertPoolReloader(certFile string) (*CertPoolReloader, error) {
return nil, err
}
reloader := &CertPoolReloader{
stat, err := os.Stat(certFile)
if err != nil {
return nil, fmt.Errorf("could not stat %s: %w", certFile, err)
}
return &CertPoolReloader{
certFile: certFile,
}
reloader.pool.Store(pool)
reloader.certWatcher, err = NewFileWatcher(certFile, reloader.reload)
if err != nil {
return nil, err
}
return reloader, nil
}
pool: pool,
lastModified: stat.ModTime(),
func (r *CertPoolReloader) Close() {
r.certWatcher.Close()
}
func (r *CertPoolReloader) reload(filename string) {
log.Printf("reloading certificate pool from %s", r.certFile)
pool, err := loadCertPool(r.certFile)
if err != nil {
log.Printf("could not load certificate pool: %s", err)
return
}
r.pool.Store(pool)
r.reloadCounter.Add(1)
nextCheck: time.Now().Add(CertificateCheckInterval),
}, nil
}
func (r *CertPoolReloader) GetCertPool() *x509.CertPool {
return r.pool.Load()
}
r.mu.Lock()
defer r.mu.Unlock()
func (r *CertPoolReloader) GetReloadCounter() uint64 {
return r.reloadCounter.Load()
now := time.Now()
if now.Before(r.nextCheck) {
return r.pool
}
r.nextCheck = now.Add(CertificateCheckInterval)
stat, err := os.Stat(r.certFile)
if err != nil {
log.Printf("could not stat %s: %s", r.certFile, err)
return r.pool
}
if !stat.ModTime().Equal(r.lastModified) {
log.Printf("reloading certificate pool from %s", r.certFile)
pool, err := loadCertPool(r.certFile)
if err != nil {
log.Printf("could not load certificate pool: %s", err)
return r.pool
}
r.pool = pool
r.lastModified = stat.ModTime()
}
return r.pool
}

View file

@ -22,41 +22,15 @@
package signaling
import (
"context"
"testing"
"time"
)
func UpdateCertificateCheckIntervalForTest(t *testing.T, interval time.Duration) {
t.Helper()
// Make sure test is not executed with "t.Parallel()"
t.Setenv("PARALLEL_CHECK", "1")
old := deduplicateWatchEvents.Load()
old := CertificateCheckInterval
t.Cleanup(func() {
deduplicateWatchEvents.Store(old)
CertificateCheckInterval = old
})
deduplicateWatchEvents.Store(int64(interval))
}
func (r *CertificateReloader) WaitForReload(ctx context.Context) error {
counter := r.GetReloadCounter()
for counter == r.GetReloadCounter() {
if err := ctx.Err(); err != nil {
return err
}
time.Sleep(time.Millisecond)
}
return nil
}
func (r *CertPoolReloader) WaitForReload(ctx context.Context) error {
counter := r.GetReloadCounter()
for counter == r.GetReloadCounter() {
if err := ctx.Err(); err != nil {
return err
}
time.Sleep(time.Millisecond)
}
return nil
CertificateCheckInterval = interval
}

155
client.go
View file

@ -23,16 +23,14 @@ package signaling
import (
"bytes"
"context"
"encoding/json"
"errors"
"log"
"net"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/gorilla/websocket"
"github.com/mailru/easyjson"
@ -95,49 +93,26 @@ type WritableClientMessage interface {
CloseAfterSend(session Session) bool
}
type HandlerClient interface {
Context() context.Context
RemoteAddr() string
Country() string
UserAgent() string
IsConnected() bool
IsAuthenticated() bool
GetSession() Session
SetSession(session Session)
SendError(e *Error) bool
SendByeResponse(message *ClientMessage) bool
SendByeResponseWithReason(message *ClientMessage, reason string) bool
SendMessage(message WritableClientMessage) bool
Close()
}
type ClientHandler interface {
OnClosed(HandlerClient)
OnMessageReceived(HandlerClient, []byte)
OnRTTReceived(HandlerClient, time.Duration)
OnClosed(*Client)
OnMessageReceived(*Client, []byte)
OnRTTReceived(*Client, time.Duration)
}
type ClientGeoIpHandler interface {
OnLookupCountry(HandlerClient) string
OnLookupCountry(*Client) string
}
type Client struct {
ctx context.Context
conn *websocket.Conn
addr string
handler ClientHandler
agent string
closed atomic.Int32
closed uint32
country *string
logRTT bool
handlerMu sync.RWMutex
handler ClientHandler
session atomic.Pointer[Session]
sessionId atomic.Pointer[string]
session unsafe.Pointer
mu sync.Mutex
@ -147,7 +122,7 @@ type Client struct {
messageChan chan *bytes.Buffer
}
func NewClient(ctx context.Context, conn *websocket.Conn, remoteAddress string, agent string, handler ClientHandler) (*Client, error) {
func NewClient(conn *websocket.Conn, remoteAddress string, agent string, handler ClientHandler) (*Client, error) {
remoteAddress = strings.TrimSpace(remoteAddress)
if remoteAddress == "" {
remoteAddress = "unknown remote address"
@ -158,7 +133,6 @@ func NewClient(ctx context.Context, conn *websocket.Conn, remoteAddress string,
}
client := &Client{
ctx: ctx,
agent: agent,
logRTT: true,
}
@ -169,69 +143,26 @@ func NewClient(ctx context.Context, conn *websocket.Conn, remoteAddress string,
func (c *Client) SetConn(conn *websocket.Conn, remoteAddress string, handler ClientHandler) {
c.conn = conn
c.addr = remoteAddress
c.SetHandler(handler)
c.handler = handler
c.closer = NewCloser()
c.messageChan = make(chan *bytes.Buffer, 16)
c.messagesDone = make(chan struct{})
}
func (c *Client) SetHandler(handler ClientHandler) {
c.handlerMu.Lock()
defer c.handlerMu.Unlock()
c.handler = handler
}
func (c *Client) getHandler() ClientHandler {
c.handlerMu.RLock()
defer c.handlerMu.RUnlock()
return c.handler
}
func (c *Client) Context() context.Context {
return c.ctx
}
func (c *Client) IsConnected() bool {
return c.closed.Load() == 0
return atomic.LoadUint32(&c.closed) == 0
}
func (c *Client) IsAuthenticated() bool {
return c.GetSession() != nil
}
func (c *Client) GetSession() Session {
session := c.session.Load()
if session == nil {
return nil
}
return *session
func (c *Client) GetSession() *ClientSession {
return (*ClientSession)(atomic.LoadPointer(&c.session))
}
func (c *Client) SetSession(session Session) {
if session == nil {
c.session.Store(nil)
} else {
c.session.Store(&session)
}
}
func (c *Client) SetSessionId(sessionId string) {
c.sessionId.Store(&sessionId)
}
func (c *Client) GetSessionId() string {
sessionId := c.sessionId.Load()
if sessionId == nil {
session := c.GetSession()
if session == nil {
return ""
}
return session.PublicId()
}
return *sessionId
func (c *Client) SetSession(session *ClientSession) {
atomic.StorePointer(&c.session, unsafe.Pointer(session))
}
func (c *Client) RemoteAddr() string {
@ -245,7 +176,7 @@ func (c *Client) UserAgent() string {
func (c *Client) Country() string {
if c.country == nil {
var country string
if handler, ok := c.getHandler().(ClientGeoIpHandler); ok {
if handler, ok := c.handler.(ClientGeoIpHandler); ok {
country = handler.OnLookupCountry(c)
} else {
country = unknownCountry
@ -257,7 +188,7 @@ func (c *Client) Country() string {
}
func (c *Client) Close() {
if c.closed.Load() >= 2 {
if atomic.LoadUint32(&c.closed) >= 2 {
// Prevent reentrant call in case this was the second closing
// step. Would otherwise deadlock in the "Once.Do" call path
// through "Hub.processUnregister" (which calls "Close" again).
@ -270,7 +201,7 @@ func (c *Client) Close() {
}
func (c *Client) doClose() {
closed := c.closed.Add(1)
closed := atomic.AddUint32(&c.closed, 1)
if closed == 1 {
c.mu.Lock()
defer c.mu.Unlock()
@ -284,7 +215,7 @@ func (c *Client) doClose() {
c.closer.Close()
<-c.messagesDone
c.getHandler().OnClosed(c)
c.handler.OnClosed(c)
c.SetSession(nil)
}
}
@ -304,14 +235,12 @@ func (c *Client) SendByeResponse(message *ClientMessage) bool {
func (c *Client) SendByeResponseWithReason(message *ClientMessage, reason string) bool {
response := &ServerMessage{
Type: "bye",
Bye: &ByeServerMessage{},
}
if message != nil {
response.Id = message.Id
}
if reason != "" {
if response.Bye == nil {
response.Bye = &ByeServerMessage{}
}
response.Bye.Reason = reason
}
return c.SendMessage(response)
@ -349,13 +278,13 @@ func (c *Client) ReadPump() {
rtt := now.Sub(time.Unix(0, ts))
if c.logRTT {
rtt_ms := rtt.Nanoseconds() / time.Millisecond.Nanoseconds()
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Client %s has RTT of %d ms (%s)", sessionId, rtt_ms, rtt)
if session := c.GetSession(); session != nil {
log.Printf("Client %s has RTT of %d ms (%s)", session.PublicId(), rtt_ms, rtt)
} else {
log.Printf("Client from %s has RTT of %d ms (%s)", addr, rtt_ms, rtt)
}
}
c.getHandler().OnRTTReceived(c, rtt)
c.handler.OnRTTReceived(c, rtt)
}
return nil
})
@ -364,15 +293,12 @@ func (c *Client) ReadPump() {
conn.SetReadDeadline(time.Now().Add(pongWait)) // nolint
messageType, reader, err := conn.NextReader()
if err != nil {
// Gorilla websocket hides the original net.Error, so also compare error messages
if errors.Is(err, net.ErrClosed) || strings.Contains(err.Error(), net.ErrClosed.Error()) {
break
} else if _, ok := err.(*websocket.CloseError); !ok || websocket.IsUnexpectedCloseError(err,
if _, ok := err.(*websocket.CloseError); !ok || websocket.IsUnexpectedCloseError(err,
websocket.CloseNormalClosure,
websocket.CloseGoingAway,
websocket.CloseNoStatusReceived) {
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Error reading from client %s: %v", sessionId, err)
if session := c.GetSession(); session != nil {
log.Printf("Error reading from client %s: %v", session.PublicId(), err)
} else {
log.Printf("Error reading from %s: %v", addr, err)
}
@ -381,8 +307,8 @@ func (c *Client) ReadPump() {
}
if messageType != websocket.TextMessage {
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Unsupported message type %v from client %s", messageType, sessionId)
if session := c.GetSession(); session != nil {
log.Printf("Unsupported message type %v from client %s", messageType, session.PublicId())
} else {
log.Printf("Unsupported message type %v from %s", messageType, addr)
}
@ -394,8 +320,8 @@ func (c *Client) ReadPump() {
decodeBuffer.Reset()
if _, err := decodeBuffer.ReadFrom(reader); err != nil {
bufferPool.Put(decodeBuffer)
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Error reading message from client %s: %v", sessionId, err)
if session := c.GetSession(); session != nil {
log.Printf("Error reading message from client %s: %v", session.PublicId(), err)
} else {
log.Printf("Error reading message from %s: %v", addr, err)
}
@ -403,7 +329,7 @@ func (c *Client) ReadPump() {
}
// Stop processing if the client was closed.
if !c.IsConnected() {
if atomic.LoadUint32(&c.closed) != 0 {
bufferPool.Put(decodeBuffer)
break
}
@ -419,7 +345,7 @@ func (c *Client) processMessages() {
break
}
c.getHandler().OnMessageReceived(c, buffer.Bytes())
c.handler.OnMessageReceived(c, buffer.Bytes())
bufferPool.Put(buffer)
}
@ -448,8 +374,8 @@ func (c *Client) writeInternal(message json.Marshaler) bool {
return false
}
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Could not send message %+v to client %s: %v", message, sessionId, err)
if session := c.GetSession(); session != nil {
log.Printf("Could not send message %+v to client %s: %v", message, session.PublicId(), err)
} else {
log.Printf("Could not send message %+v to %s: %v", message, c.RemoteAddr(), err)
}
@ -461,8 +387,8 @@ func (c *Client) writeInternal(message json.Marshaler) bool {
close:
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
if err := c.conn.WriteMessage(websocket.CloseMessage, closeData); err != nil {
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Could not send close message to client %s: %v", sessionId, err)
if session := c.GetSession(); session != nil {
log.Printf("Could not send close message to client %s: %v", session.PublicId(), err)
} else {
log.Printf("Could not send close message to %s: %v", c.RemoteAddr(), err)
}
@ -488,8 +414,8 @@ func (c *Client) writeError(e error) bool { // nolint
closeData := websocket.FormatCloseMessage(websocket.CloseInternalServerErr, e.Error())
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
if err := c.conn.WriteMessage(websocket.CloseMessage, closeData); err != nil {
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Could not send close message to client %s: %v", sessionId, err)
if session := c.GetSession(); session != nil {
log.Printf("Could not send close message to client %s: %v", session.PublicId(), err)
} else {
log.Printf("Could not send close message to %s: %v", c.RemoteAddr(), err)
}
@ -520,6 +446,7 @@ func (c *Client) writeMessageLocked(message WritableClientMessage) bool {
go session.Close()
}
go c.Close()
return false
}
return true
@ -536,8 +463,8 @@ func (c *Client) sendPing() bool {
msg := strconv.FormatInt(now, 10)
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
if err := c.conn.WriteMessage(websocket.PingMessage, []byte(msg)); err != nil {
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Could not send ping to client %s: %v", sessionId, err)
if session := c.GetSession(); session != nil {
log.Printf("Could not send ping to client %s: %v", session.PublicId(), err)
} else {
log.Printf("Could not send ping to %s: %v", c.RemoteAddr(), err)
}

View file

@ -81,8 +81,8 @@ const (
)
type Stats struct {
numRecvMessages atomic.Uint64
numSentMessages atomic.Uint64
numRecvMessages uint64
numSentMessages uint64
resetRecvMessages uint64
resetSentMessages uint64
@ -90,8 +90,8 @@ type Stats struct {
}
func (s *Stats) reset(start time.Time) {
s.resetRecvMessages = s.numRecvMessages.Load()
s.resetSentMessages = s.numSentMessages.Load()
s.resetRecvMessages = atomic.AddUint64(&s.numRecvMessages, 0)
s.resetSentMessages = atomic.AddUint64(&s.numSentMessages, 0)
s.start = start
}
@ -103,9 +103,9 @@ func (s *Stats) Log() {
return
}
totalSentMessages := s.numSentMessages.Load()
totalSentMessages := atomic.AddUint64(&s.numSentMessages, 0)
sentMessages := totalSentMessages - s.resetSentMessages
totalRecvMessages := s.numRecvMessages.Load()
totalRecvMessages := atomic.AddUint64(&s.numRecvMessages, 0)
recvMessages := totalRecvMessages - s.resetRecvMessages
log.Printf("Stats: sent=%d (%d/sec), recv=%d (%d/sec), delta=%d",
totalSentMessages, sentMessages/perSec,
@ -125,9 +125,9 @@ type SignalingClient struct {
conn *websocket.Conn
stats *Stats
closed atomic.Bool
closed uint32
stopChan chan struct{}
stopChan chan bool
lock sync.Mutex
privateSessionId string
@ -149,7 +149,7 @@ func NewSignalingClient(cookie *securecookie.SecureCookie, url string, stats *St
stats: stats,
stopChan: make(chan struct{}),
stopChan: make(chan bool),
}
doneWg.Add(2)
go func() {
@ -164,12 +164,15 @@ func NewSignalingClient(cookie *securecookie.SecureCookie, url string, stats *St
}
func (c *SignalingClient) Close() {
if !c.closed.CompareAndSwap(false, true) {
if !atomic.CompareAndSwapUint32(&c.closed, 0, 1) {
return
}
// Signal writepump to terminate
close(c.stopChan)
select {
case c.stopChan <- true:
default:
}
c.lock.Lock()
c.publicSessionId = ""
@ -197,7 +200,7 @@ func (c *SignalingClient) Send(message *signaling.ClientMessage) {
}
func (c *SignalingClient) processMessage(message *signaling.ServerMessage) {
c.stats.numRecvMessages.Add(1)
atomic.AddUint64(&c.stats.numRecvMessages, 1)
switch message.Type {
case "hello":
c.processHelloMessage(message)
@ -248,7 +251,7 @@ func (c *SignalingClient) PublicSessionId() string {
func (c *SignalingClient) processMessageMessage(message *signaling.ServerMessage) {
var msg MessagePayload
if err := json.Unmarshal(message.Message.Data, &msg); err != nil {
if err := json.Unmarshal(*message.Message.Data, &msg); err != nil {
log.Println("Error in unmarshal", err)
return
}
@ -334,7 +337,7 @@ func (c *SignalingClient) writeInternal(message *signaling.ClientMessage) bool {
}
writer.Close()
c.stats.numSentMessages.Add(1)
atomic.AddUint64(&c.stats.numSentMessages, 1)
return true
close:
@ -383,7 +386,7 @@ func (c *SignalingClient) SendMessages(clients []*SignalingClient) {
sessionIds[c] = c.PublicSessionId()
}
for !c.closed.Load() {
for atomic.LoadUint32(&c.closed) == 0 {
now := time.Now()
sender := c
@ -404,7 +407,7 @@ func (c *SignalingClient) SendMessages(clients []*SignalingClient) {
Type: "session",
SessionId: sessionIds[recipient],
},
Data: data,
Data: (*json.RawMessage)(&data),
},
}
sender.Send(msg)
@ -461,7 +464,7 @@ func registerAuthHandler(router *mux.Router) {
StatusCode: http.StatusOK,
Message: http.StatusText(http.StatusOK),
},
Data: rawdata,
Data: &rawdata,
},
}
@ -601,9 +604,9 @@ func main() {
Type: "hello",
Hello: &signaling.HelloClientMessage{
Version: signaling.HelloVersionV1,
Auth: &signaling.HelloClientMessageAuth{
Auth: signaling.HelloClientMessageAuth{
Url: backendUrl + "/auth",
Params: json.RawMessage("{}"),
Params: &json.RawMessage{'{', '}'},
},
},
}

View file

@ -31,35 +31,36 @@ import (
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/pion/sdp/v3"
)
var (
// Sessions expire 30 seconds after the connection closed.
sessionExpireDuration = 30 * time.Second
// Warn if a session has 32 or more pending messages.
warnPendingMessagesCount = 32
PathToOcsSignalingBackend = "ocs/v2.php/apps/spreed/api/v1/signaling/backend"
)
// ResponseHandlerFunc will return "true" has been fully processed.
type ResponseHandlerFunc func(message *ClientMessage) bool
type ClientSession struct {
roomJoinTime int64
inCall uint32
hub *Hub
events AsyncEvents
privateId string
publicId string
data *SessionIdData
ctx context.Context
closeFunc context.CancelFunc
clientType string
features []string
userId string
userData json.RawMessage
userData *json.RawMessage
inCall Flags
supportsPermissions bool
permissions map[Permission]bool
@ -67,18 +68,17 @@ type ClientSession struct {
backendUrl string
parsedBackendUrl *url.URL
expires time.Time
mu sync.Mutex
client HandlerClient
room atomic.Pointer[Room]
roomJoinTime atomic.Int64
roomSessionIdLock sync.RWMutex
roomSessionId string
client *Client
room unsafe.Pointer
roomSessionId string
publisherWaiters ChannelWaiters
publishers map[StreamType]McuPublisher
publishers map[string]McuPublisher
subscribers map[string]McuSubscriber
pendingClientMessages []*ServerMessage
@ -86,24 +86,15 @@ type ClientSession struct {
hasPendingParticipantsUpdate bool
virtualSessions map[*VirtualSession]bool
seenJoinedLock sync.Mutex
seenJoinedEvents map[string]bool
responseHandlersLock sync.Mutex
responseHandlers map[string]ResponseHandlerFunc
}
func NewClientSession(hub *Hub, privateId string, publicId string, data *SessionIdData, backend *Backend, hello *HelloClientMessage, auth *BackendClientAuthResponse) (*ClientSession, error) {
ctx, closeFunc := context.WithCancel(context.Background())
s := &ClientSession{
hub: hub,
events: hub.events,
privateId: privateId,
publicId: publicId,
data: data,
ctx: ctx,
closeFunc: closeFunc,
clientType: hello.Auth.Type,
features: hello.Features,
@ -147,10 +138,6 @@ func NewClientSession(hub *Hub, privateId string, publicId string, data *Session
return s, nil
}
func (s *ClientSession) Context() context.Context {
return s.ctx
}
func (s *ClientSession) PrivateId() string {
return s.privateId
}
@ -160,8 +147,6 @@ func (s *ClientSession) PublicId() string {
}
func (s *ClientSession) RoomSessionId() string {
s.roomSessionIdLock.RLock()
defer s.roomSessionIdLock.RUnlock()
return s.roomSessionId
}
@ -175,7 +160,7 @@ func (s *ClientSession) ClientType() string {
// GetInCall is only used for internal clients.
func (s *ClientSession) GetInCall() int {
return int(s.inCall.Get())
return int(atomic.LoadUint32(&s.inCall))
}
func (s *ClientSession) SetInCall(inCall int) bool {
@ -183,7 +168,16 @@ func (s *ClientSession) SetInCall(inCall int) bool {
inCall = 0
}
return s.inCall.Set(uint32(inCall))
for {
old := atomic.LoadUint32(&s.inCall)
if old == uint32(inCall) {
return false
}
if atomic.CompareAndSwapUint32(&s.inCall, old, uint32(inCall)) {
return true
}
}
}
func (s *ClientSession) GetFeatures() []string {
@ -315,29 +309,40 @@ func (s *ClientSession) UserId() string {
return userId
}
func (s *ClientSession) UserData() json.RawMessage {
func (s *ClientSession) UserData() *json.RawMessage {
return s.userData
}
func (s *ClientSession) SetRoom(room *Room) {
s.room.Store(room)
if room != nil {
s.roomJoinTime.Store(time.Now().UnixNano())
} else {
s.roomJoinTime.Store(0)
}
func (s *ClientSession) StartExpire() {
// The hub mutex must be held when calling this method.
s.expires = time.Now().Add(sessionExpireDuration)
s.hub.expiredSessions[s] = true
}
s.seenJoinedLock.Lock()
defer s.seenJoinedLock.Unlock()
s.seenJoinedEvents = nil
func (s *ClientSession) StopExpire() {
// The hub mutex must be held when calling this method.
delete(s.hub.expiredSessions, s)
}
func (s *ClientSession) IsExpired(now time.Time) bool {
return now.After(s.expires)
}
func (s *ClientSession) SetRoom(room *Room) {
atomic.StorePointer(&s.room, unsafe.Pointer(room))
if room != nil {
atomic.StoreInt64(&s.roomJoinTime, time.Now().UnixNano())
} else {
atomic.StoreInt64(&s.roomJoinTime, 0)
}
}
func (s *ClientSession) GetRoom() *Room {
return s.room.Load()
return (*Room)(atomic.LoadPointer(&s.room))
}
func (s *ClientSession) getRoomJoinTime() time.Time {
t := s.roomJoinTime.Load()
t := atomic.LoadInt64(&s.roomJoinTime)
if t == 0 {
return time.Time{}
}
@ -347,8 +352,8 @@ func (s *ClientSession) getRoomJoinTime() time.Time {
func (s *ClientSession) releaseMcuObjects() {
if len(s.publishers) > 0 {
go func(publishers map[StreamType]McuPublisher) {
ctx := context.Background()
go func(publishers map[string]McuPublisher) {
ctx := context.TODO()
for _, publisher := range publishers {
publisher.Close(ctx)
}
@ -357,7 +362,7 @@ func (s *ClientSession) releaseMcuObjects() {
}
if len(s.subscribers) > 0 {
go func(subscribers map[string]McuSubscriber) {
ctx := context.Background()
ctx := context.TODO()
for _, subscriber := range subscribers {
subscriber.Close(ctx)
}
@ -371,7 +376,6 @@ func (s *ClientSession) Close() {
}
func (s *ClientSession) closeAndWait(wait bool) {
s.closeFunc()
s.hub.removeSession(s)
s.mu.Lock()
@ -404,39 +408,9 @@ func (s *ClientSession) SubscribeEvents() error {
return s.events.RegisterSessionListener(s.publicId, s.backend, s)
}
func (s *ClientSession) UpdateRoomSessionId(roomSessionId string) error {
s.roomSessionIdLock.Lock()
defer s.roomSessionIdLock.Unlock()
if s.roomSessionId == roomSessionId {
return nil
}
if err := s.hub.roomSessions.SetRoomSession(s, roomSessionId); err != nil {
return err
}
if roomSessionId != "" {
if room := s.GetRoom(); room != nil {
log.Printf("Session %s updated room session id to %s in room %s", s.PublicId(), roomSessionId, room.Id())
} else {
log.Printf("Session %s updated room session id to %s in unknown room", s.PublicId(), roomSessionId)
}
} else {
if room := s.GetRoom(); room != nil {
log.Printf("Session %s cleared room session id in room %s", s.PublicId(), room.Id())
} else {
log.Printf("Session %s cleared room session id in unknown room", s.PublicId())
}
}
s.roomSessionId = roomSessionId
return nil
}
func (s *ClientSession) SubscribeRoomEvents(roomid string, roomSessionId string) error {
s.roomSessionIdLock.Lock()
defer s.roomSessionIdLock.Unlock()
s.mu.Lock()
defer s.mu.Unlock()
if err := s.events.RegisterRoomListener(roomid, s.backend, s); err != nil {
return err
@ -495,9 +469,6 @@ func (s *ClientSession) doUnsubscribeRoomEvents(notify bool) {
s.events.UnregisterRoomListener(room.Id(), s.Backend(), s)
}
s.hub.roomSessions.DeleteRoomSession(s)
s.roomSessionIdLock.Lock()
defer s.roomSessionIdLock.Unlock()
if notify && room != nil && s.roomSessionId != "" {
// Notify
go func(sid string) {
@ -515,14 +486,14 @@ func (s *ClientSession) doUnsubscribeRoomEvents(notify bool) {
s.roomSessionId = ""
}
func (s *ClientSession) ClearClient(client HandlerClient) {
func (s *ClientSession) ClearClient(client *Client) {
s.mu.Lock()
defer s.mu.Unlock()
s.clearClientLocked(client)
}
func (s *ClientSession) clearClientLocked(client HandlerClient) {
func (s *ClientSession) clearClientLocked(client *Client) {
if s.client == nil {
return
} else if client != nil && s.client != client {
@ -535,18 +506,18 @@ func (s *ClientSession) clearClientLocked(client HandlerClient) {
prevClient.SetSession(nil)
}
func (s *ClientSession) GetClient() HandlerClient {
func (s *ClientSession) GetClient() *Client {
s.mu.Lock()
defer s.mu.Unlock()
return s.getClientUnlocked()
}
func (s *ClientSession) getClientUnlocked() HandlerClient {
func (s *ClientSession) getClientUnlocked() *Client {
return s.client
}
func (s *ClientSession) SetClient(client HandlerClient) HandlerClient {
func (s *ClientSession) SetClient(client *Client) *Client {
if client == nil {
panic("Use ClearClient to set the client to nil")
}
@ -568,12 +539,12 @@ func (s *ClientSession) SetClient(client HandlerClient) HandlerClient {
return prev
}
func (s *ClientSession) sendOffer(client McuClient, sender string, streamType StreamType, offer map[string]interface{}) {
func (s *ClientSession) sendOffer(client McuClient, sender string, streamType string, offer map[string]interface{}) {
offer_message := &AnswerOfferMessage{
To: s.PublicId(),
From: sender,
Type: "offer",
RoomType: string(streamType),
RoomType: streamType,
Payload: offer,
Sid: client.Sid(),
}
@ -589,19 +560,19 @@ func (s *ClientSession) sendOffer(client McuClient, sender string, streamType St
Type: "session",
SessionId: sender,
},
Data: offer_data,
Data: (*json.RawMessage)(&offer_data),
},
}
s.sendMessageUnlocked(response_message)
}
func (s *ClientSession) sendCandidate(client McuClient, sender string, streamType StreamType, candidate interface{}) {
func (s *ClientSession) sendCandidate(client McuClient, sender string, streamType string, candidate interface{}) {
candidate_message := &AnswerOfferMessage{
To: s.PublicId(),
From: sender,
Type: "candidate",
RoomType: string(streamType),
RoomType: streamType,
Payload: map[string]interface{}{
"candidate": candidate,
},
@ -619,7 +590,7 @@ func (s *ClientSession) sendCandidate(client McuClient, sender string, streamTyp
Type: "session",
SessionId: sender,
},
Data: candidate_data,
Data: (*json.RawMessage)(&candidate_data),
},
}
@ -735,6 +706,23 @@ func (s *ClientSession) SubscriberClosed(subscriber McuSubscriber) {
}
}
type SdpError struct {
message string
}
func (e *SdpError) Error() string {
return e.message
}
type WrappedSdpError struct {
SdpError
err error
}
func (e *WrappedSdpError) Unwrap() error {
return e.err
}
type PermissionError struct {
permission Permission
}
@ -747,10 +735,23 @@ func (e *PermissionError) Error() string {
return fmt.Sprintf("permission \"%s\" not found", e.permission)
}
func (s *ClientSession) isSdpAllowedToSendLocked(sdp *sdp.SessionDescription) (MediaType, error) {
if sdp == nil {
// Should have already been checked when data was validated.
return 0, ErrNoSdp
func (s *ClientSession) isSdpAllowedToSendLocked(payload map[string]interface{}) (MediaType, error) {
sdpValue, found := payload["sdp"]
if !found {
return 0, &SdpError{"payload does not contain a sdp"}
}
sdpText, ok := sdpValue.(string)
if !ok {
return 0, &SdpError{"payload does not contain a valid sdp"}
}
var sdp sdp.SessionDescription
if err := sdp.Unmarshal([]byte(sdpText)); err != nil {
return 0, &WrappedSdpError{
SdpError: SdpError{
message: fmt.Sprintf("could not parse sdp: %s", err),
},
err: err,
}
}
var mediaTypes MediaType
@ -788,8 +789,8 @@ func (s *ClientSession) IsAllowedToSend(data *MessageClientMessageData) error {
// Client is allowed to publish any media (audio / video).
return nil
} else if data != nil && data.Type == "offer" {
// Check what user is trying to publish and check permissions accordingly.
if _, err := s.isSdpAllowedToSendLocked(data.offerSdp); err != nil {
// Parse SDP to check what user is trying to publish and check permissions accordingly.
if _, err := s.isSdpAllowedToSendLocked(data.Payload); err != nil {
return err
}
@ -804,22 +805,22 @@ func (s *ClientSession) IsAllowedToSend(data *MessageClientMessageData) error {
}
}
func (s *ClientSession) CheckOfferType(streamType StreamType, data *MessageClientMessageData) (MediaType, error) {
func (s *ClientSession) CheckOfferType(streamType string, data *MessageClientMessageData) (MediaType, error) {
s.mu.Lock()
defer s.mu.Unlock()
return s.checkOfferTypeLocked(streamType, data)
}
func (s *ClientSession) checkOfferTypeLocked(streamType StreamType, data *MessageClientMessageData) (MediaType, error) {
if streamType == StreamTypeScreen {
func (s *ClientSession) checkOfferTypeLocked(streamType string, data *MessageClientMessageData) (MediaType, error) {
if streamType == streamTypeScreen {
if !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_SCREEN) {
return 0, &PermissionError{PERMISSION_MAY_PUBLISH_SCREEN}
}
return MediaTypeScreen, nil
} else if data != nil && data.Type == "offer" {
mediaTypes, err := s.isSdpAllowedToSendLocked(data.offerSdp)
mediaTypes, err := s.isSdpAllowedToSendLocked(data.Payload)
if err != nil {
return 0, err
}
@ -830,7 +831,7 @@ func (s *ClientSession) checkOfferTypeLocked(streamType StreamType, data *Messag
return 0, nil
}
func (s *ClientSession) GetOrCreatePublisher(ctx context.Context, mcu Mcu, streamType StreamType, data *MessageClientMessageData) (McuPublisher, error) {
func (s *ClientSession) GetOrCreatePublisher(ctx context.Context, mcu Mcu, streamType string, data *MessageClientMessageData) (McuPublisher, error) {
s.mu.Lock()
defer s.mu.Unlock()
@ -848,7 +849,7 @@ func (s *ClientSession) GetOrCreatePublisher(ctx context.Context, mcu Mcu, strea
bitrate := data.Bitrate
if backend := s.Backend(); backend != nil {
var maxBitrate int
if streamType == StreamTypeScreen {
if streamType == streamTypeScreen {
maxBitrate = backend.maxScreenBitrate
} else {
maxBitrate = backend.maxStreamBitrate
@ -865,12 +866,12 @@ func (s *ClientSession) GetOrCreatePublisher(ctx context.Context, mcu Mcu, strea
return nil, err
}
if s.publishers == nil {
s.publishers = make(map[StreamType]McuPublisher)
s.publishers = make(map[string]McuPublisher)
}
if prev, found := s.publishers[streamType]; found {
// Another thread created the publisher while we were waiting.
go func(pub McuPublisher) {
closeCtx := context.Background()
closeCtx := context.TODO()
pub.Close(closeCtx)
}(publisher)
publisher = prev
@ -886,18 +887,18 @@ func (s *ClientSession) GetOrCreatePublisher(ctx context.Context, mcu Mcu, strea
return publisher, nil
}
func (s *ClientSession) getPublisherLocked(streamType StreamType) McuPublisher {
func (s *ClientSession) getPublisherLocked(streamType string) McuPublisher {
return s.publishers[streamType]
}
func (s *ClientSession) GetPublisher(streamType StreamType) McuPublisher {
func (s *ClientSession) GetPublisher(streamType string) McuPublisher {
s.mu.Lock()
defer s.mu.Unlock()
return s.getPublisherLocked(streamType)
}
func (s *ClientSession) GetOrWaitForPublisher(ctx context.Context, streamType StreamType) McuPublisher {
func (s *ClientSession) GetOrWaitForPublisher(ctx context.Context, streamType string) McuPublisher {
s.mu.Lock()
defer s.mu.Unlock()
@ -926,18 +927,17 @@ func (s *ClientSession) GetOrWaitForPublisher(ctx context.Context, streamType St
}
}
func (s *ClientSession) GetOrCreateSubscriber(ctx context.Context, mcu Mcu, id string, streamType StreamType) (McuSubscriber, error) {
func (s *ClientSession) GetOrCreateSubscriber(ctx context.Context, mcu Mcu, id string, streamType string) (McuSubscriber, error) {
s.mu.Lock()
defer s.mu.Unlock()
// TODO(jojo): Add method to remove subscribers.
subscriber, found := s.subscribers[getStreamId(id, streamType)]
subscriber, found := s.subscribers[id+"|"+streamType]
if !found {
client := s.getClientUnlocked()
s.mu.Unlock()
var err error
subscriber, err = mcu.NewSubscriber(ctx, s, id, streamType, client)
subscriber, err = mcu.NewSubscriber(ctx, s, id, streamType)
s.mu.Lock()
if err != nil {
return nil, err
@ -945,15 +945,15 @@ func (s *ClientSession) GetOrCreateSubscriber(ctx context.Context, mcu Mcu, id s
if s.subscribers == nil {
s.subscribers = make(map[string]McuSubscriber)
}
if prev, found := s.subscribers[getStreamId(id, streamType)]; found {
if prev, found := s.subscribers[id+"|"+streamType]; found {
// Another thread created the subscriber while we were waiting.
go func(sub McuSubscriber) {
closeCtx := context.Background()
closeCtx := context.TODO()
sub.Close(closeCtx)
}(subscriber)
subscriber = prev
} else {
s.subscribers[getStreamId(id, streamType)] = subscriber
s.subscribers[id+"|"+streamType] = subscriber
}
log.Printf("Subscribing %s from %s as %s in session %s", streamType, id, subscriber.Id(), s.PublicId())
}
@ -961,11 +961,11 @@ func (s *ClientSession) GetOrCreateSubscriber(ctx context.Context, mcu Mcu, id s
return subscriber, nil
}
func (s *ClientSession) GetSubscriber(id string, streamType StreamType) McuSubscriber {
func (s *ClientSession) GetSubscriber(id string, streamType string) McuSubscriber {
s.mu.Lock()
defer s.mu.Unlock()
return s.subscribers[getStreamId(id, streamType)]
return s.subscribers[id+"|"+streamType]
}
func (s *ClientSession) ProcessAsyncRoomMessage(message *AsyncMessage) {
@ -989,10 +989,10 @@ func (s *ClientSession) processAsyncMessage(message *AsyncMessage) {
defer s.mu.Unlock()
if !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_MEDIA) {
if publisher, found := s.publishers[StreamTypeVideo]; found {
if publisher, found := s.publishers[streamTypeVideo]; found {
if (publisher.HasMedia(MediaTypeAudio) && !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_AUDIO)) ||
(publisher.HasMedia(MediaTypeVideo) && !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_VIDEO)) {
delete(s.publishers, StreamTypeVideo)
delete(s.publishers, streamTypeVideo)
log.Printf("Session %s is no longer allowed to publish media, closing publisher %s", s.PublicId(), publisher.Id())
go func() {
publisher.Close(context.Background())
@ -1002,8 +1002,8 @@ func (s *ClientSession) processAsyncMessage(message *AsyncMessage) {
}
}
if !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_SCREEN) {
if publisher, found := s.publishers[StreamTypeScreen]; found {
delete(s.publishers, StreamTypeScreen)
if publisher, found := s.publishers[streamTypeScreen]; found {
delete(s.publishers, streamTypeScreen)
log.Printf("Session %s is no longer allowed to publish screen, closing publisher %s", s.PublicId(), publisher.Id())
go func() {
publisher.Close(context.Background())
@ -1015,17 +1015,20 @@ func (s *ClientSession) processAsyncMessage(message *AsyncMessage) {
return
case "message":
if message.Message.Type == "bye" && message.Message.Bye.Reason == "room_session_reconnected" {
log.Printf("Closing session %s because same room session %s connected", s.PublicId(), s.RoomSessionId())
s.mu.Lock()
roomSessionId := s.RoomSessionId()
s.mu.Unlock()
log.Printf("Closing session %s because same room session %s connected", s.PublicId(), roomSessionId)
s.LeaveRoom(false)
defer s.closeAndWait(false)
}
case "sendoffer":
// Process asynchronously to not block other messages received.
go func() {
ctx, cancel := context.WithTimeout(s.Context(), s.hub.mcuTimeout)
ctx, cancel := context.WithTimeout(context.Background(), s.hub.mcuTimeout)
defer cancel()
mc, err := s.GetOrCreateSubscriber(ctx, s.hub.mcu, message.SendOffer.SessionId, StreamType(message.SendOffer.Data.RoomType))
mc, err := s.GetOrCreateSubscriber(ctx, s.hub.mcu, message.SendOffer.SessionId, message.SendOffer.Data.RoomType)
if err != nil {
log.Printf("Could not create MCU subscriber for session %s to process sendoffer in %s: %s", message.SendOffer.SessionId, s.PublicId(), err)
if err := s.events.PublishSessionMessage(message.SendOffer.SessionId, s.backend, &AsyncMessage{
@ -1054,7 +1057,7 @@ func (s *ClientSession) processAsyncMessage(message *AsyncMessage) {
return
}
mc.SendMessage(s.Context(), nil, message.SendOffer.Data, func(err error, response map[string]interface{}) {
mc.SendMessage(context.TODO(), nil, message.SendOffer.Data, func(err error, response map[string]interface{}) {
if err != nil {
log.Printf("Could not send MCU message %+v for session %s to %s: %s", message.SendOffer.Data, message.SendOffer.SessionId, s.PublicId(), err)
if err := s.events.PublishSessionMessage(message.SendOffer.SessionId, s.backend, &AsyncMessage{
@ -1112,13 +1115,13 @@ func (s *ClientSession) storePendingMessage(message *ServerMessage) {
func filterDisplayNames(events []*EventServerMessageSessionEntry) []*EventServerMessageSessionEntry {
result := make([]*EventServerMessageSessionEntry, 0, len(events))
for _, event := range events {
if len(event.User) == 0 {
if event.User == nil {
result = append(result, event)
continue
}
var userdata map[string]interface{}
if err := json.Unmarshal(event.User, &userdata); err != nil {
if err := json.Unmarshal(*event.User, &userdata); err != nil {
result = append(result, event)
continue
}
@ -1144,30 +1147,7 @@ func filterDisplayNames(events []*EventServerMessageSessionEntry) []*EventServer
}
e := event.Clone()
e.User = data
result = append(result, e)
}
return result
}
func (s *ClientSession) filterDuplicateJoin(entries []*EventServerMessageSessionEntry) []*EventServerMessageSessionEntry {
s.seenJoinedLock.Lock()
defer s.seenJoinedLock.Unlock()
// Due to the asynchronous events, a session might received a "Joined" event
// for the same (other) session twice, so filter these out on a per-session
// level.
result := make([]*EventServerMessageSessionEntry, 0, len(entries))
for _, e := range entries {
if s.seenJoinedEvents[e.SessionId] {
log.Printf("Session %s got duplicate joined event for %s, ignoring", s.publicId, e.SessionId)
continue
}
if s.seenJoinedEvents == nil {
s.seenJoinedEvents = make(map[string]bool)
}
s.seenJoinedEvents[e.SessionId] = true
e.User = (*json.RawMessage)(&data)
result = append(result, e)
}
return result
@ -1197,54 +1177,25 @@ func (s *ClientSession) filterMessage(message *ServerMessage) *ServerMessage {
case "room":
switch message.Event.Type {
case "join":
join := s.filterDuplicateJoin(message.Event.Join)
if len(join) == 0 {
return nil
}
copied := false
if len(join) != len(message.Event.Join) {
if s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
// Create unique copy of message for only this client.
copied = true
message = &ServerMessage{
Id: message.Id,
Type: message.Type,
Event: &EventServerMessage{
Type: message.Event.Type,
Target: message.Event.Target,
Join: join,
Join: filterDisplayNames(message.Event.Join),
},
}
}
if s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
if copied {
message.Event.Join = filterDisplayNames(message.Event.Join)
} else {
message = &ServerMessage{
Id: message.Id,
Type: message.Type,
Event: &EventServerMessage{
Type: message.Event.Type,
Target: message.Event.Target,
Join: filterDisplayNames(message.Event.Join),
},
}
}
}
case "leave":
s.seenJoinedLock.Lock()
defer s.seenJoinedLock.Unlock()
for _, e := range message.Event.Leave {
delete(s.seenJoinedEvents, e)
}
case "message":
if message.Event.Message == nil || len(message.Event.Message.Data) == 0 || !s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
if message.Event.Message == nil || message.Event.Message.Data == nil || len(*message.Event.Message.Data) == 0 || !s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
return message
}
var data RoomEventMessageData
if err := json.Unmarshal(message.Event.Message.Data, &data); err != nil {
if err := json.Unmarshal(*message.Event.Message.Data, &data); err != nil {
return message
}
@ -1261,7 +1212,7 @@ func (s *ClientSession) filterMessage(message *ServerMessage) *ServerMessage {
Target: message.Event.Target,
Message: &RoomEventMessage{
RoomId: message.Event.Message.RoomId,
Data: encoded,
Data: (*json.RawMessage)(&encoded),
},
},
}
@ -1271,9 +1222,9 @@ func (s *ClientSession) filterMessage(message *ServerMessage) *ServerMessage {
}
}
case "message":
if message.Message != nil && len(message.Message.Data) > 0 && s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
if message.Message != nil && message.Message.Data != nil && len(*message.Message.Data) > 0 && s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
var data MessageServerMessageData
if err := json.Unmarshal(message.Message.Data, &data); err != nil {
if err := json.Unmarshal(*message.Message.Data, &data); err != nil {
return message
}
@ -1314,7 +1265,7 @@ func (s *ClientSession) filterAsyncMessage(msg *AsyncMessage) *ServerMessage {
// Can happen mostly during tests where an older room async message
// could be received by a subscriber that joined after it was sent.
if joined := s.getRoomJoinTime(); joined.IsZero() || msg.SendTime.Before(joined) {
log.Printf("Message %+v was sent on %s before room was joined on %s, ignoring", msg.Message, msg.SendTime, joined)
log.Printf("Message %+v was sent before room was joined, ignoring", msg.Message)
return nil
}
}
@ -1327,7 +1278,7 @@ func (s *ClientSession) filterAsyncMessage(msg *AsyncMessage) *ServerMessage {
}
}
func (s *ClientSession) NotifySessionResumed(client HandlerClient) {
func (s *ClientSession) NotifySessionResumed(client *Client) {
s.mu.Lock()
if len(s.pendingClientMessages) == 0 {
s.mu.Unlock()
@ -1381,38 +1332,3 @@ func (s *ClientSession) GetVirtualSessions() []*VirtualSession {
}
return result
}
func (s *ClientSession) HandleResponse(id string, handler ResponseHandlerFunc) {
s.responseHandlersLock.Lock()
defer s.responseHandlersLock.Unlock()
if s.responseHandlers == nil {
s.responseHandlers = make(map[string]ResponseHandlerFunc)
}
s.responseHandlers[id] = handler
}
func (s *ClientSession) ClearResponseHandler(id string) {
s.responseHandlersLock.Lock()
defer s.responseHandlersLock.Unlock()
delete(s.responseHandlers, id)
}
func (s *ClientSession) ProcessResponse(message *ClientMessage) bool {
id := message.Id
if id == "" {
return false
}
s.responseHandlersLock.Lock()
cb, found := s.responseHandlers[id]
defer s.responseHandlersLock.Unlock()
if !found {
return false
}
return cb(message)
}

View file

@ -117,7 +117,6 @@ func Test_permissionsEqual(t *testing.T) {
for idx, test := range tests {
test := test
t.Run(strconv.Itoa(idx), func(t *testing.T) {
t.Parallel()
equal := permissionsEqual(test.a, test.b)
if equal != test.equal {
t.Errorf("Expected %+v to be %s to %+v but was %s", test.a, equalStrings[test.equal], test.b, equalStrings[equal])
@ -127,17 +126,12 @@ func Test_permissionsEqual(t *testing.T) {
}
func TestBandwidth_Client(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
hub, _, _, server := CreateHubForTest(t)
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
mcu, err := NewTestMCU()
if err != nil {
t.Fatal(err)
} else if err := mcu.Start(ctx); err != nil {
} else if err := mcu.Start(); err != nil {
t.Fatal(err)
}
defer mcu.Stop()
@ -151,6 +145,9 @@ func TestBandwidth_Client(t *testing.T) {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
hello, err := client.RunUntilHello(ctx)
if err != nil {
t.Fatal(err)
@ -201,8 +198,6 @@ func TestBandwidth_Client(t *testing.T) {
}
func TestBandwidth_Backend(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
hub, _, _, server := CreateHubWithMultipleBackendsForTest(t)
u, err := url.Parse(server.URL + "/one")
@ -217,26 +212,26 @@ func TestBandwidth_Backend(t *testing.T) {
backend.maxScreenBitrate = 1000
backend.maxStreamBitrate = 2000
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
mcu, err := NewTestMCU()
if err != nil {
t.Fatal(err)
} else if err := mcu.Start(ctx); err != nil {
} else if err := mcu.Start(); err != nil {
t.Fatal(err)
}
defer mcu.Stop()
hub.SetMcu(mcu)
streamTypes := []StreamType{
StreamTypeVideo,
StreamTypeScreen,
streamTypes := []string{
streamTypeVideo,
streamTypeScreen,
}
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
for _, streamType := range streamTypes {
t.Run(string(streamType), func(t *testing.T) {
t.Run(streamType, func(t *testing.T) {
client := NewTestClient(t, server, hub)
defer client.CloseWithBye()
@ -273,7 +268,7 @@ func TestBandwidth_Backend(t *testing.T) {
}, MessageClientMessageData{
Type: "offer",
Sid: "54321",
RoomType: string(streamType),
RoomType: streamType,
Bitrate: bitrate,
Payload: map[string]interface{}{
"sdp": MockSdpOfferAudioAndVideo,
@ -292,7 +287,7 @@ func TestBandwidth_Backend(t *testing.T) {
}
var expectBitrate int
if streamType == StreamTypeVideo {
if streamType == streamTypeVideo {
expectBitrate = backend.maxStreamBitrate
} else {
expectBitrate = backend.maxScreenBitrate

View file

@ -26,7 +26,7 @@ import (
)
type Closer struct {
closed atomic.Bool
closed uint32
C chan struct{}
}
@ -37,11 +37,11 @@ func NewCloser() *Closer {
}
func (c *Closer) IsClosed() bool {
return c.closed.Load()
return atomic.LoadUint32(&c.closed) != 0
}
func (c *Closer) Close() {
if c.closed.CompareAndSwap(false, true) {
if atomic.CompareAndSwapUint32(&c.closed, 0, 1) {
close(c.C)
}
}

View file

@ -1,87 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2023 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"errors"
"os"
"regexp"
"github.com/dlintw/goconf"
)
var (
searchVarsRegexp = regexp.MustCompile(`\$\([A-Za-z][A-Za-z0-9_]*\)`)
)
func replaceEnvVars(s string) string {
return searchVarsRegexp.ReplaceAllStringFunc(s, func(name string) string {
name = name[2 : len(name)-1]
value, found := os.LookupEnv(name)
if !found {
return name
}
return value
})
}
// GetStringOptionWithEnv will get the string option and resolve any environment
// variable references in the form "$(VAR)".
func GetStringOptionWithEnv(config *goconf.ConfigFile, section string, option string) (string, error) {
value, err := config.GetString(section, option)
if err != nil {
return "", err
}
value = replaceEnvVars(value)
return value, nil
}
func GetStringOptions(config *goconf.ConfigFile, section string, ignoreErrors bool) (map[string]string, error) {
options, _ := config.GetOptions(section)
if len(options) == 0 {
return nil, nil
}
result := make(map[string]string)
for _, option := range options {
value, err := GetStringOptionWithEnv(config, section, option)
if err != nil {
if ignoreErrors {
continue
}
var ge goconf.GetError
if errors.As(err, &ge) && ge.Reason == goconf.OptionNotFound {
// Skip options from "default" section.
continue
}
return nil, err
}
result[option] = value
}
return result, nil
}

View file

@ -1,92 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2023 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"reflect"
"testing"
"github.com/dlintw/goconf"
)
func TestStringOptions(t *testing.T) {
t.Setenv("FOO", "foo")
expected := map[string]string{
"one": "1",
"two": "2",
"foo": "http://foo/1",
}
config := goconf.NewConfigFile()
for k, v := range expected {
if k == "foo" {
config.AddOption("foo", k, "http://$(FOO)/1")
} else {
config.AddOption("foo", k, v)
}
}
config.AddOption("default", "three", "3")
options, err := GetStringOptions(config, "foo", false)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(expected, options) {
t.Errorf("expected %+v, got %+v", expected, options)
}
}
func TestStringOptionWithEnv(t *testing.T) {
t.Setenv("FOO", "foo")
t.Setenv("BAR", "")
t.Setenv("BA_R", "bar")
config := goconf.NewConfigFile()
config.AddOption("test", "foo", "http://$(FOO)/1")
config.AddOption("test", "bar", "http://$(BAR)/2")
config.AddOption("test", "bar2", "http://$(BA_R)/3")
config.AddOption("test", "baz", "http://$(BAZ)/4")
config.AddOption("test", "inv1", "http://$(FOO")
config.AddOption("test", "inv2", "http://$FOO)")
config.AddOption("test", "inv3", "http://$((FOO)")
config.AddOption("test", "inv4", "http://$(F.OO)")
expected := map[string]string{
"foo": "http://foo/1",
"bar": "http:///2",
"bar2": "http://bar/3",
"baz": "http://BAZ/4",
"inv1": "http://$(FOO",
"inv2": "http://$FOO)",
"inv3": "http://$((FOO)",
"inv4": "http://$(F.OO)",
}
for k, v := range expected {
value, err := GetStringOptionWithEnv(config, "test", k)
if err != nil {
t.Errorf("expected value for %s, got %s", k, err)
} else if value != v {
t.Errorf("expected value %s for %s, got %s", v, k, value)
}
}
}

View file

@ -1,7 +1,7 @@
package signaling
// This file has been automatically generated, do not modify.
// Source: https://github.com/datasets/country-codes/raw/master/data/country-codes.csv
// Source: https://datahub.io/core/country-codes/r/country-codes.json
var (
ContinentMap = map[string][]string{

View file

@ -35,7 +35,6 @@ func TestDeferredExecutor_MultiClose(t *testing.T) {
}
func TestDeferredExecutor_QueueSize(t *testing.T) {
t.Parallel()
e := NewDeferredExecutor(0)
defer e.waitForStop()
defer e.Close()
@ -70,13 +69,13 @@ func TestDeferredExecutor_Order(t *testing.T) {
}
}
done := make(chan struct{})
done := make(chan bool)
for x := 0; x < 10; x++ {
e.Execute(getFunc(x))
}
e.Execute(func() {
close(done)
done <- true
})
<-done
@ -91,17 +90,16 @@ func TestDeferredExecutor_CloseFromFunc(t *testing.T) {
e := NewDeferredExecutor(64)
defer e.waitForStop()
done := make(chan struct{})
done := make(chan bool)
e.Execute(func() {
defer close(done)
e.Close()
done <- true
})
<-done
}
func TestDeferredExecutor_DeferAfterClose(t *testing.T) {
CatchLogForTest(t)
e := NewDeferredExecutor(64)
defer e.waitForStop()

View file

@ -1,343 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2023 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"log"
"net"
"net/url"
"strings"
"sync"
"sync/atomic"
"time"
)
var (
lookupDnsMonitorIP = net.LookupIP
)
const (
defaultDnsMonitorInterval = time.Second
)
type DnsMonitorCallback = func(entry *DnsMonitorEntry, all []net.IP, add []net.IP, keep []net.IP, remove []net.IP)
type DnsMonitorEntry struct {
entry atomic.Pointer[dnsMonitorEntry]
url string
callback DnsMonitorCallback
}
func (e *DnsMonitorEntry) URL() string {
return e.url
}
type dnsMonitorEntry struct {
hostname string
hostIP net.IP
mu sync.Mutex
ips []net.IP
entries map[*DnsMonitorEntry]bool
}
func (e *dnsMonitorEntry) setIPs(ips []net.IP, fromIP bool) {
e.mu.Lock()
defer e.mu.Unlock()
empty := len(e.ips) == 0
if empty {
// Simple case: initial lookup.
if len(ips) > 0 {
e.ips = ips
e.runCallbacks(ips, ips, nil, nil)
}
return
} else if fromIP {
// No more updates possible for IP addresses.
return
} else if len(ips) == 0 {
// Simple case: no records received from lookup.
if !empty {
removed := e.ips
e.ips = nil
e.runCallbacks(nil, nil, nil, removed)
}
return
}
var newIPs []net.IP
var addedIPs []net.IP
var removedIPs []net.IP
var keepIPs []net.IP
for _, oldIP := range e.ips {
found := false
for idx, newIP := range ips {
if oldIP.Equal(newIP) {
ips = append(ips[:idx], ips[idx+1:]...)
found = true
keepIPs = append(keepIPs, oldIP)
newIPs = append(newIPs, oldIP)
break
}
}
if !found {
removedIPs = append(removedIPs, oldIP)
}
}
if len(ips) > 0 {
addedIPs = append(addedIPs, ips...)
newIPs = append(newIPs, ips...)
}
e.ips = newIPs
if len(addedIPs) > 0 || len(removedIPs) > 0 {
e.runCallbacks(newIPs, addedIPs, keepIPs, removedIPs)
}
}
func (e *dnsMonitorEntry) addEntry(entry *DnsMonitorEntry) {
e.mu.Lock()
defer e.mu.Unlock()
e.entries[entry] = true
}
func (e *dnsMonitorEntry) removeEntry(entry *DnsMonitorEntry) bool {
e.mu.Lock()
defer e.mu.Unlock()
delete(e.entries, entry)
return len(e.entries) == 0
}
func (e *dnsMonitorEntry) runCallbacks(all []net.IP, add []net.IP, keep []net.IP, remove []net.IP) {
for entry := range e.entries {
entry.callback(entry, all, add, keep, remove)
}
}
type DnsMonitor struct {
interval time.Duration
stopCtx context.Context
stopFunc func()
stopped chan struct{}
mu sync.RWMutex
cond *sync.Cond
hostnames map[string]*dnsMonitorEntry
hasRemoved atomic.Bool
// Can be overwritten from tests.
checkHostnames func()
}
func NewDnsMonitor(interval time.Duration) (*DnsMonitor, error) {
if interval < 0 {
interval = defaultDnsMonitorInterval
}
stopCtx, stopFunc := context.WithCancel(context.Background())
monitor := &DnsMonitor{
interval: interval,
stopCtx: stopCtx,
stopFunc: stopFunc,
stopped: make(chan struct{}),
hostnames: make(map[string]*dnsMonitorEntry),
}
monitor.cond = sync.NewCond(&monitor.mu)
monitor.checkHostnames = monitor.doCheckHostnames
return monitor, nil
}
func (m *DnsMonitor) Start() error {
go m.run()
return nil
}
func (m *DnsMonitor) Stop() {
m.stopFunc()
m.cond.Signal()
<-m.stopped
}
func (m *DnsMonitor) Add(target string, callback DnsMonitorCallback) (*DnsMonitorEntry, error) {
var hostname string
if strings.Contains(target, "://") {
// Full URL passed.
parsed, err := url.Parse(target)
if err != nil {
return nil, err
}
hostname = parsed.Host
} else {
// Hostname only passed.
hostname = target
}
if h, _, err := net.SplitHostPort(hostname); err == nil {
hostname = h
}
m.mu.Lock()
defer m.mu.Unlock()
e := &DnsMonitorEntry{
url: target,
callback: callback,
}
entry, found := m.hostnames[hostname]
if !found {
entry = &dnsMonitorEntry{
hostname: hostname,
hostIP: net.ParseIP(hostname),
entries: make(map[*DnsMonitorEntry]bool),
}
m.hostnames[hostname] = entry
}
e.entry.Store(entry)
entry.addEntry(e)
m.cond.Signal()
return e, nil
}
func (m *DnsMonitor) Remove(entry *DnsMonitorEntry) {
oldEntry := entry.entry.Swap(nil)
if oldEntry == nil {
// Already removed.
return
}
locked := m.mu.TryLock()
// Spin-lock for simple cases that resolve immediately to avoid deferred removal.
for i := 0; !locked && i < 1000; i++ {
time.Sleep(time.Nanosecond)
locked = m.mu.TryLock()
}
if !locked {
// Currently processing callbacks for this entry, need to defer removal.
m.hasRemoved.Store(true)
return
}
defer m.mu.Unlock()
e, found := m.hostnames[oldEntry.hostname]
if !found {
return
}
if e.removeEntry(entry) {
delete(m.hostnames, e.hostname)
}
}
func (m *DnsMonitor) clearRemoved() {
if !m.hasRemoved.CompareAndSwap(true, false) {
return
}
m.mu.Lock()
defer m.mu.Unlock()
for hostname, entry := range m.hostnames {
deleted := false
for e := range entry.entries {
if e.entry.Load() == nil {
delete(entry.entries, e)
deleted = true
}
}
if deleted && len(entry.entries) == 0 {
delete(m.hostnames, hostname)
}
}
}
func (m *DnsMonitor) waitForEntries() (waited bool) {
m.mu.Lock()
defer m.mu.Unlock()
for len(m.hostnames) == 0 && m.stopCtx.Err() == nil {
m.cond.Wait()
waited = true
}
return
}
func (m *DnsMonitor) run() {
ticker := time.NewTicker(m.interval)
defer ticker.Stop()
defer close(m.stopped)
for {
if m.waitForEntries() {
ticker.Reset(m.interval)
if m.stopCtx.Err() == nil {
// Initial check when a new entry was added. More checks will be
// triggered by the Ticker.
m.checkHostnames()
continue
}
}
select {
case <-m.stopCtx.Done():
return
case <-ticker.C:
m.checkHostnames()
}
}
}
func (m *DnsMonitor) doCheckHostnames() {
m.clearRemoved()
m.mu.RLock()
defer m.mu.RUnlock()
for _, entry := range m.hostnames {
m.checkHostname(entry)
}
}
func (m *DnsMonitor) checkHostname(entry *dnsMonitorEntry) {
if len(entry.hostIP) > 0 {
entry.setIPs([]net.IP{entry.hostIP}, true)
return
}
ips, err := lookupDnsMonitorIP(entry.hostname)
if err != nil {
log.Printf("Could not lookup %s: %s", entry.hostname, err)
return
}
entry.setIPs(ips, false)
}

View file

@ -1,428 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2023 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"fmt"
"net"
"reflect"
"sync"
"sync/atomic"
"testing"
"time"
)
type mockDnsLookup struct {
sync.RWMutex
ips map[string][]net.IP
}
func newMockDnsLookupForTest(t *testing.T) *mockDnsLookup {
mock := &mockDnsLookup{
ips: make(map[string][]net.IP),
}
prev := lookupDnsMonitorIP
t.Cleanup(func() {
lookupDnsMonitorIP = prev
})
lookupDnsMonitorIP = mock.lookup
return mock
}
func (m *mockDnsLookup) Set(host string, ips []net.IP) {
m.Lock()
defer m.Unlock()
m.ips[host] = ips
}
func (m *mockDnsLookup) Get(host string) []net.IP {
m.Lock()
defer m.Unlock()
return m.ips[host]
}
func (m *mockDnsLookup) lookup(host string) ([]net.IP, error) {
m.RLock()
defer m.RUnlock()
ips, found := m.ips[host]
if !found {
return nil, &net.DNSError{
Err: fmt.Sprintf("could not resolve %s", host),
Name: host,
IsNotFound: true,
}
}
return append([]net.IP{}, ips...), nil
}
func newDnsMonitorForTest(t *testing.T, interval time.Duration) *DnsMonitor {
t.Helper()
monitor, err := NewDnsMonitor(interval)
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
monitor.Stop()
})
if err := monitor.Start(); err != nil {
t.Fatal(err)
}
return monitor
}
type dnsMonitorReceiverRecord struct {
all []net.IP
add []net.IP
keep []net.IP
remove []net.IP
}
func (r *dnsMonitorReceiverRecord) Equal(other *dnsMonitorReceiverRecord) bool {
return r == other || (reflect.DeepEqual(r.add, other.add) &&
reflect.DeepEqual(r.keep, other.keep) &&
reflect.DeepEqual(r.remove, other.remove))
}
func (r *dnsMonitorReceiverRecord) String() string {
return fmt.Sprintf("all=%v, add=%v, keep=%v, remove=%v", r.all, r.add, r.keep, r.remove)
}
var (
expectNone = &dnsMonitorReceiverRecord{}
)
type dnsMonitorReceiver struct {
sync.Mutex
t *testing.T
expected *dnsMonitorReceiverRecord
received *dnsMonitorReceiverRecord
}
func newDnsMonitorReceiverForTest(t *testing.T) *dnsMonitorReceiver {
return &dnsMonitorReceiver{
t: t,
}
}
func (r *dnsMonitorReceiver) OnLookup(entry *DnsMonitorEntry, all, add, keep, remove []net.IP) {
r.Lock()
defer r.Unlock()
received := &dnsMonitorReceiverRecord{
all: all,
add: add,
keep: keep,
remove: remove,
}
expected := r.expected
r.expected = nil
if expected == expectNone {
r.t.Errorf("expected no event, got %v", received)
return
}
if expected == nil {
if r.received != nil && !r.received.Equal(received) {
r.t.Errorf("already received %v, got %v", r.received, received)
}
return
}
if !expected.Equal(received) {
r.t.Errorf("expected %v, got %v", expected, received)
}
r.received = nil
r.expected = nil
}
func (r *dnsMonitorReceiver) WaitForExpected(ctx context.Context) {
r.t.Helper()
r.Lock()
defer r.Unlock()
ticker := time.NewTicker(time.Microsecond)
abort := false
for r.expected != nil && !abort {
r.Unlock()
select {
case <-ticker.C:
case <-ctx.Done():
r.t.Error(ctx.Err())
abort = true
}
r.Lock()
}
}
func (r *dnsMonitorReceiver) Expect(all, add, keep, remove []net.IP) {
r.t.Helper()
r.Lock()
defer r.Unlock()
if r.expected != nil && r.expected != expectNone {
r.t.Errorf("didn't get previously expected %v", r.expected)
}
expected := &dnsMonitorReceiverRecord{
all: all,
add: add,
keep: keep,
remove: remove,
}
if r.received != nil && r.received.Equal(expected) {
r.received = nil
return
}
r.expected = expected
}
func (r *dnsMonitorReceiver) ExpectNone() {
r.t.Helper()
r.Lock()
defer r.Unlock()
if r.expected != nil && r.expected != expectNone {
r.t.Errorf("didn't get previously expected %v", r.expected)
}
r.expected = expectNone
}
func TestDnsMonitor(t *testing.T) {
lookup := newMockDnsLookupForTest(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
interval := time.Millisecond
monitor := newDnsMonitorForTest(t, interval)
ip1 := net.ParseIP("192.168.0.1")
ip2 := net.ParseIP("192.168.1.1")
ip3 := net.ParseIP("10.1.2.3")
ips1 := []net.IP{
ip1,
ip2,
}
lookup.Set("foo", ips1)
rec1 := newDnsMonitorReceiverForTest(t)
rec1.Expect(ips1, ips1, nil, nil)
entry1, err := monitor.Add("https://foo:12345", rec1.OnLookup)
if err != nil {
t.Fatal(err)
}
defer monitor.Remove(entry1)
rec1.WaitForExpected(ctx)
ips2 := []net.IP{
ip1,
ip2,
ip3,
}
add2 := []net.IP{ip3}
keep2 := []net.IP{ip1, ip2}
rec1.Expect(ips2, add2, keep2, nil)
lookup.Set("foo", ips2)
rec1.WaitForExpected(ctx)
ips3 := []net.IP{
ip2,
ip3,
}
keep3 := []net.IP{ip2, ip3}
remove3 := []net.IP{ip1}
rec1.Expect(ips3, nil, keep3, remove3)
lookup.Set("foo", ips3)
rec1.WaitForExpected(ctx)
rec1.ExpectNone()
time.Sleep(5 * interval)
remove4 := []net.IP{ip2, ip3}
rec1.Expect(nil, nil, nil, remove4)
lookup.Set("foo", nil)
rec1.WaitForExpected(ctx)
rec1.ExpectNone()
time.Sleep(5 * interval)
// Removing multiple times is supported.
monitor.Remove(entry1)
monitor.Remove(entry1)
// No more events after removing.
lookup.Set("foo", ips1)
rec1.ExpectNone()
time.Sleep(5 * interval)
}
func TestDnsMonitorIP(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
interval := time.Millisecond
monitor := newDnsMonitorForTest(t, interval)
ip := "192.168.0.1"
ips := []net.IP{
net.ParseIP(ip),
}
rec1 := newDnsMonitorReceiverForTest(t)
rec1.Expect(ips, ips, nil, nil)
entry, err := monitor.Add(ip+":12345", rec1.OnLookup)
if err != nil {
t.Fatal(err)
}
defer monitor.Remove(entry)
rec1.WaitForExpected(ctx)
rec1.ExpectNone()
time.Sleep(5 * interval)
}
func TestDnsMonitorNoLookupIfEmpty(t *testing.T) {
interval := time.Millisecond
monitor := newDnsMonitorForTest(t, interval)
var checked atomic.Bool
monitor.checkHostnames = func() {
checked.Store(true)
monitor.doCheckHostnames()
}
time.Sleep(10 * interval)
if checked.Load() {
t.Error("should not have checked hostnames")
}
}
type deadlockMonitorReceiver struct {
t *testing.T
monitor *DnsMonitor
mu sync.RWMutex
wg sync.WaitGroup
entry *DnsMonitorEntry
started chan struct{}
triggered bool
closed atomic.Bool
}
func newDeadlockMonitorReceiver(t *testing.T, monitor *DnsMonitor) *deadlockMonitorReceiver {
return &deadlockMonitorReceiver{
t: t,
monitor: monitor,
started: make(chan struct{}),
}
}
func (r *deadlockMonitorReceiver) OnLookup(entry *DnsMonitorEntry, all []net.IP, add []net.IP, keep []net.IP, remove []net.IP) {
if r.closed.Load() {
r.t.Error("received lookup after closed")
return
}
r.mu.Lock()
defer r.mu.Unlock()
if r.triggered {
return
}
r.triggered = true
r.wg.Add(1)
go func() {
defer r.wg.Done()
r.mu.RLock()
defer r.mu.RUnlock()
close(r.started)
time.Sleep(50 * time.Millisecond)
}()
}
func (r *deadlockMonitorReceiver) Start() {
r.mu.Lock()
defer r.mu.Unlock()
entry, err := r.monitor.Add("foo", r.OnLookup)
if err != nil {
r.t.Errorf("error adding listener: %s", err)
return
}
r.entry = entry
}
func (r *deadlockMonitorReceiver) Close() {
r.mu.Lock()
defer r.mu.Unlock()
if r.entry != nil {
r.monitor.Remove(r.entry)
r.closed.Store(true)
}
r.wg.Wait()
}
func TestDnsMonitorDeadlock(t *testing.T) {
lookup := newMockDnsLookupForTest(t)
ip1 := net.ParseIP("192.168.0.1")
ip2 := net.ParseIP("192.168.0.2")
lookup.Set("foo", []net.IP{ip1})
interval := time.Millisecond
monitor := newDnsMonitorForTest(t, interval)
r := newDeadlockMonitorReceiver(t, monitor)
r.Start()
<-r.started
lookup.Set("foo", []net.IP{ip2})
r.Close()
lookup.Set("foo", []net.IP{ip1})
time.Sleep(10 * interval)
monitor.mu.Lock()
defer monitor.mu.Unlock()
if len(monitor.hostnames) > 0 {
t.Errorf("should have cleared hostnames, got %+v", monitor.hostnames)
}
}

View file

@ -21,8 +21,6 @@ The running container can be configured through different environment variables:
- `HASH_KEY`: Secret value used to generate checksums of sessions (32 or 64 bytes).
- `BLOCK_KEY`: Key for encrypting data in the sessions (16, 24 or 32 bytes).
- `INTERNAL_SHARED_SECRET_KEY`: Shared secret for connections from internal clients.
- `BACKENDS_ALLOWALL`: Allow all backends. Extremly insecure - use only for development!
- `BACKENDS_ALLOWALL_SECRET`: Secret when `BACKENDS_ALLOWALL` is enabled.
- `BACKENDS`: Space-separated list of backend ids.
- `BACKEND_<ID>_URL`: Url of backend `ID` (where `ID` is the uppercase backend id).
- `BACKEND_<ID>_SHARED_SECRET`: Shared secret for backend `ID` (where `ID` is the uppercase backend id).
@ -52,10 +50,9 @@ The running container can be configured through different environment variables:
- `TURN_SERVERS`: A comma-separated list of TURN servers to use.
- `GEOIP_LICENSE`: License key to use when downloading the MaxMind GeoIP database.
- `GEOIP_URL`: Optional URL to download a MaxMind GeoIP database from.
- `GEOIP_OVERRIDES`: Optional space-separated list of overrides for GeoIP lookups.
- `CONTINENT_OVERRIDES`: Optional space-separated list of overrides for continent mappings.
- `GEOIP_OVERRIDES`: Optional spae-separated list of overrides for GeoIP lookups.
- `CONTINENT_OVERRIDES`: Optional spae-separated list of overrides for continent mappings.
- `STATS_IPS`: Comma-separated list of IP addresses that are allowed to access the stats endpoint.
- `TRUSTED_PROXIES`: Comma-separated list of IPs / networks that are trusted proxies.
- `GRPC_LISTEN`: IP and port to listen on for GRPC requests.
- `GRPC_SERVER_CERTIFICATE`: Certificate to use for the GRPC server.
- `GRPC_SERVER_KEY`: Private key to use for the GRPC server.
@ -100,16 +97,9 @@ The running container can be configured through different environment variables:
- `CONFIG`: Optional name of configuration file to use.
- `HTTP_LISTEN`: Address of HTTP listener.
- `COUNTRY`: Optional ISO 3166 country this proxy is located at.
- `EXTERNAL_HOSTNAME`: The external hostname for remote streams. Will try to autodetect if omitted.
- `TOKEN_ID`: Id of the token to use when connecting remote streams.
- `TOKEN_KEY`: Private key for the configured token id.
- `BANDWIDTH_INCOMING`: Optional incoming target bandwidth (in megabits per second).
- `BANDWIDTH_OUTGOING`: Optional outgoing target bandwidth (in megabits per second).
- `JANUS_URL`: Url to Janus server.
- `MAX_STREAM_BITRATE`: Optional maximum bitrate for audio/video streams.
- `MAX_SCREEN_BITRATE`: Optional maximum bitrate for screensharing streams.
- `STATS_IPS`: Comma-separated list of IP addresses that are allowed to access the stats endpoint.
- `TRUSTED_PROXIES`: Comma-separated list of IPs / networks that are trusted proxies.
- `ETCD_ENDPOINTS`: Static list of etcd endpoints (if etcd should be used).
- `ETCD_DISCOVERY_SRV`: Alternative domain to use for DNS SRV configuration of etcd endpoints (if etcd should be used).
- `ETCD_DISCOVERY_SERVICE`: Optional service name for DNS SRV configuration of etcd..

View file

@ -1,5 +1,5 @@
# Modified from https://gitlab.com/powerpaul17/nc_talk_backend/-/blob/dcbb918d8716dad1eb72a889d1e6aa1e3a543641/docker/janus/Dockerfile
FROM alpine:3.20
FROM alpine:3.14
RUN apk add --no-cache curl autoconf automake libtool pkgconf build-base \
glib-dev libconfig-dev libnice-dev jansson-dev openssl-dev zlib libsrtp-dev \
@ -15,30 +15,30 @@ RUN cd /tmp && \
git checkout $USRSCTP_VERSION && \
./bootstrap && \
./configure --prefix=/usr && \
make -j$(nproc) && make install
make && make install
# libsrtp
ARG LIBSRTP_VERSION=2.6.0
ARG LIBSRTP_VERSION=2.4.2
RUN cd /tmp && \
wget https://github.com/cisco/libsrtp/archive/v$LIBSRTP_VERSION.tar.gz && \
tar xfv v$LIBSRTP_VERSION.tar.gz && \
cd libsrtp-$LIBSRTP_VERSION && \
./configure --prefix=/usr --enable-openssl && \
make shared_library -j$(nproc) && \
make shared_library && \
make install && \
rm -fr /libsrtp-$LIBSRTP_VERSION && \
rm -f /v$LIBSRTP_VERSION.tar.gz
# JANUS
ARG JANUS_VERSION=1.2.2
ARG JANUS_VERSION=0.11.8
RUN mkdir -p /usr/src/janus && \
cd /usr/src/janus && \
curl -L https://github.com/meetecho/janus-gateway/archive/v$JANUS_VERSION.tar.gz | tar -xz && \
cd /usr/src/janus/janus-gateway-$JANUS_VERSION && \
./autogen.sh && \
./configure --disable-rabbitmq --disable-mqtt --disable-boringssl && \
make -j$(nproc) && \
make && \
make install && \
make configs

View file

@ -1,13 +1,13 @@
FROM --platform=${BUILDPLATFORM} golang:1.22-alpine AS builder
FROM --platform=${BUILDPLATFORM} golang:1.20-alpine AS builder
ARG TARGETARCH
ARG TARGETOS
WORKDIR /workdir
COPY . .
RUN touch /.dockerenv && \
apk add --no-cache bash git build-base protobuf && \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} make proxy
RUN apk add --no-cache bash git build-base protobuf && \
if [ -d "vendor" ]; then GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOPROXY=off make proxy -j$(nproc); else \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} make proxy -j$(nproc); fi
FROM alpine:3
@ -18,12 +18,10 @@ RUN adduser -D spreedbackend && \
COPY --from=builder /workdir/bin/proxy /usr/bin/nextcloud-spreed-signaling-proxy
COPY ./proxy.conf.in /config/proxy.conf.in
COPY ./docker/proxy/entrypoint.sh /
COPY ./docker/proxy/stop.sh /
COPY ./docker/proxy/wait.sh /
RUN chown spreedbackend /config
RUN /usr/bin/nextcloud-spreed-signaling-proxy -version
USER spreedbackend
STOPSIGNAL SIGUSR1
ENTRYPOINT [ "/entrypoint.sh" ]
CMD ["/bin/sh", "-c", "/usr/bin/nextcloud-spreed-signaling-proxy -config $CONFIG"]

View file

@ -22,11 +22,6 @@
#
set -e
if [ -n "$1" ]; then
# Run custom command.
exec "$@"
fi
if [ -z "$CONFIG" ]; then
echo "No configuration filename given in CONFIG environment variable"
exit 1
@ -36,68 +31,52 @@ if [ ! -f "$CONFIG" ]; then
echo "Preparing signaling proxy configuration in $CONFIG ..."
cp /config/proxy.conf.in "$CONFIG"
if [ -n "$HTTP_LISTEN" ]; then
if [ ! -z "$HTTP_LISTEN" ]; then
sed -i "s|#listen = 127.0.0.1:9090|listen = $HTTP_LISTEN|" "$CONFIG"
fi
if [ -n "$COUNTRY" ]; then
if [ ! -z "$COUNTRY" ]; then
sed -i "s|#country =.*|country = $COUNTRY|" "$CONFIG"
fi
if [ -n "$EXTERNAL_HOSTNAME" ]; then
sed -i "s|#hostname =.*|hostname = $EXTERNAL_HOSTNAME|" "$CONFIG"
fi
if [ -n "$TOKEN_ID" ]; then
sed -i "s|#token_id =.*|token_id = $TOKEN_ID|" "$CONFIG"
fi
if [ -n "$TOKEN_KEY" ]; then
sed -i "s|#token_key =.*|token_key = $TOKEN_KEY|" "$CONFIG"
fi
if [ -n "$BANDWIDTH_INCOMING" ]; then
sed -i "s|#incoming =.*|incoming = $BANDWIDTH_INCOMING|" "$CONFIG"
fi
if [ -n "$BANDWIDTH_OUTGOING" ]; then
sed -i "s|#outgoing =.*|outgoing = $BANDWIDTH_OUTGOING|" "$CONFIG"
fi
HAS_ETCD=
if [ -n "$ETCD_ENDPOINTS" ]; then
if [ ! -z "$ETCD_ENDPOINTS" ]; then
sed -i "s|#endpoints =.*|endpoints = $ETCD_ENDPOINTS|" "$CONFIG"
HAS_ETCD=1
else
if [ -n "$ETCD_DISCOVERY_SRV" ]; then
if [ ! -z "$ETCD_DISCOVERY_SRV" ]; then
sed -i "s|#discoverysrv =.*|discoverysrv = $ETCD_DISCOVERY_SRV|" "$CONFIG"
HAS_ETCD=1
fi
if [ -n "$ETCD_DISCOVERY_SERVICE" ]; then
if [ ! -z "$ETCD_DISCOVERY_SERVICE" ]; then
sed -i "s|#discoveryservice =.*|discoveryservice = $ETCD_DISCOVERY_SERVICE|" "$CONFIG"
fi
fi
if [ -n "$HAS_ETCD" ]; then
if [ -n "$ETCD_CLIENT_KEY" ]; then
if [ ! -z "$HAS_ETCD" ]; then
if [ ! -z "$ETCD_CLIENT_KEY" ]; then
sed -i "s|#clientkey = /path/to/etcd-client.key|clientkey = $ETCD_CLIENT_KEY|" "$CONFIG"
fi
if [ -n "$ETCD_CLIENT_CERTIFICATE" ]; then
if [ ! -z "$ETCD_CLIENT_CERTIFICATE" ]; then
sed -i "s|#clientcert = /path/to/etcd-client.crt|clientcert = $ETCD_CLIENT_CERTIFICATE|" "$CONFIG"
fi
if [ -n "$ETCD_CLIENT_CA" ]; then
if [ ! -z "$ETCD_CLIENT_CA" ]; then
sed -i "s|#cacert = /path/to/etcd-ca.crt|cacert = $ETCD_CLIENT_CA|" "$CONFIG"
fi
fi
if [ -n "$JANUS_URL" ]; then
if [ ! -z "$JANUS_URL" ]; then
sed -i "s|url =.*|url = $JANUS_URL|" "$CONFIG"
else
sed -i "s|url =.*|#url =|" "$CONFIG"
fi
if [ -n "$MAX_STREAM_BITRATE" ]; then
if [ ! -z "$MAX_STREAM_BITRATE" ]; then
sed -i "s|#maxstreambitrate =.*|maxstreambitrate = $MAX_STREAM_BITRATE|" "$CONFIG"
fi
if [ -n "$MAX_SCREEN_BITRATE" ]; then
if [ ! -z "$MAX_SCREEN_BITRATE" ]; then
sed -i "s|#maxscreenbitrate =.*|maxscreenbitrate = $MAX_SCREEN_BITRATE|" "$CONFIG"
fi
if [ -n "$TOKENS_ETCD" ]; then
if [ ! -z "$TOKENS_ETCD" ]; then
if [ -z "$HAS_ETCD" ]; then
echo "No etcd endpoint configured, can't use etcd for proxy tokens"
exit 1
@ -105,7 +84,7 @@ if [ ! -f "$CONFIG" ]; then
sed -i "s|tokentype =.*|tokentype = etcd|" "$CONFIG"
if [ -n "$TOKEN_KEY_FORMAT" ]; then
if [ ! -z "$TOKEN_KEY_FORMAT" ]; then
sed -i "s|#keyformat =.*|keyformat = $TOKEN_KEY_FORMAT|" "$CONFIG"
fi
else
@ -114,22 +93,18 @@ if [ ! -f "$CONFIG" ]; then
echo "[tokens]" >> "$CONFIG"
for token in $TOKENS; do
declare var="TOKEN_${token^^}_KEY"
var=${var//./_}
if [ -n "${!var}" ]; then
var=$(echo $var | sed "s|\.|_|")
if [ ! -z "${!var}" ]; then
echo "$token = ${!var}" >> "$CONFIG"
fi
done
echo >> "$CONFIG"
fi
if [ -n "$STATS_IPS" ]; then
if [ ! -z "$STATS_IPS" ]; then
sed -i "s|#allowed_ips =.*|allowed_ips = $STATS_IPS|" "$CONFIG"
fi
if [ -n "$TRUSTED_PROXIES" ]; then
sed -i "s|#trustedproxies =.*|trustedproxies = $TRUSTED_PROXIES|" "$CONFIG"
fi
fi
echo "Starting signaling proxy with $CONFIG ..."
exec /usr/bin/nextcloud-spreed-signaling-proxy -config "$CONFIG"
exec "$@"

View file

@ -1,26 +0,0 @@
#!/bin/bash
#
# Standalone signaling server for the Nextcloud Spreed app.
# Copyright (C) 2024 struktur AG
#
# @author Joachim Bauch <bauch@struktur.de>
#
# @license GNU AGPL version 3 or any later version
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
set -e
echo "Schedule signaling proxy to shutdown ..."
exec killall -USR1 nextcloud-spreed-signaling-proxy

View file

@ -1,33 +0,0 @@
#!/bin/bash
#
# Standalone signaling server for the Nextcloud Spreed app.
# Copyright (C) 2024 struktur AG
#
# @author Joachim Bauch <bauch@struktur.de>
#
# @license GNU AGPL version 3 or any later version
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
set -e
echo "Waiting for signaling proxy to shutdown ..."
while true
do
if ! pgrep nextcloud-spreed-signaling-proxy > /dev/null ; then
echo "Signaling proxy has stopped"
exit 0
fi
sleep 1
done

View file

@ -1,13 +1,13 @@
FROM --platform=${BUILDPLATFORM} golang:1.22-alpine AS builder
FROM --platform=${BUILDPLATFORM} golang:1.20-alpine AS builder
ARG TARGETARCH
ARG TARGETOS
WORKDIR /workdir
COPY . .
RUN touch /.dockerenv && \
apk add --no-cache bash git build-base protobuf && \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} make server
RUN apk add --no-cache bash git build-base protobuf && \
if [ -d "vendor" ]; then GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOPROXY=off make server -j$(nproc); else \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} make server -j$(nproc); fi
FROM alpine:3
@ -18,12 +18,10 @@ RUN adduser -D spreedbackend && \
COPY --from=builder /workdir/bin/signaling /usr/bin/nextcloud-spreed-signaling
COPY ./server.conf.in /config/server.conf.in
COPY ./docker/server/entrypoint.sh /
COPY ./docker/server/stop.sh /
COPY ./docker/server/wait.sh /
RUN chown spreedbackend /config
RUN /usr/bin/nextcloud-spreed-signaling -version
USER spreedbackend
STOPSIGNAL SIGUSR1
ENTRYPOINT [ "/entrypoint.sh" ]
CMD ["/bin/sh", "-c", "/usr/bin/nextcloud-spreed-signaling -config $CONFIG"]

View file

@ -22,11 +22,6 @@
#
set -e
if [ -n "$1" ]; then
# Run custom command.
exec "$@"
fi
if [ -z "$CONFIG" ]; then
echo "No configuration filename given in CONFIG environment variable"
exit 1
@ -36,75 +31,69 @@ if [ ! -f "$CONFIG" ]; then
echo "Preparing signaling server configuration in $CONFIG ..."
cp /config/server.conf.in "$CONFIG"
if [ -n "$HTTP_LISTEN" ]; then
if [ ! -z "$HTTP_LISTEN" ]; then
sed -i "s|#listen = 127.0.0.1:8080|listen = $HTTP_LISTEN|" "$CONFIG"
fi
if [ -n "$HTTPS_LISTEN" ]; then
if [ ! -z "$HTTPS_LISTEN" ]; then
sed -i "s|#listen = 127.0.0.1:8443|listen = $HTTPS_LISTEN|" "$CONFIG"
if [ -n "$HTTPS_CERTIFICATE" ]; then
if [ ! -z "$HTTPS_CERTIFICATE" ]; then
sed -i "s|certificate = /etc/nginx/ssl/server.crt|certificate = $HTTPS_CERTIFICATE|" "$CONFIG"
fi
if [ -n "$HTTPS_KEY" ]; then
if [ ! -z "$HTTPS_KEY" ]; then
sed -i "s|key = /etc/nginx/ssl/server.key|key = $HTTPS_KEY|" "$CONFIG"
fi
fi
if [ -n "$HASH_KEY" ]; then
if [ ! -z "$HASH_KEY" ]; then
sed -i "s|the-secret-for-session-checksums|$HASH_KEY|" "$CONFIG"
fi
if [ -n "$BLOCK_KEY" ]; then
if [ ! -z "$BLOCK_KEY" ]; then
sed -i "s|-encryption-key-|$BLOCK_KEY|" "$CONFIG"
fi
if [ -n "$INTERNAL_SHARED_SECRET_KEY" ]; then
if [ ! -z "$INTERNAL_SHARED_SECRET_KEY" ]; then
sed -i "s|the-shared-secret-for-internal-clients|$INTERNAL_SHARED_SECRET_KEY|" "$CONFIG"
fi
if [ -n "$NATS_URL" ]; then
if [ ! -z "$NATS_URL" ]; then
sed -i "s|#url = nats://localhost:4222|url = $NATS_URL|" "$CONFIG"
else
sed -i "s|#url = nats://localhost:4222|url = nats://loopback|" "$CONFIG"
fi
HAS_ETCD=
if [ -n "$ETCD_ENDPOINTS" ]; then
if [ ! -z "$ETCD_ENDPOINTS" ]; then
sed -i "s|#endpoints =.*|endpoints = $ETCD_ENDPOINTS|" "$CONFIG"
HAS_ETCD=1
else
if [ -n "$ETCD_DISCOVERY_SRV" ]; then
if [ ! -z "$ETCD_DISCOVERY_SRV" ]; then
sed -i "s|#discoverysrv =.*|discoverysrv = $ETCD_DISCOVERY_SRV|" "$CONFIG"
HAS_ETCD=1
fi
if [ -n "$ETCD_DISCOVERY_SERVICE" ]; then
if [ ! -z "$ETCD_DISCOVERY_SERVICE" ]; then
sed -i "s|#discoveryservice =.*|discoveryservice = $ETCD_DISCOVERY_SERVICE|" "$CONFIG"
fi
fi
if [ -n "$HAS_ETCD" ]; then
if [ -n "$ETCD_CLIENT_KEY" ]; then
if [ ! -z "$HAS_ETCD" ]; then
if [ ! -z "$ETCD_CLIENT_KEY" ]; then
sed -i "s|#clientkey = /path/to/etcd-client.key|clientkey = $ETCD_CLIENT_KEY|" "$CONFIG"
fi
if [ -n "$ETCD_CLIENT_CERTIFICATE" ]; then
if [ ! -z "$ETCD_CLIENT_CERTIFICATE" ]; then
sed -i "s|#clientcert = /path/to/etcd-client.crt|clientcert = $ETCD_CLIENT_CERTIFICATE|" "$CONFIG"
fi
if [ -n "$ETCD_CLIENT_CA" ]; then
if [ ! -z "$ETCD_CLIENT_CA" ]; then
sed -i "s|#cacert = /path/to/etcd-ca.crt|cacert = $ETCD_CLIENT_CA|" "$CONFIG"
fi
fi
if [ -n "$USE_JANUS" ]; then
if [ ! -z "$USE_JANUS" ]; then
sed -i "s|#type =$|type = janus|" "$CONFIG"
if [ -n "$JANUS_URL" ]; then
if [ ! -z "$JANUS_URL" ]; then
sed -i "/proxy URLs to connect to/{n;s|#url =$|url = $JANUS_URL|}" "$CONFIG"
fi
elif [ -n "$USE_PROXY" ]; then
elif [ ! -z "$USE_PROXY" ]; then
sed -i "s|#type =$|type = proxy|" "$CONFIG"
if [ -n "$PROXY_TOKEN_ID" ]; then
sed -i "s|#token_id =.*|token_id = $PROXY_TOKEN_ID|" "$CONFIG"
fi
if [ -n "$PROXY_TOKEN_KEY" ]; then
sed -i "s|#token_key =.*|token_key = $PROXY_TOKEN_KEY|" "$CONFIG"
fi
if [ -n "$PROXY_ETCD" ]; then
if [ ! -z "$PROXY_ETCD" ]; then
if [ -z "$HAS_ETCD" ]; then
echo "No etcd endpoint configured, can't use etcd for proxy connections"
exit 1
@ -112,77 +101,79 @@ if [ ! -f "$CONFIG" ]; then
sed -i "s|#urltype = static|urltype = etcd|" "$CONFIG"
if [ -n "$PROXY_KEY_PREFIX" ]; then
if [ ! -z "$PROXY_TOKEN_ID" ]; then
sed -i "s|#token_id =.*|token_id = $PROXY_TOKEN_ID|" "$CONFIG"
fi
if [ ! -z "$PROXY_TOKEN_KEY" ]; then
sed -i "s|#token_key =.*|token_key = $PROXY_TOKEN_KEY|" "$CONFIG"
fi
if [ ! -z "$PROXY_KEY_PREFIX" ]; then
sed -i "s|#keyprefix =.*|keyprefix = $PROXY_KEY_PREFIX|" "$CONFIG"
fi
else
if [ -n "$PROXY_URLS" ]; then
if [ ! -z "$PROXY_URLS" ]; then
sed -i "/proxy URLs to connect to/{n;s|#url =$|url = $PROXY_URLS|}" "$CONFIG"
fi
if [ -n "$PROXY_DNS_DISCOVERY" ]; then
if [ ! -z "$PROXY_DNS_DISCOVERY" ]; then
sed -i "/or deleted as necessary/{n;s|#dnsdiscovery =.*|dnsdiscovery = true|}" "$CONFIG"
fi
fi
fi
if [ -n "$MAX_STREAM_BITRATE" ]; then
if [ ! -z "$MAX_STREAM_BITRATE" ]; then
sed -i "s|#maxstreambitrate =.*|maxstreambitrate = $MAX_STREAM_BITRATE|" "$CONFIG"
fi
if [ -n "$MAX_SCREEN_BITRATE" ]; then
if [ ! -z "$MAX_SCREEN_BITRATE" ]; then
sed -i "s|#maxscreenbitrate =.*|maxscreenbitrate = $MAX_SCREEN_BITRATE|" "$CONFIG"
fi
if [ -n "$SKIP_VERIFY" ]; then
if [ ! -z "$SKIP_VERIFY" ]; then
sed -i "s|#skipverify =.*|skipverify = $SKIP_VERIFY|" "$CONFIG"
fi
if [ -n "$TURN_API_KEY" ]; then
if [ ! -z "$TURN_API_KEY" ]; then
sed -i "s|#\?apikey =.*|apikey = $TURN_API_KEY|" "$CONFIG"
fi
if [ -n "$TURN_SECRET" ]; then
if [ ! -z "$TURN_SECRET" ]; then
sed -i "/same as on the TURN server/{n;s|#\?secret =.*|secret = $TURN_SECRET|}" "$CONFIG"
fi
if [ -n "$TURN_SERVERS" ]; then
if [ ! -z "$TURN_SERVERS" ]; then
sed -i "s|#servers =.*|servers = $TURN_SERVERS|" "$CONFIG"
fi
if [ -n "$GEOIP_LICENSE" ]; then
if [ ! -z "$GEOIP_LICENSE" ]; then
sed -i "s|#license =.*|license = $GEOIP_LICENSE|" "$CONFIG"
fi
if [ -n "$GEOIP_URL" ]; then
if [ ! -z "$GEOIP_URL" ]; then
sed -i "/looking up IP addresses/{n;s|#url =$|url = $GEOIP_URL|}" "$CONFIG"
fi
if [ -n "$STATS_IPS" ]; then
if [ ! -z "$STATS_IPS" ]; then
sed -i "s|#allowed_ips =.*|allowed_ips = $STATS_IPS|" "$CONFIG"
fi
if [ -n "$TRUSTED_PROXIES" ]; then
sed -i "s|#trustedproxies =.*|trustedproxies = $TRUSTED_PROXIES|" "$CONFIG"
fi
if [ -n "$GRPC_LISTEN" ]; then
if [ ! -z "$GRPC_LISTEN" ]; then
sed -i "s|#listen = 0.0.0.0:9090|listen = $GRPC_LISTEN|" "$CONFIG"
if [ -n "$GRPC_SERVER_CERTIFICATE" ]; then
if [ ! -z "$GRPC_SERVER_CERTIFICATE" ]; then
sed -i "s|#servercertificate =.*|servercertificate = $GRPC_SERVER_CERTIFICATE|" "$CONFIG"
fi
if [ -n "$GRPC_SERVER_KEY" ]; then
if [ ! -z "$GRPC_SERVER_KEY" ]; then
sed -i "s|#serverkey =.*|serverkey = $GRPC_SERVER_KEY|" "$CONFIG"
fi
if [ -n "$GRPC_SERVER_CA" ]; then
if [ ! -z "$GRPC_SERVER_CA" ]; then
sed -i "s|#serverca =.*|serverca = $GRPC_SERVER_CA|" "$CONFIG"
fi
if [ -n "$GRPC_CLIENT_CERTIFICATE" ]; then
if [ ! -z "$GRPC_CLIENT_CERTIFICATE" ]; then
sed -i "s|#clientcertificate =.*|clientcertificate = $GRPC_CLIENT_CERTIFICATE|" "$CONFIG"
fi
if [ -n "$GRPC_CLIENT_KEY" ]; then
if [ ! -z "$GRPC_CLIENT_KEY" ]; then
sed -i "s|#clientkey = /path/to/grpc-client.key|clientkey = $GRPC_CLIENT_KEY|" "$CONFIG"
fi
if [ -n "$GRPC_CLIENT_CA" ]; then
if [ ! -z "$GRPC_CLIENT_CA" ]; then
sed -i "s|#clientca =.*|clientca = $GRPC_CLIENT_CA|" "$CONFIG"
fi
if [ -n "$GRPC_ETCD" ]; then
if [ ! -z "$GRPC_ETCD" ]; then
if [ -z "$HAS_ETCD" ]; then
echo "No etcd endpoint configured, can't use etcd for GRPC"
exit 1
@ -190,78 +181,69 @@ if [ ! -f "$CONFIG" ]; then
sed -i "s|#targettype =$|targettype = etcd|" "$CONFIG"
if [ -n "$GRPC_TARGET_PREFIX" ]; then
if [ ! -z "$GRPC_TARGET_PREFIX" ]; then
sed -i "s|#targetprefix =.*|targetprefix = $GRPC_TARGET_PREFIX|" "$CONFIG"
fi
else
if [ -n "$GRPC_TARGETS" ]; then
if [ ! -z "$GRPC_TARGETS" ]; then
sed -i "s|#targets =.*|targets = $GRPC_TARGETS|" "$CONFIG"
if [ -n "$GRPC_DNS_DISCOVERY" ]; then
if [ ! -z "$GRPC_DNS_DISCOVERY" ]; then
sed -i "/# deleted as necessary/{n;s|#dnsdiscovery =.*|dnsdiscovery = true|}" "$CONFIG"
fi
fi
fi
fi
if [ -n "$GEOIP_OVERRIDES" ]; then
if [ ! -z "$GEOIP_OVERRIDES" ]; then
sed -i "s|\[geoip-overrides\]|#[geoip-overrides]|" "$CONFIG"
echo >> "$CONFIG"
echo "[geoip-overrides]" >> "$CONFIG"
for override in $GEOIP_OVERRIDES; do
echo "$override" >> "$CONFIG"
echo $override >> "$CONFIG"
done
echo >> "$CONFIG"
fi
if [ -n "$CONTINENT_OVERRIDES" ]; then
if [ ! -z "$CONTINENT_OVERRIDES" ]; then
sed -i "s|\[continent-overrides\]|#[continent-overrides]|" "$CONFIG"
echo >> "$CONFIG"
echo "[continent-overrides]" >> "$CONFIG"
for override in $CONTINENT_OVERRIDES; do
echo "$override" >> "$CONFIG"
echo $override >> "$CONFIG"
done
echo >> "$CONFIG"
fi
if [ -n "$BACKENDS_ALLOWALL" ]; then
sed -i "s|allowall = false|allowall = $BACKENDS_ALLOWALL|" "$CONFIG"
fi
if [ -n "$BACKENDS_ALLOWALL_SECRET" ]; then
sed -i "s|#secret = the-shared-secret-for-allowall|secret = $BACKENDS_ALLOWALL_SECRET|" "$CONFIG"
fi
if [ -n "$BACKENDS" ]; then
BACKENDS_CONFIG=${BACKENDS// /,}
sed -i "s|#backends = .*|backends = $BACKENDS_CONFIG|" "$CONFIG"
if [ ! -z "$BACKENDS" ]; then
sed -i "s|#backends = .*|backends = $BACKENDS|" "$CONFIG"
echo >> "$CONFIG"
for backend in $BACKENDS; do
echo "[$backend]" >> "$CONFIG"
declare var="BACKEND_${backend^^}_URL"
if [ -n "${!var}" ]; then
if [ ! -z "${!var}" ]; then
echo "url = ${!var}" >> "$CONFIG"
fi
declare var="BACKEND_${backend^^}_SHARED_SECRET"
if [ -n "${!var}" ]; then
if [ ! -z "${!var}" ]; then
echo "secret = ${!var}" >> "$CONFIG"
fi
declare var="BACKEND_${backend^^}_SESSION_LIMIT"
if [ -n "${!var}" ]; then
if [ ! -z "${!var}" ]; then
echo "sessionlimit = ${!var}" >> "$CONFIG"
fi
declare var="BACKEND_${backend^^}_MAX_STREAM_BITRATE"
if [ -n "${!var}" ]; then
if [ ! -z "${!var}" ]; then
echo "maxstreambitrate = ${!var}" >> "$CONFIG"
fi
declare var="BACKEND_${backend^^}_MAX_SCREEN_BITRATE"
if [ -n "${!var}" ]; then
if [ ! -z "${!var}" ]; then
echo "maxscreenbitrate = ${!var}" >> "$CONFIG"
fi
echo >> "$CONFIG"
@ -270,4 +252,4 @@ if [ ! -f "$CONFIG" ]; then
fi
echo "Starting signaling server with $CONFIG ..."
exec /usr/bin/nextcloud-spreed-signaling -config "$CONFIG"
exec "$@"

View file

@ -1,26 +0,0 @@
#!/bin/bash
#
# Standalone signaling server for the Nextcloud Spreed app.
# Copyright (C) 2024 struktur AG
#
# @author Joachim Bauch <bauch@struktur.de>
#
# @license GNU AGPL version 3 or any later version
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
set -e
echo "Schedule signaling server to shutdown ..."
exec killall -USR1 nextcloud-spreed-signaling

View file

@ -1,33 +0,0 @@
#!/bin/bash
#
# Standalone signaling server for the Nextcloud Spreed app.
# Copyright (C) 2024 struktur AG
#
# @author Joachim Bauch <bauch@struktur.de>
#
# @license GNU AGPL version 3 or any later version
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
set -e
echo "Waiting for signaling server to shutdown ..."
while true
do
if ! pgrep nextcloud-spreed-signaling > /dev/null ; then
echo "Signaling server has stopped"
exit 0
fi
sleep 1
done

View file

@ -48,6 +48,3 @@ The following metrics are available:
| `signaling_grpc_clients` | Gauge | 1.0.0 | The current number of GRPC clients | |
| `signaling_grpc_client_calls_total` | Counter | 1.0.0 | The total number of GRPC client calls | `method` |
| `signaling_grpc_server_calls_total` | Counter | 1.0.0 | The total number of GRPC server calls | `method` |
| `signaling_http_client_pool_connections` | Gauge | 1.2.4 | The current number of HTTP client connections per host | `host` |
| `signaling_throttle_delayed_total` | Counter | 1.2.5 | The total number of delayed requests | `action`, `delay` |
| `signaling_throttle_bruteforce_total` | Counter | 1.2.5 | The total number of rejected bruteforce requests | `action` |

View file

@ -1,6 +1,6 @@
jinja2==3.1.4
markdown==3.6
mkdocs==1.6.0
readthedocs-sphinx-search==0.3.2
sphinx==7.3.7
sphinx_rtd_theme==2.0.0
jinja2==3.1.2
markdown==3.3.7
mkdocs==1.4.2
readthedocs-sphinx-search==0.2.0
sphinx==6.1.3
sphinx_rtd_theme==1.2.0

View file

@ -460,26 +460,6 @@ Message format (Server -> Client):
the current room or the properties of a room change.
Message format (Server -> Client if already joined before):
{
"id": "unique-request-id-from-request",
"type": "error",
"error": {
"code": "already_joined",
"message": "Human readable error message",
"details": {
"roomid": "the-room-id",
"properties": {
...additional room properties...
}
}
}
}
- Sent if a client tried to join a room it is already in.
### Backend validation
Rooms are managed by the Nextcloud backend, so the signaling server has to
@ -793,74 +773,6 @@ Message format (Server -> Client, receive message)
- The `userid` is omitted if a message was sent by an anonymous user.
## Control messages
Similar to regular messages between clients which can be sent by any session,
messages with type `control` can only be sent if the permission flag `control`
is available.
These messages can be used to perform actions on clients that should only be
possible by some users (e.g. moderators).
Message format (Client -> Server, mute phone):
{
"id": "unique-request-id",
"type": "control",
"control": {
"recipient": {
"type": "session",
"sessionid": "the-session-id-to-send-to"
},
"data": {
"type": "mute",
"audio": "audio-flags"
}
}
}
The bit-field `audio-flags` supports the following bits:
- `1`: mute speaking (i.e. phone can no longer talk)
- `2`: mute listening (i.e. phone is on hold and can no longer hear)
To unmute, a value of `0` must be sent.
Message format (Client -> Server, hangup phone):
{
"id": "unique-request-id",
"type": "control",
"control": {
"recipient": {
"type": "session",
"sessionid": "the-session-id-to-send-to"
},
"data": {
"type": "hangup"
}
}
}
Message format (Client -> Server, send DTMF):
{
"id": "unique-request-id",
"type": "control",
"control": {
"recipient": {
"type": "session",
"sessionid": "the-session-id-to-send-to"
},
"data": {
"type": "dtmf",
"digit": "the-digit"
}
}
}
Supported digits are `0`-`9`, `*` and `#`.
## Transient data
Transient data can be used to share data in a room that is valid while sessions
@ -885,17 +797,14 @@ Message format (Client -> Server):
"transient": {
"type": "set",
"key": "sample-key",
"value": "any-json-object",
"ttl": "optional-ttl"
"value": "any-json-object"
}
}
- The `key` must be a string.
- The `value` can be of any type (i.e. string, number, array, object, etc.).
- The `ttl` is the time to live in nanoseconds. The value will be removed after
that time (if it is still present).
- Requests to set a value that is already present for the key are silently
ignored. Any TTL value will be updated / removed.
ignored.
Message format (Server -> Client):
@ -1004,13 +913,6 @@ Message format (Client -> Server):
}
Phone sessions will have `type` set to `phone` in the additional user data
(which will be included in the `joined` [room event](#room-events)),
`callid` will be the id of the phone call and `number` the target of the call.
The call id will match the one returned for accepted outgoing calls and the
associated session id can be used to hangup a call or send DTMF tones to it.
### Update virtual session
Message format (Client -> Server):
@ -1282,49 +1184,3 @@ Message format (Server -> Client):
Clients are expected to follow the `switchto` message. If clients don't switch
to the target room after some time, they might get disconnected.
### Start dialout from a room
Use this to start a phone dialout to a new user in a given room.
Message format (Backend -> Server)
{
"type": "dialout"
"dialout" {
"number": "e164-target-number",
"options": {
...arbitrary options that will be sent back to validate...
}
}
}
Please note that this requires a connected internal client that supports
dialout (e.g. the SIP bridge).
Message format (Server -> Backend, request was accepted)
{
"type": "dialout"
"dialout" {
"callid": "the-unique-call-id"
}
}
Message format (Server -> Backend, request could not be processed)
{
"type": "dialout"
"dialout" {
"error": {
"code": "the-internal-message-id",
"message": "human-readable-error-message",
"details": {
...optional additional details...
}
}
}
}
A HTTP error status code will be set in this case.

View file

@ -23,7 +23,6 @@ package signaling
import (
"context"
"errors"
"fmt"
"log"
"strings"
@ -35,8 +34,6 @@ import (
"go.etcd.io/etcd/client/pkg/v3/srv"
"go.etcd.io/etcd/client/pkg/v3/transport"
clientv3 "go.etcd.io/etcd/client/v3"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
type EtcdClientListener interface {
@ -45,8 +42,8 @@ type EtcdClientListener interface {
type EtcdClientWatcher interface {
EtcdWatchCreated(client *EtcdClient, key string)
EtcdKeyUpdated(client *EtcdClient, key string, value []byte, prevValue []byte)
EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte)
EtcdKeyUpdated(client *EtcdClient, key string, value []byte)
EtcdKeyDeleted(client *EtcdClient, key string)
}
type EtcdClient struct {
@ -115,17 +112,6 @@ func (c *EtcdClient) load(config *goconf.ConfigFile, ignoreErrors bool) error {
DialTimeout: time.Second,
}
if logLevel, _ := config.GetString("etcd", "loglevel"); logLevel != "" {
var l zapcore.Level
if err := l.Set(logLevel); err != nil {
return fmt.Errorf("Unsupported etcd log level %s: %w", logLevel, err)
}
logConfig := zap.NewProductionConfig()
logConfig.Level = zap.NewAtomicLevelAt(l)
cfg.LogConfig = &logConfig
}
clientKey := c.getConfigStringWithFallback(config, "clientkey")
clientCert := c.getConfigStringWithFallback(config, "clientcert")
caCert := c.getConfigStringWithFallback(config, "cacert")
@ -190,8 +176,8 @@ func (c *EtcdClient) getEtcdClient() *clientv3.Client {
return client.(*clientv3.Client)
}
func (c *EtcdClient) syncClient(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, time.Second)
func (c *EtcdClient) syncClient() error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
return c.getEtcdClient().Sync(ctx)
@ -226,32 +212,26 @@ func (c *EtcdClient) RemoveListener(listener EtcdClientListener) {
delete(c.listeners, listener)
}
func (c *EtcdClient) WaitForConnection(ctx context.Context) error {
backoff, err := NewExponentialBackoff(initialWaitDelay, maxWaitDelay)
if err != nil {
return err
}
func (c *EtcdClient) WaitForConnection() {
waitDelay := initialWaitDelay
for {
if err := ctx.Err(); err != nil {
return err
}
if err := c.syncClient(ctx); err != nil {
if errors.Is(err, context.Canceled) {
return err
} else if errors.Is(err, context.DeadlineExceeded) {
log.Printf("Timeout waiting for etcd client to connect to the cluster, retry in %s", backoff.NextWait())
if err := c.syncClient(); err != nil {
if err == context.DeadlineExceeded {
log.Printf("Timeout waiting for etcd client to connect to the cluster, retry in %s", waitDelay)
} else {
log.Printf("Could not sync etcd client with the cluster, retry in %s: %s", backoff.NextWait(), err)
log.Printf("Could not sync etcd client with the cluster, retry in %s: %s", waitDelay, err)
}
backoff.Wait(ctx)
time.Sleep(waitDelay)
waitDelay = waitDelay * 2
if waitDelay > maxWaitDelay {
waitDelay = maxWaitDelay
}
continue
}
log.Printf("Client synced, using endpoints %+v", c.getEtcdClient().Endpoints())
return nil
return
}
}
@ -259,37 +239,27 @@ func (c *EtcdClient) Get(ctx context.Context, key string, opts ...clientv3.OpOpt
return c.getEtcdClient().Get(ctx, key, opts...)
}
func (c *EtcdClient) Watch(ctx context.Context, key string, nextRevision int64, watcher EtcdClientWatcher, opts ...clientv3.OpOption) (int64, error) {
log.Printf("Wait for leader and start watching on %s (rev=%d)", key, nextRevision)
opts = append(opts, clientv3.WithRev(nextRevision), clientv3.WithPrevKV())
func (c *EtcdClient) Watch(ctx context.Context, key string, watcher EtcdClientWatcher, opts ...clientv3.OpOption) error {
log.Printf("Wait for leader and start watching on %s", key)
ch := c.getEtcdClient().Watch(clientv3.WithRequireLeader(ctx), key, opts...)
log.Printf("Watch created for %s", key)
watcher.EtcdWatchCreated(c, key)
for response := range ch {
if err := response.Err(); err != nil {
return nextRevision, err
return err
}
nextRevision = response.Header.Revision + 1
for _, ev := range response.Events {
switch ev.Type {
case clientv3.EventTypePut:
var prevValue []byte
if ev.PrevKv != nil {
prevValue = ev.PrevKv.Value
}
watcher.EtcdKeyUpdated(c, string(ev.Kv.Key), ev.Kv.Value, prevValue)
watcher.EtcdKeyUpdated(c, string(ev.Kv.Key), ev.Kv.Value)
case clientv3.EventTypeDelete:
var prevValue []byte
if ev.PrevKv != nil {
prevValue = ev.PrevKv.Value
}
watcher.EtcdKeyDeleted(c, string(ev.Kv.Key), prevValue)
watcher.EtcdKeyDeleted(c, string(ev.Kv.Key))
default:
log.Printf("Unsupported watch event %s %q -> %q", ev.Type, ev.Kv.Key, ev.Kv.Value)
}
}
}
return nextRevision, nil
return nil
}

View file

@ -29,6 +29,7 @@ import (
"os"
"runtime"
"strconv"
"sync"
"syscall"
"testing"
"time"
@ -38,8 +39,6 @@ import (
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/embed"
"go.etcd.io/etcd/server/v3/lease"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
var (
@ -80,17 +79,13 @@ func NewEtcdForTest(t *testing.T) *embed.Etcd {
var etcd *embed.Etcd
for port := 50000; port < 50100; port++ {
u.Host = net.JoinHostPort("localhost", strconv.Itoa(port))
cfg.ListenClientUrls = []url.URL{*u}
cfg.AdvertiseClientUrls = []url.URL{*u}
httpListener := u
httpListener.Host = net.JoinHostPort("localhost", strconv.Itoa(port+1))
cfg.ListenClientHttpUrls = []url.URL{*httpListener}
cfg.LCUrls = []url.URL{*u}
cfg.ACUrls = []url.URL{*u}
peerListener := u
peerListener.Host = net.JoinHostPort("localhost", strconv.Itoa(port+2))
cfg.ListenPeerUrls = []url.URL{*peerListener}
cfg.AdvertisePeerUrls = []url.URL{*peerListener}
peerListener.Host = net.JoinHostPort("localhost", strconv.Itoa(port+1))
cfg.LPUrls = []url.URL{*peerListener}
cfg.APUrls = []url.URL{*peerListener}
cfg.InitialCluster = "default=" + peerListener.String()
cfg.ZapLoggerBuilder = embed.NewZapLoggerBuilder(zaptest.NewLogger(t, zaptest.Level(zap.WarnLevel)))
etcd, err = embed.StartEtcd(cfg)
if isErrorAddressAlreadyInUse(err) {
continue
@ -105,7 +100,6 @@ func NewEtcdForTest(t *testing.T) *embed.Etcd {
t.Cleanup(func() {
etcd.Close()
<-etcd.Server.StopNotify()
})
// Wait for server to be ready.
<-etcd.Server.ReadyNotify()
@ -117,8 +111,7 @@ func NewEtcdClientForTest(t *testing.T) (*embed.Etcd, *EtcdClient) {
etcd := NewEtcdForTest(t)
config := goconf.NewConfigFile()
config.AddOption("etcd", "endpoints", etcd.Config().ListenClientUrls[0].String())
config.AddOption("etcd", "loglevel", "error")
config.AddOption("etcd", "endpoints", etcd.Config().LCUrls[0].String())
client, err := NewEtcdClient(config, "")
if err != nil {
@ -147,8 +140,6 @@ func DeleteEtcdValue(etcd *embed.Etcd, key string) {
}
func Test_EtcdClient_Get(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
etcd, client := NewEtcdClientForTest(t)
if response, err := client.Get(context.Background(), "foo"); err != nil {
@ -171,8 +162,6 @@ func Test_EtcdClient_Get(t *testing.T) {
}
func Test_EtcdClient_GetPrefix(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
etcd, client := NewEtcdClientForTest(t)
if response, err := client.Get(context.Background(), "foo"); err != nil {
@ -204,8 +193,6 @@ type etcdEvent struct {
t mvccpb.Event_EventType
key string
value string
prevValue string
}
type EtcdClientTestListener struct {
@ -214,8 +201,9 @@ type EtcdClientTestListener struct {
ctx context.Context
cancel context.CancelFunc
initial chan struct{}
events chan etcdEvent
initial chan bool
initialWg sync.WaitGroup
events chan etcdEvent
}
func NewEtcdClientTestListener(ctx context.Context, t *testing.T) *EtcdClientTestListener {
@ -226,7 +214,7 @@ func NewEtcdClientTestListener(ctx context.Context, t *testing.T) *EtcdClientTes
ctx: ctx,
cancel: cancel,
initial: make(chan struct{}),
initial: make(chan bool),
events: make(chan etcdEvent),
}
}
@ -236,17 +224,20 @@ func (l *EtcdClientTestListener) Close() {
}
func (l *EtcdClientTestListener) EtcdClientCreated(client *EtcdClient) {
l.initialWg.Add(1)
go func() {
if err := client.WaitForConnection(l.ctx); err != nil {
l.t.Errorf("error waiting for connection: %s", err)
return
if err := client.Watch(clientv3.WithRequireLeader(l.ctx), "foo", l, clientv3.WithPrefix()); err != nil {
l.t.Error(err)
}
}()
go func() {
client.WaitForConnection()
ctx, cancel := context.WithTimeout(l.ctx, time.Second)
defer cancel()
response, err := client.Get(ctx, "foo", clientv3.WithPrefix())
if err != nil {
if response, err := client.Get(ctx, "foo", clientv3.WithPrefix()); err != nil {
l.t.Error(err)
} else if response.Count != 1 {
l.t.Errorf("expected 1 responses, got %d", response.Count)
@ -255,47 +246,31 @@ func (l *EtcdClientTestListener) EtcdClientCreated(client *EtcdClient) {
} else if string(response.Kvs[0].Value) != "1" {
l.t.Errorf("expected value \"1\", got \"%s\"", string(response.Kvs[0].Value))
}
close(l.initial)
nextRevision := response.Header.Revision + 1
for l.ctx.Err() == nil {
var err error
if nextRevision, err = client.Watch(clientv3.WithRequireLeader(l.ctx), "foo", nextRevision, l, clientv3.WithPrefix()); err != nil {
l.t.Error(err)
}
}
l.initialWg.Wait()
l.initial <- true
}()
}
func (l *EtcdClientTestListener) EtcdWatchCreated(client *EtcdClient, key string) {
l.initialWg.Done()
}
func (l *EtcdClientTestListener) EtcdKeyUpdated(client *EtcdClient, key string, value []byte, prevValue []byte) {
evt := etcdEvent{
func (l *EtcdClientTestListener) EtcdKeyUpdated(client *EtcdClient, key string, value []byte) {
l.events <- etcdEvent{
t: clientv3.EventTypePut,
key: string(key),
value: string(value),
}
if len(prevValue) > 0 {
evt.prevValue = string(prevValue)
}
l.events <- evt
}
func (l *EtcdClientTestListener) EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte) {
evt := etcdEvent{
func (l *EtcdClientTestListener) EtcdKeyDeleted(client *EtcdClient, key string) {
l.events <- etcdEvent{
t: clientv3.EventTypeDelete,
key: string(key),
}
if len(prevValue) > 0 {
evt.prevValue = string(prevValue)
}
l.events <- evt
}
func Test_EtcdClient_Watch(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
etcd, client := NewEtcdClientForTest(t)
SetEtcdValue(etcd, "foo/a", []byte("1"))
@ -318,23 +293,11 @@ func Test_EtcdClient_Watch(t *testing.T) {
t.Errorf("expected value %s, got %s", "2", event.value)
}
SetEtcdValue(etcd, "foo/a", []byte("3"))
event = <-listener.events
if event.t != clientv3.EventTypePut {
t.Errorf("expected type %d, got %d", clientv3.EventTypePut, event.t)
} else if event.key != "foo/a" {
t.Errorf("expected key %s, got %s", "foo/a", event.key)
} else if event.value != "3" {
t.Errorf("expected value %s, got %s", "3", event.value)
}
DeleteEtcdValue(etcd, "foo/a")
event = <-listener.events
if event.t != clientv3.EventTypeDelete {
t.Errorf("expected type %d, got %d", clientv3.EventTypeDelete, event.t)
} else if event.key != "foo/a" {
t.Errorf("expected key %s, got %s", "foo/a", event.key)
} else if event.prevValue != "3" {
t.Errorf("expected previous value %s, got %s", "3", event.prevValue)
}
}

View file

@ -1,168 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2024 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"errors"
"log"
"os"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/fsnotify/fsnotify"
)
const (
defaultDeduplicateWatchEvents = 100 * time.Millisecond
)
var (
deduplicateWatchEvents atomic.Int64
)
func init() {
deduplicateWatchEvents.Store(int64(defaultDeduplicateWatchEvents))
}
type FileWatcherCallback func(filename string)
type FileWatcher struct {
filename string
target string
callback FileWatcherCallback
watcher *fsnotify.Watcher
closeCtx context.Context
closeFunc context.CancelFunc
}
func NewFileWatcher(filename string, callback FileWatcherCallback) (*FileWatcher, error) {
realFilename, err := filepath.EvalSymlinks(filename)
if err != nil {
return nil, err
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
return nil, err
}
if err := watcher.Add(realFilename); err != nil {
watcher.Close() // nolint
return nil, err
}
if err := watcher.Add(path.Dir(filename)); err != nil {
watcher.Close() // nolint
return nil, err
}
closeCtx, closeFunc := context.WithCancel(context.Background())
w := &FileWatcher{
filename: filename,
target: realFilename,
callback: callback,
watcher: watcher,
closeCtx: closeCtx,
closeFunc: closeFunc,
}
go w.run()
return w, nil
}
func (f *FileWatcher) Close() error {
f.closeFunc()
return f.watcher.Close()
}
func (f *FileWatcher) run() {
var mu sync.Mutex
timers := make(map[string]*time.Timer)
triggerEvent := func(event fsnotify.Event) {
deduplicate := time.Duration(deduplicateWatchEvents.Load())
if deduplicate <= 0 {
f.callback(f.filename)
return
}
// Use timer to deduplicate multiple events for the same file.
mu.Lock()
t, found := timers[event.Name]
mu.Unlock()
if !found {
t = time.AfterFunc(deduplicate, func() {
f.callback(f.filename)
mu.Lock()
delete(timers, event.Name)
mu.Unlock()
})
mu.Lock()
timers[event.Name] = t
mu.Unlock()
} else {
t.Reset(deduplicate)
}
}
for {
select {
case event := <-f.watcher.Events:
if !event.Has(fsnotify.Write) && !event.Has(fsnotify.Create) && !event.Has(fsnotify.Rename) {
continue
}
if stat, err := os.Lstat(event.Name); err != nil {
if !errors.Is(err, os.ErrNotExist) {
log.Printf("Could not lstat %s: %s", event.Name, err)
}
} else if stat.Mode()&os.ModeSymlink != 0 {
target, err := filepath.EvalSymlinks(event.Name)
if err == nil && target != f.target && strings.HasSuffix(event.Name, f.filename) {
f.target = target
triggerEvent(event)
}
continue
}
if strings.HasSuffix(event.Name, f.filename) || strings.HasSuffix(event.Name, f.target) {
triggerEvent(event)
}
case err := <-f.watcher.Errors:
if err == nil {
return
}
log.Printf("Error watching %s: %s", f.filename, err)
case <-f.closeCtx.Done():
return
}
}
}

View file

@ -1,310 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2024 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"errors"
"os"
"path"
"testing"
)
var (
testWatcherNoEventTimeout = 2 * defaultDeduplicateWatchEvents
)
func TestFileWatcher_NotExist(t *testing.T) {
tmpdir := t.TempDir()
w, err := NewFileWatcher(path.Join(tmpdir, "test.txt"), func(filename string) {})
if err == nil {
t.Error("should not be able to watch non-existing files")
if err := w.Close(); err != nil {
t.Error(err)
}
} else if !errors.Is(err, os.ErrNotExist) {
t.Error(err)
}
}
func TestFileWatcher_File(t *testing.T) {
ensureNoGoroutinesLeak(t, func(t *testing.T) {
tmpdir := t.TempDir()
filename := path.Join(tmpdir, "test.txt")
if err := os.WriteFile(filename, []byte("Hello world!"), 0644); err != nil {
t.Fatal(err)
}
modified := make(chan struct{})
w, err := NewFileWatcher(filename, func(filename string) {
modified <- struct{}{}
})
if err != nil {
t.Fatal(err)
}
defer w.Close()
if err := os.WriteFile(filename, []byte("Updated"), 0644); err != nil {
t.Fatal(err)
}
<-modified
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
defer cancel()
select {
case <-modified:
t.Error("should not have received another event")
case <-ctxTimeout.Done():
}
if err := os.WriteFile(filename, []byte("Updated"), 0644); err != nil {
t.Fatal(err)
}
<-modified
ctxTimeout, cancel = context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
defer cancel()
select {
case <-modified:
t.Error("should not have received another event")
case <-ctxTimeout.Done():
}
})
}
func TestFileWatcher_Rename(t *testing.T) {
tmpdir := t.TempDir()
filename := path.Join(tmpdir, "test.txt")
if err := os.WriteFile(filename, []byte("Hello world!"), 0644); err != nil {
t.Fatal(err)
}
modified := make(chan struct{})
w, err := NewFileWatcher(filename, func(filename string) {
modified <- struct{}{}
})
if err != nil {
t.Fatal(err)
}
defer w.Close()
filename2 := path.Join(tmpdir, "test.txt.tmp")
if err := os.WriteFile(filename2, []byte("Updated"), 0644); err != nil {
t.Fatal(err)
}
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
defer cancel()
select {
case <-modified:
t.Error("should not have received another event")
case <-ctxTimeout.Done():
}
if err := os.Rename(filename2, filename); err != nil {
t.Fatal(err)
}
<-modified
ctxTimeout, cancel = context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
defer cancel()
select {
case <-modified:
t.Error("should not have received another event")
case <-ctxTimeout.Done():
}
}
func TestFileWatcher_Symlink(t *testing.T) {
tmpdir := t.TempDir()
sourceFilename := path.Join(tmpdir, "test1.txt")
if err := os.WriteFile(sourceFilename, []byte("Hello world!"), 0644); err != nil {
t.Fatal(err)
}
filename := path.Join(tmpdir, "symlink.txt")
if err := os.Symlink(sourceFilename, filename); err != nil {
t.Fatal(err)
}
modified := make(chan struct{})
w, err := NewFileWatcher(filename, func(filename string) {
modified <- struct{}{}
})
if err != nil {
t.Fatal(err)
}
defer w.Close()
if err := os.WriteFile(sourceFilename, []byte("Updated"), 0644); err != nil {
t.Fatal(err)
}
<-modified
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
defer cancel()
select {
case <-modified:
t.Error("should not have received another event")
case <-ctxTimeout.Done():
}
}
func TestFileWatcher_ChangeSymlinkTarget(t *testing.T) {
tmpdir := t.TempDir()
sourceFilename1 := path.Join(tmpdir, "test1.txt")
if err := os.WriteFile(sourceFilename1, []byte("Hello world!"), 0644); err != nil {
t.Fatal(err)
}
sourceFilename2 := path.Join(tmpdir, "test2.txt")
if err := os.WriteFile(sourceFilename2, []byte("Updated"), 0644); err != nil {
t.Fatal(err)
}
filename := path.Join(tmpdir, "symlink.txt")
if err := os.Symlink(sourceFilename1, filename); err != nil {
t.Fatal(err)
}
modified := make(chan struct{})
w, err := NewFileWatcher(filename, func(filename string) {
modified <- struct{}{}
})
if err != nil {
t.Fatal(err)
}
defer w.Close()
// Replace symlink by creating new one and rename it to the original target.
if err := os.Symlink(sourceFilename2, filename+".tmp"); err != nil {
t.Fatal(err)
}
if err := os.Rename(filename+".tmp", filename); err != nil {
t.Fatal(err)
}
<-modified
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
defer cancel()
select {
case <-modified:
t.Error("should not have received another event")
case <-ctxTimeout.Done():
}
}
func TestFileWatcher_OtherSymlink(t *testing.T) {
tmpdir := t.TempDir()
sourceFilename1 := path.Join(tmpdir, "test1.txt")
if err := os.WriteFile(sourceFilename1, []byte("Hello world!"), 0644); err != nil {
t.Fatal(err)
}
sourceFilename2 := path.Join(tmpdir, "test2.txt")
if err := os.WriteFile(sourceFilename2, []byte("Updated"), 0644); err != nil {
t.Fatal(err)
}
filename := path.Join(tmpdir, "symlink.txt")
if err := os.Symlink(sourceFilename1, filename); err != nil {
t.Fatal(err)
}
modified := make(chan struct{})
w, err := NewFileWatcher(filename, func(filename string) {
modified <- struct{}{}
})
if err != nil {
t.Fatal(err)
}
defer w.Close()
if err := os.Symlink(sourceFilename2, filename+".tmp"); err != nil {
t.Fatal(err)
}
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
defer cancel()
select {
case <-modified:
t.Error("should not have received event for other symlink")
case <-ctxTimeout.Done():
}
}
func TestFileWatcher_RenameSymlinkTarget(t *testing.T) {
tmpdir := t.TempDir()
sourceFilename1 := path.Join(tmpdir, "test1.txt")
if err := os.WriteFile(sourceFilename1, []byte("Hello world!"), 0644); err != nil {
t.Fatal(err)
}
filename := path.Join(tmpdir, "test.txt")
if err := os.Symlink(sourceFilename1, filename); err != nil {
t.Fatal(err)
}
modified := make(chan struct{})
w, err := NewFileWatcher(filename, func(filename string) {
modified <- struct{}{}
})
if err != nil {
t.Fatal(err)
}
defer w.Close()
sourceFilename2 := path.Join(tmpdir, "test1.txt.tmp")
if err := os.WriteFile(sourceFilename2, []byte("Updated"), 0644); err != nil {
t.Fatal(err)
}
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
defer cancel()
select {
case <-modified:
t.Error("should not have received another event")
case <-ctxTimeout.Done():
}
if err := os.Rename(sourceFilename2, sourceFilename1); err != nil {
t.Fatal(err)
}
<-modified
ctxTimeout, cancel = context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
defer cancel()
select {
case <-modified:
t.Error("should not have received another event")
case <-ctxTimeout.Done():
}
}

View file

@ -1,77 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2023 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"sync/atomic"
)
type Flags struct {
flags atomic.Uint32
}
func (f *Flags) Add(flags uint32) bool {
for {
old := f.flags.Load()
if old&flags == flags {
// Flags already set.
return false
}
newFlags := old | flags
if f.flags.CompareAndSwap(old, newFlags) {
return true
}
// Another thread updated the flags while we were checking, retry.
}
}
func (f *Flags) Remove(flags uint32) bool {
for {
old := f.flags.Load()
if old&flags == 0 {
// Flags not set.
return false
}
newFlags := old & ^flags
if f.flags.CompareAndSwap(old, newFlags) {
return true
}
// Another thread updated the flags while we were checking, retry.
}
}
func (f *Flags) Set(flags uint32) bool {
for {
old := f.flags.Load()
if old == flags {
return false
}
if f.flags.CompareAndSwap(old, flags) {
return true
}
}
}
func (f *Flags) Get() uint32 {
return f.flags.Load()
}

View file

@ -1,143 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2023 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"sync"
"sync/atomic"
"testing"
)
func TestFlags(t *testing.T) {
var f Flags
if f.Get() != 0 {
t.Fatalf("Expected flags 0, got %d", f.Get())
}
if !f.Add(1) {
t.Error("expected true")
}
if f.Get() != 1 {
t.Fatalf("Expected flags 1, got %d", f.Get())
}
if f.Add(1) {
t.Error("expected false")
}
if f.Get() != 1 {
t.Fatalf("Expected flags 1, got %d", f.Get())
}
if !f.Add(2) {
t.Error("expected true")
}
if f.Get() != 3 {
t.Fatalf("Expected flags 3, got %d", f.Get())
}
if !f.Remove(1) {
t.Error("expected true")
}
if f.Get() != 2 {
t.Fatalf("Expected flags 2, got %d", f.Get())
}
if f.Remove(1) {
t.Error("expected false")
}
if f.Get() != 2 {
t.Fatalf("Expected flags 2, got %d", f.Get())
}
if !f.Add(3) {
t.Error("expected true")
}
if f.Get() != 3 {
t.Fatalf("Expected flags 3, got %d", f.Get())
}
if !f.Remove(1) {
t.Error("expected true")
}
if f.Get() != 2 {
t.Fatalf("Expected flags 2, got %d", f.Get())
}
}
func runConcurrentFlags(t *testing.T, count int, f func()) {
var start sync.WaitGroup
start.Add(1)
var ready sync.WaitGroup
var done sync.WaitGroup
for i := 0; i < count; i++ {
done.Add(1)
ready.Add(1)
go func() {
defer done.Done()
ready.Done()
start.Wait()
f()
}()
}
ready.Wait()
start.Done()
done.Wait()
}
func TestFlagsConcurrentAdd(t *testing.T) {
t.Parallel()
var flags Flags
var added atomic.Int32
runConcurrentFlags(t, 100, func() {
if flags.Add(1) {
added.Add(1)
}
})
if added.Load() != 1 {
t.Errorf("expected only one successfull attempt, got %d", added.Load())
}
}
func TestFlagsConcurrentRemove(t *testing.T) {
t.Parallel()
var flags Flags
flags.Set(1)
var removed atomic.Int32
runConcurrentFlags(t, 100, func() {
if flags.Remove(1) {
removed.Add(1)
}
})
if removed.Load() != 1 {
t.Errorf("expected only one successfull attempt, got %d", removed.Load())
}
}
func TestFlagsConcurrentSet(t *testing.T) {
t.Parallel()
var flags Flags
var set atomic.Int32
runConcurrentFlags(t, 100, func() {
if flags.Set(1) {
set.Add(1)
}
})
if set.Load() != 1 {
t.Errorf("expected only one successfull attempt, got %d", set.Load())
}
}

100
geoip.go
View file

@ -35,7 +35,6 @@ import (
"sync"
"time"
"github.com/dlintw/goconf"
"github.com/oschwald/maxminddb-golang"
)
@ -157,45 +156,36 @@ func (g *GeoLookup) updateUrl() error {
}
body := response.Body
url := g.url
if strings.HasSuffix(url, ".gz") {
if strings.HasSuffix(g.url, ".gz") {
body, err = gzip.NewReader(body)
if err != nil {
return err
}
url = strings.TrimSuffix(url, ".gz")
}
tarfile := tar.NewReader(body)
var geoipdata []byte
if strings.HasSuffix(url, ".tar") || strings.HasSuffix(url, "=tar") {
tarfile := tar.NewReader(body)
for {
header, err := tarfile.Next()
if err == io.EOF {
break
} else if err != nil {
return err
}
if !strings.HasSuffix(header.Name, ".mmdb") {
continue
}
geoipdata, err = io.ReadAll(tarfile)
if err != nil {
return err
}
for {
header, err := tarfile.Next()
if err == io.EOF {
break
} else if err != nil {
return err
}
} else {
geoipdata, err = io.ReadAll(body)
if !strings.HasSuffix(header.Name, ".mmdb") {
continue
}
geoipdata, err = io.ReadAll(tarfile)
if err != nil {
return err
}
break
}
if len(geoipdata) == 0 {
return fmt.Errorf("did not find GeoIP database in download from %s", g.url)
return fmt.Errorf("did not find MaxMind database in tarball from %s", g.url)
}
reader, err := maxminddb.FromBytes(geoipdata)
@ -277,63 +267,3 @@ func IsValidContinent(continent string) bool {
return false
}
}
func LoadGeoIPOverrides(config *goconf.ConfigFile, ignoreErrors bool) (map[*net.IPNet]string, error) {
options, _ := GetStringOptions(config, "geoip-overrides", true)
if len(options) == 0 {
return nil, nil
}
var err error
geoipOverrides := make(map[*net.IPNet]string, len(options))
for option, value := range options {
var ip net.IP
var ipNet *net.IPNet
if strings.Contains(option, "/") {
_, ipNet, err = net.ParseCIDR(option)
if err != nil {
if ignoreErrors {
log.Printf("could not parse CIDR %s (%s), skipping", option, err)
continue
}
return nil, fmt.Errorf("could not parse CIDR %s: %s", option, err)
}
} else {
ip = net.ParseIP(option)
if ip == nil {
if ignoreErrors {
log.Printf("could not parse IP %s, skipping", option)
continue
}
return nil, fmt.Errorf("could not parse IP %s", option)
}
var mask net.IPMask
if ipv4 := ip.To4(); ipv4 != nil {
mask = net.CIDRMask(32, 32)
} else {
mask = net.CIDRMask(128, 128)
}
ipNet = &net.IPNet{
IP: ip,
Mask: mask,
}
}
value = strings.ToUpper(strings.TrimSpace(value))
if value == "" {
log.Printf("IP %s doesn't have a country assigned, skipping", option)
continue
} else if !IsValidCountry(value) {
log.Printf("Country %s for IP %s is invalid, skipping", value, option)
continue
}
log.Printf("Using country %s for %s", value, ipNet)
geoipOverrides[ipNet] = value
}
return geoipOverrides, nil
}

View file

@ -24,14 +24,12 @@ package signaling
import (
"archive/tar"
"compress/gzip"
"fmt"
"io"
"net"
"net/http"
"os"
"strings"
"testing"
"time"
)
func testGeoLookupReader(t *testing.T, reader *GeoLookup) {
@ -59,27 +57,13 @@ func testGeoLookupReader(t *testing.T, reader *GeoLookup) {
}
}
func GetGeoIpUrlForTest(t *testing.T) string {
t.Helper()
var geoIpUrl string
if os.Getenv("USE_DB_IP_GEOIP_DATABASE") != "" {
now := time.Now().UTC()
geoIpUrl = fmt.Sprintf("https://download.db-ip.com/free/dbip-country-lite-%d-%.2d.mmdb.gz", now.Year(), now.Month())
}
if geoIpUrl == "" {
license := os.Getenv("MAXMIND_GEOLITE2_LICENSE")
if license == "" {
t.Skip("No MaxMind GeoLite2 license was set in MAXMIND_GEOLITE2_LICENSE environment variable.")
}
geoIpUrl = GetGeoIpDownloadUrl(license)
}
return geoIpUrl
}
func TestGeoLookup(t *testing.T) {
CatchLogForTest(t)
reader, err := NewGeoLookupFromUrl(GetGeoIpUrlForTest(t))
license := os.Getenv("MAXMIND_GEOLITE2_LICENSE")
if license == "" {
t.Skip("No MaxMind GeoLite2 license was set in MAXMIND_GEOLITE2_LICENSE environment variable.")
}
reader, err := NewGeoLookupFromUrl(GetGeoIpDownloadUrl(license))
if err != nil {
t.Fatal(err)
}
@ -93,8 +77,12 @@ func TestGeoLookup(t *testing.T) {
}
func TestGeoLookupCaching(t *testing.T) {
CatchLogForTest(t)
reader, err := NewGeoLookupFromUrl(GetGeoIpUrlForTest(t))
license := os.Getenv("MAXMIND_GEOLITE2_LICENSE")
if license == "" {
t.Skip("No MaxMind GeoLite2 license was set in MAXMIND_GEOLITE2_LICENSE environment variable.")
}
reader, err := NewGeoLookupFromUrl(GetGeoIpDownloadUrl(license))
if err != nil {
t.Fatal(err)
}
@ -140,7 +128,6 @@ func TestGeoLookupContinent(t *testing.T) {
}
func TestGeoLookupCloseEmpty(t *testing.T) {
CatchLogForTest(t)
reader, err := NewGeoLookupFromUrl("ignore-url")
if err != nil {
t.Fatal(err)
@ -149,23 +136,24 @@ func TestGeoLookupCloseEmpty(t *testing.T) {
}
func TestGeoLookupFromFile(t *testing.T) {
CatchLogForTest(t)
geoIpUrl := GetGeoIpUrlForTest(t)
license := os.Getenv("MAXMIND_GEOLITE2_LICENSE")
if license == "" {
t.Skip("No MaxMind GeoLite2 license was set in MAXMIND_GEOLITE2_LICENSE environment variable.")
}
resp, err := http.Get(geoIpUrl)
url := GetGeoIpDownloadUrl(license)
resp, err := http.Get(url)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
body := resp.Body
url := geoIpUrl
if strings.HasSuffix(geoIpUrl, ".gz") {
if strings.HasSuffix(url, ".gz") {
body, err = gzip.NewReader(body)
if err != nil {
t.Fatal(err)
}
url = strings.TrimSuffix(url, ".gz")
}
tmpfile, err := os.CreateTemp("", "geoipdb")
@ -176,33 +164,21 @@ func TestGeoLookupFromFile(t *testing.T) {
os.Remove(tmpfile.Name())
})
tarfile := tar.NewReader(body)
foundDatabase := false
if strings.HasSuffix(url, ".tar") || strings.HasSuffix(url, "=tar") {
tarfile := tar.NewReader(body)
for {
header, err := tarfile.Next()
if err == io.EOF {
break
} else if err != nil {
t.Fatal(err)
}
if !strings.HasSuffix(header.Name, ".mmdb") {
continue
}
if _, err := io.Copy(tmpfile, tarfile); err != nil {
tmpfile.Close()
t.Fatal(err)
}
if err := tmpfile.Close(); err != nil {
t.Fatal(err)
}
foundDatabase = true
for {
header, err := tarfile.Next()
if err == io.EOF {
break
} else if err != nil {
t.Fatal(err)
}
} else {
if _, err := io.Copy(tmpfile, body); err != nil {
if !strings.HasSuffix(header.Name, ".mmdb") {
continue
}
if _, err := io.Copy(tmpfile, tarfile); err != nil {
tmpfile.Close()
t.Fatal(err)
}
@ -210,10 +186,11 @@ func TestGeoLookupFromFile(t *testing.T) {
t.Fatal(err)
}
foundDatabase = true
break
}
if !foundDatabase {
t.Fatalf("Did not find GeoIP database in download from %s", geoIpUrl)
t.Fatal("Did not find MaxMind database in tarball")
}
reader, err := NewGeoLookupFromFile(tmpfile.Name())

98
go.mod
View file

@ -1,89 +1,83 @@
module github.com/strukturag/nextcloud-spreed-signaling
go 1.21
go 1.18
require (
github.com/dlintw/goconf v0.0.0-20120228082610-dcc070983490
github.com/fsnotify/fsnotify v1.7.0
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/google/uuid v1.6.0
github.com/gorilla/mux v1.8.1
github.com/gorilla/securecookie v1.1.2
github.com/gorilla/websocket v1.5.3
github.com/google/uuid v1.3.0
github.com/gorilla/mux v1.8.0
github.com/gorilla/securecookie v1.1.1
github.com/gorilla/websocket v1.5.0
github.com/mailru/easyjson v0.7.7
github.com/nats-io/nats-server/v2 v2.10.16
github.com/nats-io/nats.go v1.36.0
github.com/nats-io/nats-server/v2 v2.9.15
github.com/nats-io/nats.go v1.24.0
github.com/notedit/janus-go v0.0.0-20200517101215-10eb8b95d1a0
github.com/oschwald/maxminddb-golang v1.13.0
github.com/pion/sdp/v3 v3.0.9
github.com/prometheus/client_golang v1.19.1
go.etcd.io/etcd/api/v3 v3.5.14
go.etcd.io/etcd/client/pkg/v3 v3.5.14
go.etcd.io/etcd/client/v3 v3.5.14
go.etcd.io/etcd/server/v3 v3.5.14
go.uber.org/zap v1.27.0
google.golang.org/grpc v1.64.0
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.4.0
google.golang.org/protobuf v1.34.2
github.com/oschwald/maxminddb-golang v1.10.0
github.com/pion/sdp/v3 v3.0.6
github.com/prometheus/client_golang v1.14.0
go.etcd.io/etcd/api/v3 v3.5.7
go.etcd.io/etcd/client/pkg/v3 v3.5.7
go.etcd.io/etcd/client/v3 v3.5.7
go.etcd.io/etcd/server/v3 v3.5.7
google.golang.org/grpc v1.53.0
google.golang.org/protobuf v1.29.0
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cenkalti/backoff/v4 v4.1.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/jonboulle/clockwork v0.2.2 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.8 // indirect
github.com/klauspost/compress v1.16.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/minio/highwayhash v1.0.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/nats-io/jwt/v2 v2.5.7 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/jwt/v2 v2.3.0 // indirect
github.com/nats-io/nkeys v0.3.0 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/pion/randutil v0.1.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/sirupsen/logrus v1.7.0 // indirect
github.com/soheilhy/cmux v0.1.5 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
go.etcd.io/bbolt v1.3.10 // indirect
go.etcd.io/etcd/client/v2 v2.305.14 // indirect
go.etcd.io/etcd/pkg/v3 v3.5.14 // indirect
go.etcd.io/etcd/raft/v3 v3.5.14 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect
go.opentelemetry.io/otel v1.20.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect
go.opentelemetry.io/otel/metric v1.20.0 // indirect
go.opentelemetry.io/otel/sdk v1.20.0 // indirect
go.opentelemetry.io/otel/trace v1.20.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.uber.org/multierr v1.10.0 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/text v0.15.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
go.etcd.io/bbolt v1.3.6 // indirect
go.etcd.io/etcd/client/v2 v2.305.7 // indirect
go.etcd.io/etcd/pkg/v3 v3.5.7 // indirect
go.etcd.io/etcd/raft/v3 v3.5.7 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0 // indirect
go.opentelemetry.io/otel v1.0.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1 // indirect
go.opentelemetry.io/otel/sdk v1.0.1 // indirect
go.opentelemetry.io/otel/trace v1.0.1 // indirect
go.opentelemetry.io/proto/otlp v0.9.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.17.0 // indirect
golang.org/x/crypto v0.6.0 // indirect
golang.org/x/net v0.7.0 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect

632
go.sum
View file

@ -1,26 +1,73 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU=
cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E=
github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
@ -35,291 +82,590 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA=
github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/nats-io/jwt/v2 v2.5.7 h1:j5lH1fUXCnJnY8SsQeB/a/z9Azgu2bYIDvtPVNdxe2c=
github.com/nats-io/jwt/v2 v2.5.7/go.mod h1:ZdWS1nZa6WMZfFwwgpEaqBV8EPGVgOTDHN/wTbz0Y5A=
github.com/nats-io/nats-server/v2 v2.10.16 h1:2jXaiydp5oB/nAx/Ytf9fdCi9QN6ItIc9eehX8kwVV0=
github.com/nats-io/nats-server/v2 v2.10.16/go.mod h1:Pksi38H2+6xLe1vQx0/EA4bzetM0NqyIHcIbmgXSkIU=
github.com/nats-io/nats.go v1.36.0 h1:suEUPuWzTSse/XhESwqLxXGuj8vGRuPRoG7MoRN/qyU=
github.com/nats-io/nats.go v1.36.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI=
github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
github.com/nats-io/nats-server/v2 v2.9.15 h1:MuwEJheIwpvFgqvbs20W8Ish2azcygjf4Z0liVu2I4c=
github.com/nats-io/nats-server/v2 v2.9.15/go.mod h1:QlCTy115fqpx4KSOPFIxSV7DdI6OxtZsGOL1JLdeRlE=
github.com/nats-io/nats.go v1.24.0 h1:CRiD8L5GOQu/DcfkmgBcTTIQORMwizF+rPk6T0RaHVQ=
github.com/nats-io/nats.go v1.24.0/go.mod h1:dVQF+BK3SzUZpwyzHedXsvH3EO38aVKuOPkkHlv5hXA=
github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/notedit/janus-go v0.0.0-20200517101215-10eb8b95d1a0 h1:EFU9iv8BMPyBo8iFMHvQleYlF5M3PY6zpAbxsngImjE=
github.com/notedit/janus-go v0.0.0-20200517101215-10eb8b95d1a0/go.mod h1:BN/Txse3qz8tZOmCm2OfajB2wHVujWmX3o9nVdsI6gE=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/oschwald/maxminddb-golang v1.13.0 h1:R8xBorY71s84yO06NgTmQvqvTvlS/bnYZrrWX1MElnU=
github.com/oschwald/maxminddb-golang v1.13.0/go.mod h1:BU0z8BfFVhi1LQaonTwwGQlsHUEu9pWNdMfmq4ztm0o=
github.com/oschwald/maxminddb-golang v1.10.0 h1:Xp1u0ZhqkSuopaKmk1WwHtjF0H9Hd9181uj2MQ5Vndg=
github.com/oschwald/maxminddb-golang v1.10.0/go.mod h1:Y2ELenReaLAZ0b400URyGwvYxHV1dLIxBuyOsyYjHK0=
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY=
github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M=
github.com/pion/sdp/v3 v3.0.6 h1:WuDLhtuFUUVpTfus9ILC4HRyHsW6TdugjEX/QY9OiUw=
github.com/pion/sdp/v3 v3.0.6/go.mod h1:iiFWFpQO8Fy3S5ldclBkpXqmWy02ns78NOKoLLL0YQw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.7.3 h1:dAm0YRdRQlWojc3CrCRgPBzG5f941d0zvAKu7qY4e+I=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
go.etcd.io/etcd/api/v3 v3.5.14 h1:vHObSCxyB9zlF60w7qzAdTcGaglbJOpSj1Xj9+WGxq0=
go.etcd.io/etcd/api/v3 v3.5.14/go.mod h1:BmtWcRlQvwa1h3G2jvKYwIQy4PkHlDej5t7uLMUdJUU=
go.etcd.io/etcd/client/pkg/v3 v3.5.14 h1:SaNH6Y+rVEdxfpA2Jr5wkEvN6Zykme5+YnbCkxvuWxQ=
go.etcd.io/etcd/client/pkg/v3 v3.5.14/go.mod h1:8uMgAokyG1czCtIdsq+AGyYQMvpIKnSvPjFMunkgeZI=
go.etcd.io/etcd/client/v2 v2.305.14 h1:v5ASLyFuMlVd/gKU6uf6Cod+vSWKa4Rsv9+eghl0Nwk=
go.etcd.io/etcd/client/v2 v2.305.14/go.mod h1:AWYT0lLEkBuqVaGw0UVMtA4rxCb3/oGE8PxZ8cUS4tI=
go.etcd.io/etcd/client/v3 v3.5.14 h1:CWfRs4FDaDoSz81giL7zPpZH2Z35tbOrAJkkjMqOupg=
go.etcd.io/etcd/client/v3 v3.5.14/go.mod h1:k3XfdV/VIHy/97rqWjoUzrj9tk7GgJGH9J8L4dNXmAk=
go.etcd.io/etcd/pkg/v3 v3.5.14 h1:keuxhJiDCPjTKpW77GxJnnVVD5n4IsfvkDaqiqUMNEQ=
go.etcd.io/etcd/pkg/v3 v3.5.14/go.mod h1:7o+DL6a7DYz9KSjWByX+NGmQPYinoH3D36VAu/B3JqA=
go.etcd.io/etcd/raft/v3 v3.5.14 h1:mHnpbljpBBftmK+YUfp+49ivaCc126aBPLAnwDw0DnE=
go.etcd.io/etcd/raft/v3 v3.5.14/go.mod h1:WnIK5blyJGRKsHA3efovdNoLv9QELTZHzpDOVIAuL2s=
go.etcd.io/etcd/server/v3 v3.5.14 h1:l/3gdiSSoGU6MyKAYiL+8WSOMq9ySG+NqQ04euLtZfY=
go.etcd.io/etcd/server/v3 v3.5.14/go.mod h1:SPh0rUtGNDgOZd/aTbkAUYZV+5FFHw5sdbGnO2/byw0=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M=
go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 h1:DeFD0VgTZ+Cj6hxravYYZE2W4GlneVH81iAOPjZkzk8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0=
go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA=
go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM=
go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM=
go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0=
go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ=
go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY=
go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA=
go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg=
go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY=
go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU=
go.etcd.io/etcd/client/v2 v2.305.7/go.mod h1:GQGT5Z3TBuAQGvgPfhR7VPySu/SudxmEkRq9BgzFU6s=
go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4=
go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw=
go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0=
go.etcd.io/etcd/pkg/v3 v3.5.7/go.mod h1:kcOfWt3Ov9zgYdOiJ/o1Y9zFfLhQjylTgL4Lru8opRo=
go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc=
go.etcd.io/etcd/raft/v3 v3.5.7/go.mod h1:TflkAb/8Uy6JFBxcRaH2Fr6Slm9mCPVdI2efzxY96yU=
go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk=
go.etcd.io/etcd/server/v3 v3.5.7/go.mod h1:gxBgT84issUVBRpZ3XkW1T55NjOb4vZZRI4wVvNhf4A=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0 h1:Wx7nFnvCaissIUZxPkBqDz2963Z+Cl+PkYbDKzTxDqQ=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E=
go.opentelemetry.io/otel v1.0.1 h1:4XKyXmfqJLOQ7feyV5DB6gsBFZ0ltB8vLtp6pj4JIcc=
go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1 h1:ofMbch7i29qIUf7VtF+r0HRF6ac0SBaPSziSsKp7wkk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1 h1:CFMFNoz+CGprjFAFy+RJFrfEe4GBia3RRm2a4fREvCA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk=
go.opentelemetry.io/otel/sdk v1.0.1 h1:wXxFEWGo7XfXupPwVJvTBOaPBC9FEg0wB8hMNrKk+cA=
go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI=
go.opentelemetry.io/otel/trace v1.0.1 h1:StTeIH6Q3G4r0Fiw34LTokUFESZgIDUr0qIJ7mKmAfw=
go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4=
go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4=
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w=
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.4.0 h1:9SxA29VM43MF5Z9dQu694wmY5t8E/Gxr7s+RSxiIDmc=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.4.0/go.mod h1:yZOK5zhQMiALmuweVdIVoQPa6eIJyXn2B9g5dJDhqX4=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0=
google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

View file

@ -24,9 +24,7 @@ package signaling
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net"
"net/url"
@ -39,8 +37,6 @@ import (
clientv3 "go.etcd.io/etcd/client/v3"
"google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
status "google.golang.org/grpc/status"
)
@ -53,9 +49,9 @@ const (
)
var (
ErrNoSuchResumeId = fmt.Errorf("unknown resume id")
lookupGrpcIp = net.LookupIP // can be overwritten from tests
customResolverPrefix atomic.Uint64
customResolverPrefix uint64
)
func init() {
@ -79,12 +75,12 @@ func newGrpcClientImpl(conn grpc.ClientConnInterface) *grpcClientImpl {
}
type GrpcClient struct {
isSelf uint32
ip net.IP
target string
conn *grpc.ClientConn
impl *grpcClientImpl
isSelf atomic.Bool
}
type customIpResolver struct {
@ -129,7 +125,7 @@ func NewGrpcClient(target string, ip net.IP, opts ...grpc.DialOption) (*GrpcClie
var conn *grpc.ClientConn
var err error
if ip != nil {
prefix := customResolverPrefix.Add(1)
prefix := atomic.AddUint64(&customResolverPrefix, 1)
addr := ip.String()
hostname := target
if host, port, err := net.SplitHostPort(target); err == nil {
@ -142,9 +138,9 @@ func NewGrpcClient(target string, ip net.IP, opts ...grpc.DialOption) (*GrpcClie
hostname: hostname,
}
opts = append(opts, grpc.WithResolvers(resolver))
conn, err = grpc.NewClient(fmt.Sprintf("%s://%s", resolver.Scheme(), target), opts...)
conn, err = grpc.Dial(fmt.Sprintf("%s://%s", resolver.Scheme(), target), opts...)
} else {
conn, err = grpc.NewClient(target, opts...)
conn, err = grpc.Dial(target, opts...)
}
if err != nil {
return nil, err
@ -172,11 +168,15 @@ func (c *GrpcClient) Close() error {
}
func (c *GrpcClient) IsSelf() bool {
return c.isSelf.Load()
return atomic.LoadUint32(&c.isSelf) != 0
}
func (c *GrpcClient) SetSelf(self bool) {
c.isSelf.Store(self)
if self {
atomic.StoreUint32(&c.isSelf, 1)
} else {
atomic.StoreUint32(&c.isSelf, 0)
}
}
func (c *GrpcClient) GetServerId(ctx context.Context) (string, error) {
@ -189,26 +189,6 @@ func (c *GrpcClient) GetServerId(ctx context.Context) (string, error) {
return response.GetServerId(), nil
}
func (c *GrpcClient) LookupResumeId(ctx context.Context, resumeId string) (*LookupResumeIdReply, error) {
statsGrpcClientCalls.WithLabelValues("LookupResumeId").Inc()
// TODO: Remove debug logging
log.Printf("Lookup resume id %s on %s", resumeId, c.Target())
response, err := c.impl.LookupResumeId(ctx, &LookupResumeIdRequest{
ResumeId: resumeId,
}, grpc.WaitForReady(true))
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
return nil, ErrNoSuchResumeId
} else if err != nil {
return nil, err
}
if sessionId := response.GetSessionId(); sessionId == "" {
return nil, ErrNoSuchResumeId
}
return response, nil
}
func (c *GrpcClient) LookupSessionId(ctx context.Context, roomSessionId string, disconnectReason string) (string, error) {
statsGrpcClientCalls.WithLabelValues("LookupSessionId").Inc()
// TODO: Remove debug logging
@ -249,13 +229,13 @@ func (c *GrpcClient) IsSessionInCall(ctx context.Context, sessionId string, room
return response.GetInCall(), nil
}
func (c *GrpcClient) GetPublisherId(ctx context.Context, sessionId string, streamType StreamType) (string, string, net.IP, error) {
func (c *GrpcClient) GetPublisherId(ctx context.Context, sessionId string, streamType string) (string, string, net.IP, error) {
statsGrpcClientCalls.WithLabelValues("GetPublisherId").Inc()
// TODO: Remove debug logging
log.Printf("Get %s publisher id %s on %s", streamType, sessionId, c.Target())
response, err := c.impl.GetPublisherId(ctx, &GetPublisherIdRequest{
SessionId: sessionId,
StreamType: string(streamType),
StreamType: streamType,
}, grpc.WaitForReady(true))
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
return "", "", nil, nil
@ -282,125 +262,37 @@ func (c *GrpcClient) GetSessionCount(ctx context.Context, u *url.URL) (uint32, e
return response.GetCount(), nil
}
type ProxySessionReceiver interface {
RemoteAddr() string
Country() string
UserAgent() string
OnProxyMessage(message *ServerSessionMessage) error
OnProxyClose(err error)
}
type SessionProxy struct {
sessionId string
receiver ProxySessionReceiver
sendMu sync.Mutex
client RpcSessions_ProxySessionClient
}
func (p *SessionProxy) recvPump() {
var closeError error
defer func() {
p.receiver.OnProxyClose(closeError)
if err := p.Close(); err != nil {
log.Printf("Error closing proxy for session %s: %s", p.sessionId, err)
}
}()
for {
msg, err := p.client.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
log.Printf("Error receiving message from proxy for session %s: %s", p.sessionId, err)
closeError = err
break
}
if err := p.receiver.OnProxyMessage(msg); err != nil {
log.Printf("Error processing message %+v from proxy for session %s: %s", msg, p.sessionId, err)
}
}
}
func (p *SessionProxy) Send(message *ClientSessionMessage) error {
p.sendMu.Lock()
defer p.sendMu.Unlock()
return p.client.Send(message)
}
func (p *SessionProxy) Close() error {
p.sendMu.Lock()
defer p.sendMu.Unlock()
return p.client.CloseSend()
}
func (c *GrpcClient) ProxySession(ctx context.Context, sessionId string, receiver ProxySessionReceiver) (*SessionProxy, error) {
statsGrpcClientCalls.WithLabelValues("ProxySession").Inc()
md := metadata.Pairs(
"sessionId", sessionId,
"remoteAddr", receiver.RemoteAddr(),
"country", receiver.Country(),
"userAgent", receiver.UserAgent(),
)
client, err := c.impl.ProxySession(metadata.NewOutgoingContext(ctx, md), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
proxy := &SessionProxy{
sessionId: sessionId,
receiver: receiver,
client: client,
}
go proxy.recvPump()
return proxy, nil
}
type grpcClientsList struct {
clients []*GrpcClient
entry *DnsMonitorEntry
}
type GrpcClients struct {
mu sync.RWMutex
clientsMap map[string]*grpcClientsList
clientsMap map[string][]*GrpcClient
clients []*GrpcClient
dnsMonitor *DnsMonitor
dnsDiscovery bool
stopping chan bool
stopped chan bool
etcdClient *EtcdClient
targetPrefix string
targetInformation map[string]*GrpcTargetInformationEtcd
dialOptions atomic.Value // []grpc.DialOption
creds credentials.TransportCredentials
initializedCtx context.Context
initializedFunc context.CancelFunc
wakeupChanForTesting chan struct{}
initializedWg sync.WaitGroup
wakeupChanForTesting chan bool
selfCheckWaitGroup sync.WaitGroup
closeCtx context.Context
closeFunc context.CancelFunc
}
func NewGrpcClients(config *goconf.ConfigFile, etcdClient *EtcdClient, dnsMonitor *DnsMonitor) (*GrpcClients, error) {
func NewGrpcClients(config *goconf.ConfigFile, etcdClient *EtcdClient) (*GrpcClients, error) {
initializedCtx, initializedFunc := context.WithCancel(context.Background())
closeCtx, closeFunc := context.WithCancel(context.Background())
result := &GrpcClients{
dnsMonitor: dnsMonitor,
etcdClient: etcdClient,
initializedCtx: initializedCtx,
initializedFunc: initializedFunc,
closeCtx: closeCtx,
closeFunc: closeFunc,
stopping: make(chan bool, 1),
stopped: make(chan bool, 1),
}
if err := result.load(config, false); err != nil {
return nil, err
@ -414,13 +306,6 @@ func (c *GrpcClients) load(config *goconf.ConfigFile, fromReload bool) error {
return err
}
if c.creds != nil {
if cr, ok := c.creds.(*reloadableCredentials); ok {
cr.Close()
}
}
c.creds = creds
opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)}
c.dialOptions.Store(opts)
@ -432,6 +317,9 @@ func (c *GrpcClients) load(config *goconf.ConfigFile, fromReload bool) error {
switch targetType {
case GrpcTargetTypeStatic:
err = c.loadTargetsStatic(config, fromReload, opts...)
if err == nil && c.dnsDiscovery {
go c.monitorGrpcIPs()
}
case GrpcTargetTypeEtcd:
err = c.loadTargetsEtcd(config, fromReload, opts...)
default:
@ -460,7 +348,7 @@ func (c *GrpcClients) isClientAvailable(target string, client *GrpcClient) bool
return false
}
for _, entry := range entries.clients {
for _, entry := range entries {
if entry == client {
return true
}
@ -494,10 +382,6 @@ loop:
id, err := c.getServerIdWithTimeout(ctx, client)
if err != nil {
if errors.Is(err, context.Canceled) {
return
}
if status.Code(err) != codes.Canceled {
log.Printf("Error checking GRPC server id of %s, retrying in %s: %s", client.Target(), backoff.NextWait(), err)
}
@ -521,20 +405,7 @@ func (c *GrpcClients) loadTargetsStatic(config *goconf.ConfigFile, fromReload bo
c.mu.Lock()
defer c.mu.Unlock()
dnsDiscovery, _ := config.GetBool("grpc", "dnsdiscovery")
if dnsDiscovery != c.dnsDiscovery {
if !dnsDiscovery {
for _, entry := range c.clientsMap {
if entry.entry != nil {
c.dnsMonitor.Remove(entry.entry)
entry.entry = nil
}
}
}
c.dnsDiscovery = dnsDiscovery
}
clientsMap := make(map[string]*grpcClientsList)
clientsMap := make(map[string][]*GrpcClient)
var clients []*GrpcClient
removeTargets := make(map[string]bool, len(c.clientsMap))
for target, entries := range c.clientsMap {
@ -550,15 +421,7 @@ func (c *GrpcClients) loadTargetsStatic(config *goconf.ConfigFile, fromReload bo
}
if entries, found := clientsMap[target]; found {
clients = append(clients, entries.clients...)
if dnsDiscovery && entries.entry == nil {
entry, err := c.dnsMonitor.Add(target, c.onLookup)
if err != nil {
return err
}
entries.entry = entry
}
clients = append(clients, entries...)
delete(removeTargets, target)
continue
}
@ -568,62 +431,64 @@ func (c *GrpcClients) loadTargetsStatic(config *goconf.ConfigFile, fromReload bo
host = h
}
if dnsDiscovery && net.ParseIP(host) == nil {
var ips []net.IP
if net.ParseIP(host) == nil {
// Use dedicated client for each IP address.
entry, err := c.dnsMonitor.Add(target, c.onLookup)
var err error
ips, err = lookupGrpcIp(host)
if err != nil {
log.Printf("Could not lookup %s: %s", host, err)
// Make sure updating continues even if initial lookup failed.
clientsMap[target] = nil
continue
}
} else {
// Connect directly to IP address.
ips = []net.IP{nil}
}
for _, ip := range ips {
client, err := NewGrpcClient(target, ip, opts...)
if err != nil {
for _, clients := range clientsMap {
for _, client := range clients {
c.closeClient(client)
}
}
return err
}
clientsMap[target] = &grpcClientsList{
entry: entry,
}
continue
c.selfCheckWaitGroup.Add(1)
go c.checkIsSelf(context.Background(), target, client)
log.Printf("Adding %s as GRPC target", client.Target())
clientsMap[target] = append(clientsMap[target], client)
clients = append(clients, client)
}
client, err := NewGrpcClient(target, nil, opts...)
if err != nil {
for _, entry := range clientsMap {
for _, client := range entry.clients {
c.closeClient(client)
}
if entry.entry != nil {
c.dnsMonitor.Remove(entry.entry)
entry.entry = nil
}
}
return err
}
c.selfCheckWaitGroup.Add(1)
go c.checkIsSelf(c.closeCtx, target, client)
log.Printf("Adding %s as GRPC target", client.Target())
entry, found := clientsMap[target]
if !found {
entry = &grpcClientsList{}
clientsMap[target] = entry
}
entry.clients = append(entry.clients, client)
clients = append(clients, client)
}
for target := range removeTargets {
if entry, found := clientsMap[target]; found {
for _, client := range entry.clients {
if clients, found := clientsMap[target]; found {
for _, client := range clients {
log.Printf("Deleting GRPC target %s", client.Target())
c.closeClient(client)
}
if entry.entry != nil {
c.dnsMonitor.Remove(entry.entry)
entry.entry = nil
}
delete(clientsMap, target)
}
}
dnsDiscovery, _ := config.GetBool("grpc", "dnsdiscovery")
if dnsDiscovery != c.dnsDiscovery {
if !dnsDiscovery && fromReload {
c.stopping <- true
<-c.stopped
}
c.dnsDiscovery = dnsDiscovery
if dnsDiscovery && fromReload {
go c.monitorGrpcIPs()
}
}
c.clients = clients
c.clientsMap = clientsMap
c.initializedFunc()
@ -631,61 +496,91 @@ func (c *GrpcClients) loadTargetsStatic(config *goconf.ConfigFile, fromReload bo
return nil
}
func (c *GrpcClients) onLookup(entry *DnsMonitorEntry, all []net.IP, added []net.IP, keep []net.IP, removed []net.IP) {
func (c *GrpcClients) monitorGrpcIPs() {
log.Printf("Start monitoring GRPC client IPs")
ticker := time.NewTicker(updateDnsInterval)
for {
select {
case <-ticker.C:
c.updateGrpcIPs()
case <-c.stopping:
c.stopped <- true
return
}
}
}
func (c *GrpcClients) updateGrpcIPs() {
c.mu.Lock()
defer c.mu.Unlock()
target := entry.URL()
e, found := c.clientsMap[target]
if !found {
return
}
opts := c.dialOptions.Load().([]grpc.DialOption)
mapModified := false
var newClients []*GrpcClient
for _, ip := range removed {
for _, client := range e.clients {
if ip.Equal(client.ip) {
mapModified = true
for target, clients := range c.clientsMap {
host := target
if h, _, err := net.SplitHostPort(target); err == nil {
host = h
}
if net.ParseIP(host) != nil {
// No need to lookup endpoints that connect to IP addresses.
continue
}
ips, err := lookupGrpcIp(host)
if err != nil {
log.Printf("Could not lookup %s: %s", host, err)
continue
}
var newClients []*GrpcClient
changed := false
for _, client := range clients {
found := false
for idx, ip := range ips {
if ip.Equal(client.ip) {
ips = append(ips[:idx], ips[idx+1:]...)
found = true
newClients = append(newClients, client)
break
}
}
if !found {
changed = true
log.Printf("Removing connection to %s", client.Target())
c.closeClient(client)
c.wakeupForTesting()
}
}
}
for _, ip := range keep {
for _, client := range e.clients {
if ip.Equal(client.ip) {
newClients = append(newClients, client)
for _, ip := range ips {
client, err := NewGrpcClient(target, ip, opts...)
if err != nil {
log.Printf("Error creating client to %s with IP %s: %s", target, ip.String(), err)
continue
}
}
}
for _, ip := range added {
client, err := NewGrpcClient(target, ip, opts...)
if err != nil {
log.Printf("Error creating client to %s with IP %s: %s", target, ip.String(), err)
continue
c.selfCheckWaitGroup.Add(1)
go c.checkIsSelf(context.Background(), target, client)
log.Printf("Adding %s as GRPC target", client.Target())
newClients = append(newClients, client)
changed = true
c.wakeupForTesting()
}
c.selfCheckWaitGroup.Add(1)
go c.checkIsSelf(c.closeCtx, target, client)
log.Printf("Adding %s as GRPC target", client.Target())
newClients = append(newClients, client)
mapModified = true
c.wakeupForTesting()
if changed {
c.clientsMap[target] = newClients
mapModified = true
}
}
if mapModified {
c.clientsMap[target].clients = newClients
c.clients = make([]*GrpcClient, 0, len(c.clientsMap))
for _, entry := range c.clientsMap {
c.clients = append(c.clients, entry.clients...)
for _, clients := range c.clientsMap {
c.clients = append(c.clients, clients...)
}
statsGrpcClients.Set(float64(len(c.clients)))
}
@ -710,72 +605,52 @@ func (c *GrpcClients) loadTargetsEtcd(config *goconf.ConfigFile, fromReload bool
}
func (c *GrpcClients) EtcdClientCreated(client *EtcdClient) {
c.initializedWg.Add(1)
go func() {
if err := client.WaitForConnection(c.closeCtx); err != nil {
if errors.Is(err, context.Canceled) {
return
}
panic(err)
if err := client.Watch(context.Background(), c.targetPrefix, c, clientv3.WithPrefix()); err != nil {
log.Printf("Error processing watch for %s: %s", c.targetPrefix, err)
}
}()
go func() {
client.WaitForConnection()
backoff, _ := NewExponentialBackoff(initialWaitDelay, maxWaitDelay)
var nextRevision int64
for c.closeCtx.Err() == nil {
response, err := c.getGrpcTargets(c.closeCtx, client, c.targetPrefix)
for {
response, err := c.getGrpcTargets(client, c.targetPrefix)
if err != nil {
if errors.Is(err, context.Canceled) {
return
} else if errors.Is(err, context.DeadlineExceeded) {
if err == context.DeadlineExceeded {
log.Printf("Timeout getting initial list of GRPC targets, retry in %s", backoff.NextWait())
} else {
log.Printf("Could not get initial list of GRPC targets, retry in %s: %s", backoff.NextWait(), err)
}
backoff.Wait(c.closeCtx)
backoff.Wait(context.Background())
continue
}
for _, ev := range response.Kvs {
c.EtcdKeyUpdated(client, string(ev.Key), ev.Value, nil)
c.EtcdKeyUpdated(client, string(ev.Key), ev.Value)
}
c.initializedWg.Wait()
c.initializedFunc()
nextRevision = response.Header.Revision + 1
break
}
prevRevision := nextRevision
backoff.Reset()
for c.closeCtx.Err() == nil {
var err error
if nextRevision, err = client.Watch(c.closeCtx, c.targetPrefix, nextRevision, c, clientv3.WithPrefix()); err != nil {
log.Printf("Error processing watch for %s (%s), retry in %s", c.targetPrefix, err, backoff.NextWait())
backoff.Wait(c.closeCtx)
continue
}
if nextRevision != prevRevision {
backoff.Reset()
prevRevision = nextRevision
} else {
log.Printf("Processing watch for %s interrupted, retry in %s", c.targetPrefix, backoff.NextWait())
backoff.Wait(c.closeCtx)
}
return
}
}()
}
func (c *GrpcClients) EtcdWatchCreated(client *EtcdClient, key string) {
c.initializedWg.Done()
}
func (c *GrpcClients) getGrpcTargets(ctx context.Context, client *EtcdClient, targetPrefix string) (*clientv3.GetResponse, error) {
ctx, cancel := context.WithTimeout(ctx, time.Second)
func (c *GrpcClients) getGrpcTargets(client *EtcdClient, targetPrefix string) (*clientv3.GetResponse, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
return client.Get(ctx, targetPrefix, clientv3.WithPrefix())
}
func (c *GrpcClients) EtcdKeyUpdated(client *EtcdClient, key string, data []byte, prevValue []byte) {
func (c *GrpcClients) EtcdKeyUpdated(client *EtcdClient, key string, data []byte) {
var info GrpcTargetInformationEtcd
if err := json.Unmarshal(data, &info); err != nil {
log.Printf("Could not decode GRPC target %s=%s: %s", key, string(data), err)
@ -808,23 +683,21 @@ func (c *GrpcClients) EtcdKeyUpdated(client *EtcdClient, key string, data []byte
}
c.selfCheckWaitGroup.Add(1)
go c.checkIsSelf(c.closeCtx, info.Address, cl)
go c.checkIsSelf(context.Background(), info.Address, cl)
log.Printf("Adding %s as GRPC target", cl.Target())
if c.clientsMap == nil {
c.clientsMap = make(map[string]*grpcClientsList)
}
c.clientsMap[info.Address] = &grpcClientsList{
clients: []*GrpcClient{cl},
c.clientsMap = make(map[string][]*GrpcClient)
}
c.clientsMap[info.Address] = []*GrpcClient{cl}
c.clients = append(c.clients, cl)
c.targetInformation[key] = &info
statsGrpcClients.Inc()
c.wakeupForTesting()
}
func (c *GrpcClients) EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte) {
func (c *GrpcClients) EtcdKeyDeleted(client *EtcdClient, key string) {
c.mu.Lock()
defer c.mu.Unlock()
@ -840,19 +713,19 @@ func (c *GrpcClients) removeEtcdClientLocked(key string) {
}
delete(c.targetInformation, key)
entry, found := c.clientsMap[info.Address]
clients, found := c.clientsMap[info.Address]
if !found {
return
}
for _, client := range entry.clients {
for _, client := range clients {
log.Printf("Removing connection to %s (from %s)", client.Target(), key)
c.closeClient(client)
}
delete(c.clientsMap, info.Address)
c.clients = make([]*GrpcClient, 0, len(c.clientsMap))
for _, entry := range c.clientsMap {
c.clients = append(c.clients, entry.clients...)
for _, clients := range c.clientsMap {
c.clients = append(c.clients, clients...)
}
statsGrpcClients.Dec()
c.wakeupForTesting()
@ -873,7 +746,7 @@ func (c *GrpcClients) wakeupForTesting() {
}
select {
case c.wakeupChanForTesting <- struct{}{}:
case c.wakeupChanForTesting <- true:
default:
}
}
@ -888,32 +761,25 @@ func (c *GrpcClients) Close() {
c.mu.Lock()
defer c.mu.Unlock()
for _, entry := range c.clientsMap {
for _, client := range entry.clients {
for _, clients := range c.clientsMap {
for _, client := range clients {
if err := client.Close(); err != nil {
log.Printf("Error closing client to %s: %s", client.Target(), err)
}
}
if entry.entry != nil {
c.dnsMonitor.Remove(entry.entry)
entry.entry = nil
}
}
c.clients = nil
c.clientsMap = nil
c.dnsDiscovery = false
if c.dnsDiscovery {
c.stopping <- true
<-c.stopped
c.dnsDiscovery = false
}
if c.etcdClient != nil {
c.etcdClient.RemoveListener(c)
}
if c.creds != nil {
if cr, ok := c.creds.(*reloadableCredentials); ok {
cr.Close()
}
}
c.closeFunc()
}
func (c *GrpcClients) GetClients() []*GrpcClient {

View file

@ -36,22 +36,8 @@ import (
"go.etcd.io/etcd/server/v3/embed"
)
func (c *GrpcClients) getWakeupChannelForTesting() <-chan struct{} {
c.mu.Lock()
defer c.mu.Unlock()
if c.wakeupChanForTesting != nil {
return c.wakeupChanForTesting
}
ch := make(chan struct{}, 1)
c.wakeupChanForTesting = ch
return ch
}
func NewGrpcClientsForTestWithConfig(t *testing.T, config *goconf.ConfigFile, etcdClient *EtcdClient) (*GrpcClients, *DnsMonitor) {
dnsMonitor := newDnsMonitorForTest(t, time.Hour) // will be updated manually
client, err := NewGrpcClients(config, etcdClient, dnsMonitor)
func NewGrpcClientsForTestWithConfig(t *testing.T, config *goconf.ConfigFile, etcdClient *EtcdClient) *GrpcClients {
client, err := NewGrpcClients(config, etcdClient)
if err != nil {
t.Fatal(err)
}
@ -59,10 +45,10 @@ func NewGrpcClientsForTestWithConfig(t *testing.T, config *goconf.ConfigFile, et
client.Close()
})
return client, dnsMonitor
return client
}
func NewGrpcClientsForTest(t *testing.T, addr string) (*GrpcClients, *DnsMonitor) {
func NewGrpcClientsForTest(t *testing.T, addr string) *GrpcClients {
config := goconf.NewConfigFile()
config.AddOption("grpc", "targets", addr)
config.AddOption("grpc", "dnsdiscovery", "true")
@ -70,9 +56,9 @@ func NewGrpcClientsForTest(t *testing.T, addr string) (*GrpcClients, *DnsMonitor
return NewGrpcClientsForTestWithConfig(t, config, nil)
}
func NewGrpcClientsWithEtcdForTest(t *testing.T, etcd *embed.Etcd) (*GrpcClients, *DnsMonitor) {
func NewGrpcClientsWithEtcdForTest(t *testing.T, etcd *embed.Etcd) *GrpcClients {
config := goconf.NewConfigFile()
config.AddOption("etcd", "endpoints", etcd.Config().ListenClientUrls[0].String())
config.AddOption("etcd", "endpoints", etcd.Config().LCUrls[0].String())
config.AddOption("grpc", "targettype", "etcd")
config.AddOption("grpc", "targetprefix", "/grpctargets")
@ -90,7 +76,7 @@ func NewGrpcClientsWithEtcdForTest(t *testing.T, etcd *embed.Etcd) (*GrpcClients
return NewGrpcClientsForTestWithConfig(t, config, etcdClient)
}
func drainWakeupChannel(ch <-chan struct{}) {
func drainWakeupChannel(ch chan bool) {
for {
select {
case <-ch:
@ -100,7 +86,7 @@ func drainWakeupChannel(ch <-chan struct{}) {
}
}
func waitForEvent(ctx context.Context, t *testing.T, ch <-chan struct{}) {
func waitForEvent(ctx context.Context, t *testing.T, ch chan bool) {
t.Helper()
select {
@ -112,35 +98,31 @@ func waitForEvent(ctx context.Context, t *testing.T, ch <-chan struct{}) {
}
func Test_GrpcClients_EtcdInitial(t *testing.T) {
CatchLogForTest(t)
ensureNoGoroutinesLeak(t, func(t *testing.T) {
_, addr1 := NewGrpcServerForTest(t)
_, addr2 := NewGrpcServerForTest(t)
_, addr1 := NewGrpcServerForTest(t)
_, addr2 := NewGrpcServerForTest(t)
etcd := NewEtcdForTest(t)
etcd := NewEtcdForTest(t)
SetEtcdValue(etcd, "/grpctargets/one", []byte("{\"address\":\""+addr1+"\"}"))
SetEtcdValue(etcd, "/grpctargets/two", []byte("{\"address\":\""+addr2+"\"}"))
SetEtcdValue(etcd, "/grpctargets/one", []byte("{\"address\":\""+addr1+"\"}"))
SetEtcdValue(etcd, "/grpctargets/two", []byte("{\"address\":\""+addr2+"\"}"))
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
if err := client.WaitForInitialized(ctx); err != nil {
t.Fatal(err)
}
client := NewGrpcClientsWithEtcdForTest(t, etcd)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
if err := client.WaitForInitialized(ctx); err != nil {
t.Fatal(err)
}
if clients := client.GetClients(); len(clients) != 2 {
t.Errorf("Expected two clients, got %+v", clients)
}
})
if clients := client.GetClients(); len(clients) != 2 {
t.Errorf("Expected two clients, got %+v", clients)
}
}
func Test_GrpcClients_EtcdUpdate(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
etcd := NewEtcdForTest(t)
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
ch := client.getWakeupChannelForTesting()
client := NewGrpcClientsWithEtcdForTest(t, etcd)
ch := make(chan bool, 1)
client.wakeupChanForTesting = ch
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
@ -192,11 +174,10 @@ func Test_GrpcClients_EtcdUpdate(t *testing.T) {
}
func Test_GrpcClients_EtcdIgnoreSelf(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
etcd := NewEtcdForTest(t)
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
ch := client.getWakeupChannelForTesting()
client := NewGrpcClientsWithEtcdForTest(t, etcd)
ch := make(chan bool, 1)
client.wakeupChanForTesting = ch
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
@ -238,71 +219,85 @@ func Test_GrpcClients_EtcdIgnoreSelf(t *testing.T) {
}
func Test_GrpcClients_DnsDiscovery(t *testing.T) {
CatchLogForTest(t)
ensureNoGoroutinesLeak(t, func(t *testing.T) {
lookup := newMockDnsLookupForTest(t)
target := "testgrpc:12345"
ip1 := net.ParseIP("192.168.0.1")
ip2 := net.ParseIP("192.168.0.2")
targetWithIp1 := fmt.Sprintf("%s (%s)", target, ip1)
targetWithIp2 := fmt.Sprintf("%s (%s)", target, ip2)
lookup.Set("testgrpc", []net.IP{ip1})
client, dnsMonitor := NewGrpcClientsForTest(t, target)
ch := client.getWakeupChannelForTesting()
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
dnsMonitor.checkHostnames()
if clients := client.GetClients(); len(clients) != 1 {
t.Errorf("Expected one client, got %+v", clients)
} else if clients[0].Target() != targetWithIp1 {
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
} else if !clients[0].ip.Equal(ip1) {
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
var ipsResult []net.IP
lookupGrpcIp = func(host string) ([]net.IP, error) {
if host == "testgrpc" {
return ipsResult, nil
}
lookup.Set("testgrpc", []net.IP{ip1, ip2})
drainWakeupChannel(ch)
dnsMonitor.checkHostnames()
waitForEvent(ctx, t, ch)
return nil, fmt.Errorf("unknown host")
}
target := "testgrpc:12345"
ip1 := net.ParseIP("192.168.0.1")
ip2 := net.ParseIP("192.168.0.2")
targetWithIp1 := fmt.Sprintf("%s (%s)", target, ip1)
targetWithIp2 := fmt.Sprintf("%s (%s)", target, ip2)
ipsResult = []net.IP{ip1}
client := NewGrpcClientsForTest(t, target)
ch := make(chan bool, 1)
client.wakeupChanForTesting = ch
if clients := client.GetClients(); len(clients) != 2 {
t.Errorf("Expected two client, got %+v", clients)
} else if clients[0].Target() != targetWithIp1 {
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
} else if !clients[0].ip.Equal(ip1) {
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
} else if clients[1].Target() != targetWithIp2 {
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[1].Target())
} else if !clients[1].ip.Equal(ip2) {
t.Errorf("Expected IP %s, got %s", ip2, clients[1].ip)
}
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
lookup.Set("testgrpc", []net.IP{ip2})
drainWakeupChannel(ch)
dnsMonitor.checkHostnames()
waitForEvent(ctx, t, ch)
if clients := client.GetClients(); len(clients) != 1 {
t.Errorf("Expected one client, got %+v", clients)
} else if clients[0].Target() != targetWithIp1 {
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
} else if !clients[0].ip.Equal(ip1) {
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
}
if clients := client.GetClients(); len(clients) != 1 {
t.Errorf("Expected one client, got %+v", clients)
} else if clients[0].Target() != targetWithIp2 {
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[0].Target())
} else if !clients[0].ip.Equal(ip2) {
t.Errorf("Expected IP %s, got %s", ip2, clients[0].ip)
}
})
ipsResult = []net.IP{ip1, ip2}
drainWakeupChannel(ch)
client.updateGrpcIPs()
waitForEvent(ctx, t, ch)
if clients := client.GetClients(); len(clients) != 2 {
t.Errorf("Expected two client, got %+v", clients)
} else if clients[0].Target() != targetWithIp1 {
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
} else if !clients[0].ip.Equal(ip1) {
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
} else if clients[1].Target() != targetWithIp2 {
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[1].Target())
} else if !clients[1].ip.Equal(ip2) {
t.Errorf("Expected IP %s, got %s", ip2, clients[1].ip)
}
ipsResult = []net.IP{ip2}
drainWakeupChannel(ch)
client.updateGrpcIPs()
waitForEvent(ctx, t, ch)
if clients := client.GetClients(); len(clients) != 1 {
t.Errorf("Expected one client, got %+v", clients)
} else if clients[0].Target() != targetWithIp2 {
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[0].Target())
} else if !clients[0].ip.Equal(ip2) {
t.Errorf("Expected IP %s, got %s", ip2, clients[0].ip)
}
}
func Test_GrpcClients_DnsDiscoveryInitialFailed(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
lookup := newMockDnsLookupForTest(t)
var ipsResult []net.IP
lookupGrpcIp = func(host string) ([]net.IP, error) {
if host == "testgrpc" && len(ipsResult) > 0 {
return ipsResult, nil
}
return nil, &net.DNSError{
Err: "no such host",
Name: host,
IsNotFound: true,
}
}
target := "testgrpc:12345"
ip1 := net.ParseIP("192.168.0.1")
targetWithIp1 := fmt.Sprintf("%s (%s)", target, ip1)
client, dnsMonitor := NewGrpcClientsForTest(t, target)
ch := client.getWakeupChannelForTesting()
client := NewGrpcClientsForTest(t, target)
ch := make(chan bool, 1)
client.wakeupChanForTesting = ch
testCtx, testCtxCancel := context.WithTimeout(context.Background(), testTimeout)
defer testCtxCancel()
@ -317,9 +312,9 @@ func Test_GrpcClients_DnsDiscoveryInitialFailed(t *testing.T) {
t.Errorf("Expected no client, got %+v", clients)
}
lookup.Set("testgrpc", []net.IP{ip1})
ipsResult = []net.IP{ip1}
drainWakeupChannel(ch)
dnsMonitor.checkHostnames()
client.updateGrpcIPs()
waitForEvent(testCtx, t, ch)
if clients := client.GetClients(); len(clients) != 1 {
@ -332,58 +327,55 @@ func Test_GrpcClients_DnsDiscoveryInitialFailed(t *testing.T) {
}
func Test_GrpcClients_Encryption(t *testing.T) {
CatchLogForTest(t)
ensureNoGoroutinesLeak(t, func(t *testing.T) {
serverKey, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
serverKey, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
t.Fatal(err)
}
clientKey, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
t.Fatal(err)
}
serverCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Server cert", serverKey)
clientCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Testing client", clientKey)
dir := t.TempDir()
serverPrivkeyFile := path.Join(dir, "server-privkey.pem")
serverPubkeyFile := path.Join(dir, "server-pubkey.pem")
serverCertFile := path.Join(dir, "server-cert.pem")
WritePrivateKey(serverKey, serverPrivkeyFile) // nolint
WritePublicKey(&serverKey.PublicKey, serverPubkeyFile) // nolint
os.WriteFile(serverCertFile, serverCert, 0755) // nolint
clientPrivkeyFile := path.Join(dir, "client-privkey.pem")
clientPubkeyFile := path.Join(dir, "client-pubkey.pem")
clientCertFile := path.Join(dir, "client-cert.pem")
WritePrivateKey(clientKey, clientPrivkeyFile) // nolint
WritePublicKey(&clientKey.PublicKey, clientPubkeyFile) // nolint
os.WriteFile(clientCertFile, clientCert, 0755) // nolint
serverConfig := goconf.NewConfigFile()
serverConfig.AddOption("grpc", "servercertificate", serverCertFile)
serverConfig.AddOption("grpc", "serverkey", serverPrivkeyFile)
serverConfig.AddOption("grpc", "clientca", clientCertFile)
_, addr := NewGrpcServerForTestWithConfig(t, serverConfig)
clientConfig := goconf.NewConfigFile()
clientConfig.AddOption("grpc", "targets", addr)
clientConfig.AddOption("grpc", "clientcertificate", clientCertFile)
clientConfig.AddOption("grpc", "clientkey", clientPrivkeyFile)
clientConfig.AddOption("grpc", "serverca", serverCertFile)
clients := NewGrpcClientsForTestWithConfig(t, clientConfig, nil)
ctx, cancel1 := context.WithTimeout(context.Background(), time.Second)
defer cancel1()
if err := clients.WaitForInitialized(ctx); err != nil {
t.Fatal(err)
}
for _, client := range clients.GetClients() {
if _, err := client.GetServerId(ctx); err != nil {
t.Fatal(err)
}
clientKey, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
t.Fatal(err)
}
serverCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Server cert", serverKey)
clientCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Testing client", clientKey)
dir := t.TempDir()
serverPrivkeyFile := path.Join(dir, "server-privkey.pem")
serverPubkeyFile := path.Join(dir, "server-pubkey.pem")
serverCertFile := path.Join(dir, "server-cert.pem")
WritePrivateKey(serverKey, serverPrivkeyFile) // nolint
WritePublicKey(&serverKey.PublicKey, serverPubkeyFile) // nolint
os.WriteFile(serverCertFile, serverCert, 0755) // nolint
clientPrivkeyFile := path.Join(dir, "client-privkey.pem")
clientPubkeyFile := path.Join(dir, "client-pubkey.pem")
clientCertFile := path.Join(dir, "client-cert.pem")
WritePrivateKey(clientKey, clientPrivkeyFile) // nolint
WritePublicKey(&clientKey.PublicKey, clientPubkeyFile) // nolint
os.WriteFile(clientCertFile, clientCert, 0755) // nolint
serverConfig := goconf.NewConfigFile()
serverConfig.AddOption("grpc", "servercertificate", serverCertFile)
serverConfig.AddOption("grpc", "serverkey", serverPrivkeyFile)
serverConfig.AddOption("grpc", "clientca", clientCertFile)
_, addr := NewGrpcServerForTestWithConfig(t, serverConfig)
clientConfig := goconf.NewConfigFile()
clientConfig.AddOption("grpc", "targets", addr)
clientConfig.AddOption("grpc", "clientcertificate", clientCertFile)
clientConfig.AddOption("grpc", "clientkey", clientPrivkeyFile)
clientConfig.AddOption("grpc", "serverca", serverCertFile)
clients, _ := NewGrpcClientsForTestWithConfig(t, clientConfig, nil)
ctx, cancel1 := context.WithTimeout(context.Background(), time.Second)
defer cancel1()
if err := clients.WaitForInitialized(ctx); err != nil {
t.Fatal(err)
}
for _, client := range clients.GetClients() {
if _, err := client.GetServerId(ctx); err != nil {
t.Fatal(err)
}
}
})
}
}

View file

@ -125,15 +125,6 @@ func (c *reloadableCredentials) OverrideServerName(serverName string) error {
return nil
}
func (c *reloadableCredentials) Close() {
if c.loader != nil {
c.loader.Close()
}
if c.pool != nil {
c.pool.Close()
}
}
func NewReloadableCredentials(config *goconf.ConfigFile, server bool) (credentials.TransportCredentials, error) {
var prefix string
var caPrefix string

View file

@ -22,13 +22,11 @@
package signaling
import (
"context"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"io/fs"
"math/big"
"net"
@ -37,22 +35,6 @@ import (
"time"
)
func (c *reloadableCredentials) WaitForCertificateReload(ctx context.Context) error {
if c.loader == nil {
return errors.New("no certificate loaded")
}
return c.loader.WaitForReload(ctx)
}
func (c *reloadableCredentials) WaitForCertPoolReload(ctx context.Context) error {
if c.pool == nil {
return errors.New("no certificate pool loaded")
}
return c.pool.WaitForReload(ctx)
}
func GenerateSelfSignedCertificateForTesting(t *testing.T, bits int, organization string, key *rsa.PrivateKey) []byte {
template := x509.Certificate{
SerialNumber: big.NewInt(1),

View file

@ -1,229 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2024 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"sync/atomic"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
const (
grpcRemoteClientMessageQueue = 16
)
func getMD(md metadata.MD, key string) string {
if values := md.Get(key); len(values) > 0 {
return values[0]
}
return ""
}
// remoteGrpcClient is a remote client connecting from a GRPC proxy to a Hub.
type remoteGrpcClient struct {
hub *Hub
client RpcSessions_ProxySessionServer
sessionId string
remoteAddr string
country string
userAgent string
closeCtx context.Context
closeFunc context.CancelCauseFunc
session atomic.Pointer[Session]
messages chan WritableClientMessage
}
func newRemoteGrpcClient(hub *Hub, request RpcSessions_ProxySessionServer) (*remoteGrpcClient, error) {
md, found := metadata.FromIncomingContext(request.Context())
if !found {
return nil, errors.New("no metadata provided")
}
closeCtx, closeFunc := context.WithCancelCause(context.Background())
result := &remoteGrpcClient{
hub: hub,
client: request,
sessionId: getMD(md, "sessionId"),
remoteAddr: getMD(md, "remoteAddr"),
country: getMD(md, "country"),
userAgent: getMD(md, "userAgent"),
closeCtx: closeCtx,
closeFunc: closeFunc,
messages: make(chan WritableClientMessage, grpcRemoteClientMessageQueue),
}
return result, nil
}
func (c *remoteGrpcClient) readPump() {
var closeError error
defer func() {
c.closeFunc(closeError)
c.hub.OnClosed(c)
}()
for {
msg, err := c.client.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
// Connection was closed locally.
break
}
if status.Code(err) != codes.Canceled {
log.Printf("Error reading from remote client for session %s: %s", c.sessionId, err)
closeError = err
}
break
}
c.hub.OnMessageReceived(c, msg.Message)
}
}
func (c *remoteGrpcClient) Context() context.Context {
return c.client.Context()
}
func (c *remoteGrpcClient) RemoteAddr() string {
return c.remoteAddr
}
func (c *remoteGrpcClient) UserAgent() string {
return c.userAgent
}
func (c *remoteGrpcClient) Country() string {
return c.country
}
func (c *remoteGrpcClient) IsConnected() bool {
return true
}
func (c *remoteGrpcClient) IsAuthenticated() bool {
return c.GetSession() != nil
}
func (c *remoteGrpcClient) GetSession() Session {
session := c.session.Load()
if session == nil {
return nil
}
return *session
}
func (c *remoteGrpcClient) SetSession(session Session) {
if session == nil {
c.session.Store(nil)
} else {
c.session.Store(&session)
}
}
func (c *remoteGrpcClient) SendError(e *Error) bool {
message := &ServerMessage{
Type: "error",
Error: e,
}
return c.SendMessage(message)
}
func (c *remoteGrpcClient) SendByeResponse(message *ClientMessage) bool {
return c.SendByeResponseWithReason(message, "")
}
func (c *remoteGrpcClient) SendByeResponseWithReason(message *ClientMessage, reason string) bool {
response := &ServerMessage{
Type: "bye",
}
if message != nil {
response.Id = message.Id
}
if reason != "" {
if response.Bye == nil {
response.Bye = &ByeServerMessage{}
}
response.Bye.Reason = reason
}
return c.SendMessage(response)
}
func (c *remoteGrpcClient) SendMessage(message WritableClientMessage) bool {
if c.closeCtx.Err() != nil {
return false
}
select {
case c.messages <- message:
return true
default:
log.Printf("Message queue for remote client of session %s is full, not sending %+v", c.sessionId, message)
return false
}
}
func (c *remoteGrpcClient) Close() {
c.closeFunc(nil)
}
func (c *remoteGrpcClient) run() error {
go c.readPump()
for {
select {
case <-c.closeCtx.Done():
if err := context.Cause(c.closeCtx); err != context.Canceled {
return err
}
return nil
case msg := <-c.messages:
data, err := json.Marshal(msg)
if err != nil {
log.Printf("Error marshalling %+v for remote client for session %s: %s", msg, c.sessionId, err)
continue
}
if err := c.client.Send(&ServerSessionMessage{
Message: data,
}); err != nil {
return fmt.Errorf("error sending %+v to remote client for session %s: %w", msg, c.sessionId, err)
}
}
}
}

View file

@ -35,7 +35,6 @@ import (
"github.com/dlintw/goconf"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
status "google.golang.org/grpc/status"
)
@ -55,31 +54,22 @@ func init() {
GrpcServerId = hex.EncodeToString(md.Sum(nil))
}
type GrpcServerHub interface {
GetSessionByResumeId(resumeId string) Session
GetSessionByPublicId(sessionId string) Session
GetSessionIdByRoomSessionId(roomSessionId string) (string, error)
GetBackend(u *url.URL) *Backend
}
type GrpcServer struct {
UnimplementedRpcBackendServer
UnimplementedRpcInternalServer
UnimplementedRpcMcuServer
UnimplementedRpcSessionsServer
creds credentials.TransportCredentials
conn *grpc.Server
listener net.Listener
serverId string // can be overwritten from tests
hub GrpcServerHub
hub *Hub
}
func NewGrpcServer(config *goconf.ConfigFile) (*GrpcServer, error) {
var listener net.Listener
if addr, _ := GetStringOptionWithEnv(config, "grpc", "listen"); addr != "" {
if addr, _ := config.GetString("grpc", "listen"); addr != "" {
var err error
listener, err = net.Listen("tcp", addr)
if err != nil {
@ -94,7 +84,6 @@ func NewGrpcServer(config *goconf.ConfigFile) (*GrpcServer, error) {
conn := grpc.NewServer(grpc.Creds(creds))
result := &GrpcServer{
creds: creds,
conn: conn,
listener: listener,
serverId: GrpcServerId,
@ -116,30 +105,13 @@ func (s *GrpcServer) Run() error {
func (s *GrpcServer) Close() {
s.conn.GracefulStop()
if cr, ok := s.creds.(*reloadableCredentials); ok {
cr.Close()
}
}
func (s *GrpcServer) LookupResumeId(ctx context.Context, request *LookupResumeIdRequest) (*LookupResumeIdReply, error) {
statsGrpcServerCalls.WithLabelValues("LookupResumeId").Inc()
// TODO: Remove debug logging
log.Printf("Lookup session for resume id %s", request.ResumeId)
session := s.hub.GetSessionByResumeId(request.ResumeId)
if session == nil {
return nil, status.Error(codes.NotFound, "no such room session id")
}
return &LookupResumeIdReply{
SessionId: session.PublicId(),
}, nil
}
func (s *GrpcServer) LookupSessionId(ctx context.Context, request *LookupSessionIdRequest) (*LookupSessionIdReply, error) {
statsGrpcServerCalls.WithLabelValues("LookupSessionId").Inc()
// TODO: Remove debug logging
log.Printf("Lookup session id for room session id %s", request.RoomSessionId)
sid, err := s.hub.GetSessionIdByRoomSessionId(request.RoomSessionId)
sid, err := s.hub.roomSessions.GetSessionId(request.RoomSessionId)
if errors.Is(err, ErrNoSuchRoomSession) {
return nil, status.Error(codes.NotFound, "no such room session id")
} else if err != nil {
@ -199,7 +171,7 @@ func (s *GrpcServer) GetPublisherId(ctx context.Context, request *GetPublisherId
return nil, status.Error(codes.NotFound, "no such session")
}
publisher := clientSession.GetOrWaitForPublisher(ctx, StreamType(request.StreamType))
publisher := clientSession.GetOrWaitForPublisher(ctx, request.StreamType)
if publisher, ok := publisher.(*mcuProxyPublisher); ok {
reply := &GetPublisherIdReply{
PublisherId: publisher.Id(),
@ -229,7 +201,7 @@ func (s *GrpcServer) GetSessionCount(ctx context.Context, request *GetSessionCou
return nil, status.Error(codes.InvalidArgument, "invalid url")
}
backend := s.hub.GetBackend(u)
backend := s.hub.backend.GetBackend(u)
if backend == nil {
return nil, status.Error(codes.NotFound, "no such backend")
}
@ -238,21 +210,3 @@ func (s *GrpcServer) GetSessionCount(ctx context.Context, request *GetSessionCou
Count: uint32(backend.Len()),
}, nil
}
func (s *GrpcServer) ProxySession(request RpcSessions_ProxySessionServer) error {
statsGrpcServerCalls.WithLabelValues("ProxySession").Inc()
hub, ok := s.hub.(*Hub)
if !ok {
return status.Error(codes.Internal, "invalid hub type")
}
client, err := newRemoteGrpcClient(hub, request)
if err != nil {
return err
}
sid := hub.registerClient(client)
defer hub.unregisterClient(sid)
return client.run()
}

View file

@ -28,7 +28,6 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"errors"
"net"
"os"
"path"
@ -41,24 +40,6 @@ import (
"google.golang.org/grpc/credentials"
)
func (s *GrpcServer) WaitForCertificateReload(ctx context.Context) error {
c, ok := s.creds.(*reloadableCredentials)
if !ok {
return errors.New("no reloadable credentials found")
}
return c.WaitForCertificateReload(ctx)
}
func (s *GrpcServer) WaitForCertPoolReload(ctx context.Context) error {
c, ok := s.creds.(*reloadableCredentials)
if !ok {
return errors.New("no reloadable credentials found")
}
return c.WaitForCertPoolReload(ctx)
}
func NewGrpcServerForTestWithConfig(t *testing.T, config *goconf.ConfigFile) (server *GrpcServer, addr string) {
for port := 50000; port < 50100; port++ {
addr = net.JoinHostPort("127.0.0.1", strconv.Itoa(port))
@ -98,7 +79,6 @@ func NewGrpcServerForTest(t *testing.T) (server *GrpcServer, addr string) {
}
func Test_GrpcServer_ReloadCerts(t *testing.T) {
CatchLogForTest(t)
key, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
t.Fatal(err)
@ -119,8 +99,8 @@ func Test_GrpcServer_ReloadCerts(t *testing.T) {
config.AddOption("grpc", "servercertificate", certFile)
config.AddOption("grpc", "serverkey", privkeyFile)
UpdateCertificateCheckIntervalForTest(t, 0)
server, addr := NewGrpcServerForTestWithConfig(t, config)
UpdateCertificateCheckIntervalForTest(t, time.Millisecond)
_, addr := NewGrpcServerForTestWithConfig(t, config)
cp1 := x509.NewCertPool()
if !cp1.AppendCertsFromPEM(cert1) {
@ -148,13 +128,6 @@ func Test_GrpcServer_ReloadCerts(t *testing.T) {
cert2 := GenerateSelfSignedCertificateForTesting(t, 1024, org2, key)
replaceFile(t, certFile, cert2, 0755)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
if err := server.WaitForCertificateReload(ctx); err != nil {
t.Fatal(err)
}
cp2 := x509.NewCertPool()
if !cp2.AppendCertsFromPEM(cert2) {
t.Fatalf("could not add certificate")
@ -179,7 +152,6 @@ func Test_GrpcServer_ReloadCerts(t *testing.T) {
}
func Test_GrpcServer_ReloadCA(t *testing.T) {
CatchLogForTest(t)
serverKey, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
t.Fatal(err)
@ -208,8 +180,8 @@ func Test_GrpcServer_ReloadCA(t *testing.T) {
config.AddOption("grpc", "serverkey", privkeyFile)
config.AddOption("grpc", "clientca", caFile)
UpdateCertificateCheckIntervalForTest(t, 0)
server, addr := NewGrpcServerForTestWithConfig(t, config)
UpdateCertificateCheckIntervalForTest(t, time.Millisecond)
_, addr := NewGrpcServerForTestWithConfig(t, config)
pool := x509.NewCertPool()
if !pool.AppendCertsFromPEM(serverCert) {
@ -245,10 +217,6 @@ func Test_GrpcServer_ReloadCA(t *testing.T) {
clientCert2 := GenerateSelfSignedCertificateForTesting(t, 1024, org2, clientKey)
replaceFile(t, caFile, clientCert2, 0755)
if err := server.WaitForCertPoolReload(ctx1); err != nil {
t.Fatal(err)
}
pair2, err := tls.X509KeyPair(clientCert2, pem.EncodeToMemory(&pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(clientKey),

View file

@ -26,18 +26,8 @@ option go_package = "github.com/strukturag/nextcloud-spreed-signaling;signaling"
package signaling;
service RpcSessions {
rpc LookupResumeId(LookupResumeIdRequest) returns (LookupResumeIdReply) {}
rpc LookupSessionId(LookupSessionIdRequest) returns (LookupSessionIdReply) {}
rpc IsSessionInCall(IsSessionInCallRequest) returns (IsSessionInCallReply) {}
rpc ProxySession(stream ClientSessionMessage) returns (stream ServerSessionMessage) {}
}
message LookupResumeIdRequest {
string resumeId = 1;
}
message LookupResumeIdReply {
string sessionId = 1;
}
message LookupSessionIdRequest {
@ -59,11 +49,3 @@ message IsSessionInCallRequest {
message IsSessionInCallReply {
bool inCall = 1;
}
message ClientSessionMessage {
bytes message = 1;
}
message ServerSessionMessage {
bytes message = 1;
}

View file

@ -29,18 +29,10 @@ import (
"net/http"
"net/url"
"sync"
"github.com/prometheus/client_golang/prometheus"
)
func init() {
RegisterHttpClientPoolStats()
}
type Pool struct {
pool chan *http.Client
currentConnections prometheus.Gauge
}
func (p *Pool) get(ctx context.Context) (client *http.Client, err error) {
@ -48,24 +40,21 @@ func (p *Pool) get(ctx context.Context) (client *http.Client, err error) {
case <-ctx.Done():
return nil, ctx.Err()
case client := <-p.pool:
p.currentConnections.Inc()
return client, nil
}
}
func (p *Pool) Put(c *http.Client) {
p.currentConnections.Dec()
p.pool <- c
}
func newPool(host string, constructor func() *http.Client, size int) (*Pool, error) {
func newPool(constructor func() *http.Client, size int) (*Pool, error) {
if size <= 0 {
return nil, fmt.Errorf("can't create empty pool")
}
p := &Pool{
pool: make(chan *http.Client, size),
currentConnections: connectionsPerHostCurrent.WithLabelValues(host),
pool: make(chan *http.Client, size),
}
for size > 0 {
c := constructor()
@ -114,7 +103,7 @@ func (p *HttpClientPool) getPool(url *url.URL) (*Pool, error) {
return pool, nil
}
pool, err := newPool(url.Host, func() *http.Client {
pool, err := newPool(func() *http.Client {
return &http.Client{
Transport: p.transport,
// Only send body in redirect if going to same scheme / host.

View file

@ -1,43 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2024 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"github.com/prometheus/client_golang/prometheus"
)
var (
connectionsPerHostCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: "signaling",
Subsystem: "http_client_pool",
Name: "connections",
Help: "The current number of HTTP client connections per host",
}, []string{"host"})
httpClientPoolStats = []prometheus.Collector{
connectionsPerHostCurrent,
}
)
func RegisterHttpClientPoolStats() {
registerAll(httpClientPoolStats...)
}

View file

@ -29,7 +29,6 @@ import (
)
func TestHttpClientPool(t *testing.T) {
t.Parallel()
if _, err := NewHttpClientPool(0, false); err == nil {
t.Error("should not be possible to create empty pool")
}

864
hub.go

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -221,6 +221,8 @@ func (l *dummyGatewayListener) ConnectionInterrupted() {
// Gateway represents a connection to an instance of the Janus Gateway.
type JanusGateway struct {
nextTransaction uint64
listener GatewayListener
// Sessions is a map of the currently active sessions to the gateway.
@ -230,9 +232,8 @@ type JanusGateway struct {
// and Gateway.Unlock() methods provided by the embedded sync.Mutex.
sync.Mutex
conn *websocket.Conn
nextTransaction atomic.Uint64
transactions map[uint64]*transaction
conn *websocket.Conn
transactions map[uint64]*transaction
closer *Closer
@ -258,8 +259,8 @@ type JanusGateway struct {
// return gateway, nil
// }
func NewJanusGateway(ctx context.Context, wsURL string, listener GatewayListener) (*JanusGateway, error) {
conn, _, err := janusDialer.DialContext(ctx, wsURL, nil)
func NewJanusGateway(wsURL string, listener GatewayListener) (*JanusGateway, error) {
conn, _, err := janusDialer.Dial(wsURL, nil)
if err != nil {
return nil, err
}
@ -310,7 +311,7 @@ func (gateway *JanusGateway) cancelTransactions() {
t.quit()
}(t)
}
clear(gateway.transactions)
gateway.transactions = make(map[uint64]*transaction)
gateway.Unlock()
}
@ -327,7 +328,7 @@ func (gateway *JanusGateway) removeTransaction(id uint64) {
}
func (gateway *JanusGateway) send(msg map[string]interface{}, t *transaction) (uint64, error) {
id := gateway.nextTransaction.Add(1)
id := atomic.AddUint64(&gateway.nextTransaction, 1)
msg["transaction"] = strconv.FormatUint(id, 10)
data, err := json.Marshal(msg)
if err != nil {

View file

@ -66,7 +66,7 @@ type McuInitiator interface {
}
type Mcu interface {
Start(ctx context.Context) error
Start() error
Stop()
Reload(config *goconf.ConfigFile)
@ -75,77 +75,14 @@ type Mcu interface {
GetStats() interface{}
NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error)
NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType, initiator McuInitiator) (McuSubscriber, error)
}
// PublisherStream contains the available properties when creating a
// remote publisher in Janus.
type PublisherStream struct {
Mid string `json:"mid"`
Mindex int `json:"mindex"`
Type string `json:"type"`
Description string `json:"description,omitempty"`
Disabled bool `json:"disabled,omitempty"`
// For types "audio" and "video"
Codec string `json:"codec,omitempty"`
// For type "audio"
Stereo bool `json:"stereo,omitempty"`
Fec bool `json:"fec,omitempty"`
Dtx bool `json:"dtx,omitempty"`
// For type "video"
Simulcast bool `json:"simulcast,omitempty"`
Svc bool `json:"svc,omitempty"`
ProfileH264 string `json:"h264_profile,omitempty"`
ProfileVP9 string `json:"vp9_profile,omitempty"`
ExtIdVideoOrientation int `json:"videoorient_ext_id,omitempty"`
ExtIdPlayoutDelay int `json:"playoutdelay_ext_id,omitempty"`
}
type RemotePublisherController interface {
PublisherId() string
StartPublishing(ctx context.Context, publisher McuRemotePublisherProperties) error
GetStreams(ctx context.Context) ([]PublisherStream, error)
}
type RemoteMcu interface {
NewRemotePublisher(ctx context.Context, listener McuListener, controller RemotePublisherController, streamType StreamType) (McuRemotePublisher, error)
NewRemoteSubscriber(ctx context.Context, listener McuListener, publisher McuRemotePublisher) (McuRemoteSubscriber, error)
}
type StreamType string
const (
StreamTypeAudio StreamType = "audio"
StreamTypeVideo StreamType = "video"
StreamTypeScreen StreamType = "screen"
)
func IsValidStreamType(s string) bool {
switch s {
case string(StreamTypeAudio):
fallthrough
case string(StreamTypeVideo):
fallthrough
case string(StreamTypeScreen):
return true
default:
return false
}
NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType string, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error)
NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType string) (McuSubscriber, error)
}
type McuClient interface {
Id() string
Sid() string
StreamType() StreamType
MaxBitrate() int
StreamType() string
Close(ctx context.Context)
@ -157,10 +94,6 @@ type McuPublisher interface {
HasMedia(MediaType) bool
SetMedia(MediaType)
GetStreams(ctx context.Context) ([]PublisherStream, error)
PublishRemote(ctx context.Context, remoteId string, hostname string, port int, rtcpPort int) error
UnpublishRemote(ctx context.Context, remoteId string) error
}
type McuSubscriber interface {
@ -168,18 +101,3 @@ type McuSubscriber interface {
Publisher() string
}
type McuRemotePublisherProperties interface {
Port() int
RtcpPort() int
}
type McuRemotePublisher interface {
McuClient
McuRemotePublisherProperties
}
type McuRemoteSubscriber interface {
McuSubscriber
}

View file

@ -28,43 +28,3 @@ import (
func TestCommonMcuStats(t *testing.T) {
collectAndLint(t, commonMcuStats...)
}
type MockMcuListener struct {
publicId string
}
func (m *MockMcuListener) PublicId() string {
return m.publicId
}
func (m *MockMcuListener) OnUpdateOffer(client McuClient, offer map[string]interface{}) {
}
func (m *MockMcuListener) OnIceCandidate(client McuClient, candidate interface{}) {
}
func (m *MockMcuListener) OnIceCompleted(client McuClient) {
}
func (m *MockMcuListener) SubscriberSidUpdated(subscriber McuSubscriber) {
}
func (m *MockMcuListener) PublisherClosed(publisher McuPublisher) {
}
func (m *MockMcuListener) SubscriberClosed(subscriber McuSubscriber) {
}
type MockMcuInitiator struct {
country string
}
func (m *MockMcuInitiator) Country() string {
return m.country
}

File diff suppressed because it is too large Load diff

View file

@ -1,216 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2017 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"log"
"reflect"
"strconv"
"sync"
"github.com/notedit/janus-go"
)
type mcuJanusClient struct {
mcu *mcuJanus
listener McuListener
mu sync.Mutex // nolint
id uint64
session uint64
roomId uint64
sid string
streamType StreamType
maxBitrate int
handle *JanusHandle
handleId uint64
closeChan chan struct{}
deferred chan func()
handleEvent func(event *janus.EventMsg)
handleHangup func(event *janus.HangupMsg)
handleDetached func(event *janus.DetachedMsg)
handleConnected func(event *janus.WebRTCUpMsg)
handleSlowLink func(event *janus.SlowLinkMsg)
handleMedia func(event *janus.MediaMsg)
}
func (c *mcuJanusClient) Id() string {
return strconv.FormatUint(c.id, 10)
}
func (c *mcuJanusClient) Sid() string {
return c.sid
}
func (c *mcuJanusClient) StreamType() StreamType {
return c.streamType
}
func (c *mcuJanusClient) MaxBitrate() int {
return c.maxBitrate
}
func (c *mcuJanusClient) Close(ctx context.Context) {
}
func (c *mcuJanusClient) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
}
func (c *mcuJanusClient) closeClient(ctx context.Context) bool {
if handle := c.handle; handle != nil {
c.handle = nil
close(c.closeChan)
if _, err := handle.Detach(ctx); err != nil {
if e, ok := err.(*janus.ErrorMsg); !ok || e.Err.Code != JANUS_ERROR_HANDLE_NOT_FOUND {
log.Println("Could not detach client", handle.Id, err)
}
}
return true
}
return false
}
func (c *mcuJanusClient) run(handle *JanusHandle, closeChan <-chan struct{}) {
loop:
for {
select {
case msg := <-handle.Events:
switch t := msg.(type) {
case *janus.EventMsg:
c.handleEvent(t)
case *janus.HangupMsg:
c.handleHangup(t)
case *janus.DetachedMsg:
c.handleDetached(t)
case *janus.MediaMsg:
c.handleMedia(t)
case *janus.WebRTCUpMsg:
c.handleConnected(t)
case *janus.SlowLinkMsg:
c.handleSlowLink(t)
case *TrickleMsg:
c.handleTrickle(t)
default:
log.Println("Received unsupported event type", msg, reflect.TypeOf(msg))
}
case f := <-c.deferred:
f()
case <-closeChan:
break loop
}
}
}
func (c *mcuJanusClient) sendOffer(ctx context.Context, offer map[string]interface{}, callback func(error, map[string]interface{})) {
handle := c.handle
if handle == nil {
callback(ErrNotConnected, nil)
return
}
configure_msg := map[string]interface{}{
"request": "configure",
"audio": true,
"video": true,
"data": true,
}
answer_msg, err := handle.Message(ctx, configure_msg, offer)
if err != nil {
callback(err, nil)
return
}
callback(nil, answer_msg.Jsep)
}
func (c *mcuJanusClient) sendAnswer(ctx context.Context, answer map[string]interface{}, callback func(error, map[string]interface{})) {
handle := c.handle
if handle == nil {
callback(ErrNotConnected, nil)
return
}
start_msg := map[string]interface{}{
"request": "start",
"room": c.roomId,
}
start_response, err := handle.Message(ctx, start_msg, answer)
if err != nil {
callback(err, nil)
return
}
log.Println("Started listener", start_response)
callback(nil, nil)
}
func (c *mcuJanusClient) sendCandidate(ctx context.Context, candidate interface{}, callback func(error, map[string]interface{})) {
handle := c.handle
if handle == nil {
callback(ErrNotConnected, nil)
return
}
if _, err := handle.Trickle(ctx, candidate); err != nil {
callback(err, nil)
return
}
callback(nil, nil)
}
func (c *mcuJanusClient) handleTrickle(event *TrickleMsg) {
if event.Candidate.Completed {
c.listener.OnIceCompleted(c)
} else {
c.listener.OnIceCandidate(c, event.Candidate)
}
}
func (c *mcuJanusClient) selectStream(ctx context.Context, stream *streamSelection, callback func(error, map[string]interface{})) {
handle := c.handle
if handle == nil {
callback(ErrNotConnected, nil)
return
}
if stream == nil || !stream.HasValues() {
callback(nil, nil)
return
}
configure_msg := map[string]interface{}{
"request": "configure",
}
if stream != nil {
stream.AddToMessage(configure_msg)
}
_, err := handle.Message(ctx, configure_msg, nil)
if err != nil {
callback(err, nil)
return
}
callback(nil, nil)
}

View file

@ -1,457 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2017 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"errors"
"fmt"
"log"
"strconv"
"strings"
"sync/atomic"
"github.com/notedit/janus-go"
"github.com/pion/sdp/v3"
)
const (
ExtensionUrlPlayoutDelay = "http://www.webrtc.org/experiments/rtp-hdrext/playout-delay"
ExtensionUrlVideoOrientation = "urn:3gpp:video-orientation"
)
const (
sdpHasOffer = 1
sdpHasAnswer = 2
)
type mcuJanusPublisher struct {
mcuJanusClient
id string
bitrate int
mediaTypes MediaType
stats publisherStatsCounter
sdpFlags Flags
sdpReady *Closer
offerSdp atomic.Pointer[sdp.SessionDescription]
answerSdp atomic.Pointer[sdp.SessionDescription]
}
func (p *mcuJanusPublisher) handleEvent(event *janus.EventMsg) {
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
ctx := context.TODO()
switch videoroom {
case "destroyed":
log.Printf("Publisher %d: associated room has been destroyed, closing", p.handleId)
go p.Close(ctx)
case "slow_link":
// Ignore, processed through "handleSlowLink" in the general events.
default:
log.Printf("Unsupported videoroom publisher event in %d: %+v", p.handleId, event)
}
} else {
log.Printf("Unsupported publisher event in %d: %+v", p.handleId, event)
}
}
func (p *mcuJanusPublisher) handleHangup(event *janus.HangupMsg) {
log.Printf("Publisher %d received hangup (%s), closing", p.handleId, event.Reason)
go p.Close(context.Background())
}
func (p *mcuJanusPublisher) handleDetached(event *janus.DetachedMsg) {
log.Printf("Publisher %d received detached, closing", p.handleId)
go p.Close(context.Background())
}
func (p *mcuJanusPublisher) handleConnected(event *janus.WebRTCUpMsg) {
log.Printf("Publisher %d received connected", p.handleId)
p.mcu.publisherConnected.Notify(getStreamId(p.id, p.streamType))
}
func (p *mcuJanusPublisher) handleSlowLink(event *janus.SlowLinkMsg) {
if event.Uplink {
log.Printf("Publisher %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
} else {
log.Printf("Publisher %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
}
}
func (p *mcuJanusPublisher) handleMedia(event *janus.MediaMsg) {
mediaType := StreamType(event.Type)
if mediaType == StreamTypeVideo && p.streamType == StreamTypeScreen {
// We want to differentiate between audio, video and screensharing
mediaType = p.streamType
}
p.stats.EnableStream(mediaType, event.Receiving)
}
func (p *mcuJanusPublisher) HasMedia(mt MediaType) bool {
return (p.mediaTypes & mt) == mt
}
func (p *mcuJanusPublisher) SetMedia(mt MediaType) {
p.mediaTypes = mt
}
func (p *mcuJanusPublisher) NotifyReconnected() {
ctx := context.TODO()
handle, session, roomId, _, err := p.mcu.getOrCreatePublisherHandle(ctx, p.id, p.streamType, p.bitrate)
if err != nil {
log.Printf("Could not reconnect publisher %s: %s", p.id, err)
// TODO(jojo): Retry
return
}
p.handle = handle
p.handleId = handle.Id
p.session = session
p.roomId = roomId
log.Printf("Publisher %s reconnected on handle %d", p.id, p.handleId)
}
func (p *mcuJanusPublisher) Close(ctx context.Context) {
notify := false
p.mu.Lock()
if handle := p.handle; handle != nil && p.roomId != 0 {
destroy_msg := map[string]interface{}{
"request": "destroy",
"room": p.roomId,
}
if _, err := handle.Request(ctx, destroy_msg); err != nil {
log.Printf("Error destroying room %d: %s", p.roomId, err)
} else {
log.Printf("Room %d destroyed", p.roomId)
}
p.mcu.mu.Lock()
delete(p.mcu.publishers, getStreamId(p.id, p.streamType))
p.mcu.mu.Unlock()
p.roomId = 0
notify = true
}
p.closeClient(ctx)
p.mu.Unlock()
p.stats.Reset()
if notify {
statsPublishersCurrent.WithLabelValues(string(p.streamType)).Dec()
p.mcu.unregisterClient(p)
p.listener.PublisherClosed(p)
}
p.mcuJanusClient.Close(ctx)
}
func (p *mcuJanusPublisher) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
statsMcuMessagesTotal.WithLabelValues(data.Type).Inc()
jsep_msg := data.Payload
switch data.Type {
case "offer":
p.deferred <- func() {
if data.offerSdp == nil {
// Should have been checked before.
go callback(errors.New("No sdp found in offer"), nil)
return
}
p.offerSdp.Store(data.offerSdp)
p.sdpFlags.Add(sdpHasOffer)
if p.sdpFlags.Get() == sdpHasAnswer|sdpHasOffer {
p.sdpReady.Close()
}
// TODO Tear down previous publisher and get a new one if sid does
// not match?
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
p.sendOffer(msgctx, jsep_msg, func(err error, jsep map[string]interface{}) {
if err != nil {
callback(err, jsep)
return
}
sdpData, found := jsep["sdp"]
if !found {
log.Printf("No sdp found in answer %+v", jsep)
} else {
sdpString, ok := sdpData.(string)
if !ok {
log.Printf("Invalid sdp found in answer %+v", jsep)
} else {
var answerSdp sdp.SessionDescription
if err := answerSdp.UnmarshalString(sdpString); err != nil {
log.Printf("Error parsing answer sdp %+v: %s", sdpString, err)
p.answerSdp.Store(nil)
p.sdpFlags.Remove(sdpHasAnswer)
} else {
p.answerSdp.Store(&answerSdp)
p.sdpFlags.Add(sdpHasAnswer)
if p.sdpFlags.Get() == sdpHasAnswer|sdpHasOffer {
p.sdpReady.Close()
}
}
}
}
callback(nil, jsep)
})
}
case "candidate":
p.deferred <- func() {
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
if data.Sid == "" || data.Sid == p.Sid() {
p.sendCandidate(msgctx, jsep_msg["candidate"], callback)
} else {
go callback(fmt.Errorf("Candidate message sid (%s) does not match publisher sid (%s)", data.Sid, p.Sid()), nil)
}
}
case "endOfCandidates":
// Ignore
default:
go callback(fmt.Errorf("Unsupported message type: %s", data.Type), nil)
}
}
func getFmtpValue(fmtp string, key string) (string, bool) {
parts := strings.Split(fmtp, ";")
for _, part := range parts {
kv := strings.SplitN(part, "=", 2)
if len(kv) != 2 {
continue
}
if strings.EqualFold(strings.TrimSpace(kv[0]), key) {
return strings.TrimSpace(kv[1]), true
}
}
return "", false
}
func (p *mcuJanusPublisher) GetStreams(ctx context.Context) ([]PublisherStream, error) {
offerSdp := p.offerSdp.Load()
answerSdp := p.answerSdp.Load()
if offerSdp == nil || answerSdp == nil {
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-p.sdpReady.C:
offerSdp = p.offerSdp.Load()
answerSdp = p.answerSdp.Load()
if offerSdp == nil || answerSdp == nil {
// Only can happen on invalid SDPs.
return nil, errors.New("no offer and/or answer processed yet")
}
}
}
var streams []PublisherStream
for idx, m := range answerSdp.MediaDescriptions {
mid, found := m.Attribute(sdp.AttrKeyMID)
if !found {
continue
}
s := PublisherStream{
Mid: mid,
Mindex: idx,
Type: m.MediaName.Media,
}
if len(m.MediaName.Formats) == 0 {
continue
}
if strings.EqualFold(s.Type, "application") && strings.EqualFold(m.MediaName.Formats[0], "webrtc-datachannel") {
s.Type = "data"
streams = append(streams, s)
continue
}
pt, err := strconv.ParseInt(m.MediaName.Formats[0], 10, 8)
if err != nil {
continue
}
answerCodec, err := answerSdp.GetCodecForPayloadType(uint8(pt))
if err != nil {
continue
}
if strings.EqualFold(s.Type, "audio") {
s.Codec = answerCodec.Name
if value, found := getFmtpValue(answerCodec.Fmtp, "useinbandfec"); found && value == "1" {
s.Fec = true
}
if value, found := getFmtpValue(answerCodec.Fmtp, "usedtx"); found && value == "1" {
s.Dtx = true
}
if value, found := getFmtpValue(answerCodec.Fmtp, "stereo"); found && value == "1" {
s.Stereo = true
}
} else if strings.EqualFold(s.Type, "video") {
s.Codec = answerCodec.Name
// TODO: Determine if SVC is used.
s.Svc = false
if strings.EqualFold(answerCodec.Name, "vp9") {
// Parse VP9 profile from "profile-id=XXX"
// Exampe: "a=fmtp:98 profile-id=0"
if profile, found := getFmtpValue(answerCodec.Fmtp, "profile-id"); found {
s.ProfileVP9 = profile
}
} else if strings.EqualFold(answerCodec.Name, "h264") {
// Parse H.264 profile from "profile-level-id=XXX"
// Example: "a=fmtp:104 level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42001f"
if profile, found := getFmtpValue(answerCodec.Fmtp, "profile-level-id"); found {
s.ProfileH264 = profile
}
}
var extmap sdp.ExtMap
for _, a := range m.Attributes {
switch a.Key {
case sdp.AttrKeyExtMap:
if err := extmap.Unmarshal(extmap.Name() + ":" + a.Value); err != nil {
log.Printf("Error parsing extmap %s: %s", a.Value, err)
continue
}
switch extmap.URI.String() {
case ExtensionUrlPlayoutDelay:
s.ExtIdPlayoutDelay = extmap.Value
case ExtensionUrlVideoOrientation:
s.ExtIdVideoOrientation = extmap.Value
}
case "simulcast":
s.Simulcast = true
case sdp.AttrKeySSRCGroup:
if strings.HasPrefix(a.Value, "SIM ") {
s.Simulcast = true
}
}
}
for _, a := range offerSdp.MediaDescriptions[idx].Attributes {
switch a.Key {
case "simulcast":
s.Simulcast = true
case sdp.AttrKeySSRCGroup:
if strings.HasPrefix(a.Value, "SIM ") {
s.Simulcast = true
}
}
}
} else if strings.EqualFold(s.Type, "data") { // nolint
// Already handled above.
} else {
log.Printf("Skip type %s", s.Type)
continue
}
streams = append(streams, s)
}
return streams, nil
}
func getPublisherRemoteId(id string, remoteId string) string {
return fmt.Sprintf("%s@%s", id, remoteId)
}
func (p *mcuJanusPublisher) PublishRemote(ctx context.Context, remoteId string, hostname string, port int, rtcpPort int) error {
msg := map[string]interface{}{
"request": "publish_remotely",
"room": p.roomId,
"publisher_id": streamTypeUserIds[p.streamType],
"remote_id": getPublisherRemoteId(p.id, remoteId),
"host": hostname,
"port": port,
"rtcp_port": rtcpPort,
}
response, err := p.handle.Request(ctx, msg)
if err != nil {
return err
}
errorMessage := getPluginStringValue(response.PluginData, pluginVideoRoom, "error")
errorCode := getPluginIntValue(response.PluginData, pluginVideoRoom, "error_code")
if errorMessage != "" || errorCode != 0 {
if errorCode == 0 {
errorCode = 500
}
if errorMessage == "" {
errorMessage = "unknown error"
}
return &janus.ErrorMsg{
Err: janus.ErrorData{
Code: int(errorCode),
Reason: errorMessage,
},
}
}
log.Printf("Publishing %s to %s (port=%d, rtcpPort=%d) for %s", p.id, hostname, port, rtcpPort, remoteId)
return nil
}
func (p *mcuJanusPublisher) UnpublishRemote(ctx context.Context, remoteId string) error {
msg := map[string]interface{}{
"request": "unpublish_remotely",
"room": p.roomId,
"publisher_id": streamTypeUserIds[p.streamType],
"remote_id": getPublisherRemoteId(p.id, remoteId),
}
response, err := p.handle.Request(ctx, msg)
if err != nil {
return err
}
errorMessage := getPluginStringValue(response.PluginData, pluginVideoRoom, "error")
errorCode := getPluginIntValue(response.PluginData, pluginVideoRoom, "error_code")
if errorMessage != "" || errorCode != 0 {
if errorCode == 0 {
errorCode = 500
}
if errorMessage == "" {
errorMessage = "unknown error"
}
return &janus.ErrorMsg{
Err: janus.ErrorData{
Code: int(errorCode),
Reason: errorMessage,
},
}
}
log.Printf("Unpublished remote %s for %s", p.id, remoteId)
return nil
}

View file

@ -1,92 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2024 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"testing"
)
func TestGetFmtpValueH264(t *testing.T) {
testcases := []struct {
fmtp string
profile string
}{
{
"",
"",
},
{
"level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42001f",
"42001f",
},
{
"level-asymmetry-allowed=1;packetization-mode=0",
"",
},
{
"level-asymmetry-allowed=1; packetization-mode=0; profile-level-id = 42001f",
"42001f",
},
}
for _, tc := range testcases {
value, found := getFmtpValue(tc.fmtp, "profile-level-id")
if !found && tc.profile != "" {
t.Errorf("did not find profile \"%s\" in \"%s\"", tc.profile, tc.fmtp)
} else if found && tc.profile == "" {
t.Errorf("did not expect profile in \"%s\" but got \"%s\"", tc.fmtp, value)
} else if found && tc.profile != value {
t.Errorf("expected profile \"%s\" in \"%s\" but got \"%s\"", tc.profile, tc.fmtp, value)
}
}
}
func TestGetFmtpValueVP9(t *testing.T) {
testcases := []struct {
fmtp string
profile string
}{
{
"",
"",
},
{
"profile-id=0",
"0",
},
{
"profile-id = 0",
"0",
},
}
for _, tc := range testcases {
value, found := getFmtpValue(tc.fmtp, "profile-id")
if !found && tc.profile != "" {
t.Errorf("did not find profile \"%s\" in \"%s\"", tc.profile, tc.fmtp)
} else if found && tc.profile == "" {
t.Errorf("did not expect profile in \"%s\" but got \"%s\"", tc.fmtp, value)
} else if found && tc.profile != value {
t.Errorf("expected profile \"%s\" in \"%s\" but got \"%s\"", tc.profile, tc.fmtp, value)
}
}
}

View file

@ -1,150 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2024 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"log"
"sync/atomic"
"github.com/notedit/janus-go"
)
type mcuJanusRemotePublisher struct {
mcuJanusPublisher
ref atomic.Int64
port int
rtcpPort int
}
func (p *mcuJanusRemotePublisher) addRef() int64 {
return p.ref.Add(1)
}
func (p *mcuJanusRemotePublisher) release() bool {
return p.ref.Add(-1) == 0
}
func (p *mcuJanusRemotePublisher) Port() int {
return p.port
}
func (p *mcuJanusRemotePublisher) RtcpPort() int {
return p.rtcpPort
}
func (p *mcuJanusRemotePublisher) handleEvent(event *janus.EventMsg) {
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
ctx := context.TODO()
switch videoroom {
case "destroyed":
log.Printf("Remote publisher %d: associated room has been destroyed, closing", p.handleId)
go p.Close(ctx)
case "slow_link":
// Ignore, processed through "handleSlowLink" in the general events.
default:
log.Printf("Unsupported videoroom remote publisher event in %d: %+v", p.handleId, event)
}
} else {
log.Printf("Unsupported remote publisher event in %d: %+v", p.handleId, event)
}
}
func (p *mcuJanusRemotePublisher) handleHangup(event *janus.HangupMsg) {
log.Printf("Remote publisher %d received hangup (%s), closing", p.handleId, event.Reason)
go p.Close(context.Background())
}
func (p *mcuJanusRemotePublisher) handleDetached(event *janus.DetachedMsg) {
log.Printf("Remote publisher %d received detached, closing", p.handleId)
go p.Close(context.Background())
}
func (p *mcuJanusRemotePublisher) handleConnected(event *janus.WebRTCUpMsg) {
log.Printf("Remote publisher %d received connected", p.handleId)
p.mcu.publisherConnected.Notify(getStreamId(p.id, p.streamType))
}
func (p *mcuJanusRemotePublisher) handleSlowLink(event *janus.SlowLinkMsg) {
if event.Uplink {
log.Printf("Remote publisher %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
} else {
log.Printf("Remote publisher %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
}
}
func (p *mcuJanusRemotePublisher) NotifyReconnected() {
ctx := context.TODO()
handle, session, roomId, _, err := p.mcu.getOrCreatePublisherHandle(ctx, p.id, p.streamType, p.bitrate)
if err != nil {
log.Printf("Could not reconnect remote publisher %s: %s", p.id, err)
// TODO(jojo): Retry
return
}
p.handle = handle
p.handleId = handle.Id
p.session = session
p.roomId = roomId
log.Printf("Remote publisher %s reconnected on handle %d", p.id, p.handleId)
}
func (p *mcuJanusRemotePublisher) Close(ctx context.Context) {
if !p.release() {
return
}
p.mu.Lock()
if handle := p.handle; handle != nil {
response, err := p.handle.Request(ctx, map[string]interface{}{
"request": "remove_remote_publisher",
"room": p.roomId,
"id": streamTypeUserIds[p.streamType],
})
if err != nil {
log.Printf("Error removing remote publisher %s in room %d: %s", p.id, p.roomId, err)
} else {
log.Printf("Removed remote publisher: %+v", response)
}
if p.roomId != 0 {
destroy_msg := map[string]interface{}{
"request": "destroy",
"room": p.roomId,
}
if _, err := handle.Request(ctx, destroy_msg); err != nil {
log.Printf("Error destroying room %d: %s", p.roomId, err)
} else {
log.Printf("Room %d destroyed", p.roomId)
}
p.mcu.mu.Lock()
delete(p.mcu.remotePublishers, getStreamId(p.id, p.streamType))
p.mcu.mu.Unlock()
p.roomId = 0
}
}
p.closeClient(ctx)
p.mu.Unlock()
}

View file

@ -1,115 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2024 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"log"
"strconv"
"sync/atomic"
"github.com/notedit/janus-go"
)
type mcuJanusRemoteSubscriber struct {
mcuJanusSubscriber
remote atomic.Pointer[mcuJanusRemotePublisher]
}
func (p *mcuJanusRemoteSubscriber) handleEvent(event *janus.EventMsg) {
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
ctx := context.TODO()
switch videoroom {
case "destroyed":
log.Printf("Remote subscriber %d: associated room has been destroyed, closing", p.handleId)
go p.Close(ctx)
case "event":
// Handle renegotiations, but ignore other events like selected
// substream / temporal layer.
if getPluginStringValue(event.Plugindata, pluginVideoRoom, "configured") == "ok" &&
event.Jsep != nil && event.Jsep["type"] == "offer" && event.Jsep["sdp"] != nil {
p.listener.OnUpdateOffer(p, event.Jsep)
}
case "slow_link":
// Ignore, processed through "handleSlowLink" in the general events.
default:
log.Printf("Unsupported videoroom event %s for remote subscriber %d: %+v", videoroom, p.handleId, event)
}
} else {
log.Printf("Unsupported event for remote subscriber %d: %+v", p.handleId, event)
}
}
func (p *mcuJanusRemoteSubscriber) handleHangup(event *janus.HangupMsg) {
log.Printf("Remote subscriber %d received hangup (%s), closing", p.handleId, event.Reason)
go p.Close(context.Background())
}
func (p *mcuJanusRemoteSubscriber) handleDetached(event *janus.DetachedMsg) {
log.Printf("Remote subscriber %d received detached, closing", p.handleId)
go p.Close(context.Background())
}
func (p *mcuJanusRemoteSubscriber) handleConnected(event *janus.WebRTCUpMsg) {
log.Printf("Remote subscriber %d received connected", p.handleId)
p.mcu.SubscriberConnected(p.Id(), p.publisher, p.streamType)
}
func (p *mcuJanusRemoteSubscriber) handleSlowLink(event *janus.SlowLinkMsg) {
if event.Uplink {
log.Printf("Remote subscriber %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
} else {
log.Printf("Remote subscriber %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
}
}
func (p *mcuJanusRemoteSubscriber) handleMedia(event *janus.MediaMsg) {
// Only triggered for publishers
}
func (p *mcuJanusRemoteSubscriber) NotifyReconnected() {
ctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
handle, pub, err := p.mcu.getOrCreateSubscriberHandle(ctx, p.publisher, p.streamType)
if err != nil {
// TODO(jojo): Retry?
log.Printf("Could not reconnect remote subscriber for publisher %s: %s", p.publisher, err)
p.Close(context.Background())
return
}
p.handle = handle
p.handleId = handle.Id
p.roomId = pub.roomId
p.sid = strconv.FormatUint(handle.Id, 10)
p.listener.SubscriberSidUpdated(p)
log.Printf("Subscriber %d for publisher %s reconnected on handle %d", p.id, p.publisher, p.handleId)
}
func (p *mcuJanusRemoteSubscriber) Close(ctx context.Context) {
p.mcuJanusSubscriber.Close(ctx)
if remote := p.remote.Swap(nil); remote != nil {
remote.Close(context.Background())
}
}

View file

@ -1,110 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2017 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"database/sql"
"fmt"
)
type streamSelection struct {
substream sql.NullInt16
temporal sql.NullInt16
audio sql.NullBool
video sql.NullBool
}
func (s *streamSelection) HasValues() bool {
return s.substream.Valid || s.temporal.Valid || s.audio.Valid || s.video.Valid
}
func (s *streamSelection) AddToMessage(message map[string]interface{}) {
if s.substream.Valid {
message["substream"] = s.substream.Int16
}
if s.temporal.Valid {
message["temporal"] = s.temporal.Int16
}
if s.audio.Valid {
message["audio"] = s.audio.Bool
}
if s.video.Valid {
message["video"] = s.video.Bool
}
}
func parseStreamSelection(payload map[string]interface{}) (*streamSelection, error) {
var stream streamSelection
if value, found := payload["substream"]; found {
switch value := value.(type) {
case int:
stream.substream.Valid = true
stream.substream.Int16 = int16(value)
case float32:
stream.substream.Valid = true
stream.substream.Int16 = int16(value)
case float64:
stream.substream.Valid = true
stream.substream.Int16 = int16(value)
default:
return nil, fmt.Errorf("Unsupported substream value: %v", value)
}
}
if value, found := payload["temporal"]; found {
switch value := value.(type) {
case int:
stream.temporal.Valid = true
stream.temporal.Int16 = int16(value)
case float32:
stream.temporal.Valid = true
stream.temporal.Int16 = int16(value)
case float64:
stream.temporal.Valid = true
stream.temporal.Int16 = int16(value)
default:
return nil, fmt.Errorf("Unsupported temporal value: %v", value)
}
}
if value, found := payload["audio"]; found {
switch value := value.(type) {
case bool:
stream.audio.Valid = true
stream.audio.Bool = value
default:
return nil, fmt.Errorf("Unsupported audio value: %v", value)
}
}
if value, found := payload["video"]; found {
switch value := value.(type) {
case bool:
stream.video.Valid = true
stream.video.Bool = value
default:
return nil, fmt.Errorf("Unsupported video value: %v", value)
}
}
return &stream, nil
}

View file

@ -1,321 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2017 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"fmt"
"log"
"strconv"
"github.com/notedit/janus-go"
)
type mcuJanusSubscriber struct {
mcuJanusClient
publisher string
}
func (p *mcuJanusSubscriber) Publisher() string {
return p.publisher
}
func (p *mcuJanusSubscriber) handleEvent(event *janus.EventMsg) {
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
ctx := context.TODO()
switch videoroom {
case "destroyed":
log.Printf("Subscriber %d: associated room has been destroyed, closing", p.handleId)
go p.Close(ctx)
case "event":
// Handle renegotiations, but ignore other events like selected
// substream / temporal layer.
if getPluginStringValue(event.Plugindata, pluginVideoRoom, "configured") == "ok" &&
event.Jsep != nil && event.Jsep["type"] == "offer" && event.Jsep["sdp"] != nil {
p.listener.OnUpdateOffer(p, event.Jsep)
}
case "slow_link":
// Ignore, processed through "handleSlowLink" in the general events.
default:
log.Printf("Unsupported videoroom event %s for subscriber %d: %+v", videoroom, p.handleId, event)
}
} else {
log.Printf("Unsupported event for subscriber %d: %+v", p.handleId, event)
}
}
func (p *mcuJanusSubscriber) handleHangup(event *janus.HangupMsg) {
log.Printf("Subscriber %d received hangup (%s), closing", p.handleId, event.Reason)
go p.Close(context.Background())
}
func (p *mcuJanusSubscriber) handleDetached(event *janus.DetachedMsg) {
log.Printf("Subscriber %d received detached, closing", p.handleId)
go p.Close(context.Background())
}
func (p *mcuJanusSubscriber) handleConnected(event *janus.WebRTCUpMsg) {
log.Printf("Subscriber %d received connected", p.handleId)
p.mcu.SubscriberConnected(p.Id(), p.publisher, p.streamType)
}
func (p *mcuJanusSubscriber) handleSlowLink(event *janus.SlowLinkMsg) {
if event.Uplink {
log.Printf("Subscriber %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
} else {
log.Printf("Subscriber %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
}
}
func (p *mcuJanusSubscriber) handleMedia(event *janus.MediaMsg) {
// Only triggered for publishers
}
func (p *mcuJanusSubscriber) NotifyReconnected() {
ctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
handle, pub, err := p.mcu.getOrCreateSubscriberHandle(ctx, p.publisher, p.streamType)
if err != nil {
// TODO(jojo): Retry?
log.Printf("Could not reconnect subscriber for publisher %s: %s", p.publisher, err)
p.Close(context.Background())
return
}
p.handle = handle
p.handleId = handle.Id
p.roomId = pub.roomId
p.sid = strconv.FormatUint(handle.Id, 10)
p.listener.SubscriberSidUpdated(p)
log.Printf("Subscriber %d for publisher %s reconnected on handle %d", p.id, p.publisher, p.handleId)
}
func (p *mcuJanusSubscriber) Close(ctx context.Context) {
p.mu.Lock()
closed := p.closeClient(ctx)
p.mu.Unlock()
if closed {
p.mcu.SubscriberDisconnected(p.Id(), p.publisher, p.streamType)
statsSubscribersCurrent.WithLabelValues(string(p.streamType)).Dec()
}
p.mcu.unregisterClient(p)
p.listener.SubscriberClosed(p)
p.mcuJanusClient.Close(ctx)
}
func (p *mcuJanusSubscriber) joinRoom(ctx context.Context, stream *streamSelection, callback func(error, map[string]interface{})) {
handle := p.handle
if handle == nil {
callback(ErrNotConnected, nil)
return
}
waiter := p.mcu.publisherConnected.NewWaiter(getStreamId(p.publisher, p.streamType))
defer p.mcu.publisherConnected.Release(waiter)
loggedNotPublishingYet := false
retry:
join_msg := map[string]interface{}{
"request": "join",
"ptype": "subscriber",
"room": p.roomId,
}
if p.mcu.isMultistream() {
join_msg["streams"] = []map[string]interface{}{
{
"feed": streamTypeUserIds[p.streamType],
},
}
} else {
join_msg["feed"] = streamTypeUserIds[p.streamType]
}
if stream != nil {
stream.AddToMessage(join_msg)
}
join_response, err := handle.Message(ctx, join_msg, nil)
if err != nil {
callback(err, nil)
return
}
if error_code := getPluginIntValue(join_response.Plugindata, pluginVideoRoom, "error_code"); error_code > 0 {
switch error_code {
case JANUS_VIDEOROOM_ERROR_ALREADY_JOINED:
// The subscriber is already connected to the room. This can happen
// if a client leaves a call but keeps the subscriber objects active.
// On joining the call again, the subscriber tries to join on the
// MCU which will fail because he is still connected.
// To get a new Offer SDP, we have to tear down the session on the
// MCU and join again.
p.mu.Lock()
p.closeClient(ctx)
p.mu.Unlock()
var pub *mcuJanusPublisher
handle, pub, err = p.mcu.getOrCreateSubscriberHandle(ctx, p.publisher, p.streamType)
if err != nil {
// Reconnection didn't work, need to unregister/remove subscriber
// so a new object will be created if the request is retried.
p.mcu.unregisterClient(p)
p.listener.SubscriberClosed(p)
callback(fmt.Errorf("Already connected as subscriber for %s, error during re-joining: %s", p.streamType, err), nil)
return
}
p.handle = handle
p.handleId = handle.Id
p.roomId = pub.roomId
p.sid = strconv.FormatUint(handle.Id, 10)
p.listener.SubscriberSidUpdated(p)
p.closeChan = make(chan struct{}, 1)
go p.run(p.handle, p.closeChan)
log.Printf("Already connected subscriber %d for %s, leaving and re-joining on handle %d", p.id, p.streamType, p.handleId)
goto retry
case JANUS_VIDEOROOM_ERROR_NO_SUCH_ROOM:
fallthrough
case JANUS_VIDEOROOM_ERROR_NO_SUCH_FEED:
switch error_code {
case JANUS_VIDEOROOM_ERROR_NO_SUCH_ROOM:
log.Printf("Publisher %s not created yet for %s, wait and retry to join room %d as subscriber", p.publisher, p.streamType, p.roomId)
case JANUS_VIDEOROOM_ERROR_NO_SUCH_FEED:
log.Printf("Publisher %s not sending yet for %s, wait and retry to join room %d as subscriber", p.publisher, p.streamType, p.roomId)
}
if !loggedNotPublishingYet {
loggedNotPublishingYet = true
statsWaitingForPublisherTotal.WithLabelValues(string(p.streamType)).Inc()
}
if err := waiter.Wait(ctx); err != nil {
callback(err, nil)
return
}
log.Printf("Retry subscribing %s from %s", p.streamType, p.publisher)
goto retry
default:
// TODO(jojo): Should we handle other errors, too?
callback(fmt.Errorf("Error joining room as subscriber: %+v", join_response), nil)
return
}
}
//log.Println("Joined as listener", join_response)
p.session = join_response.Session
callback(nil, join_response.Jsep)
}
func (p *mcuJanusSubscriber) update(ctx context.Context, stream *streamSelection, callback func(error, map[string]interface{})) {
handle := p.handle
if handle == nil {
callback(ErrNotConnected, nil)
return
}
configure_msg := map[string]interface{}{
"request": "configure",
"update": true,
}
if stream != nil {
stream.AddToMessage(configure_msg)
}
configure_response, err := handle.Message(ctx, configure_msg, nil)
if err != nil {
callback(err, nil)
return
}
callback(nil, configure_response.Jsep)
}
func (p *mcuJanusSubscriber) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
statsMcuMessagesTotal.WithLabelValues(data.Type).Inc()
jsep_msg := data.Payload
switch data.Type {
case "requestoffer":
fallthrough
case "sendoffer":
p.deferred <- func() {
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
stream, err := parseStreamSelection(jsep_msg)
if err != nil {
go callback(err, nil)
return
}
if data.Sid == "" || data.Sid != p.Sid() {
p.joinRoom(msgctx, stream, callback)
} else {
p.update(msgctx, stream, callback)
}
}
case "answer":
p.deferred <- func() {
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
if data.Sid == "" || data.Sid == p.Sid() {
p.sendAnswer(msgctx, jsep_msg, callback)
} else {
go callback(fmt.Errorf("Answer message sid (%s) does not match subscriber sid (%s)", data.Sid, p.Sid()), nil)
}
}
case "candidate":
p.deferred <- func() {
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
if data.Sid == "" || data.Sid == p.Sid() {
p.sendCandidate(msgctx, jsep_msg["candidate"], callback)
} else {
go callback(fmt.Errorf("Candidate message sid (%s) does not match subscriber sid (%s)", data.Sid, p.Sid()), nil)
}
}
case "endOfCandidates":
// Ignore
case "selectStream":
stream, err := parseStreamSelection(jsep_msg)
if err != nil {
go callback(err, nil)
return
}
if stream == nil || !stream.HasValues() {
// Nothing to do
go callback(nil, nil)
return
}
p.deferred <- func() {
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
p.selectStream(msgctx, stream, callback)
}
default:
// Return error asynchronously
go callback(fmt.Errorf("Unsupported message type: %s", data.Type), nil)
}
}

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more