mirror of
https://github.com/strukturag/nextcloud-spreed-signaling
synced 2024-05-26 02:22:14 +02:00
Compare commits
341 commits
Author | SHA1 | Date | |
---|---|---|---|
8123be9551 | |||
cad442c486 | |||
e8ebfed711 | |||
8d8ec677f1 | |||
80d96916b9 | |||
8a0ce7c9b6 | |||
1952bfc2be | |||
b3d2f7b02c | |||
7583fb6486 | |||
040e663b37 | |||
15b1214413 | |||
05810e10ce | |||
7e7a04ad6c | |||
d25169d0ff | |||
79b76b1ca4 | |||
f8e37a1bca | |||
b5cbb917c5 | |||
e2ac08ae67 | |||
00d17bae97 | |||
ff69a294a9 | |||
5790e7a369 | |||
4c807c86e8 | |||
e862392872 | |||
39f4b2eb11 | |||
7f8e44b3b5 | |||
31b8c74d1c | |||
5f18913646 | |||
716a93538b | |||
2cd3418f09 | |||
c6cbe88d0e | |||
f73ad7b508 | |||
efb722a55e | |||
d63b1cf14a | |||
75060b25aa | |||
7e7a6d5c09 | |||
a4b8a81734 | |||
3ce963ee91 | |||
24c1a09662 | |||
56f5a72f61 | |||
a66c1d82bf | |||
d9deddfda7 | |||
9c99129242 | |||
63c42dd84c | |||
92cbc28065 | |||
132cf0d474 | |||
4fd929c15a | |||
879469df19 | |||
fe0a002adf | |||
7b555e91ec | |||
b2afa88bcc | |||
1bbc49351a | |||
dff78d0101 | |||
2ad2327090 | |||
4b76a49355 | |||
f6125dac3f | |||
c2e93cd92a | |||
4f8349d4c1 | |||
aac4874e72 | |||
936f83feb9 | |||
c1e9e02087 | |||
beee423a7c | |||
5a85fecb10 | |||
88575abea2 | |||
fdc43d12cd | |||
d03ea86991 | |||
18300ce89e | |||
d8f2f265ab | |||
ddbf1065f6 | |||
bad52af35a | |||
c58564c0e8 | |||
0b259a8171 | |||
3fc5f5253d | |||
3e92664edc | |||
0ee976d377 | |||
552474f6f0 | |||
09e010ee14 | |||
70a5318973 | |||
94a8f0f02b | |||
4603b2b290 | |||
a50d637107 | |||
307ffdc29a | |||
ec3ac62474 | |||
e3a163fbe5 | |||
cf36530b30 | |||
adc72aa578 | |||
ea0d31b0dc | |||
5b305f6f99 | |||
3c923a9ef9 | |||
1a692bc4bb | |||
6a495bfc5c | |||
9a91e885cf | |||
b4830b1fd3 | |||
16da87106a | |||
e763f4519c | |||
bfb185f382 | |||
46e8ea9148 | |||
4eb1b6609d | |||
815088f269 | |||
527061bbe2 | |||
a2f0bec564 | |||
70f0519ca2 | |||
9e2a896326 | |||
2d48018b58 | |||
cf19b3b1b4 | |||
ebb215c592 | |||
0eb234b24d | |||
cad397e59e | |||
f8899ef189 | |||
54c4f1847a | |||
d368a060fa | |||
602452fa25 | |||
0c2cefa63a | |||
2468443572 | |||
3721fb131f | |||
6960912681 | |||
b77525603c | |||
9adb762ccf | |||
bf68a15943 | |||
bc7aea68f3 | |||
69beea84cb | |||
952b8ae460 | |||
2e6cf7f86b | |||
dcec32be7e | |||
b0d052c6ec | |||
318ed3700f | |||
ee16a8d8be | |||
91033bf8c2 | |||
b541ebc4c6 | |||
0aed690463 | |||
71a4248568 | |||
df210a6a85 | |||
5bc9ada233 | |||
d0d68f0d21 | |||
9a892a194e | |||
26102e7acb | |||
88a575c36c | |||
fdab3db819 | |||
c8aa4c71e0 | |||
ec9e44f5d6 | |||
543a85f8aa | |||
9f104cb281 | |||
4e623a8e08 | |||
9ba5b4330a | |||
4b6a4dbfe1 | |||
e1f40a024e | |||
47fc6694ca | |||
d0c711b500 | |||
7dc450350b | |||
b2c6bd320b | |||
4f26d6e2a5 | |||
ddfd976627 | |||
879e1ca5b0 | |||
0b698556d6 | |||
ec96256f29 | |||
8d60f81969 | |||
280c2681be | |||
9c7b38d4ff | |||
283da1436a | |||
fdfeeefa39 | |||
f2bcc000ae | |||
2ef9b39959 | |||
1358285c4a | |||
68528d4674 | |||
cc7625c544 | |||
c325fbeae6 | |||
c859064a45 | |||
2f31532ee2 | |||
d97b071ccf | |||
95e2bc10d4 | |||
66dc55a3a5 | |||
74944ee547 | |||
61b8a91749 | |||
886ad912da | |||
3b4699c11e | |||
7844a9c21a | |||
f8eae0b71f | |||
0b7c17e083 | |||
c2eb3a8a27 | |||
010914eed9 | |||
1a93c42c38 | |||
3ba1853e5a | |||
bbdd991f05 | |||
b0f2e6ea33 | |||
f65bdf04ff | |||
1fa731f20e | |||
5dbee53a1b | |||
e6b3c8d24f | |||
687f4101c0 | |||
4fb7142a4e | |||
ec8cb8e1b8 | |||
2ee3fa509c | |||
204fec1583 | |||
42005d97c4 | |||
e2266a6765 | |||
9603ed3d6e | |||
9d313608cf | |||
84374590a4 | |||
a082874377 | |||
b67264e600 | |||
bd445bd99b | |||
df477a7856 | |||
1a8444ca71 | |||
bde0b08eb1 | |||
a68454ceec | |||
f6fe960534 | |||
fe53c32714 | |||
c3403b1e9a | |||
ba73d1a7df | |||
36e704e320 | |||
6394539876 | |||
2012a7a6df | |||
1bcf07afd3 | |||
3442cad9c3 | |||
edd042b00e | |||
8f4fc2db6d | |||
7d09c71ab9 | |||
26a65cedd1 | |||
9010e91ff4 | |||
da00080303 | |||
62b54a85ed | |||
3ea60cfe31 | |||
1f8b536c8a | |||
8385211fa2 | |||
f5007df0ad | |||
8b49cf8581 | |||
0f980f2894 | |||
6ac065f603 | |||
ae37a56e34 | |||
45be0ad2fd | |||
29b0b06f6d | |||
7e613f831b | |||
2e8b0dfe25 | |||
2348297f36 | |||
e0fe89f0f2 | |||
e0b3797ea9 | |||
35f9d313c7 | |||
68d4e87d31 | |||
27ebf9e037 | |||
f071a64797 | |||
6488ba1cf5 | |||
b710d1704e | |||
4a762a3264 | |||
55aee6e5dc | |||
2b62c9e3c1 | |||
1a0e51499f | |||
2430421006 | |||
5f71a9a0ab | |||
cf5ee8e4a1 | |||
c85b31bd24 | |||
5ec7fcb594 | |||
978024e799 | |||
d71ca35e97 | |||
48424bf290 | |||
a3ba73d764 | |||
9da78a1a8b | |||
fa0cb51c8e | |||
d0a3ce0616 | |||
2595420db1 | |||
32ccc2e50e | |||
0cd4099f7b | |||
c4fce20678 | |||
2c4cdedcae | |||
b1c78f6e9d | |||
8db4068989 | |||
528a09e5da | |||
8417f37cba | |||
7a6cffdc10 | |||
f5bef51917 | |||
390f288c1a | |||
11a89e0ca9 | |||
da4cf896c5 | |||
c89b7bbe8f | |||
20a34526c5 | |||
682134fe56 | |||
beaad80eba | |||
59cf86d786 | |||
a362682143 | |||
d3f41eb572 | |||
88e67cf95e | |||
cc25760dd6 | |||
530700e5af | |||
dbba13865d | |||
0a10339d17 | |||
02184ace70 | |||
07e2e25a07 | |||
2334f4815e | |||
47ad7619e0 | |||
6a384619b8 | |||
1d12b40867 | |||
a8c2a35221 | |||
dc3bcf2ce7 | |||
8c7882e4a6 | |||
67f20bd9b2 | |||
fea65b31dc | |||
d0085811f8 | |||
719bb9615d | |||
e3e302e453 | |||
3b509a5f43 | |||
0b61b8bb9f | |||
9b09ff083b | |||
682d3aa52a | |||
110ece7626 | |||
5fd0efa4bc | |||
bd9e2aa29d | |||
8f2933071e | |||
fb62c53976 | |||
2e0561b90b | |||
eeab0a226b | |||
32bbbeee32 | |||
7a8879051d | |||
075e50560e | |||
6c377ee173 | |||
eae19fd61a | |||
a751ede2b2 | |||
7d11ff4a41 | |||
833f39e608 | |||
4f5bdb2a3f | |||
116e74fab4 | |||
9c0e0ba85d | |||
23e6b11383 | |||
7e33d2cf3a | |||
7fe5995e1d | |||
2eb84a3301 | |||
2a16bf0650 | |||
d63856e263 | |||
734eaea85c | |||
e61845b086 | |||
0f83392e2d | |||
362098531b | |||
0d15971506 | |||
fd8d11806b | |||
a8180194ef | |||
dddf194b48 | |||
2f421e3bdf | |||
716be91feb | |||
b3dc84b7b8 | |||
55d143d6bc | |||
838e601183 | |||
4b019a991f | |||
a2faf3dc95 | |||
c20ff558f3 |
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
|
@ -28,6 +28,10 @@ updates:
|
|||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
groups:
|
||||
artifacts:
|
||||
patterns:
|
||||
- "actions/*-artifact"
|
||||
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/docs"
|
||||
|
|
12
.github/workflows/check-continentmap.yml
vendored
12
.github/workflows/check-continentmap.yml
vendored
|
@ -1,6 +1,18 @@
|
|||
name: check-continentmap
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/check-continentmap.yml'
|
||||
- 'scripts/get_continent_map.py'
|
||||
- 'Makefile'
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/check-continentmap.yml'
|
||||
- 'scripts/get_continent_map.py'
|
||||
- 'Makefile'
|
||||
schedule:
|
||||
- cron: "0 2 * * SUN"
|
||||
|
||||
|
|
6
.github/workflows/codeql-analysis.yml
vendored
6
.github/workflows/codeql-analysis.yml
vendored
|
@ -39,12 +39,12 @@ jobs:
|
|||
uses: actions/checkout@v4
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@v3
|
||||
|
|
4
.github/workflows/command-rebase.yml
vendored
4
.github/workflows/command-rebase.yml
vendored
|
@ -23,7 +23,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Add reaction on start
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
uses: peter-evans/create-or-update-comment@v4
|
||||
with:
|
||||
token: ${{ secrets.COMMAND_BOT_PAT }}
|
||||
repository: ${{ github.event.repository.full_name }}
|
||||
|
@ -42,7 +42,7 @@ jobs:
|
|||
GITHUB_TOKEN: ${{ secrets.COMMAND_BOT_PAT }}
|
||||
|
||||
- name: Add reaction on failure
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
uses: peter-evans/create-or-update-comment@v4
|
||||
if: failure()
|
||||
with:
|
||||
token: ${{ secrets.COMMAND_BOT_PAT }}
|
||||
|
|
13
.github/workflows/deploydocker.yml
vendored
13
.github/workflows/deploydocker.yml
vendored
|
@ -2,6 +2,15 @@ name: Deploy to Docker Hub / GHCR
|
|||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/deploydocker.yml'
|
||||
- '**.go'
|
||||
- 'go.*'
|
||||
- 'Makefile'
|
||||
- '*.conf.in'
|
||||
- 'docker/server/*'
|
||||
- 'docker/proxy/*'
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
@ -37,7 +46,7 @@ jobs:
|
|||
type=semver,pattern={{major}}
|
||||
type=sha,prefix=
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
|
@ -107,7 +116,7 @@ jobs:
|
|||
type=semver,pattern={{major}}
|
||||
type=sha,prefix=
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
|
|
4
.github/workflows/docker-janus.yml
vendored
4
.github/workflows/docker-janus.yml
vendored
|
@ -34,7 +34,3 @@ jobs:
|
|||
context: docker/janus
|
||||
load: true
|
||||
tags: ${{ env.TEST_TAG }}
|
||||
|
||||
- name: Test Docker image
|
||||
run: |
|
||||
docker run --rm ${{ env.TEST_TAG }} /usr/local/bin/janus --version
|
||||
|
|
40
.github/workflows/docker.yml
vendored
40
.github/workflows/docker.yml
vendored
|
@ -3,8 +3,24 @@ name: Docker image
|
|||
on:
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/docker.yml'
|
||||
- '**.go'
|
||||
- 'go.*'
|
||||
- 'Makefile'
|
||||
- '*.conf.in'
|
||||
- 'docker/server/*'
|
||||
- 'docker/proxy/*'
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/docker.yml'
|
||||
- '**.go'
|
||||
- 'go.*'
|
||||
- 'Makefile'
|
||||
- '*.conf.in'
|
||||
- 'docker/server/*'
|
||||
- 'docker/proxy/*'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
@ -25,18 +41,6 @@ jobs:
|
|||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build Docker image for testing
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: docker/server/Dockerfile
|
||||
load: true
|
||||
tags: ${{ env.TEST_TAG }}
|
||||
|
||||
- name: Test Docker image
|
||||
run: |
|
||||
docker run --rm ${{ env.TEST_TAG }} /usr/bin/nextcloud-spreed-signaling --version
|
||||
|
||||
- name: Build Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
|
@ -56,18 +60,6 @@ jobs:
|
|||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build Docker image for testing
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: docker/proxy/Dockerfile
|
||||
load: true
|
||||
tags: ${{ env.TEST_TAG }}
|
||||
|
||||
- name: Test Docker image
|
||||
run: |
|
||||
docker run --rm ${{ env.TEST_TAG }} /usr/bin/nextcloud-spreed-signaling-proxy --version
|
||||
|
||||
- name: Build Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
|
|
46
.github/workflows/govuln.yml
vendored
Normal file
46
.github/workflows/govuln.yml
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
name: Go Vulnerability Checker
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/govuln.yml'
|
||||
- '**.go'
|
||||
- 'go.*'
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/govuln.yml'
|
||||
- '**.go'
|
||||
- 'go.*'
|
||||
schedule:
|
||||
- cron: "0 2 * * SUN"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
run:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go-version:
|
||||
- "1.21"
|
||||
- "1.22"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- run: date
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt -y update && sudo apt -y install protobuf-compiler
|
||||
make common
|
||||
|
||||
- name: Install and run govulncheck
|
||||
run: |
|
||||
set -euo pipefail
|
||||
go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
govulncheck ./...
|
46
.github/workflows/licensecheck.yml
vendored
Normal file
46
.github/workflows/licensecheck.yml
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
name: licensecheck
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/licensecheck.yml'
|
||||
- '**.go'
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/licensecheck.yml'
|
||||
- '**.go'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
golang:
|
||||
name: golang
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install licensecheck
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install licensecheck
|
||||
|
||||
- id: licensecheck
|
||||
name: Check licenses
|
||||
run: |
|
||||
{
|
||||
echo 'CHECK_RESULT<<EOF'
|
||||
licensecheck *.go */*.go
|
||||
echo EOF
|
||||
} >> "$GITHUB_ENV"
|
||||
|
||||
- name: Check for missing licenses
|
||||
run: |
|
||||
MISSING=$(echo "$CHECK_RESULT" | grep UNKNOWN || true)
|
||||
if [ -n "$MISSING" ]; then \
|
||||
echo "$MISSING"; \
|
||||
exit 1; \
|
||||
fi
|
44
.github/workflows/lint.yml
vendored
44
.github/workflows/lint.yml
vendored
|
@ -26,27 +26,9 @@ jobs:
|
|||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.20"
|
||||
|
||||
- id: go-cache-paths
|
||||
run: |
|
||||
echo "go-build=$(go env GOCACHE)" >> $GITHUB_OUTPUT
|
||||
echo "go-mod=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
|
||||
echo "go-version=$(go version | cut -d ' ' -f 3)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Go build cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.go-build }}
|
||||
key: ${{ runner.os }}-${{ steps.go-cache-paths.outputs.go-version }}-build-${{ hashFiles('**/go.mod', '**/go.sum') }}
|
||||
|
||||
- name: Go mod cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.go-mod }}
|
||||
key: ${{ runner.os }}-${{ steps.go-cache-paths.outputs.go-version }}-mod-${{ hashFiles('**/go.mod', '**/go.sum') }}
|
||||
go-version: "1.21"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
|
@ -54,7 +36,27 @@ jobs:
|
|||
make common
|
||||
|
||||
- name: lint
|
||||
uses: golangci/golangci-lint-action@v3.7.0
|
||||
uses: golangci/golangci-lint-action@v6.0.1
|
||||
with:
|
||||
version: latest
|
||||
args: --timeout=2m0s
|
||||
skip-cache: true
|
||||
|
||||
dependencies:
|
||||
name: dependencies
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "stable"
|
||||
|
||||
- name: Check minimum supported version of Go
|
||||
run: |
|
||||
go mod tidy -go=1.21 -compat=1.21
|
||||
|
||||
- name: Check go.mod / go.sum
|
||||
run: |
|
||||
git add go.*
|
||||
git diff --cached --exit-code go.*
|
||||
|
|
27
.github/workflows/shellcheck.yml
vendored
Normal file
27
.github/workflows/shellcheck.yml
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
name: shellcheck
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/shellcheck.yml'
|
||||
- '**.sh'
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/shellcheck.yml'
|
||||
- '**.sh'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: shellcheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: shellcheck
|
||||
run: |
|
||||
find -name "*.sh" | xargs shellcheck
|
31
.github/workflows/tarball.yml
vendored
31
.github/workflows/tarball.yml
vendored
|
@ -24,33 +24,15 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
go-version:
|
||||
- "1.20"
|
||||
- "1.21"
|
||||
- "1.22"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- id: go-cache-paths
|
||||
run: |
|
||||
echo "go-build=$(go env GOCACHE)" >> $GITHUB_OUTPUT
|
||||
echo "go-mod=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
|
||||
echo "go-version=$(go version | cut -d ' ' -f 3)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Go build cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.go-build }}
|
||||
key: ${{ runner.os }}-${{ steps.go-cache-paths.outputs.go-version }}-build-${{ hashFiles('**/go.mod', '**/go.sum') }}
|
||||
|
||||
- name: Go mod cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.go-mod }}
|
||||
key: ${{ runner.os }}-${{ steps.go-cache-paths.outputs.go-version }}-mod-${{ hashFiles('**/go.mod', '**/go.sum') }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt -y update && sudo apt -y install protobuf-compiler
|
||||
|
@ -61,7 +43,7 @@ jobs:
|
|||
make tarball
|
||||
|
||||
- name: Upload tarball
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: tarball-${{ matrix.go-version }}
|
||||
path: nextcloud-spreed-signaling*.tar.gz
|
||||
|
@ -70,12 +52,12 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
go-version:
|
||||
- "1.20"
|
||||
- "1.21"
|
||||
- "1.22"
|
||||
runs-on: ubuntu-latest
|
||||
needs: [create]
|
||||
steps:
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
|
@ -84,7 +66,7 @@ jobs:
|
|||
sudo apt -y update && sudo apt -y install protobuf-compiler
|
||||
|
||||
- name: Download tarball
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: tarball-${{ matrix.go-version }}
|
||||
|
||||
|
@ -104,5 +86,6 @@ jobs:
|
|||
- name: Run tests
|
||||
env:
|
||||
GOPROXY: off
|
||||
USE_DB_IP_GEOIP_DATABASE: "1"
|
||||
run: |
|
||||
make -C tmp test TIMEOUT=120s
|
||||
|
|
27
.github/workflows/test.yml
vendored
27
.github/workflows/test.yml
vendored
|
@ -23,36 +23,19 @@ jobs:
|
|||
go:
|
||||
env:
|
||||
MAXMIND_GEOLITE2_LICENSE: ${{ secrets.MAXMIND_GEOLITE2_LICENSE }}
|
||||
USE_DB_IP_GEOIP_DATABASE: "1"
|
||||
strategy:
|
||||
matrix:
|
||||
go-version:
|
||||
- "1.20"
|
||||
- "1.21"
|
||||
- "1.22"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- id: go-cache-paths
|
||||
run: |
|
||||
echo "go-build=$(go env GOCACHE)" >> $GITHUB_OUTPUT
|
||||
echo "go-mod=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
|
||||
echo "go-version=$(go version | cut -d ' ' -f 3)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Go build cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.go-build }}
|
||||
key: ${{ runner.os }}-${{ steps.go-cache-paths.outputs.go-version }}-build-${{ hashFiles('**/go.mod', '**/go.sum') }}
|
||||
|
||||
- name: Go mod cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-cache-paths.outputs.go-mod }}
|
||||
key: ${{ runner.os }}-${{ steps.go-cache-paths.outputs.go-version }}-mod-${{ hashFiles('**/go.mod', '**/go.sum') }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt -y update && sudo apt -y install protobuf-compiler
|
||||
|
@ -80,7 +63,7 @@ jobs:
|
|||
outfile: cover.lcov
|
||||
|
||||
- name: Coveralls Parallel
|
||||
uses: coverallsapp/github-action@v2.2.3
|
||||
uses: coverallsapp/github-action@v2.3.0
|
||||
env:
|
||||
COVERALLS_FLAG_NAME: run-${{ matrix.go-version }}
|
||||
with:
|
||||
|
@ -95,7 +78,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Coveralls Finished
|
||||
uses: coverallsapp/github-action@v2.2.3
|
||||
uses: coverallsapp/github-action@v2.3.0
|
||||
with:
|
||||
github-token: ${{ secrets.github_token }}
|
||||
parallel-finished: true
|
||||
|
|
293
CHANGELOG.md
293
CHANGELOG.md
|
@ -2,6 +2,299 @@
|
|||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
## 1.3.1 - 2024-05-23
|
||||
|
||||
### Changed
|
||||
- Bump alpine from 3.19 to 3.20 in /docker/janus
|
||||
[#746](https://github.com/strukturag/nextcloud-spreed-signaling/pull/746)
|
||||
- CI: Remove deprecated options from lint workflow.
|
||||
[#748](https://github.com/strukturag/nextcloud-spreed-signaling/pull/748)
|
||||
- docker: Update Janus in example image to 1.2.2
|
||||
[#749](https://github.com/strukturag/nextcloud-spreed-signaling/pull/749)
|
||||
- Improve detection of actual client IP.
|
||||
[#747](https://github.com/strukturag/nextcloud-spreed-signaling/pull/747)
|
||||
|
||||
### Fixed
|
||||
- docker: Fix proxy entrypoint.
|
||||
[#745](https://github.com/strukturag/nextcloud-spreed-signaling/pull/745)
|
||||
|
||||
|
||||
## 1.3.0 - 2024-05-22
|
||||
|
||||
### Added
|
||||
- Support resuming remote sessions
|
||||
[#715](https://github.com/strukturag/nextcloud-spreed-signaling/pull/715)
|
||||
- Gracefully shut down signaling server on SIGUSR1.
|
||||
[#706](https://github.com/strukturag/nextcloud-spreed-signaling/pull/706)
|
||||
- docker: Add helper scripts to gracefully stop / wait for server.
|
||||
[#722](https://github.com/strukturag/nextcloud-spreed-signaling/pull/722)
|
||||
- Support environment variables in some configuration.
|
||||
[#721](https://github.com/strukturag/nextcloud-spreed-signaling/pull/721)
|
||||
- Add Context to clients / sessions.
|
||||
[#732](https://github.com/strukturag/nextcloud-spreed-signaling/pull/732)
|
||||
- Drop support for Golang 1.20
|
||||
[#737](https://github.com/strukturag/nextcloud-spreed-signaling/pull/737)
|
||||
- CI: Run "govulncheck".
|
||||
[#694](https://github.com/strukturag/nextcloud-spreed-signaling/pull/694)
|
||||
- Make trusted proxies configurable and default to loopback / private IPs.
|
||||
[#738](https://github.com/strukturag/nextcloud-spreed-signaling/pull/738)
|
||||
- Add support for remote streams (preview)
|
||||
[#708](https://github.com/strukturag/nextcloud-spreed-signaling/pull/708)
|
||||
- Add throttler for backend requests
|
||||
[#744](https://github.com/strukturag/nextcloud-spreed-signaling/pull/744)
|
||||
|
||||
### Changed
|
||||
- build(deps): Bump github.com/nats-io/nats.go from 1.34.0 to 1.34.1
|
||||
[#697](https://github.com/strukturag/nextcloud-spreed-signaling/pull/697)
|
||||
- build(deps): Bump google.golang.org/grpc from 1.62.1 to 1.63.0
|
||||
[#699](https://github.com/strukturag/nextcloud-spreed-signaling/pull/699)
|
||||
- build(deps): Bump google.golang.org/grpc from 1.63.0 to 1.63.2
|
||||
[#700](https://github.com/strukturag/nextcloud-spreed-signaling/pull/700)
|
||||
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.12 to 2.10.14
|
||||
[#702](https://github.com/strukturag/nextcloud-spreed-signaling/pull/702)
|
||||
- Include previous value with etcd watch events.
|
||||
[#704](https://github.com/strukturag/nextcloud-spreed-signaling/pull/704)
|
||||
- build(deps): Bump go.uber.org/zap from 1.17.0 to 1.27.0
|
||||
[#705](https://github.com/strukturag/nextcloud-spreed-signaling/pull/705)
|
||||
- Improve support for Janus 1.x
|
||||
[#669](https://github.com/strukturag/nextcloud-spreed-signaling/pull/669)
|
||||
- build(deps): Bump sphinx from 7.2.6 to 7.3.5 in /docs
|
||||
[#709](https://github.com/strukturag/nextcloud-spreed-signaling/pull/709)
|
||||
- build(deps): Bump sphinx from 7.3.5 to 7.3.7 in /docs
|
||||
[#712](https://github.com/strukturag/nextcloud-spreed-signaling/pull/712)
|
||||
- build(deps): Bump golang.org/x/net from 0.21.0 to 0.23.0
|
||||
[#711](https://github.com/strukturag/nextcloud-spreed-signaling/pull/711)
|
||||
- Don't keep expiration timestamp in each session.
|
||||
[#713](https://github.com/strukturag/nextcloud-spreed-signaling/pull/713)
|
||||
- build(deps): Bump mkdocs from 1.5.3 to 1.6.0 in /docs
|
||||
[#714](https://github.com/strukturag/nextcloud-spreed-signaling/pull/714)
|
||||
- Speedup tests by running in parallel
|
||||
[#718](https://github.com/strukturag/nextcloud-spreed-signaling/pull/718)
|
||||
- build(deps): Bump golangci/golangci-lint-action from 4.0.0 to 5.0.0
|
||||
[#719](https://github.com/strukturag/nextcloud-spreed-signaling/pull/719)
|
||||
- build(deps): Bump golangci/golangci-lint-action from 5.0.0 to 5.1.0
|
||||
[#720](https://github.com/strukturag/nextcloud-spreed-signaling/pull/720)
|
||||
- build(deps): Bump coverallsapp/github-action from 2.2.3 to 2.3.0
|
||||
[#728](https://github.com/strukturag/nextcloud-spreed-signaling/pull/728)
|
||||
- build(deps): Bump jinja2 from 3.1.3 to 3.1.4 in /docs
|
||||
[#726](https://github.com/strukturag/nextcloud-spreed-signaling/pull/726)
|
||||
- build(deps): Bump google.golang.org/protobuf from 1.33.0 to 1.34.1
|
||||
[#725](https://github.com/strukturag/nextcloud-spreed-signaling/pull/725)
|
||||
- build(deps): Bump github.com/prometheus/client_golang from 1.19.0 to 1.19.1
|
||||
[#730](https://github.com/strukturag/nextcloud-spreed-signaling/pull/730)
|
||||
- build(deps): Bump golangci/golangci-lint-action from 5.1.0 to 6.0.1
|
||||
[#729](https://github.com/strukturag/nextcloud-spreed-signaling/pull/729)
|
||||
- build(deps): Bump google.golang.org/grpc from 1.63.2 to 1.64.0
|
||||
[#734](https://github.com/strukturag/nextcloud-spreed-signaling/pull/734)
|
||||
- Validate received SDP earlier.
|
||||
[#707](https://github.com/strukturag/nextcloud-spreed-signaling/pull/707)
|
||||
- Log something if mcu publisher / subscriber was closed.
|
||||
[#736](https://github.com/strukturag/nextcloud-spreed-signaling/pull/736)
|
||||
- build(deps): Bump the etcd group with 4 updates
|
||||
[#693](https://github.com/strukturag/nextcloud-spreed-signaling/pull/693)
|
||||
- build(deps): Bump github.com/nats-io/nats.go from 1.34.1 to 1.35.0
|
||||
[#740](https://github.com/strukturag/nextcloud-spreed-signaling/pull/740)
|
||||
- Don't use unnecessary pointer to "json.RawMessage".
|
||||
[#739](https://github.com/strukturag/nextcloud-spreed-signaling/pull/739)
|
||||
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.14 to 2.10.15
|
||||
[#741](https://github.com/strukturag/nextcloud-spreed-signaling/pull/741)
|
||||
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.15 to 2.10.16
|
||||
[#743](https://github.com/strukturag/nextcloud-spreed-signaling/pull/743)
|
||||
|
||||
### Fixed
|
||||
- Improve detecting renames in file watcher.
|
||||
[#698](https://github.com/strukturag/nextcloud-spreed-signaling/pull/698)
|
||||
- Update etcd watch handling.
|
||||
[#701](https://github.com/strukturag/nextcloud-spreed-signaling/pull/701)
|
||||
- Prevent goroutine leaks in GRPC tests.
|
||||
[#716](https://github.com/strukturag/nextcloud-spreed-signaling/pull/716)
|
||||
- Fix potential race in capabilities test.
|
||||
[#731](https://github.com/strukturag/nextcloud-spreed-signaling/pull/731)
|
||||
- Don't log read error after we closed the connection.
|
||||
[#735](https://github.com/strukturag/nextcloud-spreed-signaling/pull/735)
|
||||
- Fix lock order inversion when leaving room / publishing room sessions.
|
||||
[#742](https://github.com/strukturag/nextcloud-spreed-signaling/pull/742)
|
||||
- Relax "MessageClientMessageData" validation.
|
||||
[#733](https://github.com/strukturag/nextcloud-spreed-signaling/pull/733)
|
||||
|
||||
|
||||
## 1.2.4 - 2024-04-03
|
||||
|
||||
### Added
|
||||
- Add metrics for current number of HTTP client connections.
|
||||
[#668](https://github.com/strukturag/nextcloud-spreed-signaling/pull/668)
|
||||
- Support getting GeoIP DB from db-ip.com for tests.
|
||||
[#689](https://github.com/strukturag/nextcloud-spreed-signaling/pull/689)
|
||||
- Use fsnotify to detect file changes
|
||||
[#680](https://github.com/strukturag/nextcloud-spreed-signaling/pull/680)
|
||||
- CI: Check dependencies for minimum supported version.
|
||||
[#692](https://github.com/strukturag/nextcloud-spreed-signaling/pull/692)
|
||||
|
||||
### Changed
|
||||
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.9 to 2.10.10
|
||||
[#650](https://github.com/strukturag/nextcloud-spreed-signaling/pull/650)
|
||||
- CI: Also test with Golang 1.22
|
||||
[#651](https://github.com/strukturag/nextcloud-spreed-signaling/pull/651)
|
||||
- build(deps): Bump the etcd group with 4 updates
|
||||
[#649](https://github.com/strukturag/nextcloud-spreed-signaling/pull/649)
|
||||
- Improve Makefile
|
||||
[#653](https://github.com/strukturag/nextcloud-spreed-signaling/pull/653)
|
||||
- build(deps): Bump google.golang.org/grpc from 1.61.0 to 1.61.1
|
||||
[#659](https://github.com/strukturag/nextcloud-spreed-signaling/pull/659)
|
||||
- build(deps): Bump golangci/golangci-lint-action from 3.7.0 to 4.0.0
|
||||
[#658](https://github.com/strukturag/nextcloud-spreed-signaling/pull/658)
|
||||
- Minor improvements to DNS monitor
|
||||
[#663](https://github.com/strukturag/nextcloud-spreed-signaling/pull/663)
|
||||
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.10 to 2.10.11
|
||||
[#662](https://github.com/strukturag/nextcloud-spreed-signaling/pull/662)
|
||||
- build(deps): Bump google.golang.org/grpc from 1.61.1 to 1.62.0
|
||||
[#664](https://github.com/strukturag/nextcloud-spreed-signaling/pull/664)
|
||||
- Support ports in full URLs for DNS monitor.
|
||||
[#667](https://github.com/strukturag/nextcloud-spreed-signaling/pull/667)
|
||||
- Calculate proxy load based on maximum bandwidth.
|
||||
[#670](https://github.com/strukturag/nextcloud-spreed-signaling/pull/670)
|
||||
- build(deps): Bump github.com/nats-io/nats.go from 1.32.0 to 1.33.1
|
||||
[#661](https://github.com/strukturag/nextcloud-spreed-signaling/pull/661)
|
||||
- build(deps): Bump golang from 1.21-alpine to 1.22-alpine in /docker/server
|
||||
[#655](https://github.com/strukturag/nextcloud-spreed-signaling/pull/655)
|
||||
- build(deps): Bump golang from 1.21-alpine to 1.22-alpine in /docker/proxy
|
||||
[#656](https://github.com/strukturag/nextcloud-spreed-signaling/pull/656)
|
||||
- docker: Update Janus from 0.11.8 to 0.14.1.
|
||||
[#672](https://github.com/strukturag/nextcloud-spreed-signaling/pull/672)
|
||||
- build(deps): Bump alpine from 3.18 to 3.19 in /docker/janus
|
||||
[#613](https://github.com/strukturag/nextcloud-spreed-signaling/pull/613)
|
||||
- Reuse backoff waiting code where possible
|
||||
[#673](https://github.com/strukturag/nextcloud-spreed-signaling/pull/673)
|
||||
- build(deps): Bump github.com/prometheus/client_golang from 1.18.0 to 1.19.0
|
||||
[#674](https://github.com/strukturag/nextcloud-spreed-signaling/pull/674)
|
||||
- Docker improvements
|
||||
[#675](https://github.com/strukturag/nextcloud-spreed-signaling/pull/675)
|
||||
- make: Don't update dependencies but use pinned versions.
|
||||
[#679](https://github.com/strukturag/nextcloud-spreed-signaling/pull/679)
|
||||
- build(deps): Bump github.com/pion/sdp/v3 from 3.0.6 to 3.0.7
|
||||
[#678](https://github.com/strukturag/nextcloud-spreed-signaling/pull/678)
|
||||
- build(deps): Bump google.golang.org/grpc from 1.62.0 to 1.62.1
|
||||
[#677](https://github.com/strukturag/nextcloud-spreed-signaling/pull/677)
|
||||
- build(deps): Bump google.golang.org/protobuf from 1.32.0 to 1.33.0
|
||||
[#676](https://github.com/strukturag/nextcloud-spreed-signaling/pull/676)
|
||||
- build(deps): Bump github.com/pion/sdp/v3 from 3.0.7 to 3.0.8
|
||||
[#681](https://github.com/strukturag/nextcloud-spreed-signaling/pull/681)
|
||||
- Update source of continentmap to original CSV file.
|
||||
[#682](https://github.com/strukturag/nextcloud-spreed-signaling/pull/682)
|
||||
- build(deps): Bump markdown from 3.5.2 to 3.6 in /docs
|
||||
[#684](https://github.com/strukturag/nextcloud-spreed-signaling/pull/684)
|
||||
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.11 to 2.10.12
|
||||
[#683](https://github.com/strukturag/nextcloud-spreed-signaling/pull/683)
|
||||
- build(deps): Bump github.com/pion/sdp/v3 from 3.0.8 to 3.0.9
|
||||
[#687](https://github.com/strukturag/nextcloud-spreed-signaling/pull/687)
|
||||
- build(deps): Bump the etcd group with 4 updates
|
||||
[#686](https://github.com/strukturag/nextcloud-spreed-signaling/pull/686)
|
||||
- build(deps): Bump github.com/nats-io/nats.go from 1.33.1 to 1.34.0
|
||||
[#685](https://github.com/strukturag/nextcloud-spreed-signaling/pull/685)
|
||||
- Revert "build(deps): Bump the etcd group with 4 updates"
|
||||
[#691](https://github.com/strukturag/nextcloud-spreed-signaling/pull/691)
|
||||
- CI: Limit when to run Docker build jobs.
|
||||
[#695](https://github.com/strukturag/nextcloud-spreed-signaling/pull/695)
|
||||
- Remove deprecated section on multiple signaling servers from README.
|
||||
[#696](https://github.com/strukturag/nextcloud-spreed-signaling/pull/696)
|
||||
|
||||
### Fixed
|
||||
- Fix race condition when accessing "expected" in proxy_config tests.
|
||||
[#652](https://github.com/strukturag/nextcloud-spreed-signaling/pull/652)
|
||||
- Fix deadlock when entry is removed while receiver holds lock in lookup.
|
||||
[#654](https://github.com/strukturag/nextcloud-spreed-signaling/pull/654)
|
||||
- Fix flaky "TestProxyConfigStaticDNS".
|
||||
[#671](https://github.com/strukturag/nextcloud-spreed-signaling/pull/671)
|
||||
- Fix flaky DnsMonitor test.
|
||||
[#690](https://github.com/strukturag/nextcloud-spreed-signaling/pull/690)
|
||||
|
||||
|
||||
## 1.2.3 - 2024-01-31
|
||||
|
||||
### Added
|
||||
- CI: Check license headers.
|
||||
[#627](https://github.com/strukturag/nextcloud-spreed-signaling/pull/627)
|
||||
- Add "welcome" endpoint to proxy.
|
||||
[#644](https://github.com/strukturag/nextcloud-spreed-signaling/pull/644)
|
||||
|
||||
### Changed
|
||||
- build(deps): Bump github/codeql-action from 2 to 3
|
||||
[#619](https://github.com/strukturag/nextcloud-spreed-signaling/pull/619)
|
||||
- build(deps): Bump github.com/google/uuid from 1.4.0 to 1.5.0
|
||||
[#618](https://github.com/strukturag/nextcloud-spreed-signaling/pull/618)
|
||||
- build(deps): Bump google.golang.org/grpc from 1.59.0 to 1.60.0
|
||||
[#617](https://github.com/strukturag/nextcloud-spreed-signaling/pull/617)
|
||||
- build(deps): Bump the artifacts group with 2 updates
|
||||
[#622](https://github.com/strukturag/nextcloud-spreed-signaling/pull/622)
|
||||
- build(deps): Bump golang.org/x/crypto from 0.16.0 to 0.17.0
|
||||
[#623](https://github.com/strukturag/nextcloud-spreed-signaling/pull/623)
|
||||
- build(deps): Bump google.golang.org/grpc from 1.60.0 to 1.60.1
|
||||
[#624](https://github.com/strukturag/nextcloud-spreed-signaling/pull/624)
|
||||
- Refactor proxy config
|
||||
[#606](https://github.com/strukturag/nextcloud-spreed-signaling/pull/606)
|
||||
- build(deps): Bump google.golang.org/protobuf from 1.31.0 to 1.32.0
|
||||
[#629](https://github.com/strukturag/nextcloud-spreed-signaling/pull/629)
|
||||
- build(deps): Bump github.com/prometheus/client_golang from 1.17.0 to 1.18.0
|
||||
[#630](https://github.com/strukturag/nextcloud-spreed-signaling/pull/630)
|
||||
- build(deps): Bump jinja2 from 3.1.2 to 3.1.3 in /docs
|
||||
[#632](https://github.com/strukturag/nextcloud-spreed-signaling/pull/632)
|
||||
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.7 to 2.10.9
|
||||
[#633](https://github.com/strukturag/nextcloud-spreed-signaling/pull/633)
|
||||
- build(deps): Bump markdown from 3.5.1 to 3.5.2 in /docs
|
||||
[#631](https://github.com/strukturag/nextcloud-spreed-signaling/pull/631)
|
||||
- build(deps): Bump github.com/nats-io/nats.go from 1.31.0 to 1.32.0
|
||||
[#634](https://github.com/strukturag/nextcloud-spreed-signaling/pull/634)
|
||||
- build(deps): Bump readthedocs-sphinx-search from 0.3.1 to 0.3.2 in /docs
|
||||
[#635](https://github.com/strukturag/nextcloud-spreed-signaling/pull/635)
|
||||
- build(deps): Bump actions/cache from 3 to 4
|
||||
[#638](https://github.com/strukturag/nextcloud-spreed-signaling/pull/638)
|
||||
- build(deps): Bump github.com/google/uuid from 1.5.0 to 1.6.0
|
||||
[#643](https://github.com/strukturag/nextcloud-spreed-signaling/pull/643)
|
||||
- build(deps): Bump google.golang.org/grpc from 1.60.1 to 1.61.0
|
||||
[#645](https://github.com/strukturag/nextcloud-spreed-signaling/pull/645)
|
||||
- build(deps): Bump peter-evans/create-or-update-comment from 3 to 4
|
||||
[#646](https://github.com/strukturag/nextcloud-spreed-signaling/pull/646)
|
||||
- CI: No longer need to manually cache Go modules.
|
||||
[#648](https://github.com/strukturag/nextcloud-spreed-signaling/pull/648)
|
||||
- CI: Disable cache for linter to bring back annotations.
|
||||
[#647](https://github.com/strukturag/nextcloud-spreed-signaling/pull/647)
|
||||
- Refactor DNS monitoring
|
||||
[#648](https://github.com/strukturag/nextcloud-spreed-signaling/pull/648)
|
||||
|
||||
### Fixed
|
||||
- Fix link to NATS install docs
|
||||
[#637](https://github.com/strukturag/nextcloud-spreed-signaling/pull/637)
|
||||
- docker: Always need to set proxy token id / key for server.
|
||||
[#641](https://github.com/strukturag/nextcloud-spreed-signaling/pull/641)
|
||||
|
||||
|
||||
## 1.2.2 - 2023-12-11
|
||||
|
||||
### Added
|
||||
- Include "~docker" in version if built on Docker.
|
||||
[#602](https://github.com/strukturag/nextcloud-spreed-signaling/pull/602)
|
||||
|
||||
### Changed
|
||||
- CI: No need to build docker images for testing, done internally.
|
||||
[#603](https://github.com/strukturag/nextcloud-spreed-signaling/pull/603)
|
||||
- build(deps): Bump sphinx-rtd-theme from 1.3.0 to 2.0.0 in /docs
|
||||
[#604](https://github.com/strukturag/nextcloud-spreed-signaling/pull/604)
|
||||
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.5 to 2.10.6
|
||||
[#605](https://github.com/strukturag/nextcloud-spreed-signaling/pull/605)
|
||||
- build(deps): Bump actions/setup-go from 4 to 5
|
||||
[#608](https://github.com/strukturag/nextcloud-spreed-signaling/pull/608)
|
||||
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.6 to 2.10.7
|
||||
[#612](https://github.com/strukturag/nextcloud-spreed-signaling/pull/612)
|
||||
- build(deps): Bump the etcd group with 4 updates
|
||||
[#611](https://github.com/strukturag/nextcloud-spreed-signaling/pull/611)
|
||||
|
||||
### Fixed
|
||||
- Skip options from default section when parsing "geoip-overrides".
|
||||
[#609](https://github.com/strukturag/nextcloud-spreed-signaling/pull/609)
|
||||
- Hangup virtual session if it gets disinvited.
|
||||
[#610](https://github.com/strukturag/nextcloud-spreed-signaling/pull/610)
|
||||
|
||||
|
||||
## 1.2.1 - 2023-11-15
|
||||
|
||||
### Added
|
||||
|
|
81
Makefile
81
Makefile
|
@ -7,12 +7,20 @@ GOFMT := "$(GODIR)/gofmt"
|
|||
GOOS ?= linux
|
||||
GOARCH ?= amd64
|
||||
GOVERSION := $(shell "$(GO)" env GOVERSION | sed "s|go||" )
|
||||
BINDIR := "$(CURDIR)/bin"
|
||||
BINDIR := $(CURDIR)/bin
|
||||
VENDORDIR := "$(CURDIR)/vendor"
|
||||
VERSION := $(shell "$(CURDIR)/scripts/get-version.sh")
|
||||
TARVERSION := $(shell "$(CURDIR)/scripts/get-version.sh" --tar)
|
||||
PACKAGENAME := github.com/strukturag/nextcloud-spreed-signaling
|
||||
ALL_PACKAGES := $(PACKAGENAME) $(PACKAGENAME)/client $(PACKAGENAME)/proxy $(PACKAGENAME)/server
|
||||
PROTO_FILES := $(basename $(wildcard *.proto))
|
||||
PROTO_GO_FILES := $(addsuffix .pb.go,$(PROTO_FILES)) $(addsuffix _grpc.pb.go,$(PROTO_FILES))
|
||||
EASYJSON_GO_FILES := \
|
||||
api_async_easyjson.go \
|
||||
api_backend_easyjson.go \
|
||||
api_grpc_easyjson.go \
|
||||
api_proxy_easyjson.go \
|
||||
api_signaling_easyjson.go
|
||||
|
||||
ifneq ($(VERSION),)
|
||||
INTERNALLDFLAGS := -X main.version=$(VERSION)
|
||||
|
@ -37,13 +45,21 @@ TIMEOUT := 60s
|
|||
endif
|
||||
|
||||
ifneq ($(TEST),)
|
||||
TESTARGS := $(TESTARGS) -run $(TEST)
|
||||
TESTARGS := $(TESTARGS) -run "$(TEST)"
|
||||
endif
|
||||
|
||||
ifneq ($(COUNT),)
|
||||
TESTARGS := $(TESTARGS) -count $(COUNT)
|
||||
endif
|
||||
|
||||
ifneq ($(PARALLEL),)
|
||||
TESTARGS := $(TESTARGS) -parallel $(PARALLEL)
|
||||
endif
|
||||
|
||||
ifneq ($(VERBOSE),)
|
||||
TESTARGS := $(TESTARGS) -v
|
||||
endif
|
||||
|
||||
ifeq ($(GOARCH), amd64)
|
||||
GOPATHBIN := $(GOPATH)/bin
|
||||
else
|
||||
|
@ -53,15 +69,15 @@ endif
|
|||
hook:
|
||||
[ ! -d "$(CURDIR)/.git/hooks" ] || ln -sf "$(CURDIR)/scripts/pre-commit.hook" "$(CURDIR)/.git/hooks/pre-commit"
|
||||
|
||||
$(GOPATHBIN)/easyjson:
|
||||
[ "$(GOPROXY)" = "off" ] || $(GO) get -u -d github.com/mailru/easyjson/...
|
||||
$(GOPATHBIN)/easyjson: go.mod go.sum
|
||||
[ "$(GOPROXY)" = "off" ] || $(GO) get -d github.com/mailru/easyjson/...
|
||||
$(GO) install github.com/mailru/easyjson/...
|
||||
|
||||
$(GOPATHBIN)/protoc-gen-go:
|
||||
$(GOPATHBIN)/protoc-gen-go: go.mod go.sum
|
||||
$(GO) install google.golang.org/protobuf/cmd/protoc-gen-go
|
||||
|
||||
$(GOPATHBIN)/protoc-gen-go-grpc:
|
||||
[ "$(GOPROXY)" = "off" ] || $(GO) get -u -d google.golang.org/grpc/cmd/protoc-gen-go-grpc
|
||||
$(GOPATHBIN)/protoc-gen-go-grpc: go.mod go.sum
|
||||
[ "$(GOPROXY)" = "off" ] || $(GO) get -d google.golang.org/grpc/cmd/protoc-gen-go-grpc
|
||||
$(GO) install google.golang.org/grpc/cmd/protoc-gen-go-grpc
|
||||
|
||||
continentmap.go:
|
||||
|
@ -78,68 +94,61 @@ check-continentmap:
|
|||
get:
|
||||
$(GO) get $(PACKAGE)
|
||||
|
||||
fmt: hook | common_proto
|
||||
fmt: hook | $(PROTO_GO_FILES)
|
||||
$(GOFMT) -s -w *.go client proxy server
|
||||
|
||||
vet: common
|
||||
$(GO) vet $(ALL_PACKAGES)
|
||||
|
||||
test: vet common
|
||||
$(GO) test -v -timeout $(TIMEOUT) $(TESTARGS) $(ALL_PACKAGES)
|
||||
$(GO) test -timeout $(TIMEOUT) $(TESTARGS) $(ALL_PACKAGES)
|
||||
|
||||
cover: vet common
|
||||
rm -f cover.out && \
|
||||
$(GO) test -v -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
|
||||
$(GO) test -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
|
||||
sed -i "/_easyjson/d" cover.out && \
|
||||
sed -i "/\.pb\.go/d" cover.out && \
|
||||
$(GO) tool cover -func=cover.out
|
||||
|
||||
coverhtml: vet common
|
||||
rm -f cover.out && \
|
||||
$(GO) test -v -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
|
||||
$(GO) test -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
|
||||
sed -i "/_easyjson/d" cover.out && \
|
||||
sed -i "/\.pb\.go/d" cover.out && \
|
||||
$(GO) tool cover -html=cover.out -o coverage.html
|
||||
|
||||
%_easyjson.go: %.go $(GOPATHBIN)/easyjson | common_proto
|
||||
%_easyjson.go: %.go $(GOPATHBIN)/easyjson | $(PROTO_GO_FILES)
|
||||
rm -f easyjson-bootstrap*.go
|
||||
PATH="$(GODIR)":$(PATH) "$(GOPATHBIN)/easyjson" -all $*.go
|
||||
|
||||
%.pb.go: %.proto $(GOPATHBIN)/protoc-gen-go $(GOPATHBIN)/protoc-gen-go-grpc
|
||||
PATH="$(GODIR)":"$(GOPATHBIN)":$(PATH) protoc --go_out=. --go_opt=paths=source_relative \
|
||||
PATH="$(GODIR)":"$(GOPATHBIN)":$(PATH) protoc \
|
||||
--go_out=. --go_opt=paths=source_relative \
|
||||
$*.proto
|
||||
|
||||
%_grpc.pb.go: %.proto $(GOPATHBIN)/protoc-gen-go $(GOPATHBIN)/protoc-gen-go-grpc
|
||||
PATH="$(GODIR)":"$(GOPATHBIN)":$(PATH) protoc \
|
||||
--go-grpc_out=. --go-grpc_opt=paths=source_relative \
|
||||
$*.proto
|
||||
|
||||
common: common_easyjson common_proto
|
||||
|
||||
common_easyjson: \
|
||||
api_async_easyjson.go \
|
||||
api_backend_easyjson.go \
|
||||
api_grpc_easyjson.go \
|
||||
api_proxy_easyjson.go \
|
||||
api_signaling_easyjson.go
|
||||
|
||||
common_proto: \
|
||||
grpc_backend.pb.go \
|
||||
grpc_internal.pb.go \
|
||||
grpc_mcu.pb.go \
|
||||
grpc_sessions.pb.go
|
||||
common: $(EASYJSON_GO_FILES) $(PROTO_GO_FILES)
|
||||
|
||||
$(BINDIR):
|
||||
mkdir -p $(BINDIR)
|
||||
mkdir -p "$(BINDIR)"
|
||||
|
||||
client: common $(BINDIR)
|
||||
$(GO) build $(BUILDARGS) -ldflags '$(INTERNALLDFLAGS)' -o $(BINDIR)/client ./client/...
|
||||
$(GO) build $(BUILDARGS) -ldflags '$(INTERNALLDFLAGS)' -o "$(BINDIR)/client" ./client/...
|
||||
|
||||
server: common $(BINDIR)
|
||||
$(GO) build $(BUILDARGS) -ldflags '$(INTERNALLDFLAGS)' -o $(BINDIR)/signaling ./server/...
|
||||
$(GO) build $(BUILDARGS) -ldflags '$(INTERNALLDFLAGS)' -o "$(BINDIR)/signaling" ./server/...
|
||||
|
||||
proxy: common $(BINDIR)
|
||||
$(GO) build $(BUILDARGS) -ldflags '$(INTERNALLDFLAGS)' -o $(BINDIR)/proxy ./proxy/...
|
||||
$(GO) build $(BUILDARGS) -ldflags '$(INTERNALLDFLAGS)' -o "$(BINDIR)/proxy" ./proxy/...
|
||||
|
||||
clean:
|
||||
rm -f *_easyjson.go
|
||||
rm -f $(EASYJSON_GO_FILES)
|
||||
rm -f easyjson-bootstrap*.go
|
||||
rm -f *.pb.go
|
||||
rm -f $(PROTO_GO_FILES)
|
||||
|
||||
build: server proxy
|
||||
|
||||
|
@ -163,5 +172,7 @@ tarball: vendor
|
|||
|
||||
dist: tarball
|
||||
|
||||
.NOTPARALLEL: %_easyjson.go
|
||||
.PHONY: continentmap.go vendor
|
||||
.NOTPARALLEL: $(EASYJSON_GO_FILES)
|
||||
.PHONY: continentmap.go common vendor
|
||||
.SECONDARY: $(EASYJSON_GO_FILES) $(PROTO_GO_FILES)
|
||||
.DELETE_ON_ERROR:
|
||||
|
|
39
README.md
39
README.md
|
@ -17,7 +17,7 @@ information on the API of the signaling server.
|
|||
The following tools are required for building the signaling server.
|
||||
|
||||
- git
|
||||
- go >= 1.20
|
||||
- go >= 1.21
|
||||
- make
|
||||
- protobuf-compiler >= 3
|
||||
|
||||
|
@ -118,7 +118,7 @@ https://docs.docker.com/compose/install/
|
|||
## Setup of NATS server
|
||||
|
||||
There is a detailed description on how to install and run the NATS server
|
||||
available at http://nats.io/documentation/tutorials/gnatsd-install/
|
||||
available at https://docs.nats.io/running-a-nats-service/introduction
|
||||
|
||||
You can use the `gnatsd.conf` file as base for the configuration of the NATS
|
||||
server.
|
||||
|
@ -171,7 +171,17 @@ proxy process gracefully after all clients have been disconnected. No new
|
|||
publishers will be accepted in this case.
|
||||
|
||||
|
||||
### Clustering
|
||||
### Remote streams (preview)
|
||||
|
||||
With Janus 1.1.0 or newer, remote streams are supported, i.e. a subscriber can
|
||||
receive a published stream from any server. For this, you need to configure
|
||||
`hostname`, `token_id` and `token_key` in the proxy configuration. Each proxy
|
||||
server also supports configuring maximum `incoming` and `outgoing` bandwidth
|
||||
settings, which will also be used to select remote streams.
|
||||
See `proxy.conf.in` in section `app` for details.
|
||||
|
||||
|
||||
## Clustering
|
||||
|
||||
The signaling server supports a clustering mode where multiple running servers
|
||||
can be interconnected to form a single "virtual" server. This can be used to
|
||||
|
@ -299,6 +309,8 @@ interface on port `8080` below):
|
|||
# Enable proxying Websocket requests to the standalone signaling server.
|
||||
ProxyPass "/standalone-signaling/" "ws://127.0.0.1:8080/"
|
||||
|
||||
RequestHeader set X-Real-IP %{REMOTE_ADDR}s
|
||||
|
||||
RewriteEngine On
|
||||
# Websocket connections from the clients.
|
||||
RewriteRule ^/standalone-signaling/spreed/$ - [L]
|
||||
|
@ -334,6 +346,7 @@ myserver.domain.invalid {
|
|||
route /standalone-signaling/* {
|
||||
uri strip_prefix /standalone-signaling
|
||||
reverse_proxy http://127.0.0.1:8080
|
||||
header_up X-Real-IP {remote_host}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@ -380,23 +393,3 @@ Usage:
|
|||
config file to use (default "server.conf")
|
||||
-maxClients int
|
||||
number of client connections (default 100)
|
||||
|
||||
|
||||
## Running multiple signaling servers
|
||||
|
||||
IMPORTANT: This is considered experimental and might not work with all
|
||||
functionality of the signaling server, especially when using the Janus
|
||||
integration.
|
||||
|
||||
The signaling server uses the NATS server to send messages to peers that are
|
||||
not connected locally. Therefore multiple signaling servers running on different
|
||||
hosts can use the same NATS server to build a simple cluster, allowing more
|
||||
simultaneous connections and distribute the load.
|
||||
|
||||
To set this up, make sure all signaling servers are using the same settings for
|
||||
their `session` keys and the `secret` in the `backend` section. Also the URL to
|
||||
the NATS server (option `url` in section `nats`) must point to the same NATS
|
||||
server.
|
||||
|
||||
If all this is setup correctly, clients can connect to either of the signaling
|
||||
servers and exchange messages between them.
|
||||
|
|
|
@ -1,6 +1,28 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2023 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
@ -10,6 +32,19 @@ type AllowedIps struct {
|
|||
allowed []*net.IPNet
|
||||
}
|
||||
|
||||
func (a *AllowedIps) String() string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("[")
|
||||
for idx, n := range a.allowed {
|
||||
if idx > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
b.WriteString(n.String())
|
||||
}
|
||||
b.WriteString("]")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (a *AllowedIps) Empty() bool {
|
||||
return len(a.allowed) == 0
|
||||
}
|
||||
|
@ -78,3 +113,22 @@ func DefaultAllowedIps() *AllowedIps {
|
|||
}
|
||||
return result
|
||||
}
|
||||
|
||||
var (
|
||||
privateIpNets = []string{
|
||||
// Loopback addresses.
|
||||
"127.0.0.0/8",
|
||||
// Private addresses.
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
}
|
||||
)
|
||||
|
||||
func DefaultPrivateIps() *AllowedIps {
|
||||
allowed, err := ParseAllowedIps(strings.Join(privateIpNets, ","))
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("could not parse private ips %+v: %w", privateIpNets, err))
|
||||
}
|
||||
return allowed
|
||||
}
|
||||
|
|
|
@ -1,3 +1,24 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2023 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
|
@ -13,6 +34,9 @@ func TestAllowedIps(t *testing.T) {
|
|||
if a.Empty() {
|
||||
t.Fatal("should not be empty")
|
||||
}
|
||||
if expected := `[127.0.0.1/32, 192.168.0.1/32, 192.168.1.0/24]`; a.String() != expected {
|
||||
t.Errorf("expected %s, got %s", expected, a.String())
|
||||
}
|
||||
|
||||
allowed := []string{
|
||||
"127.0.0.1",
|
||||
|
|
|
@ -118,8 +118,8 @@ type BackendRoomInviteRequest struct {
|
|||
UserIds []string `json:"userids,omitempty"`
|
||||
// TODO(jojo): We should get rid of "AllUserIds" and find a better way to
|
||||
// notify existing users the room has changed and they need to update it.
|
||||
AllUserIds []string `json:"alluserids,omitempty"`
|
||||
Properties *json.RawMessage `json:"properties,omitempty"`
|
||||
AllUserIds []string `json:"alluserids,omitempty"`
|
||||
Properties json.RawMessage `json:"properties,omitempty"`
|
||||
}
|
||||
|
||||
type BackendRoomDisinviteRequest struct {
|
||||
|
@ -127,13 +127,13 @@ type BackendRoomDisinviteRequest struct {
|
|||
SessionIds []string `json:"sessionids,omitempty"`
|
||||
// TODO(jojo): We should get rid of "AllUserIds" and find a better way to
|
||||
// notify existing users the room has changed and they need to update it.
|
||||
AllUserIds []string `json:"alluserids,omitempty"`
|
||||
Properties *json.RawMessage `json:"properties,omitempty"`
|
||||
AllUserIds []string `json:"alluserids,omitempty"`
|
||||
Properties json.RawMessage `json:"properties,omitempty"`
|
||||
}
|
||||
|
||||
type BackendRoomUpdateRequest struct {
|
||||
UserIds []string `json:"userids,omitempty"`
|
||||
Properties *json.RawMessage `json:"properties,omitempty"`
|
||||
UserIds []string `json:"userids,omitempty"`
|
||||
Properties json.RawMessage `json:"properties,omitempty"`
|
||||
}
|
||||
|
||||
type BackendRoomDeleteRequest struct {
|
||||
|
@ -154,7 +154,7 @@ type BackendRoomParticipantsRequest struct {
|
|||
}
|
||||
|
||||
type BackendRoomMessageRequest struct {
|
||||
Data *json.RawMessage `json:"data,omitempty"`
|
||||
Data json.RawMessage `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
type BackendRoomSwitchToSessionsList []string
|
||||
|
@ -169,7 +169,7 @@ type BackendRoomSwitchToMessageRequest struct {
|
|||
// In the map, the key is the session id, the value additional details
|
||||
// (or null) for the session. The details will be included in the request
|
||||
// to the connected client.
|
||||
Sessions *json.RawMessage `json:"sessions,omitempty"`
|
||||
Sessions json.RawMessage `json:"sessions,omitempty"`
|
||||
|
||||
// Internal properties
|
||||
SessionsList BackendRoomSwitchToSessionsList `json:"sessionslist,omitempty"`
|
||||
|
@ -237,8 +237,8 @@ type BackendRoomDialoutResponse struct {
|
|||
// Requests from the signaling server to the Nextcloud backend.
|
||||
|
||||
type BackendClientAuthRequest struct {
|
||||
Version string `json:"version"`
|
||||
Params *json.RawMessage `json:"params"`
|
||||
Version string `json:"version"`
|
||||
Params json.RawMessage `json:"params"`
|
||||
}
|
||||
|
||||
type BackendClientRequest struct {
|
||||
|
@ -256,7 +256,7 @@ type BackendClientRequest struct {
|
|||
Session *BackendClientSessionRequest `json:"session,omitempty"`
|
||||
}
|
||||
|
||||
func NewBackendClientAuthRequest(params *json.RawMessage) *BackendClientRequest {
|
||||
func NewBackendClientAuthRequest(params json.RawMessage) *BackendClientRequest {
|
||||
return &BackendClientRequest{
|
||||
Type: "auth",
|
||||
Auth: &BackendClientAuthRequest{
|
||||
|
@ -284,9 +284,9 @@ type BackendClientResponse struct {
|
|||
}
|
||||
|
||||
type BackendClientAuthResponse struct {
|
||||
Version string `json:"version"`
|
||||
UserId string `json:"userid"`
|
||||
User *json.RawMessage `json:"user"`
|
||||
Version string `json:"version"`
|
||||
UserId string `json:"userid"`
|
||||
User json.RawMessage `json:"user"`
|
||||
}
|
||||
|
||||
type BackendClientRoomRequest struct {
|
||||
|
@ -315,14 +315,14 @@ func NewBackendClientRoomRequest(roomid string, userid string, sessionid string)
|
|||
}
|
||||
|
||||
type BackendClientRoomResponse struct {
|
||||
Version string `json:"version"`
|
||||
RoomId string `json:"roomid"`
|
||||
Properties *json.RawMessage `json:"properties"`
|
||||
Version string `json:"version"`
|
||||
RoomId string `json:"roomid"`
|
||||
Properties json.RawMessage `json:"properties"`
|
||||
|
||||
// Optional information about the Nextcloud Talk session. Can be used for
|
||||
// example to define a "userid" for otherwise anonymous users.
|
||||
// See "RoomSessionData" for a possible content.
|
||||
Session *json.RawMessage `json:"session,omitempty"`
|
||||
Session json.RawMessage `json:"session,omitempty"`
|
||||
|
||||
Permissions *[]Permission `json:"permissions,omitempty"`
|
||||
}
|
||||
|
@ -359,12 +359,12 @@ type BackendClientRingResponse struct {
|
|||
}
|
||||
|
||||
type BackendClientSessionRequest struct {
|
||||
Version string `json:"version"`
|
||||
RoomId string `json:"roomid"`
|
||||
Action string `json:"action"`
|
||||
SessionId string `json:"sessionid"`
|
||||
UserId string `json:"userid,omitempty"`
|
||||
User *json.RawMessage `json:"user,omitempty"`
|
||||
Version string `json:"version"`
|
||||
RoomId string `json:"roomid"`
|
||||
Action string `json:"action"`
|
||||
SessionId string `json:"sessionid"`
|
||||
UserId string `json:"userid,omitempty"`
|
||||
User json.RawMessage `json:"user,omitempty"`
|
||||
}
|
||||
|
||||
type BackendClientSessionResponse struct {
|
||||
|
@ -396,8 +396,8 @@ type OcsMeta struct {
|
|||
}
|
||||
|
||||
type OcsBody struct {
|
||||
Meta OcsMeta `json:"meta"`
|
||||
Data *json.RawMessage `json:"data"`
|
||||
Meta OcsMeta `json:"meta"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type OcsResponse struct {
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
)
|
||||
|
||||
func TestBackendChecksum(t *testing.T) {
|
||||
t.Parallel()
|
||||
rnd := newRandomString(32)
|
||||
body := []byte{1, 2, 3, 4, 5}
|
||||
secret := []byte("shared-secret")
|
||||
|
@ -58,6 +59,7 @@ func TestBackendChecksum(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestValidNumbers(t *testing.T) {
|
||||
t.Parallel()
|
||||
valid := []string{
|
||||
"+12",
|
||||
"+12345",
|
||||
|
|
81
api_proxy.go
81
api_proxy.go
|
@ -24,6 +24,7 @@ package signaling
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
)
|
||||
|
@ -48,6 +49,14 @@ type ProxyClientMessage struct {
|
|||
Payload *PayloadProxyClientMessage `json:"payload,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ProxyClientMessage) String() string {
|
||||
data, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Could not serialize %#v: %s", m, err)
|
||||
}
|
||||
return string(data)
|
||||
}
|
||||
|
||||
func (m *ProxyClientMessage) CheckValid() error {
|
||||
switch m.Type {
|
||||
case "":
|
||||
|
@ -115,6 +124,14 @@ type ProxyServerMessage struct {
|
|||
Event *EventProxyServerMessage `json:"event,omitempty"`
|
||||
}
|
||||
|
||||
func (r *ProxyServerMessage) String() string {
|
||||
data, err := json.Marshal(r)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Could not serialize %#v: %s", r, err)
|
||||
}
|
||||
return string(data)
|
||||
}
|
||||
|
||||
func (r *ProxyServerMessage) CloseAfterSend(session Session) bool {
|
||||
switch r.Type {
|
||||
case "bye":
|
||||
|
@ -179,12 +196,20 @@ type ByeProxyServerMessage struct {
|
|||
type CommandProxyClientMessage struct {
|
||||
Type string `json:"type"`
|
||||
|
||||
Sid string `json:"sid,omitempty"`
|
||||
StreamType string `json:"streamType,omitempty"`
|
||||
PublisherId string `json:"publisherId,omitempty"`
|
||||
ClientId string `json:"clientId,omitempty"`
|
||||
Bitrate int `json:"bitrate,omitempty"`
|
||||
MediaTypes MediaType `json:"mediatypes,omitempty"`
|
||||
Sid string `json:"sid,omitempty"`
|
||||
StreamType StreamType `json:"streamType,omitempty"`
|
||||
PublisherId string `json:"publisherId,omitempty"`
|
||||
ClientId string `json:"clientId,omitempty"`
|
||||
Bitrate int `json:"bitrate,omitempty"`
|
||||
MediaTypes MediaType `json:"mediatypes,omitempty"`
|
||||
|
||||
RemoteUrl string `json:"remoteUrl,omitempty"`
|
||||
remoteUrl *url.URL
|
||||
RemoteToken string `json:"remoteToken,omitempty"`
|
||||
|
||||
Hostname string `json:"hostname,omitempty"`
|
||||
Port int `json:"port,omitempty"`
|
||||
RtcpPort int `json:"rtcpPort,omitempty"`
|
||||
}
|
||||
|
||||
func (m *CommandProxyClientMessage) CheckValid() error {
|
||||
|
@ -202,6 +227,17 @@ func (m *CommandProxyClientMessage) CheckValid() error {
|
|||
if m.StreamType == "" {
|
||||
return fmt.Errorf("stream type missing")
|
||||
}
|
||||
if m.RemoteUrl != "" {
|
||||
if m.RemoteToken == "" {
|
||||
return fmt.Errorf("remote token missing")
|
||||
}
|
||||
|
||||
remoteUrl, err := url.Parse(m.RemoteUrl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid remote url: %w", err)
|
||||
}
|
||||
m.remoteUrl = remoteUrl
|
||||
}
|
||||
case "delete-publisher":
|
||||
fallthrough
|
||||
case "delete-subscriber":
|
||||
|
@ -215,6 +251,10 @@ func (m *CommandProxyClientMessage) CheckValid() error {
|
|||
type CommandProxyServerMessage struct {
|
||||
Id string `json:"id,omitempty"`
|
||||
Sid string `json:"sid,omitempty"`
|
||||
|
||||
Bitrate int `json:"bitrate,omitempty"`
|
||||
|
||||
Streams []PublisherStream `json:"streams,omitempty"`
|
||||
}
|
||||
|
||||
// Type "payload"
|
||||
|
@ -259,12 +299,41 @@ type PayloadProxyServerMessage struct {
|
|||
|
||||
// Type "event"
|
||||
|
||||
type EventProxyServerBandwidth struct {
|
||||
// Incoming is the bandwidth utilization for publishers in percent.
|
||||
Incoming *float64 `json:"incoming,omitempty"`
|
||||
// Outgoing is the bandwidth utilization for subscribers in percent.
|
||||
Outgoing *float64 `json:"outgoing,omitempty"`
|
||||
}
|
||||
|
||||
func (b *EventProxyServerBandwidth) String() string {
|
||||
if b.Incoming != nil && b.Outgoing != nil {
|
||||
return fmt.Sprintf("bandwidth: incoming=%.3f%%, outgoing=%.3f%%", *b.Incoming, *b.Outgoing)
|
||||
} else if b.Incoming != nil {
|
||||
return fmt.Sprintf("bandwidth: incoming=%.3f%%, outgoing=unlimited", *b.Incoming)
|
||||
} else if b.Outgoing != nil {
|
||||
return fmt.Sprintf("bandwidth: incoming=unlimited, outgoing=%.3f%%", *b.Outgoing)
|
||||
} else {
|
||||
return "bandwidth: incoming=unlimited, outgoing=unlimited"
|
||||
}
|
||||
}
|
||||
|
||||
func (b EventProxyServerBandwidth) AllowIncoming() bool {
|
||||
return b.Incoming == nil || *b.Incoming < 100
|
||||
}
|
||||
|
||||
func (b EventProxyServerBandwidth) AllowOutgoing() bool {
|
||||
return b.Outgoing == nil || *b.Outgoing < 100
|
||||
}
|
||||
|
||||
type EventProxyServerMessage struct {
|
||||
Type string `json:"type"`
|
||||
|
||||
ClientId string `json:"clientId,omitempty"`
|
||||
Load int64 `json:"load,omitempty"`
|
||||
Sid string `json:"sid,omitempty"`
|
||||
|
||||
Bandwidth *EventProxyServerBandwidth `json:"bandwidth,omitempty"`
|
||||
}
|
||||
|
||||
// Information on a proxy in the etcd cluster.
|
||||
|
|
100
api_signaling.go
100
api_signaling.go
|
@ -32,6 +32,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/pion/sdp/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -42,6 +43,11 @@ const (
|
|||
HelloVersionV2 = "2.0"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoSdp = NewError("no_sdp", "Payload does not contain a SDP.")
|
||||
ErrInvalidSdp = NewError("invalid_sdp", "Payload does not contain a valid SDP.")
|
||||
)
|
||||
|
||||
// ClientMessage is a message that is sent from a client to the server.
|
||||
type ClientMessage struct {
|
||||
json.Marshaler
|
||||
|
@ -192,12 +198,12 @@ func (r *ServerMessage) CloseAfterSend(session Session) bool {
|
|||
}
|
||||
|
||||
func (r *ServerMessage) IsChatRefresh() bool {
|
||||
if r.Type != "message" || r.Message == nil || r.Message.Data == nil || len(*r.Message.Data) == 0 {
|
||||
if r.Type != "message" || r.Message == nil || len(r.Message.Data) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var data MessageServerMessageData
|
||||
if err := json.Unmarshal(*r.Message.Data, &data); err != nil {
|
||||
if err := json.Unmarshal(r.Message.Data, &data); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -360,7 +366,7 @@ func (p *HelloV2AuthParams) CheckValid() error {
|
|||
type HelloV2TokenClaims struct {
|
||||
jwt.RegisteredClaims
|
||||
|
||||
UserData *json.RawMessage `json:"userdata,omitempty"`
|
||||
UserData json.RawMessage `json:"userdata,omitempty"`
|
||||
}
|
||||
|
||||
type HelloClientMessageAuth struct {
|
||||
|
@ -368,7 +374,7 @@ type HelloClientMessageAuth struct {
|
|||
// "HelloClientTypeClient"
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
Params *json.RawMessage `json:"params"`
|
||||
Params json.RawMessage `json:"params"`
|
||||
|
||||
Url string `json:"url"`
|
||||
parsedUrl *url.URL
|
||||
|
@ -387,7 +393,7 @@ type HelloClientMessage struct {
|
|||
Features []string `json:"features,omitempty"`
|
||||
|
||||
// The authentication credentials.
|
||||
Auth HelloClientMessageAuth `json:"auth"`
|
||||
Auth *HelloClientMessageAuth `json:"auth,omitempty"`
|
||||
}
|
||||
|
||||
func (m *HelloClientMessage) CheckValid() error {
|
||||
|
@ -395,7 +401,7 @@ func (m *HelloClientMessage) CheckValid() error {
|
|||
return InvalidHelloVersion
|
||||
}
|
||||
if m.ResumeId == "" {
|
||||
if m.Auth.Params == nil || len(*m.Auth.Params) == 0 {
|
||||
if m.Auth == nil || len(m.Auth.Params) == 0 {
|
||||
return fmt.Errorf("params missing")
|
||||
}
|
||||
if m.Auth.Type == "" {
|
||||
|
@ -419,14 +425,14 @@ func (m *HelloClientMessage) CheckValid() error {
|
|||
case HelloVersionV1:
|
||||
// No additional validation necessary.
|
||||
case HelloVersionV2:
|
||||
if err := json.Unmarshal(*m.Auth.Params, &m.Auth.helloV2Params); err != nil {
|
||||
if err := json.Unmarshal(m.Auth.Params, &m.Auth.helloV2Params); err != nil {
|
||||
return err
|
||||
} else if err := m.Auth.helloV2Params.CheckValid(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case HelloClientTypeInternal:
|
||||
if err := json.Unmarshal(*m.Auth.Params, &m.Auth.internalParams); err != nil {
|
||||
if err := json.Unmarshal(m.Auth.Params, &m.Auth.internalParams); err != nil {
|
||||
return err
|
||||
} else if err := m.Auth.internalParams.CheckValid(); err != nil {
|
||||
return err
|
||||
|
@ -528,8 +534,8 @@ func (m *RoomClientMessage) CheckValid() error {
|
|||
}
|
||||
|
||||
type RoomServerMessage struct {
|
||||
RoomId string `json:"roomid"`
|
||||
Properties *json.RawMessage `json:"properties,omitempty"`
|
||||
RoomId string `json:"roomid"`
|
||||
Properties json.RawMessage `json:"properties,omitempty"`
|
||||
}
|
||||
|
||||
type RoomErrorDetails struct {
|
||||
|
@ -554,7 +560,7 @@ type MessageClientMessageRecipient struct {
|
|||
type MessageClientMessage struct {
|
||||
Recipient MessageClientMessageRecipient `json:"recipient"`
|
||||
|
||||
Data *json.RawMessage `json:"data"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type MessageClientMessageData struct {
|
||||
|
@ -563,10 +569,44 @@ type MessageClientMessageData struct {
|
|||
RoomType string `json:"roomType"`
|
||||
Bitrate int `json:"bitrate,omitempty"`
|
||||
Payload map[string]interface{} `json:"payload"`
|
||||
|
||||
offerSdp *sdp.SessionDescription // Only set if Type == "offer"
|
||||
answerSdp *sdp.SessionDescription // Only set if Type == "answer"
|
||||
}
|
||||
|
||||
func (m *MessageClientMessageData) CheckValid() error {
|
||||
if m.RoomType != "" && !IsValidStreamType(m.RoomType) {
|
||||
return fmt.Errorf("invalid room type: %s", m.RoomType)
|
||||
}
|
||||
if m.Type == "offer" || m.Type == "answer" {
|
||||
sdpValue, found := m.Payload["sdp"]
|
||||
if !found {
|
||||
return ErrNoSdp
|
||||
}
|
||||
sdpText, ok := sdpValue.(string)
|
||||
if !ok {
|
||||
return ErrInvalidSdp
|
||||
}
|
||||
|
||||
var sdp sdp.SessionDescription
|
||||
if err := sdp.Unmarshal([]byte(sdpText)); err != nil {
|
||||
return NewErrorDetail("invalid_sdp", "Error parsing SDP from payload.", map[string]interface{}{
|
||||
"error": err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
switch m.Type {
|
||||
case "offer":
|
||||
m.offerSdp = &sdp
|
||||
case "answer":
|
||||
m.answerSdp = &sdp
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MessageClientMessage) CheckValid() error {
|
||||
if m.Data == nil || len(*m.Data) == 0 {
|
||||
if len(m.Data) == 0 {
|
||||
return fmt.Errorf("message empty")
|
||||
}
|
||||
switch m.Recipient.Type {
|
||||
|
@ -607,7 +647,7 @@ type MessageServerMessage struct {
|
|||
Sender *MessageServerMessageSender `json:"sender"`
|
||||
Recipient *MessageClientMessageRecipient `json:"recipient,omitempty"`
|
||||
|
||||
Data *json.RawMessage `json:"data"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
// Type "control"
|
||||
|
@ -624,7 +664,7 @@ type ControlServerMessage struct {
|
|||
Sender *MessageServerMessageSender `json:"sender"`
|
||||
Recipient *MessageClientMessageRecipient `json:"recipient,omitempty"`
|
||||
|
||||
Data *json.RawMessage `json:"data"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
// Type "internal"
|
||||
|
@ -653,10 +693,10 @@ type AddSessionOptions struct {
|
|||
type AddSessionInternalClientMessage struct {
|
||||
CommonSessionInternalClientMessage
|
||||
|
||||
UserId string `json:"userid,omitempty"`
|
||||
User *json.RawMessage `json:"user,omitempty"`
|
||||
Flags uint32 `json:"flags,omitempty"`
|
||||
InCall *int `json:"incall,omitempty"`
|
||||
UserId string `json:"userid,omitempty"`
|
||||
User json.RawMessage `json:"user,omitempty"`
|
||||
Flags uint32 `json:"flags,omitempty"`
|
||||
InCall *int `json:"incall,omitempty"`
|
||||
|
||||
Options *AddSessionOptions `json:"options,omitempty"`
|
||||
}
|
||||
|
@ -808,10 +848,10 @@ type InternalServerMessage struct {
|
|||
// Type "event"
|
||||
|
||||
type RoomEventServerMessage struct {
|
||||
RoomId string `json:"roomid"`
|
||||
Properties *json.RawMessage `json:"properties,omitempty"`
|
||||
RoomId string `json:"roomid"`
|
||||
Properties json.RawMessage `json:"properties,omitempty"`
|
||||
// TODO(jojo): Change "InCall" to "int" when #914 has landed in NC Talk.
|
||||
InCall *json.RawMessage `json:"incall,omitempty"`
|
||||
InCall json.RawMessage `json:"incall,omitempty"`
|
||||
Changed []map[string]interface{} `json:"changed,omitempty"`
|
||||
Users []map[string]interface{} `json:"users,omitempty"`
|
||||
|
||||
|
@ -838,8 +878,8 @@ type RoomDisinviteEventServerMessage struct {
|
|||
}
|
||||
|
||||
type RoomEventMessage struct {
|
||||
RoomId string `json:"roomid"`
|
||||
Data *json.RawMessage `json:"data,omitempty"`
|
||||
RoomId string `json:"roomid"`
|
||||
Data json.RawMessage `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
type RoomFlagsServerMessage struct {
|
||||
|
@ -889,10 +929,10 @@ func (m *EventServerMessage) String() string {
|
|||
}
|
||||
|
||||
type EventServerMessageSessionEntry struct {
|
||||
SessionId string `json:"sessionid"`
|
||||
UserId string `json:"userid"`
|
||||
User *json.RawMessage `json:"user,omitempty"`
|
||||
RoomSessionId string `json:"roomsessionid,omitempty"`
|
||||
SessionId string `json:"sessionid"`
|
||||
UserId string `json:"userid"`
|
||||
User json.RawMessage `json:"user,omitempty"`
|
||||
RoomSessionId string `json:"roomsessionid,omitempty"`
|
||||
}
|
||||
|
||||
func (e *EventServerMessageSessionEntry) Clone() *EventServerMessageSessionEntry {
|
||||
|
@ -925,9 +965,9 @@ type AnswerOfferMessage struct {
|
|||
type TransientDataClientMessage struct {
|
||||
Type string `json:"type"`
|
||||
|
||||
Key string `json:"key,omitempty"`
|
||||
Value *json.RawMessage `json:"value,omitempty"`
|
||||
TTL time.Duration `json:"ttl,omitempty"`
|
||||
Key string `json:"key,omitempty"`
|
||||
Value json.RawMessage `json:"value,omitempty"`
|
||||
TTL time.Duration `json:"ttl,omitempty"`
|
||||
}
|
||||
|
||||
func (m *TransientDataClientMessage) CheckValid() error {
|
||||
|
|
|
@ -81,6 +81,7 @@ func testMessages(t *testing.T, messageType string, valid_messages []testCheckVa
|
|||
}
|
||||
|
||||
func TestClientMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
// The message needs a type.
|
||||
msg := ClientMessage{}
|
||||
if err := msg.CheckValid(); err == nil {
|
||||
|
@ -89,30 +90,31 @@ func TestClientMessage(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHelloClientMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
internalAuthParams := []byte("{\"backend\":\"https://domain.invalid\"}")
|
||||
tokenAuthParams := []byte("{\"token\":\"invalid-token\"}")
|
||||
valid_messages := []testCheckValid{
|
||||
// Hello version 1
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV1,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Params: &json.RawMessage{'{', '}'},
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Params: json.RawMessage("{}"),
|
||||
Url: "https://domain.invalid",
|
||||
},
|
||||
},
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV1,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Type: "client",
|
||||
Params: &json.RawMessage{'{', '}'},
|
||||
Params: json.RawMessage("{}"),
|
||||
Url: "https://domain.invalid",
|
||||
},
|
||||
},
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV1,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Type: "internal",
|
||||
Params: (*json.RawMessage)(&internalAuthParams),
|
||||
Params: internalAuthParams,
|
||||
},
|
||||
},
|
||||
&HelloClientMessage{
|
||||
|
@ -122,16 +124,16 @@ func TestHelloClientMessage(t *testing.T) {
|
|||
// Hello version 2
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV2,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Params: (*json.RawMessage)(&tokenAuthParams),
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Params: tokenAuthParams,
|
||||
Url: "https://domain.invalid",
|
||||
},
|
||||
},
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV2,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Type: "client",
|
||||
Params: (*json.RawMessage)(&tokenAuthParams),
|
||||
Params: tokenAuthParams,
|
||||
Url: "https://domain.invalid",
|
||||
},
|
||||
},
|
||||
|
@ -147,75 +149,75 @@ func TestHelloClientMessage(t *testing.T) {
|
|||
&HelloClientMessage{Version: HelloVersionV1},
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV1,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Params: &json.RawMessage{'{', '}'},
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Params: json.RawMessage("{}"),
|
||||
Type: "invalid-type",
|
||||
},
|
||||
},
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV1,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Url: "https://domain.invalid",
|
||||
},
|
||||
},
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV1,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Params: &json.RawMessage{'{', '}'},
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Params: json.RawMessage("{}"),
|
||||
},
|
||||
},
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV1,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Params: &json.RawMessage{'{', '}'},
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Params: json.RawMessage("{}"),
|
||||
Url: "invalid-url",
|
||||
},
|
||||
},
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV1,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Type: "internal",
|
||||
Params: &json.RawMessage{'{', '}'},
|
||||
Params: json.RawMessage("{}"),
|
||||
},
|
||||
},
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV1,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Type: "internal",
|
||||
Params: &json.RawMessage{'x', 'y', 'z'}, // Invalid JSON.
|
||||
Params: json.RawMessage("xyz"), // Invalid JSON.
|
||||
},
|
||||
},
|
||||
// Hello version 2
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV2,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Url: "https://domain.invalid",
|
||||
},
|
||||
},
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV2,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Params: (*json.RawMessage)(&tokenAuthParams),
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Params: tokenAuthParams,
|
||||
},
|
||||
},
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV2,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Params: (*json.RawMessage)(&tokenAuthParams),
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Params: tokenAuthParams,
|
||||
Url: "invalid-url",
|
||||
},
|
||||
},
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV2,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Params: (*json.RawMessage)(&internalAuthParams),
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Params: internalAuthParams,
|
||||
Url: "https://domain.invalid",
|
||||
},
|
||||
},
|
||||
&HelloClientMessage{
|
||||
Version: HelloVersionV2,
|
||||
Auth: HelloClientMessageAuth{
|
||||
Params: &json.RawMessage{'x', 'y', 'z'}, // Invalid JSON.
|
||||
Auth: &HelloClientMessageAuth{
|
||||
Params: json.RawMessage("xyz"), // Invalid JSON.
|
||||
Url: "https://domain.invalid",
|
||||
},
|
||||
},
|
||||
|
@ -233,26 +235,27 @@ func TestHelloClientMessage(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMessageClientMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
valid_messages := []testCheckValid{
|
||||
&MessageClientMessage{
|
||||
Recipient: MessageClientMessageRecipient{
|
||||
Type: "session",
|
||||
SessionId: "the-session-id",
|
||||
},
|
||||
Data: &json.RawMessage{'{', '}'},
|
||||
Data: json.RawMessage("{}"),
|
||||
},
|
||||
&MessageClientMessage{
|
||||
Recipient: MessageClientMessageRecipient{
|
||||
Type: "user",
|
||||
UserId: "the-user-id",
|
||||
},
|
||||
Data: &json.RawMessage{'{', '}'},
|
||||
Data: json.RawMessage("{}"),
|
||||
},
|
||||
&MessageClientMessage{
|
||||
Recipient: MessageClientMessageRecipient{
|
||||
Type: "room",
|
||||
},
|
||||
Data: &json.RawMessage{'{', '}'},
|
||||
Data: json.RawMessage("{}"),
|
||||
},
|
||||
}
|
||||
invalid_messages := []testCheckValid{
|
||||
|
@ -267,20 +270,20 @@ func TestMessageClientMessage(t *testing.T) {
|
|||
Recipient: MessageClientMessageRecipient{
|
||||
Type: "session",
|
||||
},
|
||||
Data: &json.RawMessage{'{', '}'},
|
||||
Data: json.RawMessage("{}"),
|
||||
},
|
||||
&MessageClientMessage{
|
||||
Recipient: MessageClientMessageRecipient{
|
||||
Type: "session",
|
||||
UserId: "the-user-id",
|
||||
},
|
||||
Data: &json.RawMessage{'{', '}'},
|
||||
Data: json.RawMessage("{}"),
|
||||
},
|
||||
&MessageClientMessage{
|
||||
Recipient: MessageClientMessageRecipient{
|
||||
Type: "user",
|
||||
},
|
||||
Data: &json.RawMessage{'{', '}'},
|
||||
Data: json.RawMessage("{}"),
|
||||
},
|
||||
&MessageClientMessage{
|
||||
Recipient: MessageClientMessageRecipient{
|
||||
|
@ -293,13 +296,13 @@ func TestMessageClientMessage(t *testing.T) {
|
|||
Type: "user",
|
||||
SessionId: "the-user-id",
|
||||
},
|
||||
Data: &json.RawMessage{'{', '}'},
|
||||
Data: json.RawMessage("{}"),
|
||||
},
|
||||
&MessageClientMessage{
|
||||
Recipient: MessageClientMessageRecipient{
|
||||
Type: "unknown-type",
|
||||
},
|
||||
Data: &json.RawMessage{'{', '}'},
|
||||
Data: json.RawMessage("{}"),
|
||||
},
|
||||
}
|
||||
testMessages(t, "message", valid_messages, invalid_messages)
|
||||
|
@ -314,6 +317,7 @@ func TestMessageClientMessage(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestByeClientMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Any "bye" message is valid.
|
||||
valid_messages := []testCheckValid{
|
||||
&ByeClientMessage{},
|
||||
|
@ -332,6 +336,7 @@ func TestByeClientMessage(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRoomClientMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Any "room" message is valid.
|
||||
valid_messages := []testCheckValid{
|
||||
&RoomClientMessage{},
|
||||
|
@ -350,6 +355,7 @@ func TestRoomClientMessage(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestErrorMessages(t *testing.T) {
|
||||
t.Parallel()
|
||||
id := "request-id"
|
||||
msg := ClientMessage{
|
||||
Id: id,
|
||||
|
@ -382,12 +388,13 @@ func TestErrorMessages(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIsChatRefresh(t *testing.T) {
|
||||
t.Parallel()
|
||||
var msg ServerMessage
|
||||
data_true := []byte("{\"type\":\"chat\",\"chat\":{\"refresh\":true}}")
|
||||
msg = ServerMessage{
|
||||
Type: "message",
|
||||
Message: &MessageServerMessage{
|
||||
Data: (*json.RawMessage)(&data_true),
|
||||
Data: data_true,
|
||||
},
|
||||
}
|
||||
if !msg.IsChatRefresh() {
|
||||
|
@ -398,7 +405,7 @@ func TestIsChatRefresh(t *testing.T) {
|
|||
msg = ServerMessage{
|
||||
Type: "message",
|
||||
Message: &MessageServerMessage{
|
||||
Data: (*json.RawMessage)(&data_false),
|
||||
Data: data_false,
|
||||
},
|
||||
}
|
||||
if msg.IsChatRefresh() {
|
||||
|
@ -426,6 +433,7 @@ func assertEqualStrings(t *testing.T, expected, result []string) {
|
|||
}
|
||||
|
||||
func Test_Welcome_AddRemoveFeature(t *testing.T) {
|
||||
t.Parallel()
|
||||
var msg WelcomeServerMessage
|
||||
assertEqualStrings(t, []string{}, msg.Features)
|
||||
|
||||
|
|
|
@ -280,6 +280,8 @@ func (e *asyncEventsNats) Close() {
|
|||
sub.close()
|
||||
}
|
||||
}(e.sessionSubscriptions)
|
||||
// Can't use clear(...) here as the maps are processed asynchronously by the
|
||||
// goroutines above.
|
||||
e.backendRoomSubscriptions = make(map[string]*asyncBackendRoomSubscriberNats)
|
||||
e.roomSubscriptions = make(map[string]*asyncRoomSubscriberNats)
|
||||
e.userSubscriptions = make(map[string]*asyncUserSubscriberNats)
|
||||
|
|
|
@ -194,7 +194,7 @@ func (b *BackendClient) PerformJSONRequest(ctx context.Context, u *url.URL, requ
|
|||
if err := json.Unmarshal(body, &ocs); err != nil {
|
||||
log.Printf("Could not decode OCS response %s from %s: %s", string(body), req.URL, err)
|
||||
return err
|
||||
} else if ocs.Ocs == nil || ocs.Ocs.Data == nil {
|
||||
} else if ocs.Ocs == nil || len(ocs.Ocs.Data) == 0 {
|
||||
log.Printf("Incomplete OCS response %s from %s", string(body), req.URL)
|
||||
return ErrIncompleteResponse
|
||||
}
|
||||
|
@ -205,8 +205,8 @@ func (b *BackendClient) PerformJSONRequest(ctx context.Context, u *url.URL, requ
|
|||
return ErrThrottledResponse
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(*ocs.Ocs.Data, response); err != nil {
|
||||
log.Printf("Could not decode OCS response body %s from %s: %s", string(*ocs.Ocs.Data), req.URL, err)
|
||||
if err := json.Unmarshal(ocs.Ocs.Data, response); err != nil {
|
||||
log.Printf("Could not decode OCS response body %s from %s: %s", string(ocs.Ocs.Data), req.URL, err)
|
||||
return err
|
||||
}
|
||||
} else if err := json.Unmarshal(body, response); err != nil {
|
||||
|
|
|
@ -45,7 +45,7 @@ func returnOCS(t *testing.T, w http.ResponseWriter, body []byte) {
|
|||
StatusCode: http.StatusOK,
|
||||
Message: "OK",
|
||||
},
|
||||
Data: (*json.RawMessage)(&body),
|
||||
Data: body,
|
||||
},
|
||||
}
|
||||
if strings.Contains(t.Name(), "Throttled") {
|
||||
|
@ -70,6 +70,8 @@ func returnOCS(t *testing.T, w http.ResponseWriter, body []byte) {
|
|||
}
|
||||
|
||||
func TestPostOnRedirect(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, "/ocs/v2.php/two", http.StatusTemporaryRedirect)
|
||||
|
@ -125,6 +127,8 @@ func TestPostOnRedirect(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPostOnRedirectDifferentHost(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, "http://domain.invalid/ocs/v2.php/two", http.StatusTemporaryRedirect)
|
||||
|
@ -165,6 +169,8 @@ func TestPostOnRedirectDifferentHost(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPostOnRedirectStatusFound(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, "/ocs/v2.php/two", http.StatusFound)
|
||||
|
@ -217,6 +223,8 @@ func TestPostOnRedirectStatusFound(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHandleThrottled(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
|
||||
returnOCS(t, w, []byte("[]"))
|
||||
|
|
|
@ -92,6 +92,7 @@ func testBackends(t *testing.T, config *BackendConfiguration, valid_urls [][]str
|
|||
}
|
||||
|
||||
func TestIsUrlAllowed_Compat(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
// Old-style configuration
|
||||
valid_urls := []string{
|
||||
"http://domain.invalid",
|
||||
|
@ -114,6 +115,7 @@ func TestIsUrlAllowed_Compat(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIsUrlAllowed_CompatForceHttps(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
// Old-style configuration, force HTTPS
|
||||
valid_urls := []string{
|
||||
"https://domain.invalid",
|
||||
|
@ -135,6 +137,7 @@ func TestIsUrlAllowed_CompatForceHttps(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIsUrlAllowed(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
valid_urls := [][]string{
|
||||
{"https://domain.invalid/foo", string(testBackendSecret) + "-foo"},
|
||||
{"https://domain.invalid/foo/", string(testBackendSecret) + "-foo"},
|
||||
|
@ -180,6 +183,7 @@ func TestIsUrlAllowed(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIsUrlAllowed_EmptyAllowlist(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
valid_urls := []string{}
|
||||
invalid_urls := []string{
|
||||
"http://domain.invalid",
|
||||
|
@ -197,6 +201,7 @@ func TestIsUrlAllowed_EmptyAllowlist(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIsUrlAllowed_AllowAll(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
valid_urls := []string{
|
||||
"http://domain.invalid",
|
||||
"https://domain.invalid",
|
||||
|
@ -222,6 +227,7 @@ type ParseBackendIdsTestcase struct {
|
|||
}
|
||||
|
||||
func TestParseBackendIds(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
testcases := []ParseBackendIdsTestcase{
|
||||
{"", nil},
|
||||
{"backend1", []string{"backend1"}},
|
||||
|
@ -241,6 +247,7 @@ func TestParseBackendIds(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendReloadNoChange(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
current := testutil.ToFloat64(statsBackendsCurrent)
|
||||
original_config := goconf.NewConfigFile()
|
||||
original_config.AddOption("backend", "backends", "backend1, backend2")
|
||||
|
@ -276,6 +283,7 @@ func TestBackendReloadNoChange(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendReloadChangeExistingURL(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
current := testutil.ToFloat64(statsBackendsCurrent)
|
||||
original_config := goconf.NewConfigFile()
|
||||
original_config.AddOption("backend", "backends", "backend1, backend2")
|
||||
|
@ -316,6 +324,7 @@ func TestBackendReloadChangeExistingURL(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendReloadChangeSecret(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
current := testutil.ToFloat64(statsBackendsCurrent)
|
||||
original_config := goconf.NewConfigFile()
|
||||
original_config.AddOption("backend", "backends", "backend1, backend2")
|
||||
|
@ -354,6 +363,7 @@ func TestBackendReloadChangeSecret(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendReloadAddBackend(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
current := testutil.ToFloat64(statsBackendsCurrent)
|
||||
original_config := goconf.NewConfigFile()
|
||||
original_config.AddOption("backend", "backends", "backend1")
|
||||
|
@ -394,6 +404,7 @@ func TestBackendReloadAddBackend(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendReloadRemoveHost(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
current := testutil.ToFloat64(statsBackendsCurrent)
|
||||
original_config := goconf.NewConfigFile()
|
||||
original_config.AddOption("backend", "backends", "backend1, backend2")
|
||||
|
@ -431,6 +442,7 @@ func TestBackendReloadRemoveHost(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendReloadRemoveBackendFromSharedHost(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
current := testutil.ToFloat64(statsBackendsCurrent)
|
||||
original_config := goconf.NewConfigFile()
|
||||
original_config.AddOption("backend", "backends", "backend1, backend2")
|
||||
|
@ -486,6 +498,8 @@ func mustParse(s string) *url.URL {
|
|||
}
|
||||
|
||||
func TestBackendConfiguration_Etcd(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
etcd, client := NewEtcdClientForTest(t)
|
||||
|
||||
url1 := "https://domain1.invalid/foo"
|
||||
|
@ -619,6 +633,8 @@ func TestBackendConfiguration_Etcd(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendCommonSecret(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
u1, err := url.Parse("http://domain1.invalid")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
@ -277,7 +277,7 @@ func (b *BackendServer) parseRequestBody(f func(http.ResponseWriter, *http.Reque
|
|||
}
|
||||
}
|
||||
|
||||
func (b *BackendServer) sendRoomInvite(roomid string, backend *Backend, userids []string, properties *json.RawMessage) {
|
||||
func (b *BackendServer) sendRoomInvite(roomid string, backend *Backend, userids []string, properties json.RawMessage) {
|
||||
msg := &AsyncMessage{
|
||||
Type: "message",
|
||||
Message: &ServerMessage{
|
||||
|
@ -347,7 +347,7 @@ func (b *BackendServer) sendRoomDisinvite(roomid string, backend *Backend, reaso
|
|||
wg.Wait()
|
||||
}
|
||||
|
||||
func (b *BackendServer) sendRoomUpdate(roomid string, backend *Backend, notified_userids []string, all_userids []string, properties *json.RawMessage) {
|
||||
func (b *BackendServer) sendRoomUpdate(roomid string, backend *Backend, notified_userids []string, all_userids []string, properties json.RawMessage) {
|
||||
msg := &AsyncMessage{
|
||||
Type: "message",
|
||||
Message: &ServerMessage{
|
||||
|
@ -553,11 +553,11 @@ func (b *BackendServer) sendRoomSwitchTo(roomid string, backend *Backend, reques
|
|||
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
if request.SwitchTo.Sessions != nil {
|
||||
if len(request.SwitchTo.Sessions) > 0 {
|
||||
// We support both a list of sessions or a map with additional details per session.
|
||||
if (*request.SwitchTo.Sessions)[0] == '[' {
|
||||
if request.SwitchTo.Sessions[0] == '[' {
|
||||
var sessionsList BackendRoomSwitchToSessionsList
|
||||
if err := json.Unmarshal(*request.SwitchTo.Sessions, &sessionsList); err != nil {
|
||||
if err := json.Unmarshal(request.SwitchTo.Sessions, &sessionsList); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -595,7 +595,7 @@ func (b *BackendServer) sendRoomSwitchTo(roomid string, backend *Backend, reques
|
|||
request.SwitchTo.SessionsMap = nil
|
||||
} else {
|
||||
var sessionsMap BackendRoomSwitchToSessionsMap
|
||||
if err := json.Unmarshal(*request.SwitchTo.Sessions, &sessionsMap); err != nil {
|
||||
if err := json.Unmarshal(request.SwitchTo.Sessions, &sessionsMap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -761,6 +761,16 @@ func (b *BackendServer) startDialout(roomid string, backend *Backend, backendUrl
|
|||
}
|
||||
|
||||
func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body []byte) {
|
||||
throttle, err := b.hub.throttler.CheckBruteforce(r.Context(), b.hub.getRealUserIP(r), "BackendRoomAuth")
|
||||
if err == ErrBruteforceDetected {
|
||||
http.Error(w, "Too many requests", http.StatusTooManyRequests)
|
||||
return
|
||||
} else if err != nil {
|
||||
log.Printf("Error checking for bruteforce: %s", err)
|
||||
http.Error(w, "Could not check for bruteforce", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
v := mux.Vars(r)
|
||||
roomid := v["roomid"]
|
||||
|
||||
|
@ -773,6 +783,7 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
|
|||
|
||||
if backend == nil {
|
||||
// Unknown backend URL passed, return immediately.
|
||||
throttle(r.Context())
|
||||
http.Error(w, "Authentication check failed", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
@ -794,12 +805,14 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
|
|||
}
|
||||
|
||||
if backend == nil {
|
||||
throttle(r.Context())
|
||||
http.Error(w, "Authentication check failed", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !ValidateBackendChecksum(r, body, backend.Secret()) {
|
||||
throttle(r.Context())
|
||||
http.Error(w, "Authentication check failed", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
@ -814,7 +827,6 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
|
|||
request.ReceivedTime = time.Now().UnixNano()
|
||||
|
||||
var response any
|
||||
var err error
|
||||
switch request.Type {
|
||||
case "invite":
|
||||
b.sendRoomInvite(roomid, backend, request.Invite.UserIds, request.Invite.Properties)
|
||||
|
@ -881,15 +893,9 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
|
|||
}
|
||||
|
||||
func (b *BackendServer) allowStatsAccess(r *http.Request) bool {
|
||||
addr := getRealUserIP(r)
|
||||
if strings.Contains(addr, ":") {
|
||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
||||
addr = host
|
||||
}
|
||||
}
|
||||
|
||||
addr := b.hub.getRealUserIP(r)
|
||||
ip := net.ParseIP(addr)
|
||||
if ip == nil {
|
||||
if len(ip) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/textproto"
|
||||
|
@ -169,7 +170,7 @@ func CreateBackendServerWithClusteringForTestFromConfig(t *testing.T, config1 *g
|
|||
t.Cleanup(func() {
|
||||
events1.Close()
|
||||
})
|
||||
client1 := NewGrpcClientsForTest(t, addr2)
|
||||
client1, _ := NewGrpcClientsForTest(t, addr2)
|
||||
hub1, err := NewHub(config1, events1, grpcServer1, client1, nil, r1, "no-version")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -198,7 +199,7 @@ func CreateBackendServerWithClusteringForTestFromConfig(t *testing.T, config1 *g
|
|||
t.Cleanup(func() {
|
||||
events2.Close()
|
||||
})
|
||||
client2 := NewGrpcClientsForTest(t, addr1)
|
||||
client2, _ := NewGrpcClientsForTest(t, addr1)
|
||||
hub2, err := NewHub(config2, events2, grpcServer2, client2, nil, r2, "no-version")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -275,6 +276,8 @@ func expectRoomlistEvent(ch chan *AsyncMessage, msgType string) (*EventServerMes
|
|||
}
|
||||
|
||||
func TestBackendServer_NoAuth(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
_, _, _, _, _, server := CreateBackendServerForTest(t)
|
||||
|
||||
roomId := "the-room-id"
|
||||
|
@ -301,6 +304,8 @@ func TestBackendServer_NoAuth(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_InvalidAuth(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
_, _, _, _, _, server := CreateBackendServerForTest(t)
|
||||
|
||||
roomId := "the-room-id"
|
||||
|
@ -329,6 +334,8 @@ func TestBackendServer_InvalidAuth(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_OldCompatAuth(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
_, _, _, _, _, server := CreateBackendServerForTest(t)
|
||||
|
||||
roomId := "the-room-id"
|
||||
|
@ -343,7 +350,7 @@ func TestBackendServer_OldCompatAuth(t *testing.T) {
|
|||
AllUserIds: []string{
|
||||
userid,
|
||||
},
|
||||
Properties: &roomProperties,
|
||||
Properties: roomProperties,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -378,6 +385,8 @@ func TestBackendServer_OldCompatAuth(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_InvalidBody(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
_, _, _, _, _, server := CreateBackendServerForTest(t)
|
||||
|
||||
roomId := "the-room-id"
|
||||
|
@ -397,6 +406,8 @@ func TestBackendServer_InvalidBody(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_UnsupportedRequest(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
_, _, _, _, _, server := CreateBackendServerForTest(t)
|
||||
|
||||
msg := &BackendServerRoomRequest{
|
||||
|
@ -423,8 +434,10 @@ func TestBackendServer_UnsupportedRequest(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_RoomInvite(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
for _, backend := range eventBackendsForTest {
|
||||
t.Run(backend, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
RunTestBackendServer_RoomInvite(t)
|
||||
})
|
||||
}
|
||||
|
@ -468,7 +481,7 @@ func RunTestBackendServer_RoomInvite(t *testing.T) {
|
|||
AllUserIds: []string{
|
||||
userid,
|
||||
},
|
||||
Properties: &roomProperties,
|
||||
Properties: roomProperties,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -497,14 +510,16 @@ func RunTestBackendServer_RoomInvite(t *testing.T) {
|
|||
t.Errorf("Expected invite, got %+v", event)
|
||||
} else if event.Invite.RoomId != roomId {
|
||||
t.Errorf("Expected room %s, got %+v", roomId, event)
|
||||
} else if event.Invite.Properties == nil || !bytes.Equal(*event.Invite.Properties, roomProperties) {
|
||||
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(*event.Invite.Properties))
|
||||
} else if !bytes.Equal(event.Invite.Properties, roomProperties) {
|
||||
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(event.Invite.Properties))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendServer_RoomDisinvite(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
for _, backend := range eventBackendsForTest {
|
||||
t.Run(backend, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
RunTestBackendServer_RoomDisinvite(t)
|
||||
})
|
||||
}
|
||||
|
@ -568,7 +583,7 @@ func RunTestBackendServer_RoomDisinvite(t *testing.T) {
|
|||
roomId + "-" + hello.Hello.SessionId,
|
||||
},
|
||||
AllUserIds: []string{},
|
||||
Properties: &roomProperties,
|
||||
Properties: roomProperties,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -596,8 +611,8 @@ func RunTestBackendServer_RoomDisinvite(t *testing.T) {
|
|||
t.Errorf("Expected disinvite, got %+v", event)
|
||||
} else if event.Disinvite.RoomId != roomId {
|
||||
t.Errorf("Expected room %s, got %+v", roomId, event)
|
||||
} else if event.Disinvite.Properties != nil {
|
||||
t.Errorf("Room properties should be omitted, got %s", string(*event.Disinvite.Properties))
|
||||
} else if len(event.Disinvite.Properties) > 0 {
|
||||
t.Errorf("Room properties should be omitted, got %s", string(event.Disinvite.Properties))
|
||||
} else if event.Disinvite.Reason != "disinvited" {
|
||||
t.Errorf("Reason should be disinvited, got %s", event.Disinvite.Reason)
|
||||
}
|
||||
|
@ -616,6 +631,8 @@ func RunTestBackendServer_RoomDisinvite(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||
|
||||
client1 := NewTestClient(t, server, hub)
|
||||
|
@ -712,7 +729,7 @@ func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) {
|
|||
UserIds: []string{
|
||||
testDefaultUserId,
|
||||
},
|
||||
Properties: (*json.RawMessage)(&testRoomProperties),
|
||||
Properties: testRoomProperties,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -741,8 +758,10 @@ func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_RoomUpdate(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
for _, backend := range eventBackendsForTest {
|
||||
t.Run(backend, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
RunTestBackendServer_RoomUpdate(t)
|
||||
})
|
||||
}
|
||||
|
@ -762,7 +781,7 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
|
|||
if backend == nil {
|
||||
t.Fatalf("Did not find backend")
|
||||
}
|
||||
room, err := hub.createRoom(roomId, &emptyProperties, backend)
|
||||
room, err := hub.createRoom(roomId, emptyProperties, backend)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create room: %s", err)
|
||||
}
|
||||
|
@ -786,7 +805,7 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
|
|||
UserIds: []string{
|
||||
userid,
|
||||
},
|
||||
Properties: &roomProperties,
|
||||
Properties: roomProperties,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -814,8 +833,8 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
|
|||
t.Errorf("Expected update, got %+v", event)
|
||||
} else if event.Update.RoomId != roomId {
|
||||
t.Errorf("Expected room %s, got %+v", roomId, event)
|
||||
} else if event.Update.Properties == nil || !bytes.Equal(*event.Update.Properties, roomProperties) {
|
||||
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(*event.Update.Properties))
|
||||
} else if !bytes.Equal(event.Update.Properties, roomProperties) {
|
||||
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(event.Update.Properties))
|
||||
}
|
||||
|
||||
// TODO: Use event to wait for asynchronous messages.
|
||||
|
@ -825,14 +844,16 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
|
|||
if room == nil {
|
||||
t.Fatalf("Room %s does not exist", roomId)
|
||||
}
|
||||
if string(*room.Properties()) != string(roomProperties) {
|
||||
t.Errorf("Expected properties %s for room %s, got %s", string(roomProperties), room.Id(), string(*room.Properties()))
|
||||
if string(room.Properties()) != string(roomProperties) {
|
||||
t.Errorf("Expected properties %s for room %s, got %s", string(roomProperties), room.Id(), string(room.Properties()))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendServer_RoomDelete(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
for _, backend := range eventBackendsForTest {
|
||||
t.Run(backend, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
RunTestBackendServer_RoomDelete(t)
|
||||
})
|
||||
}
|
||||
|
@ -852,7 +873,7 @@ func RunTestBackendServer_RoomDelete(t *testing.T) {
|
|||
if backend == nil {
|
||||
t.Fatalf("Did not find backend")
|
||||
}
|
||||
if _, err := hub.createRoom(roomId, &emptyProperties, backend); err != nil {
|
||||
if _, err := hub.createRoom(roomId, emptyProperties, backend); err != nil {
|
||||
t.Fatalf("Could not create room: %s", err)
|
||||
}
|
||||
|
||||
|
@ -900,8 +921,8 @@ func RunTestBackendServer_RoomDelete(t *testing.T) {
|
|||
t.Errorf("Expected disinvite, got %+v", event)
|
||||
} else if event.Disinvite.RoomId != roomId {
|
||||
t.Errorf("Expected room %s, got %+v", roomId, event)
|
||||
} else if event.Disinvite.Properties != nil {
|
||||
t.Errorf("Room properties should be omitted, got %s", string(*event.Disinvite.Properties))
|
||||
} else if len(event.Disinvite.Properties) > 0 {
|
||||
t.Errorf("Room properties should be omitted, got %s", string(event.Disinvite.Properties))
|
||||
} else if event.Disinvite.Reason != "deleted" {
|
||||
t.Errorf("Reason should be deleted, got %s", event.Disinvite.Reason)
|
||||
}
|
||||
|
@ -916,8 +937,10 @@ func RunTestBackendServer_RoomDelete(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_ParticipantsUpdatePermissions(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
for _, subtest := range clusteredTests {
|
||||
t.Run(subtest, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var hub1 *Hub
|
||||
var hub2 *Hub
|
||||
var server1 *httptest.Server
|
||||
|
@ -1047,6 +1070,8 @@ func TestBackendServer_ParticipantsUpdatePermissions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_ParticipantsUpdateEmptyPermissions(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||
|
||||
client := NewTestClient(t, server, hub)
|
||||
|
@ -1132,6 +1157,8 @@ func TestBackendServer_ParticipantsUpdateEmptyPermissions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_ParticipantsUpdateTimeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||
|
||||
client1 := NewTestClient(t, server, hub)
|
||||
|
@ -1345,8 +1372,10 @@ func TestBackendServer_ParticipantsUpdateTimeout(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_InCallAll(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
for _, subtest := range clusteredTests {
|
||||
t.Run(subtest, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var hub1 *Hub
|
||||
var hub2 *Hub
|
||||
var server1 *httptest.Server
|
||||
|
@ -1471,8 +1500,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
|
|||
t.Error(err)
|
||||
} else if !in_call_1.All {
|
||||
t.Errorf("All flag not set in message %+v", in_call_1)
|
||||
} else if !bytes.Equal(*in_call_1.InCall, []byte("7")) {
|
||||
t.Errorf("Expected inCall flag 7, got %s", string(*in_call_1.InCall))
|
||||
} else if !bytes.Equal(in_call_1.InCall, []byte("7")) {
|
||||
t.Errorf("Expected inCall flag 7, got %s", string(in_call_1.InCall))
|
||||
}
|
||||
|
||||
if msg2_a, err := client2.RunUntilMessage(ctx); err != nil {
|
||||
|
@ -1481,8 +1510,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
|
|||
t.Error(err)
|
||||
} else if !in_call_1.All {
|
||||
t.Errorf("All flag not set in message %+v", in_call_1)
|
||||
} else if !bytes.Equal(*in_call_1.InCall, []byte("7")) {
|
||||
t.Errorf("Expected inCall flag 7, got %s", string(*in_call_1.InCall))
|
||||
} else if !bytes.Equal(in_call_1.InCall, []byte("7")) {
|
||||
t.Errorf("Expected inCall flag 7, got %s", string(in_call_1.InCall))
|
||||
}
|
||||
|
||||
if !room1.IsSessionInCall(session1) {
|
||||
|
@ -1552,8 +1581,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
|
|||
t.Error(err)
|
||||
} else if !in_call_1.All {
|
||||
t.Errorf("All flag not set in message %+v", in_call_1)
|
||||
} else if !bytes.Equal(*in_call_1.InCall, []byte("0")) {
|
||||
t.Errorf("Expected inCall flag 0, got %s", string(*in_call_1.InCall))
|
||||
} else if !bytes.Equal(in_call_1.InCall, []byte("0")) {
|
||||
t.Errorf("Expected inCall flag 0, got %s", string(in_call_1.InCall))
|
||||
}
|
||||
|
||||
if msg2_a, err := client2.RunUntilMessage(ctx); err != nil {
|
||||
|
@ -1562,8 +1591,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
|
|||
t.Error(err)
|
||||
} else if !in_call_1.All {
|
||||
t.Errorf("All flag not set in message %+v", in_call_1)
|
||||
} else if !bytes.Equal(*in_call_1.InCall, []byte("0")) {
|
||||
t.Errorf("Expected inCall flag 0, got %s", string(*in_call_1.InCall))
|
||||
} else if !bytes.Equal(in_call_1.InCall, []byte("0")) {
|
||||
t.Errorf("Expected inCall flag 0, got %s", string(in_call_1.InCall))
|
||||
}
|
||||
|
||||
if room1.IsSessionInCall(session1) {
|
||||
|
@ -1595,6 +1624,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_RoomMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||
|
||||
client := NewTestClient(t, server, hub)
|
||||
|
@ -1628,7 +1659,7 @@ func TestBackendServer_RoomMessage(t *testing.T) {
|
|||
msg := &BackendServerRoomRequest{
|
||||
Type: "message",
|
||||
Message: &BackendRoomMessageRequest{
|
||||
Data: &messageData,
|
||||
Data: messageData,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1654,12 +1685,14 @@ func TestBackendServer_RoomMessage(t *testing.T) {
|
|||
t.Error(err)
|
||||
} else if message.RoomId != roomId {
|
||||
t.Errorf("Expected message for room %s, got %s", roomId, message.RoomId)
|
||||
} else if !bytes.Equal(messageData, *message.Data) {
|
||||
t.Errorf("Expected message data %s, got %s", string(messageData), string(*message.Data))
|
||||
} else if !bytes.Equal(messageData, message.Data) {
|
||||
t.Errorf("Expected message data %s, got %s", string(messageData), string(message.Data))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendServer_TurnCredentials(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
_, _, _, _, _, server := CreateBackendServerForTestWithTurn(t)
|
||||
|
||||
q := make(url.Values)
|
||||
|
@ -1703,7 +1736,9 @@ func TestBackendServer_TurnCredentials(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_StatsAllowedIps(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
config := goconf.NewConfigFile()
|
||||
config.AddOption("app", "trustedproxies", "1.2.3.4")
|
||||
config.AddOption("stats", "allowed_ips", "127.0.0.1, 192.168.0.1, 192.168.1.1/24")
|
||||
_, backend, _, _, _, _ := CreateBackendServerForTestFromConfig(t, config)
|
||||
|
||||
|
@ -1720,7 +1755,9 @@ func TestBackendServer_StatsAllowedIps(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, addr := range allowed {
|
||||
addr := addr
|
||||
t.Run(addr, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
r1 := &http.Request{
|
||||
RemoteAddr: addr,
|
||||
}
|
||||
|
@ -1728,6 +1765,10 @@ func TestBackendServer_StatsAllowedIps(t *testing.T) {
|
|||
t.Errorf("should allow %s", addr)
|
||||
}
|
||||
|
||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
||||
addr = host
|
||||
}
|
||||
|
||||
r2 := &http.Request{
|
||||
RemoteAddr: "1.2.3.4:12345",
|
||||
Header: http.Header{
|
||||
|
@ -1761,7 +1802,9 @@ func TestBackendServer_StatsAllowedIps(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, addr := range notAllowed {
|
||||
addr := addr
|
||||
t.Run(addr, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
r := &http.Request{
|
||||
RemoteAddr: addr,
|
||||
}
|
||||
|
@ -1773,6 +1816,7 @@ func TestBackendServer_StatsAllowedIps(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_IsNumeric(t *testing.T) {
|
||||
t.Parallel()
|
||||
numeric := []string{
|
||||
"0",
|
||||
"1",
|
||||
|
@ -1802,6 +1846,8 @@ func Test_IsNumeric(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_DialoutNoSipBridge(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||
|
||||
client := NewTestClient(t, server, hub)
|
||||
|
@ -1860,6 +1906,8 @@ func TestBackendServer_DialoutNoSipBridge(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_DialoutAccepted(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||
|
||||
client := NewTestClient(t, server, hub)
|
||||
|
@ -1966,6 +2014,8 @@ func TestBackendServer_DialoutAccepted(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_DialoutAcceptedCompat(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||
|
||||
client := NewTestClient(t, server, hub)
|
||||
|
@ -2072,6 +2122,8 @@ func TestBackendServer_DialoutAcceptedCompat(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBackendServer_DialoutRejected(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||
|
||||
client := NewTestClient(t, server, hub)
|
||||
|
|
|
@ -24,10 +24,10 @@ package signaling
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dlintw/goconf"
|
||||
|
@ -43,8 +43,10 @@ type backendStorageEtcd struct {
|
|||
|
||||
initializedCtx context.Context
|
||||
initializedFunc context.CancelFunc
|
||||
initializedWg sync.WaitGroup
|
||||
wakeupChanForTesting chan struct{}
|
||||
|
||||
closeCtx context.Context
|
||||
closeFunc context.CancelFunc
|
||||
}
|
||||
|
||||
func NewBackendStorageEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient) (BackendStorage, error) {
|
||||
|
@ -58,6 +60,7 @@ func NewBackendStorageEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient) (B
|
|||
}
|
||||
|
||||
initializedCtx, initializedFunc := context.WithCancel(context.Background())
|
||||
closeCtx, closeFunc := context.WithCancel(context.Background())
|
||||
result := &backendStorageEtcd{
|
||||
backendStorageCommon: backendStorageCommon{
|
||||
backends: make(map[string][]*Backend),
|
||||
|
@ -68,6 +71,8 @@ func NewBackendStorageEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient) (B
|
|||
|
||||
initializedCtx: initializedCtx,
|
||||
initializedFunc: initializedFunc,
|
||||
closeCtx: closeCtx,
|
||||
closeFunc: closeFunc,
|
||||
}
|
||||
|
||||
etcdClient.AddListener(result)
|
||||
|
@ -95,56 +100,74 @@ func (s *backendStorageEtcd) wakeupForTesting() {
|
|||
}
|
||||
|
||||
func (s *backendStorageEtcd) EtcdClientCreated(client *EtcdClient) {
|
||||
s.initializedWg.Add(1)
|
||||
go func() {
|
||||
if err := client.Watch(context.Background(), s.keyPrefix, s, clientv3.WithPrefix()); err != nil {
|
||||
log.Printf("Error processing watch for %s: %s", s.keyPrefix, err)
|
||||
if err := client.WaitForConnection(s.closeCtx); err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
client.WaitForConnection()
|
||||
|
||||
waitDelay := initialWaitDelay
|
||||
for {
|
||||
response, err := s.getBackends(client, s.keyPrefix)
|
||||
backoff, err := NewExponentialBackoff(initialWaitDelay, maxWaitDelay)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for s.closeCtx.Err() == nil {
|
||||
response, err := s.getBackends(s.closeCtx, client, s.keyPrefix)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
log.Printf("Timeout getting initial list of backends, retry in %s", waitDelay)
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
} else if errors.Is(err, context.DeadlineExceeded) {
|
||||
log.Printf("Timeout getting initial list of backends, retry in %s", backoff.NextWait())
|
||||
} else {
|
||||
log.Printf("Could not get initial list of backends, retry in %s: %s", waitDelay, err)
|
||||
log.Printf("Could not get initial list of backends, retry in %s: %s", backoff.NextWait(), err)
|
||||
}
|
||||
|
||||
time.Sleep(waitDelay)
|
||||
waitDelay = waitDelay * 2
|
||||
if waitDelay > maxWaitDelay {
|
||||
waitDelay = maxWaitDelay
|
||||
}
|
||||
backoff.Wait(s.closeCtx)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ev := range response.Kvs {
|
||||
s.EtcdKeyUpdated(client, string(ev.Key), ev.Value)
|
||||
s.EtcdKeyUpdated(client, string(ev.Key), ev.Value, nil)
|
||||
}
|
||||
s.initializedWg.Wait()
|
||||
s.initializedFunc()
|
||||
|
||||
nextRevision := response.Header.Revision + 1
|
||||
prevRevision := nextRevision
|
||||
backoff.Reset()
|
||||
for s.closeCtx.Err() == nil {
|
||||
var err error
|
||||
if nextRevision, err = client.Watch(s.closeCtx, s.keyPrefix, nextRevision, s, clientv3.WithPrefix()); err != nil {
|
||||
log.Printf("Error processing watch for %s (%s), retry in %s", s.keyPrefix, err, backoff.NextWait())
|
||||
backoff.Wait(s.closeCtx)
|
||||
continue
|
||||
}
|
||||
|
||||
if nextRevision != prevRevision {
|
||||
backoff.Reset()
|
||||
prevRevision = nextRevision
|
||||
} else {
|
||||
log.Printf("Processing watch for %s interrupted, retry in %s", s.keyPrefix, backoff.NextWait())
|
||||
backoff.Wait(s.closeCtx)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *backendStorageEtcd) EtcdWatchCreated(client *EtcdClient, key string) {
|
||||
s.initializedWg.Done()
|
||||
}
|
||||
|
||||
func (s *backendStorageEtcd) getBackends(client *EtcdClient, keyPrefix string) (*clientv3.GetResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
func (s *backendStorageEtcd) getBackends(ctx context.Context, client *EtcdClient, keyPrefix string) (*clientv3.GetResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
|
||||
return client.Get(ctx, keyPrefix, clientv3.WithPrefix())
|
||||
}
|
||||
|
||||
func (s *backendStorageEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []byte) {
|
||||
func (s *backendStorageEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []byte, prevValue []byte) {
|
||||
var info BackendInformationEtcd
|
||||
if err := json.Unmarshal(data, &info); err != nil {
|
||||
log.Printf("Could not decode backend information %s: %s", string(data), err)
|
||||
|
@ -204,7 +227,7 @@ func (s *backendStorageEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data
|
|||
s.wakeupForTesting()
|
||||
}
|
||||
|
||||
func (s *backendStorageEtcd) EtcdKeyDeleted(client *EtcdClient, key string) {
|
||||
func (s *backendStorageEtcd) EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
|
@ -240,6 +263,7 @@ func (s *backendStorageEtcd) EtcdKeyDeleted(client *EtcdClient, key string) {
|
|||
|
||||
func (s *backendStorageEtcd) Close() {
|
||||
s.etcdClient.RemoveListener(s)
|
||||
s.closeFunc()
|
||||
}
|
||||
|
||||
func (s *backendStorageEtcd) Reload(config *goconf.ConfigFile) {
|
||||
|
|
|
@ -21,6 +21,13 @@
|
|||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/dlintw/goconf"
|
||||
"go.etcd.io/etcd/server/v3/embed"
|
||||
)
|
||||
|
||||
func (s *backendStorageEtcd) getWakeupChannelForTesting() <-chan struct{} {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
@ -33,3 +40,38 @@ func (s *backendStorageEtcd) getWakeupChannelForTesting() <-chan struct{} {
|
|||
s.wakeupChanForTesting = ch
|
||||
return ch
|
||||
}
|
||||
|
||||
type testListener struct {
|
||||
etcd *embed.Etcd
|
||||
closed chan struct{}
|
||||
}
|
||||
|
||||
func (tl *testListener) EtcdClientCreated(client *EtcdClient) {
|
||||
tl.etcd.Server.Stop()
|
||||
close(tl.closed)
|
||||
}
|
||||
|
||||
func Test_BackendStorageEtcdNoLeak(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||
etcd, client := NewEtcdClientForTest(t)
|
||||
tl := &testListener{
|
||||
etcd: etcd,
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
client.AddListener(tl)
|
||||
defer client.RemoveListener(tl)
|
||||
|
||||
config := goconf.NewConfigFile()
|
||||
config.AddOption("backend", "backendtype", "etcd")
|
||||
config.AddOption("backend", "backendprefix", "/backends")
|
||||
|
||||
cfg, err := NewBackendConfiguration(config, client)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
<-tl.closed
|
||||
cfg.Close()
|
||||
})
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
)
|
||||
|
||||
func TestBackoff_Exponential(t *testing.T) {
|
||||
t.Parallel()
|
||||
backoff, err := NewExponentialBackoff(100*time.Millisecond, 500*time.Millisecond)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
@ -48,9 +48,6 @@ const (
|
|||
maxInvalidateInterval = time.Minute
|
||||
)
|
||||
|
||||
// Can be overwritten by tests.
|
||||
var getCapabilitiesNow = time.Now
|
||||
|
||||
type capabilitiesEntry struct {
|
||||
nextUpdate time.Time
|
||||
capabilities map[string]interface{}
|
||||
|
@ -59,6 +56,9 @@ type capabilitiesEntry struct {
|
|||
type Capabilities struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
// Can be overwritten by tests.
|
||||
getNow func() time.Time
|
||||
|
||||
version string
|
||||
pool *HttpClientPool
|
||||
entries map[string]*capabilitiesEntry
|
||||
|
@ -67,6 +67,8 @@ type Capabilities struct {
|
|||
|
||||
func NewCapabilities(version string, pool *HttpClientPool) (*Capabilities, error) {
|
||||
result := &Capabilities{
|
||||
getNow: time.Now,
|
||||
|
||||
version: version,
|
||||
pool: pool,
|
||||
entries: make(map[string]*capabilitiesEntry),
|
||||
|
@ -86,15 +88,15 @@ type CapabilitiesVersion struct {
|
|||
}
|
||||
|
||||
type CapabilitiesResponse struct {
|
||||
Version CapabilitiesVersion `json:"version"`
|
||||
Capabilities map[string]*json.RawMessage `json:"capabilities"`
|
||||
Version CapabilitiesVersion `json:"version"`
|
||||
Capabilities map[string]json.RawMessage `json:"capabilities"`
|
||||
}
|
||||
|
||||
func (c *Capabilities) getCapabilities(key string) (map[string]interface{}, bool) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
now := getCapabilitiesNow()
|
||||
now := c.getNow()
|
||||
if entry, found := c.entries[key]; found && entry.nextUpdate.After(now) {
|
||||
return entry.capabilities, true
|
||||
}
|
||||
|
@ -103,14 +105,15 @@ func (c *Capabilities) getCapabilities(key string) (map[string]interface{}, bool
|
|||
}
|
||||
|
||||
func (c *Capabilities) setCapabilities(key string, capabilities map[string]interface{}) {
|
||||
now := getCapabilitiesNow()
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
now := c.getNow()
|
||||
entry := &capabilitiesEntry{
|
||||
nextUpdate: now.Add(CapabilitiesCacheDuration),
|
||||
capabilities: capabilities,
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.entries[key] = entry
|
||||
}
|
||||
|
||||
|
@ -118,7 +121,7 @@ func (c *Capabilities) invalidateCapabilities(key string) {
|
|||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
now := getCapabilitiesNow()
|
||||
now := c.getNow()
|
||||
if entry, found := c.nextInvalidate[key]; found && entry.After(now) {
|
||||
return
|
||||
}
|
||||
|
@ -188,25 +191,25 @@ func (c *Capabilities) loadCapabilities(ctx context.Context, u *url.URL) (map[st
|
|||
if err := json.Unmarshal(body, &ocs); err != nil {
|
||||
log.Printf("Could not decode OCS response %s from %s: %s", string(body), capUrl.String(), err)
|
||||
return nil, false, err
|
||||
} else if ocs.Ocs == nil || ocs.Ocs.Data == nil {
|
||||
} else if ocs.Ocs == nil || len(ocs.Ocs.Data) == 0 {
|
||||
log.Printf("Incomplete OCS response %s from %s", string(body), u)
|
||||
return nil, false, fmt.Errorf("incomplete OCS response")
|
||||
}
|
||||
|
||||
var response CapabilitiesResponse
|
||||
if err := json.Unmarshal(*ocs.Ocs.Data, &response); err != nil {
|
||||
log.Printf("Could not decode OCS response body %s from %s: %s", string(*ocs.Ocs.Data), capUrl.String(), err)
|
||||
if err := json.Unmarshal(ocs.Ocs.Data, &response); err != nil {
|
||||
log.Printf("Could not decode OCS response body %s from %s: %s", string(ocs.Ocs.Data), capUrl.String(), err)
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
capaObj, found := response.Capabilities[AppNameSpreed]
|
||||
if !found || capaObj == nil {
|
||||
if !found || len(capaObj) == 0 {
|
||||
log.Printf("No capabilities received for app spreed from %s: %+v", capUrl.String(), response)
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
var capa map[string]interface{}
|
||||
if err := json.Unmarshal(*capaObj, &capa); err != nil {
|
||||
if err := json.Unmarshal(capaObj, &capa); err != nil {
|
||||
log.Printf("Unsupported capabilities received for app spreed from %s: %+v", capUrl.String(), response)
|
||||
return nil, false, nil
|
||||
}
|
||||
|
|
|
@ -80,9 +80,9 @@ func NewCapabilitiesForTestWithCallback(t *testing.T, callback func(*Capabilitie
|
|||
Version: CapabilitiesVersion{
|
||||
Major: 20,
|
||||
},
|
||||
Capabilities: map[string]*json.RawMessage{
|
||||
"anotherApp": (*json.RawMessage)(&emptyArray),
|
||||
"spreed": (*json.RawMessage)(&spreedCapa),
|
||||
Capabilities: map[string]json.RawMessage{
|
||||
"anotherApp": emptyArray,
|
||||
"spreed": spreedCapa,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ func NewCapabilitiesForTestWithCallback(t *testing.T, callback func(*Capabilitie
|
|||
StatusCode: http.StatusOK,
|
||||
Message: http.StatusText(http.StatusOK),
|
||||
},
|
||||
Data: (*json.RawMessage)(&data),
|
||||
Data: data,
|
||||
}
|
||||
if data, err = json.Marshal(ocs); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -120,16 +120,25 @@ func NewCapabilitiesForTest(t *testing.T) (*url.URL, *Capabilities) {
|
|||
return NewCapabilitiesForTestWithCallback(t, nil)
|
||||
}
|
||||
|
||||
func SetCapabilitiesGetNow(t *testing.T, f func() time.Time) {
|
||||
old := getCapabilitiesNow
|
||||
func SetCapabilitiesGetNow(t *testing.T, capabilities *Capabilities, f func() time.Time) {
|
||||
capabilities.mu.Lock()
|
||||
defer capabilities.mu.Unlock()
|
||||
|
||||
old := capabilities.getNow
|
||||
|
||||
t.Cleanup(func() {
|
||||
getCapabilitiesNow = old
|
||||
capabilities.mu.Lock()
|
||||
defer capabilities.mu.Unlock()
|
||||
|
||||
capabilities.getNow = old
|
||||
})
|
||||
|
||||
getCapabilitiesNow = f
|
||||
capabilities.getNow = f
|
||||
}
|
||||
|
||||
func TestCapabilities(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
url, capabilities := NewCapabilitiesForTest(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||
|
@ -192,6 +201,8 @@ func TestCapabilities(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInvalidateCapabilities(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
var called atomic.Uint32
|
||||
url, capabilities := NewCapabilitiesForTestWithCallback(t, func(cr *CapabilitiesResponse) {
|
||||
called.Add(1)
|
||||
|
@ -244,7 +255,7 @@ func TestInvalidateCapabilities(t *testing.T) {
|
|||
}
|
||||
|
||||
// At a later time, invalidating can be done again.
|
||||
SetCapabilitiesGetNow(t, func() time.Time {
|
||||
SetCapabilitiesGetNow(t, capabilities, func() time.Time {
|
||||
return time.Now().Add(2 * time.Minute)
|
||||
})
|
||||
|
||||
|
|
|
@ -27,26 +27,19 @@ import (
|
|||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// CertificateCheckInterval defines the interval in which certificate files
|
||||
// are checked for modifications.
|
||||
CertificateCheckInterval = time.Minute
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type CertificateReloader struct {
|
||||
mu sync.Mutex
|
||||
certFile string
|
||||
certWatcher *FileWatcher
|
||||
|
||||
certFile string
|
||||
keyFile string
|
||||
keyFile string
|
||||
keyWatcher *FileWatcher
|
||||
|
||||
certificate *tls.Certificate
|
||||
lastModified time.Time
|
||||
certificate atomic.Pointer[tls.Certificate]
|
||||
|
||||
nextCheck time.Time
|
||||
reloadCounter atomic.Uint64
|
||||
}
|
||||
|
||||
func NewCertificateReloader(certFile string, keyFile string) (*CertificateReloader, error) {
|
||||
|
@ -55,52 +48,43 @@ func NewCertificateReloader(certFile string, keyFile string) (*CertificateReload
|
|||
return nil, fmt.Errorf("could not load certificate / key: %w", err)
|
||||
}
|
||||
|
||||
stat, err := os.Stat(certFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not stat %s: %w", certFile, err)
|
||||
}
|
||||
|
||||
return &CertificateReloader{
|
||||
reloader := &CertificateReloader{
|
||||
certFile: certFile,
|
||||
keyFile: keyFile,
|
||||
}
|
||||
reloader.certificate.Store(&pair)
|
||||
reloader.certWatcher, err = NewFileWatcher(certFile, reloader.reload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reloader.keyWatcher, err = NewFileWatcher(keyFile, reloader.reload)
|
||||
if err != nil {
|
||||
reloader.certWatcher.Close() // nolint
|
||||
return nil, err
|
||||
}
|
||||
|
||||
certificate: &pair,
|
||||
lastModified: stat.ModTime(),
|
||||
return reloader, nil
|
||||
}
|
||||
|
||||
nextCheck: time.Now().Add(CertificateCheckInterval),
|
||||
}, nil
|
||||
func (r *CertificateReloader) Close() {
|
||||
r.keyWatcher.Close()
|
||||
r.certWatcher.Close()
|
||||
}
|
||||
|
||||
func (r *CertificateReloader) reload(filename string) {
|
||||
log.Printf("reloading certificate from %s with %s", r.certFile, r.keyFile)
|
||||
pair, err := tls.LoadX509KeyPair(r.certFile, r.keyFile)
|
||||
if err != nil {
|
||||
log.Printf("could not load certificate / key: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
r.certificate.Store(&pair)
|
||||
r.reloadCounter.Add(1)
|
||||
}
|
||||
|
||||
func (r *CertificateReloader) getCertificate() (*tls.Certificate, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
if now.Before(r.nextCheck) {
|
||||
return r.certificate, nil
|
||||
}
|
||||
|
||||
r.nextCheck = now.Add(CertificateCheckInterval)
|
||||
|
||||
stat, err := os.Stat(r.certFile)
|
||||
if err != nil {
|
||||
log.Printf("could not stat %s: %s", r.certFile, err)
|
||||
return r.certificate, nil
|
||||
}
|
||||
|
||||
if !stat.ModTime().Equal(r.lastModified) {
|
||||
log.Printf("reloading certificate from %s with %s", r.certFile, r.keyFile)
|
||||
pair, err := tls.LoadX509KeyPair(r.certFile, r.keyFile)
|
||||
if err != nil {
|
||||
log.Printf("could not load certificate / key: %s", err)
|
||||
return r.certificate, nil
|
||||
}
|
||||
|
||||
r.certificate = &pair
|
||||
r.lastModified = stat.ModTime()
|
||||
}
|
||||
|
||||
return r.certificate, nil
|
||||
return r.certificate.Load(), nil
|
||||
}
|
||||
|
||||
func (r *CertificateReloader) GetCertificate(h *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
|
@ -111,15 +95,17 @@ func (r *CertificateReloader) GetClientCertificate(i *tls.CertificateRequestInfo
|
|||
return r.getCertificate()
|
||||
}
|
||||
|
||||
func (r *CertificateReloader) GetReloadCounter() uint64 {
|
||||
return r.reloadCounter.Load()
|
||||
}
|
||||
|
||||
type CertPoolReloader struct {
|
||||
mu sync.Mutex
|
||||
certFile string
|
||||
certWatcher *FileWatcher
|
||||
|
||||
certFile string
|
||||
pool atomic.Pointer[x509.CertPool]
|
||||
|
||||
pool *x509.CertPool
|
||||
lastModified time.Time
|
||||
|
||||
nextCheck time.Time
|
||||
reloadCounter atomic.Uint64
|
||||
}
|
||||
|
||||
func loadCertPool(filename string) (*x509.CertPool, error) {
|
||||
|
@ -142,49 +128,38 @@ func NewCertPoolReloader(certFile string) (*CertPoolReloader, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
stat, err := os.Stat(certFile)
|
||||
reloader := &CertPoolReloader{
|
||||
certFile: certFile,
|
||||
}
|
||||
reloader.pool.Store(pool)
|
||||
reloader.certWatcher, err = NewFileWatcher(certFile, reloader.reload)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not stat %s: %w", certFile, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &CertPoolReloader{
|
||||
certFile: certFile,
|
||||
return reloader, nil
|
||||
}
|
||||
|
||||
pool: pool,
|
||||
lastModified: stat.ModTime(),
|
||||
func (r *CertPoolReloader) Close() {
|
||||
r.certWatcher.Close()
|
||||
}
|
||||
|
||||
nextCheck: time.Now().Add(CertificateCheckInterval),
|
||||
}, nil
|
||||
func (r *CertPoolReloader) reload(filename string) {
|
||||
log.Printf("reloading certificate pool from %s", r.certFile)
|
||||
pool, err := loadCertPool(r.certFile)
|
||||
if err != nil {
|
||||
log.Printf("could not load certificate pool: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
r.pool.Store(pool)
|
||||
r.reloadCounter.Add(1)
|
||||
}
|
||||
|
||||
func (r *CertPoolReloader) GetCertPool() *x509.CertPool {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
if now.Before(r.nextCheck) {
|
||||
return r.pool
|
||||
}
|
||||
|
||||
r.nextCheck = now.Add(CertificateCheckInterval)
|
||||
|
||||
stat, err := os.Stat(r.certFile)
|
||||
if err != nil {
|
||||
log.Printf("could not stat %s: %s", r.certFile, err)
|
||||
return r.pool
|
||||
}
|
||||
|
||||
if !stat.ModTime().Equal(r.lastModified) {
|
||||
log.Printf("reloading certificate pool from %s", r.certFile)
|
||||
pool, err := loadCertPool(r.certFile)
|
||||
if err != nil {
|
||||
log.Printf("could not load certificate pool: %s", err)
|
||||
return r.pool
|
||||
}
|
||||
|
||||
r.pool = pool
|
||||
r.lastModified = stat.ModTime()
|
||||
}
|
||||
|
||||
return r.pool
|
||||
return r.pool.Load()
|
||||
}
|
||||
|
||||
func (r *CertPoolReloader) GetReloadCounter() uint64 {
|
||||
return r.reloadCounter.Load()
|
||||
}
|
||||
|
|
|
@ -22,15 +22,41 @@
|
|||
package signaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func UpdateCertificateCheckIntervalForTest(t *testing.T, interval time.Duration) {
|
||||
old := CertificateCheckInterval
|
||||
t.Helper()
|
||||
// Make sure test is not executed with "t.Parallel()"
|
||||
t.Setenv("PARALLEL_CHECK", "1")
|
||||
old := deduplicateWatchEvents.Load()
|
||||
t.Cleanup(func() {
|
||||
CertificateCheckInterval = old
|
||||
deduplicateWatchEvents.Store(old)
|
||||
})
|
||||
|
||||
CertificateCheckInterval = interval
|
||||
deduplicateWatchEvents.Store(int64(interval))
|
||||
}
|
||||
|
||||
func (r *CertificateReloader) WaitForReload(ctx context.Context) error {
|
||||
counter := r.GetReloadCounter()
|
||||
for counter == r.GetReloadCounter() {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *CertPoolReloader) WaitForReload(ctx context.Context) error {
|
||||
counter := r.GetReloadCounter()
|
||||
for counter == r.GetReloadCounter() {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
144
client.go
144
client.go
|
@ -23,8 +23,11 @@ package signaling
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -92,26 +95,49 @@ type WritableClientMessage interface {
|
|||
CloseAfterSend(session Session) bool
|
||||
}
|
||||
|
||||
type HandlerClient interface {
|
||||
Context() context.Context
|
||||
RemoteAddr() string
|
||||
Country() string
|
||||
UserAgent() string
|
||||
IsConnected() bool
|
||||
IsAuthenticated() bool
|
||||
|
||||
GetSession() Session
|
||||
SetSession(session Session)
|
||||
|
||||
SendError(e *Error) bool
|
||||
SendByeResponse(message *ClientMessage) bool
|
||||
SendByeResponseWithReason(message *ClientMessage, reason string) bool
|
||||
SendMessage(message WritableClientMessage) bool
|
||||
|
||||
Close()
|
||||
}
|
||||
|
||||
type ClientHandler interface {
|
||||
OnClosed(*Client)
|
||||
OnMessageReceived(*Client, []byte)
|
||||
OnRTTReceived(*Client, time.Duration)
|
||||
OnClosed(HandlerClient)
|
||||
OnMessageReceived(HandlerClient, []byte)
|
||||
OnRTTReceived(HandlerClient, time.Duration)
|
||||
}
|
||||
|
||||
type ClientGeoIpHandler interface {
|
||||
OnLookupCountry(*Client) string
|
||||
OnLookupCountry(HandlerClient) string
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
ctx context.Context
|
||||
conn *websocket.Conn
|
||||
addr string
|
||||
handler ClientHandler
|
||||
agent string
|
||||
closed atomic.Int32
|
||||
country *string
|
||||
logRTT bool
|
||||
|
||||
session atomic.Pointer[ClientSession]
|
||||
handlerMu sync.RWMutex
|
||||
handler ClientHandler
|
||||
|
||||
session atomic.Pointer[Session]
|
||||
sessionId atomic.Pointer[string]
|
||||
|
||||
mu sync.Mutex
|
||||
|
||||
|
@ -121,7 +147,7 @@ type Client struct {
|
|||
messageChan chan *bytes.Buffer
|
||||
}
|
||||
|
||||
func NewClient(conn *websocket.Conn, remoteAddress string, agent string, handler ClientHandler) (*Client, error) {
|
||||
func NewClient(ctx context.Context, conn *websocket.Conn, remoteAddress string, agent string, handler ClientHandler) (*Client, error) {
|
||||
remoteAddress = strings.TrimSpace(remoteAddress)
|
||||
if remoteAddress == "" {
|
||||
remoteAddress = "unknown remote address"
|
||||
|
@ -132,6 +158,7 @@ func NewClient(conn *websocket.Conn, remoteAddress string, agent string, handler
|
|||
}
|
||||
|
||||
client := &Client{
|
||||
ctx: ctx,
|
||||
agent: agent,
|
||||
logRTT: true,
|
||||
}
|
||||
|
@ -142,12 +169,28 @@ func NewClient(conn *websocket.Conn, remoteAddress string, agent string, handler
|
|||
func (c *Client) SetConn(conn *websocket.Conn, remoteAddress string, handler ClientHandler) {
|
||||
c.conn = conn
|
||||
c.addr = remoteAddress
|
||||
c.handler = handler
|
||||
c.SetHandler(handler)
|
||||
c.closer = NewCloser()
|
||||
c.messageChan = make(chan *bytes.Buffer, 16)
|
||||
c.messagesDone = make(chan struct{})
|
||||
}
|
||||
|
||||
func (c *Client) SetHandler(handler ClientHandler) {
|
||||
c.handlerMu.Lock()
|
||||
defer c.handlerMu.Unlock()
|
||||
c.handler = handler
|
||||
}
|
||||
|
||||
func (c *Client) getHandler() ClientHandler {
|
||||
c.handlerMu.RLock()
|
||||
defer c.handlerMu.RUnlock()
|
||||
return c.handler
|
||||
}
|
||||
|
||||
func (c *Client) Context() context.Context {
|
||||
return c.ctx
|
||||
}
|
||||
|
||||
func (c *Client) IsConnected() bool {
|
||||
return c.closed.Load() == 0
|
||||
}
|
||||
|
@ -156,12 +199,39 @@ func (c *Client) IsAuthenticated() bool {
|
|||
return c.GetSession() != nil
|
||||
}
|
||||
|
||||
func (c *Client) GetSession() *ClientSession {
|
||||
return c.session.Load()
|
||||
func (c *Client) GetSession() Session {
|
||||
session := c.session.Load()
|
||||
if session == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return *session
|
||||
}
|
||||
|
||||
func (c *Client) SetSession(session *ClientSession) {
|
||||
c.session.Store(session)
|
||||
func (c *Client) SetSession(session Session) {
|
||||
if session == nil {
|
||||
c.session.Store(nil)
|
||||
} else {
|
||||
c.session.Store(&session)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) SetSessionId(sessionId string) {
|
||||
c.sessionId.Store(&sessionId)
|
||||
}
|
||||
|
||||
func (c *Client) GetSessionId() string {
|
||||
sessionId := c.sessionId.Load()
|
||||
if sessionId == nil {
|
||||
session := c.GetSession()
|
||||
if session == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return session.PublicId()
|
||||
}
|
||||
|
||||
return *sessionId
|
||||
}
|
||||
|
||||
func (c *Client) RemoteAddr() string {
|
||||
|
@ -175,7 +245,7 @@ func (c *Client) UserAgent() string {
|
|||
func (c *Client) Country() string {
|
||||
if c.country == nil {
|
||||
var country string
|
||||
if handler, ok := c.handler.(ClientGeoIpHandler); ok {
|
||||
if handler, ok := c.getHandler().(ClientGeoIpHandler); ok {
|
||||
country = handler.OnLookupCountry(c)
|
||||
} else {
|
||||
country = unknownCountry
|
||||
|
@ -214,7 +284,7 @@ func (c *Client) doClose() {
|
|||
c.closer.Close()
|
||||
<-c.messagesDone
|
||||
|
||||
c.handler.OnClosed(c)
|
||||
c.getHandler().OnClosed(c)
|
||||
c.SetSession(nil)
|
||||
}
|
||||
}
|
||||
|
@ -234,12 +304,14 @@ func (c *Client) SendByeResponse(message *ClientMessage) bool {
|
|||
func (c *Client) SendByeResponseWithReason(message *ClientMessage, reason string) bool {
|
||||
response := &ServerMessage{
|
||||
Type: "bye",
|
||||
Bye: &ByeServerMessage{},
|
||||
}
|
||||
if message != nil {
|
||||
response.Id = message.Id
|
||||
}
|
||||
if reason != "" {
|
||||
if response.Bye == nil {
|
||||
response.Bye = &ByeServerMessage{}
|
||||
}
|
||||
response.Bye.Reason = reason
|
||||
}
|
||||
return c.SendMessage(response)
|
||||
|
@ -277,13 +349,13 @@ func (c *Client) ReadPump() {
|
|||
rtt := now.Sub(time.Unix(0, ts))
|
||||
if c.logRTT {
|
||||
rtt_ms := rtt.Nanoseconds() / time.Millisecond.Nanoseconds()
|
||||
if session := c.GetSession(); session != nil {
|
||||
log.Printf("Client %s has RTT of %d ms (%s)", session.PublicId(), rtt_ms, rtt)
|
||||
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||
log.Printf("Client %s has RTT of %d ms (%s)", sessionId, rtt_ms, rtt)
|
||||
} else {
|
||||
log.Printf("Client from %s has RTT of %d ms (%s)", addr, rtt_ms, rtt)
|
||||
}
|
||||
}
|
||||
c.handler.OnRTTReceived(c, rtt)
|
||||
c.getHandler().OnRTTReceived(c, rtt)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
@ -292,12 +364,15 @@ func (c *Client) ReadPump() {
|
|||
conn.SetReadDeadline(time.Now().Add(pongWait)) // nolint
|
||||
messageType, reader, err := conn.NextReader()
|
||||
if err != nil {
|
||||
if _, ok := err.(*websocket.CloseError); !ok || websocket.IsUnexpectedCloseError(err,
|
||||
// Gorilla websocket hides the original net.Error, so also compare error messages
|
||||
if errors.Is(err, net.ErrClosed) || strings.Contains(err.Error(), net.ErrClosed.Error()) {
|
||||
break
|
||||
} else if _, ok := err.(*websocket.CloseError); !ok || websocket.IsUnexpectedCloseError(err,
|
||||
websocket.CloseNormalClosure,
|
||||
websocket.CloseGoingAway,
|
||||
websocket.CloseNoStatusReceived) {
|
||||
if session := c.GetSession(); session != nil {
|
||||
log.Printf("Error reading from client %s: %v", session.PublicId(), err)
|
||||
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||
log.Printf("Error reading from client %s: %v", sessionId, err)
|
||||
} else {
|
||||
log.Printf("Error reading from %s: %v", addr, err)
|
||||
}
|
||||
|
@ -306,8 +381,8 @@ func (c *Client) ReadPump() {
|
|||
}
|
||||
|
||||
if messageType != websocket.TextMessage {
|
||||
if session := c.GetSession(); session != nil {
|
||||
log.Printf("Unsupported message type %v from client %s", messageType, session.PublicId())
|
||||
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||
log.Printf("Unsupported message type %v from client %s", messageType, sessionId)
|
||||
} else {
|
||||
log.Printf("Unsupported message type %v from %s", messageType, addr)
|
||||
}
|
||||
|
@ -319,8 +394,8 @@ func (c *Client) ReadPump() {
|
|||
decodeBuffer.Reset()
|
||||
if _, err := decodeBuffer.ReadFrom(reader); err != nil {
|
||||
bufferPool.Put(decodeBuffer)
|
||||
if session := c.GetSession(); session != nil {
|
||||
log.Printf("Error reading message from client %s: %v", session.PublicId(), err)
|
||||
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||
log.Printf("Error reading message from client %s: %v", sessionId, err)
|
||||
} else {
|
||||
log.Printf("Error reading message from %s: %v", addr, err)
|
||||
}
|
||||
|
@ -344,7 +419,7 @@ func (c *Client) processMessages() {
|
|||
break
|
||||
}
|
||||
|
||||
c.handler.OnMessageReceived(c, buffer.Bytes())
|
||||
c.getHandler().OnMessageReceived(c, buffer.Bytes())
|
||||
bufferPool.Put(buffer)
|
||||
}
|
||||
|
||||
|
@ -373,8 +448,8 @@ func (c *Client) writeInternal(message json.Marshaler) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
if session := c.GetSession(); session != nil {
|
||||
log.Printf("Could not send message %+v to client %s: %v", message, session.PublicId(), err)
|
||||
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||
log.Printf("Could not send message %+v to client %s: %v", message, sessionId, err)
|
||||
} else {
|
||||
log.Printf("Could not send message %+v to %s: %v", message, c.RemoteAddr(), err)
|
||||
}
|
||||
|
@ -386,8 +461,8 @@ func (c *Client) writeInternal(message json.Marshaler) bool {
|
|||
close:
|
||||
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
|
||||
if err := c.conn.WriteMessage(websocket.CloseMessage, closeData); err != nil {
|
||||
if session := c.GetSession(); session != nil {
|
||||
log.Printf("Could not send close message to client %s: %v", session.PublicId(), err)
|
||||
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||
log.Printf("Could not send close message to client %s: %v", sessionId, err)
|
||||
} else {
|
||||
log.Printf("Could not send close message to %s: %v", c.RemoteAddr(), err)
|
||||
}
|
||||
|
@ -413,8 +488,8 @@ func (c *Client) writeError(e error) bool { // nolint
|
|||
closeData := websocket.FormatCloseMessage(websocket.CloseInternalServerErr, e.Error())
|
||||
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
|
||||
if err := c.conn.WriteMessage(websocket.CloseMessage, closeData); err != nil {
|
||||
if session := c.GetSession(); session != nil {
|
||||
log.Printf("Could not send close message to client %s: %v", session.PublicId(), err)
|
||||
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||
log.Printf("Could not send close message to client %s: %v", sessionId, err)
|
||||
} else {
|
||||
log.Printf("Could not send close message to %s: %v", c.RemoteAddr(), err)
|
||||
}
|
||||
|
@ -445,7 +520,6 @@ func (c *Client) writeMessageLocked(message WritableClientMessage) bool {
|
|||
go session.Close()
|
||||
}
|
||||
go c.Close()
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
|
@ -462,8 +536,8 @@ func (c *Client) sendPing() bool {
|
|||
msg := strconv.FormatInt(now, 10)
|
||||
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
|
||||
if err := c.conn.WriteMessage(websocket.PingMessage, []byte(msg)); err != nil {
|
||||
if session := c.GetSession(); session != nil {
|
||||
log.Printf("Could not send ping to client %s: %v", session.PublicId(), err)
|
||||
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||
log.Printf("Could not send ping to client %s: %v", sessionId, err)
|
||||
} else {
|
||||
log.Printf("Could not send ping to %s: %v", c.RemoteAddr(), err)
|
||||
}
|
||||
|
|
|
@ -248,7 +248,7 @@ func (c *SignalingClient) PublicSessionId() string {
|
|||
|
||||
func (c *SignalingClient) processMessageMessage(message *signaling.ServerMessage) {
|
||||
var msg MessagePayload
|
||||
if err := json.Unmarshal(*message.Message.Data, &msg); err != nil {
|
||||
if err := json.Unmarshal(message.Message.Data, &msg); err != nil {
|
||||
log.Println("Error in unmarshal", err)
|
||||
return
|
||||
}
|
||||
|
@ -404,7 +404,7 @@ func (c *SignalingClient) SendMessages(clients []*SignalingClient) {
|
|||
Type: "session",
|
||||
SessionId: sessionIds[recipient],
|
||||
},
|
||||
Data: (*json.RawMessage)(&data),
|
||||
Data: data,
|
||||
},
|
||||
}
|
||||
sender.Send(msg)
|
||||
|
@ -461,7 +461,7 @@ func registerAuthHandler(router *mux.Router) {
|
|||
StatusCode: http.StatusOK,
|
||||
Message: http.StatusText(http.StatusOK),
|
||||
},
|
||||
Data: &rawdata,
|
||||
Data: rawdata,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -601,9 +601,9 @@ func main() {
|
|||
Type: "hello",
|
||||
Hello: &signaling.HelloClientMessage{
|
||||
Version: signaling.HelloVersionV1,
|
||||
Auth: signaling.HelloClientMessageAuth{
|
||||
Auth: &signaling.HelloClientMessageAuth{
|
||||
Url: backendUrl + "/auth",
|
||||
Params: &json.RawMessage{'{', '}'},
|
||||
Params: json.RawMessage("{}"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
202
clientsession.go
202
clientsession.go
|
@ -36,9 +36,6 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
// Sessions expire 30 seconds after the connection closed.
|
||||
sessionExpireDuration = 30 * time.Second
|
||||
|
||||
// Warn if a session has 32 or more pending messages.
|
||||
warnPendingMessagesCount = 32
|
||||
|
||||
|
@ -54,11 +51,13 @@ type ClientSession struct {
|
|||
privateId string
|
||||
publicId string
|
||||
data *SessionIdData
|
||||
ctx context.Context
|
||||
closeFunc context.CancelFunc
|
||||
|
||||
clientType string
|
||||
features []string
|
||||
userId string
|
||||
userData *json.RawMessage
|
||||
userData json.RawMessage
|
||||
|
||||
inCall Flags
|
||||
supportsPermissions bool
|
||||
|
@ -68,18 +67,18 @@ type ClientSession struct {
|
|||
backendUrl string
|
||||
parsedBackendUrl *url.URL
|
||||
|
||||
expires time.Time
|
||||
|
||||
mu sync.Mutex
|
||||
|
||||
client *Client
|
||||
room atomic.Pointer[Room]
|
||||
roomJoinTime atomic.Int64
|
||||
roomSessionId string
|
||||
client HandlerClient
|
||||
room atomic.Pointer[Room]
|
||||
roomJoinTime atomic.Int64
|
||||
|
||||
roomSessionIdLock sync.RWMutex
|
||||
roomSessionId string
|
||||
|
||||
publisherWaiters ChannelWaiters
|
||||
|
||||
publishers map[string]McuPublisher
|
||||
publishers map[StreamType]McuPublisher
|
||||
subscribers map[string]McuSubscriber
|
||||
|
||||
pendingClientMessages []*ServerMessage
|
||||
|
@ -96,12 +95,15 @@ type ClientSession struct {
|
|||
}
|
||||
|
||||
func NewClientSession(hub *Hub, privateId string, publicId string, data *SessionIdData, backend *Backend, hello *HelloClientMessage, auth *BackendClientAuthResponse) (*ClientSession, error) {
|
||||
ctx, closeFunc := context.WithCancel(context.Background())
|
||||
s := &ClientSession{
|
||||
hub: hub,
|
||||
events: hub.events,
|
||||
privateId: privateId,
|
||||
publicId: publicId,
|
||||
data: data,
|
||||
ctx: ctx,
|
||||
closeFunc: closeFunc,
|
||||
|
||||
clientType: hello.Auth.Type,
|
||||
features: hello.Features,
|
||||
|
@ -145,6 +147,10 @@ func NewClientSession(hub *Hub, privateId string, publicId string, data *Session
|
|||
return s, nil
|
||||
}
|
||||
|
||||
func (s *ClientSession) Context() context.Context {
|
||||
return s.ctx
|
||||
}
|
||||
|
||||
func (s *ClientSession) PrivateId() string {
|
||||
return s.privateId
|
||||
}
|
||||
|
@ -154,8 +160,8 @@ func (s *ClientSession) PublicId() string {
|
|||
}
|
||||
|
||||
func (s *ClientSession) RoomSessionId() string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.roomSessionIdLock.RLock()
|
||||
defer s.roomSessionIdLock.RUnlock()
|
||||
return s.roomSessionId
|
||||
}
|
||||
|
||||
|
@ -309,25 +315,10 @@ func (s *ClientSession) UserId() string {
|
|||
return userId
|
||||
}
|
||||
|
||||
func (s *ClientSession) UserData() *json.RawMessage {
|
||||
func (s *ClientSession) UserData() json.RawMessage {
|
||||
return s.userData
|
||||
}
|
||||
|
||||
func (s *ClientSession) StartExpire() {
|
||||
// The hub mutex must be held when calling this method.
|
||||
s.expires = time.Now().Add(sessionExpireDuration)
|
||||
s.hub.expiredSessions[s] = true
|
||||
}
|
||||
|
||||
func (s *ClientSession) StopExpire() {
|
||||
// The hub mutex must be held when calling this method.
|
||||
delete(s.hub.expiredSessions, s)
|
||||
}
|
||||
|
||||
func (s *ClientSession) IsExpired(now time.Time) bool {
|
||||
return now.After(s.expires)
|
||||
}
|
||||
|
||||
func (s *ClientSession) SetRoom(room *Room) {
|
||||
s.room.Store(room)
|
||||
if room != nil {
|
||||
|
@ -356,8 +347,8 @@ func (s *ClientSession) getRoomJoinTime() time.Time {
|
|||
|
||||
func (s *ClientSession) releaseMcuObjects() {
|
||||
if len(s.publishers) > 0 {
|
||||
go func(publishers map[string]McuPublisher) {
|
||||
ctx := context.TODO()
|
||||
go func(publishers map[StreamType]McuPublisher) {
|
||||
ctx := context.Background()
|
||||
for _, publisher := range publishers {
|
||||
publisher.Close(ctx)
|
||||
}
|
||||
|
@ -366,7 +357,7 @@ func (s *ClientSession) releaseMcuObjects() {
|
|||
}
|
||||
if len(s.subscribers) > 0 {
|
||||
go func(subscribers map[string]McuSubscriber) {
|
||||
ctx := context.TODO()
|
||||
ctx := context.Background()
|
||||
for _, subscriber := range subscribers {
|
||||
subscriber.Close(ctx)
|
||||
}
|
||||
|
@ -380,6 +371,7 @@ func (s *ClientSession) Close() {
|
|||
}
|
||||
|
||||
func (s *ClientSession) closeAndWait(wait bool) {
|
||||
s.closeFunc()
|
||||
s.hub.removeSession(s)
|
||||
|
||||
s.mu.Lock()
|
||||
|
@ -413,8 +405,8 @@ func (s *ClientSession) SubscribeEvents() error {
|
|||
}
|
||||
|
||||
func (s *ClientSession) UpdateRoomSessionId(roomSessionId string) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.roomSessionIdLock.Lock()
|
||||
defer s.roomSessionIdLock.Unlock()
|
||||
|
||||
if s.roomSessionId == roomSessionId {
|
||||
return nil
|
||||
|
@ -443,8 +435,8 @@ func (s *ClientSession) UpdateRoomSessionId(roomSessionId string) error {
|
|||
}
|
||||
|
||||
func (s *ClientSession) SubscribeRoomEvents(roomid string, roomSessionId string) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.roomSessionIdLock.Lock()
|
||||
defer s.roomSessionIdLock.Unlock()
|
||||
|
||||
if err := s.events.RegisterRoomListener(roomid, s.backend, s); err != nil {
|
||||
return err
|
||||
|
@ -503,6 +495,9 @@ func (s *ClientSession) doUnsubscribeRoomEvents(notify bool) {
|
|||
s.events.UnregisterRoomListener(room.Id(), s.Backend(), s)
|
||||
}
|
||||
s.hub.roomSessions.DeleteRoomSession(s)
|
||||
|
||||
s.roomSessionIdLock.Lock()
|
||||
defer s.roomSessionIdLock.Unlock()
|
||||
if notify && room != nil && s.roomSessionId != "" {
|
||||
// Notify
|
||||
go func(sid string) {
|
||||
|
@ -520,14 +515,14 @@ func (s *ClientSession) doUnsubscribeRoomEvents(notify bool) {
|
|||
s.roomSessionId = ""
|
||||
}
|
||||
|
||||
func (s *ClientSession) ClearClient(client *Client) {
|
||||
func (s *ClientSession) ClearClient(client HandlerClient) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.clearClientLocked(client)
|
||||
}
|
||||
|
||||
func (s *ClientSession) clearClientLocked(client *Client) {
|
||||
func (s *ClientSession) clearClientLocked(client HandlerClient) {
|
||||
if s.client == nil {
|
||||
return
|
||||
} else if client != nil && s.client != client {
|
||||
|
@ -540,18 +535,18 @@ func (s *ClientSession) clearClientLocked(client *Client) {
|
|||
prevClient.SetSession(nil)
|
||||
}
|
||||
|
||||
func (s *ClientSession) GetClient() *Client {
|
||||
func (s *ClientSession) GetClient() HandlerClient {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
return s.getClientUnlocked()
|
||||
}
|
||||
|
||||
func (s *ClientSession) getClientUnlocked() *Client {
|
||||
func (s *ClientSession) getClientUnlocked() HandlerClient {
|
||||
return s.client
|
||||
}
|
||||
|
||||
func (s *ClientSession) SetClient(client *Client) *Client {
|
||||
func (s *ClientSession) SetClient(client HandlerClient) HandlerClient {
|
||||
if client == nil {
|
||||
panic("Use ClearClient to set the client to nil")
|
||||
}
|
||||
|
@ -573,12 +568,12 @@ func (s *ClientSession) SetClient(client *Client) *Client {
|
|||
return prev
|
||||
}
|
||||
|
||||
func (s *ClientSession) sendOffer(client McuClient, sender string, streamType string, offer map[string]interface{}) {
|
||||
func (s *ClientSession) sendOffer(client McuClient, sender string, streamType StreamType, offer map[string]interface{}) {
|
||||
offer_message := &AnswerOfferMessage{
|
||||
To: s.PublicId(),
|
||||
From: sender,
|
||||
Type: "offer",
|
||||
RoomType: streamType,
|
||||
RoomType: string(streamType),
|
||||
Payload: offer,
|
||||
Sid: client.Sid(),
|
||||
}
|
||||
|
@ -594,19 +589,19 @@ func (s *ClientSession) sendOffer(client McuClient, sender string, streamType st
|
|||
Type: "session",
|
||||
SessionId: sender,
|
||||
},
|
||||
Data: (*json.RawMessage)(&offer_data),
|
||||
Data: offer_data,
|
||||
},
|
||||
}
|
||||
|
||||
s.sendMessageUnlocked(response_message)
|
||||
}
|
||||
|
||||
func (s *ClientSession) sendCandidate(client McuClient, sender string, streamType string, candidate interface{}) {
|
||||
func (s *ClientSession) sendCandidate(client McuClient, sender string, streamType StreamType, candidate interface{}) {
|
||||
candidate_message := &AnswerOfferMessage{
|
||||
To: s.PublicId(),
|
||||
From: sender,
|
||||
Type: "candidate",
|
||||
RoomType: streamType,
|
||||
RoomType: string(streamType),
|
||||
Payload: map[string]interface{}{
|
||||
"candidate": candidate,
|
||||
},
|
||||
|
@ -624,7 +619,7 @@ func (s *ClientSession) sendCandidate(client McuClient, sender string, streamTyp
|
|||
Type: "session",
|
||||
SessionId: sender,
|
||||
},
|
||||
Data: (*json.RawMessage)(&candidate_data),
|
||||
Data: candidate_data,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -740,23 +735,6 @@ func (s *ClientSession) SubscriberClosed(subscriber McuSubscriber) {
|
|||
}
|
||||
}
|
||||
|
||||
type SdpError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e *SdpError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
type WrappedSdpError struct {
|
||||
SdpError
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *WrappedSdpError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
type PermissionError struct {
|
||||
permission Permission
|
||||
}
|
||||
|
@ -769,23 +747,10 @@ func (e *PermissionError) Error() string {
|
|||
return fmt.Sprintf("permission \"%s\" not found", e.permission)
|
||||
}
|
||||
|
||||
func (s *ClientSession) isSdpAllowedToSendLocked(payload map[string]interface{}) (MediaType, error) {
|
||||
sdpValue, found := payload["sdp"]
|
||||
if !found {
|
||||
return 0, &SdpError{"payload does not contain a sdp"}
|
||||
}
|
||||
sdpText, ok := sdpValue.(string)
|
||||
if !ok {
|
||||
return 0, &SdpError{"payload does not contain a valid sdp"}
|
||||
}
|
||||
var sdp sdp.SessionDescription
|
||||
if err := sdp.Unmarshal([]byte(sdpText)); err != nil {
|
||||
return 0, &WrappedSdpError{
|
||||
SdpError: SdpError{
|
||||
message: fmt.Sprintf("could not parse sdp: %s", err),
|
||||
},
|
||||
err: err,
|
||||
}
|
||||
func (s *ClientSession) isSdpAllowedToSendLocked(sdp *sdp.SessionDescription) (MediaType, error) {
|
||||
if sdp == nil {
|
||||
// Should have already been checked when data was validated.
|
||||
return 0, ErrNoSdp
|
||||
}
|
||||
|
||||
var mediaTypes MediaType
|
||||
|
@ -823,8 +788,8 @@ func (s *ClientSession) IsAllowedToSend(data *MessageClientMessageData) error {
|
|||
// Client is allowed to publish any media (audio / video).
|
||||
return nil
|
||||
} else if data != nil && data.Type == "offer" {
|
||||
// Parse SDP to check what user is trying to publish and check permissions accordingly.
|
||||
if _, err := s.isSdpAllowedToSendLocked(data.Payload); err != nil {
|
||||
// Check what user is trying to publish and check permissions accordingly.
|
||||
if _, err := s.isSdpAllowedToSendLocked(data.offerSdp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -839,22 +804,22 @@ func (s *ClientSession) IsAllowedToSend(data *MessageClientMessageData) error {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *ClientSession) CheckOfferType(streamType string, data *MessageClientMessageData) (MediaType, error) {
|
||||
func (s *ClientSession) CheckOfferType(streamType StreamType, data *MessageClientMessageData) (MediaType, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
return s.checkOfferTypeLocked(streamType, data)
|
||||
}
|
||||
|
||||
func (s *ClientSession) checkOfferTypeLocked(streamType string, data *MessageClientMessageData) (MediaType, error) {
|
||||
if streamType == streamTypeScreen {
|
||||
func (s *ClientSession) checkOfferTypeLocked(streamType StreamType, data *MessageClientMessageData) (MediaType, error) {
|
||||
if streamType == StreamTypeScreen {
|
||||
if !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_SCREEN) {
|
||||
return 0, &PermissionError{PERMISSION_MAY_PUBLISH_SCREEN}
|
||||
}
|
||||
|
||||
return MediaTypeScreen, nil
|
||||
} else if data != nil && data.Type == "offer" {
|
||||
mediaTypes, err := s.isSdpAllowedToSendLocked(data.Payload)
|
||||
mediaTypes, err := s.isSdpAllowedToSendLocked(data.offerSdp)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -865,7 +830,7 @@ func (s *ClientSession) checkOfferTypeLocked(streamType string, data *MessageCli
|
|||
return 0, nil
|
||||
}
|
||||
|
||||
func (s *ClientSession) GetOrCreatePublisher(ctx context.Context, mcu Mcu, streamType string, data *MessageClientMessageData) (McuPublisher, error) {
|
||||
func (s *ClientSession) GetOrCreatePublisher(ctx context.Context, mcu Mcu, streamType StreamType, data *MessageClientMessageData) (McuPublisher, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
|
@ -883,7 +848,7 @@ func (s *ClientSession) GetOrCreatePublisher(ctx context.Context, mcu Mcu, strea
|
|||
bitrate := data.Bitrate
|
||||
if backend := s.Backend(); backend != nil {
|
||||
var maxBitrate int
|
||||
if streamType == streamTypeScreen {
|
||||
if streamType == StreamTypeScreen {
|
||||
maxBitrate = backend.maxScreenBitrate
|
||||
} else {
|
||||
maxBitrate = backend.maxStreamBitrate
|
||||
|
@ -900,12 +865,12 @@ func (s *ClientSession) GetOrCreatePublisher(ctx context.Context, mcu Mcu, strea
|
|||
return nil, err
|
||||
}
|
||||
if s.publishers == nil {
|
||||
s.publishers = make(map[string]McuPublisher)
|
||||
s.publishers = make(map[StreamType]McuPublisher)
|
||||
}
|
||||
if prev, found := s.publishers[streamType]; found {
|
||||
// Another thread created the publisher while we were waiting.
|
||||
go func(pub McuPublisher) {
|
||||
closeCtx := context.TODO()
|
||||
closeCtx := context.Background()
|
||||
pub.Close(closeCtx)
|
||||
}(publisher)
|
||||
publisher = prev
|
||||
|
@ -921,18 +886,18 @@ func (s *ClientSession) GetOrCreatePublisher(ctx context.Context, mcu Mcu, strea
|
|||
return publisher, nil
|
||||
}
|
||||
|
||||
func (s *ClientSession) getPublisherLocked(streamType string) McuPublisher {
|
||||
func (s *ClientSession) getPublisherLocked(streamType StreamType) McuPublisher {
|
||||
return s.publishers[streamType]
|
||||
}
|
||||
|
||||
func (s *ClientSession) GetPublisher(streamType string) McuPublisher {
|
||||
func (s *ClientSession) GetPublisher(streamType StreamType) McuPublisher {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
return s.getPublisherLocked(streamType)
|
||||
}
|
||||
|
||||
func (s *ClientSession) GetOrWaitForPublisher(ctx context.Context, streamType string) McuPublisher {
|
||||
func (s *ClientSession) GetOrWaitForPublisher(ctx context.Context, streamType StreamType) McuPublisher {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
|
@ -961,17 +926,18 @@ func (s *ClientSession) GetOrWaitForPublisher(ctx context.Context, streamType st
|
|||
}
|
||||
}
|
||||
|
||||
func (s *ClientSession) GetOrCreateSubscriber(ctx context.Context, mcu Mcu, id string, streamType string) (McuSubscriber, error) {
|
||||
func (s *ClientSession) GetOrCreateSubscriber(ctx context.Context, mcu Mcu, id string, streamType StreamType) (McuSubscriber, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// TODO(jojo): Add method to remove subscribers.
|
||||
|
||||
subscriber, found := s.subscribers[id+"|"+streamType]
|
||||
subscriber, found := s.subscribers[getStreamId(id, streamType)]
|
||||
if !found {
|
||||
client := s.getClientUnlocked()
|
||||
s.mu.Unlock()
|
||||
var err error
|
||||
subscriber, err = mcu.NewSubscriber(ctx, s, id, streamType)
|
||||
subscriber, err = mcu.NewSubscriber(ctx, s, id, streamType, client)
|
||||
s.mu.Lock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -979,15 +945,15 @@ func (s *ClientSession) GetOrCreateSubscriber(ctx context.Context, mcu Mcu, id s
|
|||
if s.subscribers == nil {
|
||||
s.subscribers = make(map[string]McuSubscriber)
|
||||
}
|
||||
if prev, found := s.subscribers[id+"|"+streamType]; found {
|
||||
if prev, found := s.subscribers[getStreamId(id, streamType)]; found {
|
||||
// Another thread created the subscriber while we were waiting.
|
||||
go func(sub McuSubscriber) {
|
||||
closeCtx := context.TODO()
|
||||
closeCtx := context.Background()
|
||||
sub.Close(closeCtx)
|
||||
}(subscriber)
|
||||
subscriber = prev
|
||||
} else {
|
||||
s.subscribers[id+"|"+streamType] = subscriber
|
||||
s.subscribers[getStreamId(id, streamType)] = subscriber
|
||||
}
|
||||
log.Printf("Subscribing %s from %s as %s in session %s", streamType, id, subscriber.Id(), s.PublicId())
|
||||
}
|
||||
|
@ -995,11 +961,11 @@ func (s *ClientSession) GetOrCreateSubscriber(ctx context.Context, mcu Mcu, id s
|
|||
return subscriber, nil
|
||||
}
|
||||
|
||||
func (s *ClientSession) GetSubscriber(id string, streamType string) McuSubscriber {
|
||||
func (s *ClientSession) GetSubscriber(id string, streamType StreamType) McuSubscriber {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
return s.subscribers[id+"|"+streamType]
|
||||
return s.subscribers[getStreamId(id, streamType)]
|
||||
}
|
||||
|
||||
func (s *ClientSession) ProcessAsyncRoomMessage(message *AsyncMessage) {
|
||||
|
@ -1023,10 +989,10 @@ func (s *ClientSession) processAsyncMessage(message *AsyncMessage) {
|
|||
defer s.mu.Unlock()
|
||||
|
||||
if !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_MEDIA) {
|
||||
if publisher, found := s.publishers[streamTypeVideo]; found {
|
||||
if publisher, found := s.publishers[StreamTypeVideo]; found {
|
||||
if (publisher.HasMedia(MediaTypeAudio) && !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_AUDIO)) ||
|
||||
(publisher.HasMedia(MediaTypeVideo) && !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_VIDEO)) {
|
||||
delete(s.publishers, streamTypeVideo)
|
||||
delete(s.publishers, StreamTypeVideo)
|
||||
log.Printf("Session %s is no longer allowed to publish media, closing publisher %s", s.PublicId(), publisher.Id())
|
||||
go func() {
|
||||
publisher.Close(context.Background())
|
||||
|
@ -1036,8 +1002,8 @@ func (s *ClientSession) processAsyncMessage(message *AsyncMessage) {
|
|||
}
|
||||
}
|
||||
if !s.hasPermissionLocked(PERMISSION_MAY_PUBLISH_SCREEN) {
|
||||
if publisher, found := s.publishers[streamTypeScreen]; found {
|
||||
delete(s.publishers, streamTypeScreen)
|
||||
if publisher, found := s.publishers[StreamTypeScreen]; found {
|
||||
delete(s.publishers, StreamTypeScreen)
|
||||
log.Printf("Session %s is no longer allowed to publish screen, closing publisher %s", s.PublicId(), publisher.Id())
|
||||
go func() {
|
||||
publisher.Close(context.Background())
|
||||
|
@ -1056,10 +1022,10 @@ func (s *ClientSession) processAsyncMessage(message *AsyncMessage) {
|
|||
case "sendoffer":
|
||||
// Process asynchronously to not block other messages received.
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), s.hub.mcuTimeout)
|
||||
ctx, cancel := context.WithTimeout(s.Context(), s.hub.mcuTimeout)
|
||||
defer cancel()
|
||||
|
||||
mc, err := s.GetOrCreateSubscriber(ctx, s.hub.mcu, message.SendOffer.SessionId, message.SendOffer.Data.RoomType)
|
||||
mc, err := s.GetOrCreateSubscriber(ctx, s.hub.mcu, message.SendOffer.SessionId, StreamType(message.SendOffer.Data.RoomType))
|
||||
if err != nil {
|
||||
log.Printf("Could not create MCU subscriber for session %s to process sendoffer in %s: %s", message.SendOffer.SessionId, s.PublicId(), err)
|
||||
if err := s.events.PublishSessionMessage(message.SendOffer.SessionId, s.backend, &AsyncMessage{
|
||||
|
@ -1088,7 +1054,7 @@ func (s *ClientSession) processAsyncMessage(message *AsyncMessage) {
|
|||
return
|
||||
}
|
||||
|
||||
mc.SendMessage(context.TODO(), nil, message.SendOffer.Data, func(err error, response map[string]interface{}) {
|
||||
mc.SendMessage(s.Context(), nil, message.SendOffer.Data, func(err error, response map[string]interface{}) {
|
||||
if err != nil {
|
||||
log.Printf("Could not send MCU message %+v for session %s to %s: %s", message.SendOffer.Data, message.SendOffer.SessionId, s.PublicId(), err)
|
||||
if err := s.events.PublishSessionMessage(message.SendOffer.SessionId, s.backend, &AsyncMessage{
|
||||
|
@ -1146,13 +1112,13 @@ func (s *ClientSession) storePendingMessage(message *ServerMessage) {
|
|||
func filterDisplayNames(events []*EventServerMessageSessionEntry) []*EventServerMessageSessionEntry {
|
||||
result := make([]*EventServerMessageSessionEntry, 0, len(events))
|
||||
for _, event := range events {
|
||||
if event.User == nil {
|
||||
if len(event.User) == 0 {
|
||||
result = append(result, event)
|
||||
continue
|
||||
}
|
||||
|
||||
var userdata map[string]interface{}
|
||||
if err := json.Unmarshal(*event.User, &userdata); err != nil {
|
||||
if err := json.Unmarshal(event.User, &userdata); err != nil {
|
||||
result = append(result, event)
|
||||
continue
|
||||
}
|
||||
|
@ -1178,7 +1144,7 @@ func filterDisplayNames(events []*EventServerMessageSessionEntry) []*EventServer
|
|||
}
|
||||
|
||||
e := event.Clone()
|
||||
e.User = (*json.RawMessage)(&data)
|
||||
e.User = data
|
||||
result = append(result, e)
|
||||
}
|
||||
return result
|
||||
|
@ -1273,12 +1239,12 @@ func (s *ClientSession) filterMessage(message *ServerMessage) *ServerMessage {
|
|||
delete(s.seenJoinedEvents, e)
|
||||
}
|
||||
case "message":
|
||||
if message.Event.Message == nil || message.Event.Message.Data == nil || len(*message.Event.Message.Data) == 0 || !s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
|
||||
if message.Event.Message == nil || len(message.Event.Message.Data) == 0 || !s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
|
||||
return message
|
||||
}
|
||||
|
||||
var data RoomEventMessageData
|
||||
if err := json.Unmarshal(*message.Event.Message.Data, &data); err != nil {
|
||||
if err := json.Unmarshal(message.Event.Message.Data, &data); err != nil {
|
||||
return message
|
||||
}
|
||||
|
||||
|
@ -1295,7 +1261,7 @@ func (s *ClientSession) filterMessage(message *ServerMessage) *ServerMessage {
|
|||
Target: message.Event.Target,
|
||||
Message: &RoomEventMessage{
|
||||
RoomId: message.Event.Message.RoomId,
|
||||
Data: (*json.RawMessage)(&encoded),
|
||||
Data: encoded,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -1305,9 +1271,9 @@ func (s *ClientSession) filterMessage(message *ServerMessage) *ServerMessage {
|
|||
}
|
||||
}
|
||||
case "message":
|
||||
if message.Message != nil && message.Message.Data != nil && len(*message.Message.Data) > 0 && s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
|
||||
if message.Message != nil && len(message.Message.Data) > 0 && s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
|
||||
var data MessageServerMessageData
|
||||
if err := json.Unmarshal(*message.Message.Data, &data); err != nil {
|
||||
if err := json.Unmarshal(message.Message.Data, &data); err != nil {
|
||||
return message
|
||||
}
|
||||
|
||||
|
@ -1361,7 +1327,7 @@ func (s *ClientSession) filterAsyncMessage(msg *AsyncMessage) *ServerMessage {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *ClientSession) NotifySessionResumed(client *Client) {
|
||||
func (s *ClientSession) NotifySessionResumed(client HandlerClient) {
|
||||
s.mu.Lock()
|
||||
if len(s.pendingClientMessages) == 0 {
|
||||
s.mu.Unlock()
|
||||
|
|
|
@ -117,6 +117,7 @@ func Test_permissionsEqual(t *testing.T) {
|
|||
for idx, test := range tests {
|
||||
test := test
|
||||
t.Run(strconv.Itoa(idx), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
equal := permissionsEqual(test.a, test.b)
|
||||
if equal != test.equal {
|
||||
t.Errorf("Expected %+v to be %s to %+v but was %s", test.a, equalStrings[test.equal], test.b, equalStrings[equal])
|
||||
|
@ -126,12 +127,17 @@ func Test_permissionsEqual(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBandwidth_Client(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
hub, _, _, server := CreateHubForTest(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||
defer cancel()
|
||||
|
||||
mcu, err := NewTestMCU()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := mcu.Start(); err != nil {
|
||||
} else if err := mcu.Start(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mcu.Stop()
|
||||
|
@ -145,9 +151,6 @@ func TestBandwidth_Client(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||
defer cancel()
|
||||
|
||||
hello, err := client.RunUntilHello(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -198,6 +201,8 @@ func TestBandwidth_Client(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBandwidth_Backend(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
hub, _, _, server := CreateHubWithMultipleBackendsForTest(t)
|
||||
|
||||
u, err := url.Parse(server.URL + "/one")
|
||||
|
@ -212,26 +217,26 @@ func TestBandwidth_Backend(t *testing.T) {
|
|||
backend.maxScreenBitrate = 1000
|
||||
backend.maxStreamBitrate = 2000
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||
defer cancel()
|
||||
|
||||
mcu, err := NewTestMCU()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if err := mcu.Start(); err != nil {
|
||||
} else if err := mcu.Start(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mcu.Stop()
|
||||
|
||||
hub.SetMcu(mcu)
|
||||
|
||||
streamTypes := []string{
|
||||
streamTypeVideo,
|
||||
streamTypeScreen,
|
||||
streamTypes := []StreamType{
|
||||
StreamTypeVideo,
|
||||
StreamTypeScreen,
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||
defer cancel()
|
||||
|
||||
for _, streamType := range streamTypes {
|
||||
t.Run(streamType, func(t *testing.T) {
|
||||
t.Run(string(streamType), func(t *testing.T) {
|
||||
client := NewTestClient(t, server, hub)
|
||||
defer client.CloseWithBye()
|
||||
|
||||
|
@ -268,7 +273,7 @@ func TestBandwidth_Backend(t *testing.T) {
|
|||
}, MessageClientMessageData{
|
||||
Type: "offer",
|
||||
Sid: "54321",
|
||||
RoomType: streamType,
|
||||
RoomType: string(streamType),
|
||||
Bitrate: bitrate,
|
||||
Payload: map[string]interface{}{
|
||||
"sdp": MockSdpOfferAudioAndVideo,
|
||||
|
@ -287,7 +292,7 @@ func TestBandwidth_Backend(t *testing.T) {
|
|||
}
|
||||
|
||||
var expectBitrate int
|
||||
if streamType == streamTypeVideo {
|
||||
if streamType == StreamTypeVideo {
|
||||
expectBitrate = backend.maxStreamBitrate
|
||||
} else {
|
||||
expectBitrate = backend.maxScreenBitrate
|
||||
|
|
87
config.go
Normal file
87
config.go
Normal file
|
@ -0,0 +1,87 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2023 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"regexp"
|
||||
|
||||
"github.com/dlintw/goconf"
|
||||
)
|
||||
|
||||
var (
|
||||
searchVarsRegexp = regexp.MustCompile(`\$\([A-Za-z][A-Za-z0-9_]*\)`)
|
||||
)
|
||||
|
||||
func replaceEnvVars(s string) string {
|
||||
return searchVarsRegexp.ReplaceAllStringFunc(s, func(name string) string {
|
||||
name = name[2 : len(name)-1]
|
||||
value, found := os.LookupEnv(name)
|
||||
if !found {
|
||||
return name
|
||||
}
|
||||
|
||||
return value
|
||||
})
|
||||
}
|
||||
|
||||
// GetStringOptionWithEnv will get the string option and resolve any environment
|
||||
// variable references in the form "$(VAR)".
|
||||
func GetStringOptionWithEnv(config *goconf.ConfigFile, section string, option string) (string, error) {
|
||||
value, err := config.GetString(section, option)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
value = replaceEnvVars(value)
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func GetStringOptions(config *goconf.ConfigFile, section string, ignoreErrors bool) (map[string]string, error) {
|
||||
options, _ := config.GetOptions(section)
|
||||
if len(options) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
result := make(map[string]string)
|
||||
for _, option := range options {
|
||||
value, err := GetStringOptionWithEnv(config, section, option)
|
||||
if err != nil {
|
||||
if ignoreErrors {
|
||||
continue
|
||||
}
|
||||
|
||||
var ge goconf.GetError
|
||||
if errors.As(err, &ge) && ge.Reason == goconf.OptionNotFound {
|
||||
// Skip options from "default" section.
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result[option] = value
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
92
config_test.go
Normal file
92
config_test.go
Normal file
|
@ -0,0 +1,92 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2023 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/dlintw/goconf"
|
||||
)
|
||||
|
||||
func TestStringOptions(t *testing.T) {
|
||||
t.Setenv("FOO", "foo")
|
||||
expected := map[string]string{
|
||||
"one": "1",
|
||||
"two": "2",
|
||||
"foo": "http://foo/1",
|
||||
}
|
||||
config := goconf.NewConfigFile()
|
||||
for k, v := range expected {
|
||||
if k == "foo" {
|
||||
config.AddOption("foo", k, "http://$(FOO)/1")
|
||||
} else {
|
||||
config.AddOption("foo", k, v)
|
||||
}
|
||||
}
|
||||
config.AddOption("default", "three", "3")
|
||||
|
||||
options, err := GetStringOptions(config, "foo", false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expected, options) {
|
||||
t.Errorf("expected %+v, got %+v", expected, options)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringOptionWithEnv(t *testing.T) {
|
||||
t.Setenv("FOO", "foo")
|
||||
t.Setenv("BAR", "")
|
||||
t.Setenv("BA_R", "bar")
|
||||
|
||||
config := goconf.NewConfigFile()
|
||||
config.AddOption("test", "foo", "http://$(FOO)/1")
|
||||
config.AddOption("test", "bar", "http://$(BAR)/2")
|
||||
config.AddOption("test", "bar2", "http://$(BA_R)/3")
|
||||
config.AddOption("test", "baz", "http://$(BAZ)/4")
|
||||
config.AddOption("test", "inv1", "http://$(FOO")
|
||||
config.AddOption("test", "inv2", "http://$FOO)")
|
||||
config.AddOption("test", "inv3", "http://$((FOO)")
|
||||
config.AddOption("test", "inv4", "http://$(F.OO)")
|
||||
|
||||
expected := map[string]string{
|
||||
"foo": "http://foo/1",
|
||||
"bar": "http:///2",
|
||||
"bar2": "http://bar/3",
|
||||
"baz": "http://BAZ/4",
|
||||
"inv1": "http://$(FOO",
|
||||
"inv2": "http://$FOO)",
|
||||
"inv3": "http://$((FOO)",
|
||||
"inv4": "http://$(F.OO)",
|
||||
}
|
||||
for k, v := range expected {
|
||||
value, err := GetStringOptionWithEnv(config, "test", k)
|
||||
if err != nil {
|
||||
t.Errorf("expected value for %s, got %s", k, err)
|
||||
} else if value != v {
|
||||
t.Errorf("expected value %s for %s, got %s", v, k, value)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
package signaling
|
||||
|
||||
// This file has been automatically generated, do not modify.
|
||||
// Source: https://datahub.io/core/country-codes/r/country-codes.json
|
||||
// Source: https://github.com/datasets/country-codes/raw/master/data/country-codes.csv
|
||||
|
||||
var (
|
||||
ContinentMap = map[string][]string{
|
||||
|
|
|
@ -35,6 +35,7 @@ func TestDeferredExecutor_MultiClose(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDeferredExecutor_QueueSize(t *testing.T) {
|
||||
t.Parallel()
|
||||
e := NewDeferredExecutor(0)
|
||||
defer e.waitForStop()
|
||||
defer e.Close()
|
||||
|
@ -100,6 +101,7 @@ func TestDeferredExecutor_CloseFromFunc(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDeferredExecutor_DeferAfterClose(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
e := NewDeferredExecutor(64)
|
||||
defer e.waitForStop()
|
||||
|
||||
|
|
343
dns_monitor.go
Normal file
343
dns_monitor.go
Normal file
|
@ -0,0 +1,343 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2023 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
lookupDnsMonitorIP = net.LookupIP
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDnsMonitorInterval = time.Second
|
||||
)
|
||||
|
||||
type DnsMonitorCallback = func(entry *DnsMonitorEntry, all []net.IP, add []net.IP, keep []net.IP, remove []net.IP)
|
||||
|
||||
type DnsMonitorEntry struct {
|
||||
entry atomic.Pointer[dnsMonitorEntry]
|
||||
url string
|
||||
callback DnsMonitorCallback
|
||||
}
|
||||
|
||||
func (e *DnsMonitorEntry) URL() string {
|
||||
return e.url
|
||||
}
|
||||
|
||||
type dnsMonitorEntry struct {
|
||||
hostname string
|
||||
hostIP net.IP
|
||||
|
||||
mu sync.Mutex
|
||||
ips []net.IP
|
||||
entries map[*DnsMonitorEntry]bool
|
||||
}
|
||||
|
||||
func (e *dnsMonitorEntry) setIPs(ips []net.IP, fromIP bool) {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
empty := len(e.ips) == 0
|
||||
if empty {
|
||||
// Simple case: initial lookup.
|
||||
if len(ips) > 0 {
|
||||
e.ips = ips
|
||||
e.runCallbacks(ips, ips, nil, nil)
|
||||
}
|
||||
return
|
||||
} else if fromIP {
|
||||
// No more updates possible for IP addresses.
|
||||
return
|
||||
} else if len(ips) == 0 {
|
||||
// Simple case: no records received from lookup.
|
||||
if !empty {
|
||||
removed := e.ips
|
||||
e.ips = nil
|
||||
e.runCallbacks(nil, nil, nil, removed)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var newIPs []net.IP
|
||||
var addedIPs []net.IP
|
||||
var removedIPs []net.IP
|
||||
var keepIPs []net.IP
|
||||
for _, oldIP := range e.ips {
|
||||
found := false
|
||||
for idx, newIP := range ips {
|
||||
if oldIP.Equal(newIP) {
|
||||
ips = append(ips[:idx], ips[idx+1:]...)
|
||||
found = true
|
||||
keepIPs = append(keepIPs, oldIP)
|
||||
newIPs = append(newIPs, oldIP)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
removedIPs = append(removedIPs, oldIP)
|
||||
}
|
||||
}
|
||||
|
||||
if len(ips) > 0 {
|
||||
addedIPs = append(addedIPs, ips...)
|
||||
newIPs = append(newIPs, ips...)
|
||||
}
|
||||
e.ips = newIPs
|
||||
|
||||
if len(addedIPs) > 0 || len(removedIPs) > 0 {
|
||||
e.runCallbacks(newIPs, addedIPs, keepIPs, removedIPs)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *dnsMonitorEntry) addEntry(entry *DnsMonitorEntry) {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
e.entries[entry] = true
|
||||
}
|
||||
|
||||
func (e *dnsMonitorEntry) removeEntry(entry *DnsMonitorEntry) bool {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
delete(e.entries, entry)
|
||||
return len(e.entries) == 0
|
||||
}
|
||||
|
||||
func (e *dnsMonitorEntry) runCallbacks(all []net.IP, add []net.IP, keep []net.IP, remove []net.IP) {
|
||||
for entry := range e.entries {
|
||||
entry.callback(entry, all, add, keep, remove)
|
||||
}
|
||||
}
|
||||
|
||||
type DnsMonitor struct {
|
||||
interval time.Duration
|
||||
|
||||
stopCtx context.Context
|
||||
stopFunc func()
|
||||
stopped chan struct{}
|
||||
|
||||
mu sync.RWMutex
|
||||
cond *sync.Cond
|
||||
hostnames map[string]*dnsMonitorEntry
|
||||
|
||||
hasRemoved atomic.Bool
|
||||
|
||||
// Can be overwritten from tests.
|
||||
checkHostnames func()
|
||||
}
|
||||
|
||||
func NewDnsMonitor(interval time.Duration) (*DnsMonitor, error) {
|
||||
if interval < 0 {
|
||||
interval = defaultDnsMonitorInterval
|
||||
}
|
||||
|
||||
stopCtx, stopFunc := context.WithCancel(context.Background())
|
||||
monitor := &DnsMonitor{
|
||||
interval: interval,
|
||||
|
||||
stopCtx: stopCtx,
|
||||
stopFunc: stopFunc,
|
||||
stopped: make(chan struct{}),
|
||||
|
||||
hostnames: make(map[string]*dnsMonitorEntry),
|
||||
}
|
||||
monitor.cond = sync.NewCond(&monitor.mu)
|
||||
monitor.checkHostnames = monitor.doCheckHostnames
|
||||
return monitor, nil
|
||||
}
|
||||
|
||||
func (m *DnsMonitor) Start() error {
|
||||
go m.run()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *DnsMonitor) Stop() {
|
||||
m.stopFunc()
|
||||
m.cond.Signal()
|
||||
<-m.stopped
|
||||
}
|
||||
|
||||
func (m *DnsMonitor) Add(target string, callback DnsMonitorCallback) (*DnsMonitorEntry, error) {
|
||||
var hostname string
|
||||
if strings.Contains(target, "://") {
|
||||
// Full URL passed.
|
||||
parsed, err := url.Parse(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hostname = parsed.Host
|
||||
} else {
|
||||
// Hostname only passed.
|
||||
hostname = target
|
||||
}
|
||||
if h, _, err := net.SplitHostPort(hostname); err == nil {
|
||||
hostname = h
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
e := &DnsMonitorEntry{
|
||||
url: target,
|
||||
callback: callback,
|
||||
}
|
||||
|
||||
entry, found := m.hostnames[hostname]
|
||||
if !found {
|
||||
entry = &dnsMonitorEntry{
|
||||
hostname: hostname,
|
||||
hostIP: net.ParseIP(hostname),
|
||||
entries: make(map[*DnsMonitorEntry]bool),
|
||||
}
|
||||
m.hostnames[hostname] = entry
|
||||
}
|
||||
e.entry.Store(entry)
|
||||
entry.addEntry(e)
|
||||
m.cond.Signal()
|
||||
return e, nil
|
||||
}
|
||||
|
||||
func (m *DnsMonitor) Remove(entry *DnsMonitorEntry) {
|
||||
oldEntry := entry.entry.Swap(nil)
|
||||
if oldEntry == nil {
|
||||
// Already removed.
|
||||
return
|
||||
}
|
||||
|
||||
locked := m.mu.TryLock()
|
||||
// Spin-lock for simple cases that resolve immediately to avoid deferred removal.
|
||||
for i := 0; !locked && i < 1000; i++ {
|
||||
time.Sleep(time.Nanosecond)
|
||||
locked = m.mu.TryLock()
|
||||
}
|
||||
if !locked {
|
||||
// Currently processing callbacks for this entry, need to defer removal.
|
||||
m.hasRemoved.Store(true)
|
||||
return
|
||||
}
|
||||
defer m.mu.Unlock()
|
||||
|
||||
e, found := m.hostnames[oldEntry.hostname]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
if e.removeEntry(entry) {
|
||||
delete(m.hostnames, e.hostname)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *DnsMonitor) clearRemoved() {
|
||||
if !m.hasRemoved.CompareAndSwap(true, false) {
|
||||
return
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
for hostname, entry := range m.hostnames {
|
||||
deleted := false
|
||||
for e := range entry.entries {
|
||||
if e.entry.Load() == nil {
|
||||
delete(entry.entries, e)
|
||||
deleted = true
|
||||
}
|
||||
}
|
||||
|
||||
if deleted && len(entry.entries) == 0 {
|
||||
delete(m.hostnames, hostname)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *DnsMonitor) waitForEntries() (waited bool) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
for len(m.hostnames) == 0 && m.stopCtx.Err() == nil {
|
||||
m.cond.Wait()
|
||||
waited = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (m *DnsMonitor) run() {
|
||||
ticker := time.NewTicker(m.interval)
|
||||
defer ticker.Stop()
|
||||
defer close(m.stopped)
|
||||
|
||||
for {
|
||||
if m.waitForEntries() {
|
||||
ticker.Reset(m.interval)
|
||||
if m.stopCtx.Err() == nil {
|
||||
// Initial check when a new entry was added. More checks will be
|
||||
// triggered by the Ticker.
|
||||
m.checkHostnames()
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-m.stopCtx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
m.checkHostnames()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *DnsMonitor) doCheckHostnames() {
|
||||
m.clearRemoved()
|
||||
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
for _, entry := range m.hostnames {
|
||||
m.checkHostname(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *DnsMonitor) checkHostname(entry *dnsMonitorEntry) {
|
||||
if len(entry.hostIP) > 0 {
|
||||
entry.setIPs([]net.IP{entry.hostIP}, true)
|
||||
return
|
||||
}
|
||||
|
||||
ips, err := lookupDnsMonitorIP(entry.hostname)
|
||||
if err != nil {
|
||||
log.Printf("Could not lookup %s: %s", entry.hostname, err)
|
||||
return
|
||||
}
|
||||
|
||||
entry.setIPs(ips, false)
|
||||
}
|
428
dns_monitor_test.go
Normal file
428
dns_monitor_test.go
Normal file
|
@ -0,0 +1,428 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2023 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type mockDnsLookup struct {
|
||||
sync.RWMutex
|
||||
|
||||
ips map[string][]net.IP
|
||||
}
|
||||
|
||||
func newMockDnsLookupForTest(t *testing.T) *mockDnsLookup {
|
||||
mock := &mockDnsLookup{
|
||||
ips: make(map[string][]net.IP),
|
||||
}
|
||||
prev := lookupDnsMonitorIP
|
||||
t.Cleanup(func() {
|
||||
lookupDnsMonitorIP = prev
|
||||
})
|
||||
lookupDnsMonitorIP = mock.lookup
|
||||
return mock
|
||||
}
|
||||
|
||||
func (m *mockDnsLookup) Set(host string, ips []net.IP) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
m.ips[host] = ips
|
||||
}
|
||||
|
||||
func (m *mockDnsLookup) Get(host string) []net.IP {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
return m.ips[host]
|
||||
}
|
||||
|
||||
func (m *mockDnsLookup) lookup(host string) ([]net.IP, error) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
ips, found := m.ips[host]
|
||||
if !found {
|
||||
return nil, &net.DNSError{
|
||||
Err: fmt.Sprintf("could not resolve %s", host),
|
||||
Name: host,
|
||||
IsNotFound: true,
|
||||
}
|
||||
}
|
||||
|
||||
return append([]net.IP{}, ips...), nil
|
||||
}
|
||||
|
||||
func newDnsMonitorForTest(t *testing.T, interval time.Duration) *DnsMonitor {
|
||||
t.Helper()
|
||||
|
||||
monitor, err := NewDnsMonitor(interval)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
monitor.Stop()
|
||||
})
|
||||
|
||||
if err := monitor.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return monitor
|
||||
}
|
||||
|
||||
type dnsMonitorReceiverRecord struct {
|
||||
all []net.IP
|
||||
add []net.IP
|
||||
keep []net.IP
|
||||
remove []net.IP
|
||||
}
|
||||
|
||||
func (r *dnsMonitorReceiverRecord) Equal(other *dnsMonitorReceiverRecord) bool {
|
||||
return r == other || (reflect.DeepEqual(r.add, other.add) &&
|
||||
reflect.DeepEqual(r.keep, other.keep) &&
|
||||
reflect.DeepEqual(r.remove, other.remove))
|
||||
}
|
||||
|
||||
func (r *dnsMonitorReceiverRecord) String() string {
|
||||
return fmt.Sprintf("all=%v, add=%v, keep=%v, remove=%v", r.all, r.add, r.keep, r.remove)
|
||||
}
|
||||
|
||||
var (
|
||||
expectNone = &dnsMonitorReceiverRecord{}
|
||||
)
|
||||
|
||||
type dnsMonitorReceiver struct {
|
||||
sync.Mutex
|
||||
|
||||
t *testing.T
|
||||
expected *dnsMonitorReceiverRecord
|
||||
received *dnsMonitorReceiverRecord
|
||||
}
|
||||
|
||||
func newDnsMonitorReceiverForTest(t *testing.T) *dnsMonitorReceiver {
|
||||
return &dnsMonitorReceiver{
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *dnsMonitorReceiver) OnLookup(entry *DnsMonitorEntry, all, add, keep, remove []net.IP) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
received := &dnsMonitorReceiverRecord{
|
||||
all: all,
|
||||
add: add,
|
||||
keep: keep,
|
||||
remove: remove,
|
||||
}
|
||||
|
||||
expected := r.expected
|
||||
r.expected = nil
|
||||
if expected == expectNone {
|
||||
r.t.Errorf("expected no event, got %v", received)
|
||||
return
|
||||
}
|
||||
|
||||
if expected == nil {
|
||||
if r.received != nil && !r.received.Equal(received) {
|
||||
r.t.Errorf("already received %v, got %v", r.received, received)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if !expected.Equal(received) {
|
||||
r.t.Errorf("expected %v, got %v", expected, received)
|
||||
}
|
||||
r.received = nil
|
||||
r.expected = nil
|
||||
}
|
||||
|
||||
func (r *dnsMonitorReceiver) WaitForExpected(ctx context.Context) {
|
||||
r.t.Helper()
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
ticker := time.NewTicker(time.Microsecond)
|
||||
abort := false
|
||||
for r.expected != nil && !abort {
|
||||
r.Unlock()
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-ctx.Done():
|
||||
r.t.Error(ctx.Err())
|
||||
abort = true
|
||||
}
|
||||
r.Lock()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *dnsMonitorReceiver) Expect(all, add, keep, remove []net.IP) {
|
||||
r.t.Helper()
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
if r.expected != nil && r.expected != expectNone {
|
||||
r.t.Errorf("didn't get previously expected %v", r.expected)
|
||||
}
|
||||
|
||||
expected := &dnsMonitorReceiverRecord{
|
||||
all: all,
|
||||
add: add,
|
||||
keep: keep,
|
||||
remove: remove,
|
||||
}
|
||||
if r.received != nil && r.received.Equal(expected) {
|
||||
r.received = nil
|
||||
return
|
||||
}
|
||||
|
||||
r.expected = expected
|
||||
}
|
||||
|
||||
func (r *dnsMonitorReceiver) ExpectNone() {
|
||||
r.t.Helper()
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
if r.expected != nil && r.expected != expectNone {
|
||||
r.t.Errorf("didn't get previously expected %v", r.expected)
|
||||
}
|
||||
|
||||
r.expected = expectNone
|
||||
}
|
||||
|
||||
func TestDnsMonitor(t *testing.T) {
|
||||
lookup := newMockDnsLookupForTest(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
|
||||
interval := time.Millisecond
|
||||
monitor := newDnsMonitorForTest(t, interval)
|
||||
|
||||
ip1 := net.ParseIP("192.168.0.1")
|
||||
ip2 := net.ParseIP("192.168.1.1")
|
||||
ip3 := net.ParseIP("10.1.2.3")
|
||||
ips1 := []net.IP{
|
||||
ip1,
|
||||
ip2,
|
||||
}
|
||||
lookup.Set("foo", ips1)
|
||||
|
||||
rec1 := newDnsMonitorReceiverForTest(t)
|
||||
rec1.Expect(ips1, ips1, nil, nil)
|
||||
|
||||
entry1, err := monitor.Add("https://foo:12345", rec1.OnLookup)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer monitor.Remove(entry1)
|
||||
|
||||
rec1.WaitForExpected(ctx)
|
||||
|
||||
ips2 := []net.IP{
|
||||
ip1,
|
||||
ip2,
|
||||
ip3,
|
||||
}
|
||||
add2 := []net.IP{ip3}
|
||||
keep2 := []net.IP{ip1, ip2}
|
||||
rec1.Expect(ips2, add2, keep2, nil)
|
||||
lookup.Set("foo", ips2)
|
||||
rec1.WaitForExpected(ctx)
|
||||
|
||||
ips3 := []net.IP{
|
||||
ip2,
|
||||
ip3,
|
||||
}
|
||||
keep3 := []net.IP{ip2, ip3}
|
||||
remove3 := []net.IP{ip1}
|
||||
rec1.Expect(ips3, nil, keep3, remove3)
|
||||
lookup.Set("foo", ips3)
|
||||
rec1.WaitForExpected(ctx)
|
||||
|
||||
rec1.ExpectNone()
|
||||
time.Sleep(5 * interval)
|
||||
|
||||
remove4 := []net.IP{ip2, ip3}
|
||||
rec1.Expect(nil, nil, nil, remove4)
|
||||
lookup.Set("foo", nil)
|
||||
rec1.WaitForExpected(ctx)
|
||||
|
||||
rec1.ExpectNone()
|
||||
time.Sleep(5 * interval)
|
||||
|
||||
// Removing multiple times is supported.
|
||||
monitor.Remove(entry1)
|
||||
monitor.Remove(entry1)
|
||||
|
||||
// No more events after removing.
|
||||
lookup.Set("foo", ips1)
|
||||
rec1.ExpectNone()
|
||||
time.Sleep(5 * interval)
|
||||
}
|
||||
|
||||
func TestDnsMonitorIP(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
|
||||
interval := time.Millisecond
|
||||
monitor := newDnsMonitorForTest(t, interval)
|
||||
|
||||
ip := "192.168.0.1"
|
||||
ips := []net.IP{
|
||||
net.ParseIP(ip),
|
||||
}
|
||||
|
||||
rec1 := newDnsMonitorReceiverForTest(t)
|
||||
rec1.Expect(ips, ips, nil, nil)
|
||||
|
||||
entry, err := monitor.Add(ip+":12345", rec1.OnLookup)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer monitor.Remove(entry)
|
||||
|
||||
rec1.WaitForExpected(ctx)
|
||||
|
||||
rec1.ExpectNone()
|
||||
time.Sleep(5 * interval)
|
||||
}
|
||||
|
||||
func TestDnsMonitorNoLookupIfEmpty(t *testing.T) {
|
||||
interval := time.Millisecond
|
||||
monitor := newDnsMonitorForTest(t, interval)
|
||||
|
||||
var checked atomic.Bool
|
||||
monitor.checkHostnames = func() {
|
||||
checked.Store(true)
|
||||
monitor.doCheckHostnames()
|
||||
}
|
||||
|
||||
time.Sleep(10 * interval)
|
||||
if checked.Load() {
|
||||
t.Error("should not have checked hostnames")
|
||||
}
|
||||
}
|
||||
|
||||
type deadlockMonitorReceiver struct {
|
||||
t *testing.T
|
||||
monitor *DnsMonitor
|
||||
|
||||
mu sync.RWMutex
|
||||
wg sync.WaitGroup
|
||||
|
||||
entry *DnsMonitorEntry
|
||||
started chan struct{}
|
||||
triggered bool
|
||||
closed atomic.Bool
|
||||
}
|
||||
|
||||
func newDeadlockMonitorReceiver(t *testing.T, monitor *DnsMonitor) *deadlockMonitorReceiver {
|
||||
return &deadlockMonitorReceiver{
|
||||
t: t,
|
||||
monitor: monitor,
|
||||
started: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *deadlockMonitorReceiver) OnLookup(entry *DnsMonitorEntry, all []net.IP, add []net.IP, keep []net.IP, remove []net.IP) {
|
||||
if r.closed.Load() {
|
||||
r.t.Error("received lookup after closed")
|
||||
return
|
||||
}
|
||||
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.triggered {
|
||||
return
|
||||
}
|
||||
|
||||
r.triggered = true
|
||||
r.wg.Add(1)
|
||||
go func() {
|
||||
defer r.wg.Done()
|
||||
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
close(r.started)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}()
|
||||
}
|
||||
|
||||
func (r *deadlockMonitorReceiver) Start() {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
entry, err := r.monitor.Add("foo", r.OnLookup)
|
||||
if err != nil {
|
||||
r.t.Errorf("error adding listener: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
r.entry = entry
|
||||
}
|
||||
|
||||
func (r *deadlockMonitorReceiver) Close() {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.entry != nil {
|
||||
r.monitor.Remove(r.entry)
|
||||
r.closed.Store(true)
|
||||
}
|
||||
r.wg.Wait()
|
||||
}
|
||||
|
||||
func TestDnsMonitorDeadlock(t *testing.T) {
|
||||
lookup := newMockDnsLookupForTest(t)
|
||||
ip1 := net.ParseIP("192.168.0.1")
|
||||
ip2 := net.ParseIP("192.168.0.2")
|
||||
lookup.Set("foo", []net.IP{ip1})
|
||||
|
||||
interval := time.Millisecond
|
||||
monitor := newDnsMonitorForTest(t, interval)
|
||||
|
||||
r := newDeadlockMonitorReceiver(t, monitor)
|
||||
r.Start()
|
||||
<-r.started
|
||||
lookup.Set("foo", []net.IP{ip2})
|
||||
r.Close()
|
||||
lookup.Set("foo", []net.IP{ip1})
|
||||
time.Sleep(10 * interval)
|
||||
monitor.mu.Lock()
|
||||
defer monitor.mu.Unlock()
|
||||
if len(monitor.hostnames) > 0 {
|
||||
t.Errorf("should have cleared hostnames, got %+v", monitor.hostnames)
|
||||
}
|
||||
}
|
|
@ -52,9 +52,10 @@ The running container can be configured through different environment variables:
|
|||
- `TURN_SERVERS`: A comma-separated list of TURN servers to use.
|
||||
- `GEOIP_LICENSE`: License key to use when downloading the MaxMind GeoIP database.
|
||||
- `GEOIP_URL`: Optional URL to download a MaxMind GeoIP database from.
|
||||
- `GEOIP_OVERRIDES`: Optional spae-separated list of overrides for GeoIP lookups.
|
||||
- `CONTINENT_OVERRIDES`: Optional spae-separated list of overrides for continent mappings.
|
||||
- `GEOIP_OVERRIDES`: Optional space-separated list of overrides for GeoIP lookups.
|
||||
- `CONTINENT_OVERRIDES`: Optional space-separated list of overrides for continent mappings.
|
||||
- `STATS_IPS`: Comma-separated list of IP addresses that are allowed to access the stats endpoint.
|
||||
- `TRUSTED_PROXIES`: Comma-separated list of IPs / networks that are trusted proxies.
|
||||
- `GRPC_LISTEN`: IP and port to listen on for GRPC requests.
|
||||
- `GRPC_SERVER_CERTIFICATE`: Certificate to use for the GRPC server.
|
||||
- `GRPC_SERVER_KEY`: Private key to use for the GRPC server.
|
||||
|
@ -99,9 +100,16 @@ The running container can be configured through different environment variables:
|
|||
- `CONFIG`: Optional name of configuration file to use.
|
||||
- `HTTP_LISTEN`: Address of HTTP listener.
|
||||
- `COUNTRY`: Optional ISO 3166 country this proxy is located at.
|
||||
- `EXTERNAL_HOSTNAME`: The external hostname for remote streams. Will try to autodetect if omitted.
|
||||
- `TOKEN_ID`: Id of the token to use when connecting remote streams.
|
||||
- `TOKEN_KEY`: Private key for the configured token id.
|
||||
- `BANDWIDTH_INCOMING`: Optional incoming target bandwidth (in megabits per second).
|
||||
- `BANDWIDTH_OUTGOING`: Optional outgoing target bandwidth (in megabits per second).
|
||||
- `JANUS_URL`: Url to Janus server.
|
||||
- `MAX_STREAM_BITRATE`: Optional maximum bitrate for audio/video streams.
|
||||
- `MAX_SCREEN_BITRATE`: Optional maximum bitrate for screensharing streams.
|
||||
- `STATS_IPS`: Comma-separated list of IP addresses that are allowed to access the stats endpoint.
|
||||
- `TRUSTED_PROXIES`: Comma-separated list of IPs / networks that are trusted proxies.
|
||||
- `ETCD_ENDPOINTS`: Static list of etcd endpoints (if etcd should be used).
|
||||
- `ETCD_DISCOVERY_SRV`: Alternative domain to use for DNS SRV configuration of etcd endpoints (if etcd should be used).
|
||||
- `ETCD_DISCOVERY_SERVICE`: Optional service name for DNS SRV configuration of etcd..
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Modified from https://gitlab.com/powerpaul17/nc_talk_backend/-/blob/dcbb918d8716dad1eb72a889d1e6aa1e3a543641/docker/janus/Dockerfile
|
||||
FROM alpine:3.18
|
||||
FROM alpine:3.20
|
||||
|
||||
RUN apk add --no-cache curl autoconf automake libtool pkgconf build-base \
|
||||
glib-dev libconfig-dev libnice-dev jansson-dev openssl-dev zlib libsrtp-dev \
|
||||
|
@ -15,30 +15,30 @@ RUN cd /tmp && \
|
|||
git checkout $USRSCTP_VERSION && \
|
||||
./bootstrap && \
|
||||
./configure --prefix=/usr && \
|
||||
make && make install
|
||||
make -j$(nproc) && make install
|
||||
|
||||
# libsrtp
|
||||
ARG LIBSRTP_VERSION=2.4.2
|
||||
ARG LIBSRTP_VERSION=2.6.0
|
||||
RUN cd /tmp && \
|
||||
wget https://github.com/cisco/libsrtp/archive/v$LIBSRTP_VERSION.tar.gz && \
|
||||
tar xfv v$LIBSRTP_VERSION.tar.gz && \
|
||||
cd libsrtp-$LIBSRTP_VERSION && \
|
||||
./configure --prefix=/usr --enable-openssl && \
|
||||
make shared_library && \
|
||||
make shared_library -j$(nproc) && \
|
||||
make install && \
|
||||
rm -fr /libsrtp-$LIBSRTP_VERSION && \
|
||||
rm -f /v$LIBSRTP_VERSION.tar.gz
|
||||
|
||||
# JANUS
|
||||
|
||||
ARG JANUS_VERSION=0.11.8
|
||||
ARG JANUS_VERSION=1.2.2
|
||||
RUN mkdir -p /usr/src/janus && \
|
||||
cd /usr/src/janus && \
|
||||
curl -L https://github.com/meetecho/janus-gateway/archive/v$JANUS_VERSION.tar.gz | tar -xz && \
|
||||
cd /usr/src/janus/janus-gateway-$JANUS_VERSION && \
|
||||
./autogen.sh && \
|
||||
./configure --disable-rabbitmq --disable-mqtt --disable-boringssl && \
|
||||
make && \
|
||||
make -j$(nproc) && \
|
||||
make install && \
|
||||
make configs
|
||||
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
FROM --platform=${BUILDPLATFORM} golang:1.21-alpine AS builder
|
||||
FROM --platform=${BUILDPLATFORM} golang:1.22-alpine AS builder
|
||||
ARG TARGETARCH
|
||||
ARG TARGETOS
|
||||
|
||||
WORKDIR /workdir
|
||||
|
||||
COPY . .
|
||||
RUN apk add --no-cache bash git build-base protobuf && \
|
||||
RUN touch /.dockerenv && \
|
||||
apk add --no-cache bash git build-base protobuf && \
|
||||
if [ -d "vendor" ]; then GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOPROXY=off make proxy; else \
|
||||
GOOS=${TARGETOS} GOARCH=${TARGETARCH} make proxy; fi
|
||||
|
||||
|
@ -18,10 +19,12 @@ RUN adduser -D spreedbackend && \
|
|||
COPY --from=builder /workdir/bin/proxy /usr/bin/nextcloud-spreed-signaling-proxy
|
||||
COPY ./proxy.conf.in /config/proxy.conf.in
|
||||
COPY ./docker/proxy/entrypoint.sh /
|
||||
COPY ./docker/proxy/stop.sh /
|
||||
COPY ./docker/proxy/wait.sh /
|
||||
RUN chown spreedbackend /config
|
||||
RUN /usr/bin/nextcloud-spreed-signaling-proxy -version
|
||||
|
||||
USER spreedbackend
|
||||
|
||||
STOPSIGNAL SIGUSR1
|
||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||
CMD ["/bin/sh", "-c", "/usr/bin/nextcloud-spreed-signaling-proxy -config $CONFIG"]
|
||||
|
|
|
@ -22,6 +22,11 @@
|
|||
#
|
||||
set -e
|
||||
|
||||
if [ -n "$1" ]; then
|
||||
# Run custom command.
|
||||
exec "$@"
|
||||
fi
|
||||
|
||||
if [ -z "$CONFIG" ]; then
|
||||
echo "No configuration filename given in CONFIG environment variable"
|
||||
exit 1
|
||||
|
@ -31,52 +36,68 @@ if [ ! -f "$CONFIG" ]; then
|
|||
echo "Preparing signaling proxy configuration in $CONFIG ..."
|
||||
cp /config/proxy.conf.in "$CONFIG"
|
||||
|
||||
if [ ! -z "$HTTP_LISTEN" ]; then
|
||||
if [ -n "$HTTP_LISTEN" ]; then
|
||||
sed -i "s|#listen = 127.0.0.1:9090|listen = $HTTP_LISTEN|" "$CONFIG"
|
||||
fi
|
||||
|
||||
if [ ! -z "$COUNTRY" ]; then
|
||||
if [ -n "$COUNTRY" ]; then
|
||||
sed -i "s|#country =.*|country = $COUNTRY|" "$CONFIG"
|
||||
fi
|
||||
|
||||
if [ -n "$EXTERNAL_HOSTNAME" ]; then
|
||||
sed -i "s|#hostname =.*|hostname = $EXTERNAL_HOSTNAME|" "$CONFIG"
|
||||
fi
|
||||
if [ -n "$TOKEN_ID" ]; then
|
||||
sed -i "s|#token_id =.*|token_id = $TOKEN_ID|" "$CONFIG"
|
||||
fi
|
||||
if [ -n "$TOKEN_KEY" ]; then
|
||||
sed -i "s|#token_key =.*|token_key = $TOKEN_KEY|" "$CONFIG"
|
||||
fi
|
||||
if [ -n "$BANDWIDTH_INCOMING" ]; then
|
||||
sed -i "s|#incoming =.*|incoming = $BANDWIDTH_INCOMING|" "$CONFIG"
|
||||
fi
|
||||
if [ -n "$BANDWIDTH_OUTGOING" ]; then
|
||||
sed -i "s|#outgoing =.*|outgoing = $BANDWIDTH_OUTGOING|" "$CONFIG"
|
||||
fi
|
||||
|
||||
HAS_ETCD=
|
||||
if [ ! -z "$ETCD_ENDPOINTS" ]; then
|
||||
if [ -n "$ETCD_ENDPOINTS" ]; then
|
||||
sed -i "s|#endpoints =.*|endpoints = $ETCD_ENDPOINTS|" "$CONFIG"
|
||||
HAS_ETCD=1
|
||||
else
|
||||
if [ ! -z "$ETCD_DISCOVERY_SRV" ]; then
|
||||
if [ -n "$ETCD_DISCOVERY_SRV" ]; then
|
||||
sed -i "s|#discoverysrv =.*|discoverysrv = $ETCD_DISCOVERY_SRV|" "$CONFIG"
|
||||
HAS_ETCD=1
|
||||
fi
|
||||
if [ ! -z "$ETCD_DISCOVERY_SERVICE" ]; then
|
||||
if [ -n "$ETCD_DISCOVERY_SERVICE" ]; then
|
||||
sed -i "s|#discoveryservice =.*|discoveryservice = $ETCD_DISCOVERY_SERVICE|" "$CONFIG"
|
||||
fi
|
||||
fi
|
||||
if [ ! -z "$HAS_ETCD" ]; then
|
||||
if [ ! -z "$ETCD_CLIENT_KEY" ]; then
|
||||
if [ -n "$HAS_ETCD" ]; then
|
||||
if [ -n "$ETCD_CLIENT_KEY" ]; then
|
||||
sed -i "s|#clientkey = /path/to/etcd-client.key|clientkey = $ETCD_CLIENT_KEY|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$ETCD_CLIENT_CERTIFICATE" ]; then
|
||||
if [ -n "$ETCD_CLIENT_CERTIFICATE" ]; then
|
||||
sed -i "s|#clientcert = /path/to/etcd-client.crt|clientcert = $ETCD_CLIENT_CERTIFICATE|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$ETCD_CLIENT_CA" ]; then
|
||||
if [ -n "$ETCD_CLIENT_CA" ]; then
|
||||
sed -i "s|#cacert = /path/to/etcd-ca.crt|cacert = $ETCD_CLIENT_CA|" "$CONFIG"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -z "$JANUS_URL" ]; then
|
||||
if [ -n "$JANUS_URL" ]; then
|
||||
sed -i "s|url =.*|url = $JANUS_URL|" "$CONFIG"
|
||||
else
|
||||
sed -i "s|url =.*|#url =|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$MAX_STREAM_BITRATE" ]; then
|
||||
if [ -n "$MAX_STREAM_BITRATE" ]; then
|
||||
sed -i "s|#maxstreambitrate =.*|maxstreambitrate = $MAX_STREAM_BITRATE|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$MAX_SCREEN_BITRATE" ]; then
|
||||
if [ -n "$MAX_SCREEN_BITRATE" ]; then
|
||||
sed -i "s|#maxscreenbitrate =.*|maxscreenbitrate = $MAX_SCREEN_BITRATE|" "$CONFIG"
|
||||
fi
|
||||
|
||||
if [ ! -z "$TOKENS_ETCD" ]; then
|
||||
if [ -n "$TOKENS_ETCD" ]; then
|
||||
if [ -z "$HAS_ETCD" ]; then
|
||||
echo "No etcd endpoint configured, can't use etcd for proxy tokens"
|
||||
exit 1
|
||||
|
@ -84,7 +105,7 @@ if [ ! -f "$CONFIG" ]; then
|
|||
|
||||
sed -i "s|tokentype =.*|tokentype = etcd|" "$CONFIG"
|
||||
|
||||
if [ ! -z "$TOKEN_KEY_FORMAT" ]; then
|
||||
if [ -n "$TOKEN_KEY_FORMAT" ]; then
|
||||
sed -i "s|#keyformat =.*|keyformat = $TOKEN_KEY_FORMAT|" "$CONFIG"
|
||||
fi
|
||||
else
|
||||
|
@ -93,18 +114,22 @@ if [ ! -f "$CONFIG" ]; then
|
|||
echo "[tokens]" >> "$CONFIG"
|
||||
for token in $TOKENS; do
|
||||
declare var="TOKEN_${token^^}_KEY"
|
||||
var=$(echo $var | sed "s|\.|_|")
|
||||
if [ ! -z "${!var}" ]; then
|
||||
var=${var//./_}
|
||||
if [ -n "${!var}" ]; then
|
||||
echo "$token = ${!var}" >> "$CONFIG"
|
||||
fi
|
||||
done
|
||||
echo >> "$CONFIG"
|
||||
fi
|
||||
|
||||
if [ ! -z "$STATS_IPS" ]; then
|
||||
if [ -n "$STATS_IPS" ]; then
|
||||
sed -i "s|#allowed_ips =.*|allowed_ips = $STATS_IPS|" "$CONFIG"
|
||||
fi
|
||||
|
||||
if [ -n "$TRUSTED_PROXIES" ]; then
|
||||
sed -i "s|#trustedproxies =.*|trustedproxies = $TRUSTED_PROXIES|" "$CONFIG"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Starting signaling proxy with $CONFIG ..."
|
||||
exec "$@"
|
||||
exec /usr/bin/nextcloud-spreed-signaling-proxy -config "$CONFIG"
|
||||
|
|
26
docker/proxy/stop.sh
Executable file
26
docker/proxy/stop.sh
Executable file
|
@ -0,0 +1,26 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Standalone signaling server for the Nextcloud Spreed app.
|
||||
# Copyright (C) 2024 struktur AG
|
||||
#
|
||||
# @author Joachim Bauch <bauch@struktur.de>
|
||||
#
|
||||
# @license GNU AGPL version 3 or any later version
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
set -e
|
||||
|
||||
echo "Schedule signaling proxy to shutdown ..."
|
||||
exec killall -USR1 nextcloud-spreed-signaling-proxy
|
33
docker/proxy/wait.sh
Executable file
33
docker/proxy/wait.sh
Executable file
|
@ -0,0 +1,33 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Standalone signaling server for the Nextcloud Spreed app.
|
||||
# Copyright (C) 2024 struktur AG
|
||||
#
|
||||
# @author Joachim Bauch <bauch@struktur.de>
|
||||
#
|
||||
# @license GNU AGPL version 3 or any later version
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
set -e
|
||||
|
||||
echo "Waiting for signaling proxy to shutdown ..."
|
||||
while true
|
||||
do
|
||||
if ! pgrep nextcloud-spreed-signaling-proxy > /dev/null ; then
|
||||
echo "Signaling proxy has stopped"
|
||||
exit 0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
|
@ -1,11 +1,12 @@
|
|||
FROM --platform=${BUILDPLATFORM} golang:1.21-alpine AS builder
|
||||
FROM --platform=${BUILDPLATFORM} golang:1.22-alpine AS builder
|
||||
ARG TARGETARCH
|
||||
ARG TARGETOS
|
||||
|
||||
WORKDIR /workdir
|
||||
|
||||
COPY . .
|
||||
RUN apk add --no-cache bash git build-base protobuf && \
|
||||
RUN touch /.dockerenv && \
|
||||
apk add --no-cache bash git build-base protobuf && \
|
||||
if [ -d "vendor" ]; then GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOPROXY=off make server; else \
|
||||
GOOS=${TARGETOS} GOARCH=${TARGETARCH} make server; fi
|
||||
|
||||
|
@ -18,10 +19,12 @@ RUN adduser -D spreedbackend && \
|
|||
COPY --from=builder /workdir/bin/signaling /usr/bin/nextcloud-spreed-signaling
|
||||
COPY ./server.conf.in /config/server.conf.in
|
||||
COPY ./docker/server/entrypoint.sh /
|
||||
COPY ./docker/server/stop.sh /
|
||||
COPY ./docker/server/wait.sh /
|
||||
RUN chown spreedbackend /config
|
||||
RUN /usr/bin/nextcloud-spreed-signaling -version
|
||||
|
||||
USER spreedbackend
|
||||
|
||||
STOPSIGNAL SIGUSR1
|
||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||
CMD ["/bin/sh", "-c", "/usr/bin/nextcloud-spreed-signaling -config $CONFIG"]
|
||||
|
|
|
@ -22,6 +22,11 @@
|
|||
#
|
||||
set -e
|
||||
|
||||
if [ -n "$1" ]; then
|
||||
# Run custom command.
|
||||
exec "$@"
|
||||
fi
|
||||
|
||||
if [ -z "$CONFIG" ]; then
|
||||
echo "No configuration filename given in CONFIG environment variable"
|
||||
exit 1
|
||||
|
@ -31,69 +36,75 @@ if [ ! -f "$CONFIG" ]; then
|
|||
echo "Preparing signaling server configuration in $CONFIG ..."
|
||||
cp /config/server.conf.in "$CONFIG"
|
||||
|
||||
if [ ! -z "$HTTP_LISTEN" ]; then
|
||||
if [ -n "$HTTP_LISTEN" ]; then
|
||||
sed -i "s|#listen = 127.0.0.1:8080|listen = $HTTP_LISTEN|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$HTTPS_LISTEN" ]; then
|
||||
if [ -n "$HTTPS_LISTEN" ]; then
|
||||
sed -i "s|#listen = 127.0.0.1:8443|listen = $HTTPS_LISTEN|" "$CONFIG"
|
||||
|
||||
if [ ! -z "$HTTPS_CERTIFICATE" ]; then
|
||||
if [ -n "$HTTPS_CERTIFICATE" ]; then
|
||||
sed -i "s|certificate = /etc/nginx/ssl/server.crt|certificate = $HTTPS_CERTIFICATE|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$HTTPS_KEY" ]; then
|
||||
if [ -n "$HTTPS_KEY" ]; then
|
||||
sed -i "s|key = /etc/nginx/ssl/server.key|key = $HTTPS_KEY|" "$CONFIG"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -z "$HASH_KEY" ]; then
|
||||
if [ -n "$HASH_KEY" ]; then
|
||||
sed -i "s|the-secret-for-session-checksums|$HASH_KEY|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$BLOCK_KEY" ]; then
|
||||
if [ -n "$BLOCK_KEY" ]; then
|
||||
sed -i "s|-encryption-key-|$BLOCK_KEY|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$INTERNAL_SHARED_SECRET_KEY" ]; then
|
||||
if [ -n "$INTERNAL_SHARED_SECRET_KEY" ]; then
|
||||
sed -i "s|the-shared-secret-for-internal-clients|$INTERNAL_SHARED_SECRET_KEY|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$NATS_URL" ]; then
|
||||
if [ -n "$NATS_URL" ]; then
|
||||
sed -i "s|#url = nats://localhost:4222|url = $NATS_URL|" "$CONFIG"
|
||||
else
|
||||
sed -i "s|#url = nats://localhost:4222|url = nats://loopback|" "$CONFIG"
|
||||
fi
|
||||
|
||||
HAS_ETCD=
|
||||
if [ ! -z "$ETCD_ENDPOINTS" ]; then
|
||||
if [ -n "$ETCD_ENDPOINTS" ]; then
|
||||
sed -i "s|#endpoints =.*|endpoints = $ETCD_ENDPOINTS|" "$CONFIG"
|
||||
HAS_ETCD=1
|
||||
else
|
||||
if [ ! -z "$ETCD_DISCOVERY_SRV" ]; then
|
||||
if [ -n "$ETCD_DISCOVERY_SRV" ]; then
|
||||
sed -i "s|#discoverysrv =.*|discoverysrv = $ETCD_DISCOVERY_SRV|" "$CONFIG"
|
||||
HAS_ETCD=1
|
||||
fi
|
||||
if [ ! -z "$ETCD_DISCOVERY_SERVICE" ]; then
|
||||
if [ -n "$ETCD_DISCOVERY_SERVICE" ]; then
|
||||
sed -i "s|#discoveryservice =.*|discoveryservice = $ETCD_DISCOVERY_SERVICE|" "$CONFIG"
|
||||
fi
|
||||
fi
|
||||
if [ ! -z "$HAS_ETCD" ]; then
|
||||
if [ ! -z "$ETCD_CLIENT_KEY" ]; then
|
||||
if [ -n "$HAS_ETCD" ]; then
|
||||
if [ -n "$ETCD_CLIENT_KEY" ]; then
|
||||
sed -i "s|#clientkey = /path/to/etcd-client.key|clientkey = $ETCD_CLIENT_KEY|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$ETCD_CLIENT_CERTIFICATE" ]; then
|
||||
if [ -n "$ETCD_CLIENT_CERTIFICATE" ]; then
|
||||
sed -i "s|#clientcert = /path/to/etcd-client.crt|clientcert = $ETCD_CLIENT_CERTIFICATE|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$ETCD_CLIENT_CA" ]; then
|
||||
if [ -n "$ETCD_CLIENT_CA" ]; then
|
||||
sed -i "s|#cacert = /path/to/etcd-ca.crt|cacert = $ETCD_CLIENT_CA|" "$CONFIG"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -z "$USE_JANUS" ]; then
|
||||
if [ -n "$USE_JANUS" ]; then
|
||||
sed -i "s|#type =$|type = janus|" "$CONFIG"
|
||||
if [ ! -z "$JANUS_URL" ]; then
|
||||
if [ -n "$JANUS_URL" ]; then
|
||||
sed -i "/proxy URLs to connect to/{n;s|#url =$|url = $JANUS_URL|}" "$CONFIG"
|
||||
fi
|
||||
elif [ ! -z "$USE_PROXY" ]; then
|
||||
elif [ -n "$USE_PROXY" ]; then
|
||||
sed -i "s|#type =$|type = proxy|" "$CONFIG"
|
||||
if [ -n "$PROXY_TOKEN_ID" ]; then
|
||||
sed -i "s|#token_id =.*|token_id = $PROXY_TOKEN_ID|" "$CONFIG"
|
||||
fi
|
||||
if [ -n "$PROXY_TOKEN_KEY" ]; then
|
||||
sed -i "s|#token_key =.*|token_key = $PROXY_TOKEN_KEY|" "$CONFIG"
|
||||
fi
|
||||
|
||||
if [ ! -z "$PROXY_ETCD" ]; then
|
||||
if [ -n "$PROXY_ETCD" ]; then
|
||||
if [ -z "$HAS_ETCD" ]; then
|
||||
echo "No etcd endpoint configured, can't use etcd for proxy connections"
|
||||
exit 1
|
||||
|
@ -101,79 +112,77 @@ if [ ! -f "$CONFIG" ]; then
|
|||
|
||||
sed -i "s|#urltype = static|urltype = etcd|" "$CONFIG"
|
||||
|
||||
if [ ! -z "$PROXY_TOKEN_ID" ]; then
|
||||
sed -i "s|#token_id =.*|token_id = $PROXY_TOKEN_ID|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$PROXY_TOKEN_KEY" ]; then
|
||||
sed -i "s|#token_key =.*|token_key = $PROXY_TOKEN_KEY|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$PROXY_KEY_PREFIX" ]; then
|
||||
if [ -n "$PROXY_KEY_PREFIX" ]; then
|
||||
sed -i "s|#keyprefix =.*|keyprefix = $PROXY_KEY_PREFIX|" "$CONFIG"
|
||||
fi
|
||||
else
|
||||
if [ ! -z "$PROXY_URLS" ]; then
|
||||
if [ -n "$PROXY_URLS" ]; then
|
||||
sed -i "/proxy URLs to connect to/{n;s|#url =$|url = $PROXY_URLS|}" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$PROXY_DNS_DISCOVERY" ]; then
|
||||
if [ -n "$PROXY_DNS_DISCOVERY" ]; then
|
||||
sed -i "/or deleted as necessary/{n;s|#dnsdiscovery =.*|dnsdiscovery = true|}" "$CONFIG"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -z "$MAX_STREAM_BITRATE" ]; then
|
||||
if [ -n "$MAX_STREAM_BITRATE" ]; then
|
||||
sed -i "s|#maxstreambitrate =.*|maxstreambitrate = $MAX_STREAM_BITRATE|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$MAX_SCREEN_BITRATE" ]; then
|
||||
if [ -n "$MAX_SCREEN_BITRATE" ]; then
|
||||
sed -i "s|#maxscreenbitrate =.*|maxscreenbitrate = $MAX_SCREEN_BITRATE|" "$CONFIG"
|
||||
fi
|
||||
|
||||
if [ ! -z "$SKIP_VERIFY" ]; then
|
||||
if [ -n "$SKIP_VERIFY" ]; then
|
||||
sed -i "s|#skipverify =.*|skipverify = $SKIP_VERIFY|" "$CONFIG"
|
||||
fi
|
||||
|
||||
if [ ! -z "$TURN_API_KEY" ]; then
|
||||
if [ -n "$TURN_API_KEY" ]; then
|
||||
sed -i "s|#\?apikey =.*|apikey = $TURN_API_KEY|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$TURN_SECRET" ]; then
|
||||
if [ -n "$TURN_SECRET" ]; then
|
||||
sed -i "/same as on the TURN server/{n;s|#\?secret =.*|secret = $TURN_SECRET|}" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$TURN_SERVERS" ]; then
|
||||
if [ -n "$TURN_SERVERS" ]; then
|
||||
sed -i "s|#servers =.*|servers = $TURN_SERVERS|" "$CONFIG"
|
||||
fi
|
||||
|
||||
if [ ! -z "$GEOIP_LICENSE" ]; then
|
||||
if [ -n "$GEOIP_LICENSE" ]; then
|
||||
sed -i "s|#license =.*|license = $GEOIP_LICENSE|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$GEOIP_URL" ]; then
|
||||
if [ -n "$GEOIP_URL" ]; then
|
||||
sed -i "/looking up IP addresses/{n;s|#url =$|url = $GEOIP_URL|}" "$CONFIG"
|
||||
fi
|
||||
|
||||
if [ ! -z "$STATS_IPS" ]; then
|
||||
if [ -n "$STATS_IPS" ]; then
|
||||
sed -i "s|#allowed_ips =.*|allowed_ips = $STATS_IPS|" "$CONFIG"
|
||||
fi
|
||||
|
||||
if [ ! -z "$GRPC_LISTEN" ]; then
|
||||
if [ -n "$TRUSTED_PROXIES" ]; then
|
||||
sed -i "s|#trustedproxies =.*|trustedproxies = $TRUSTED_PROXIES|" "$CONFIG"
|
||||
fi
|
||||
|
||||
if [ -n "$GRPC_LISTEN" ]; then
|
||||
sed -i "s|#listen = 0.0.0.0:9090|listen = $GRPC_LISTEN|" "$CONFIG"
|
||||
|
||||
if [ ! -z "$GRPC_SERVER_CERTIFICATE" ]; then
|
||||
if [ -n "$GRPC_SERVER_CERTIFICATE" ]; then
|
||||
sed -i "s|#servercertificate =.*|servercertificate = $GRPC_SERVER_CERTIFICATE|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$GRPC_SERVER_KEY" ]; then
|
||||
if [ -n "$GRPC_SERVER_KEY" ]; then
|
||||
sed -i "s|#serverkey =.*|serverkey = $GRPC_SERVER_KEY|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$GRPC_SERVER_CA" ]; then
|
||||
if [ -n "$GRPC_SERVER_CA" ]; then
|
||||
sed -i "s|#serverca =.*|serverca = $GRPC_SERVER_CA|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$GRPC_CLIENT_CERTIFICATE" ]; then
|
||||
if [ -n "$GRPC_CLIENT_CERTIFICATE" ]; then
|
||||
sed -i "s|#clientcertificate =.*|clientcertificate = $GRPC_CLIENT_CERTIFICATE|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$GRPC_CLIENT_KEY" ]; then
|
||||
if [ -n "$GRPC_CLIENT_KEY" ]; then
|
||||
sed -i "s|#clientkey = /path/to/grpc-client.key|clientkey = $GRPC_CLIENT_KEY|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$GRPC_CLIENT_CA" ]; then
|
||||
if [ -n "$GRPC_CLIENT_CA" ]; then
|
||||
sed -i "s|#clientca =.*|clientca = $GRPC_CLIENT_CA|" "$CONFIG"
|
||||
fi
|
||||
if [ ! -z "$GRPC_ETCD" ]; then
|
||||
if [ -n "$GRPC_ETCD" ]; then
|
||||
if [ -z "$HAS_ETCD" ]; then
|
||||
echo "No etcd endpoint configured, can't use etcd for GRPC"
|
||||
exit 1
|
||||
|
@ -181,50 +190,50 @@ if [ ! -f "$CONFIG" ]; then
|
|||
|
||||
sed -i "s|#targettype =$|targettype = etcd|" "$CONFIG"
|
||||
|
||||
if [ ! -z "$GRPC_TARGET_PREFIX" ]; then
|
||||
if [ -n "$GRPC_TARGET_PREFIX" ]; then
|
||||
sed -i "s|#targetprefix =.*|targetprefix = $GRPC_TARGET_PREFIX|" "$CONFIG"
|
||||
fi
|
||||
else
|
||||
if [ ! -z "$GRPC_TARGETS" ]; then
|
||||
if [ -n "$GRPC_TARGETS" ]; then
|
||||
sed -i "s|#targets =.*|targets = $GRPC_TARGETS|" "$CONFIG"
|
||||
|
||||
if [ ! -z "$GRPC_DNS_DISCOVERY" ]; then
|
||||
if [ -n "$GRPC_DNS_DISCOVERY" ]; then
|
||||
sed -i "/# deleted as necessary/{n;s|#dnsdiscovery =.*|dnsdiscovery = true|}" "$CONFIG"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -z "$GEOIP_OVERRIDES" ]; then
|
||||
if [ -n "$GEOIP_OVERRIDES" ]; then
|
||||
sed -i "s|\[geoip-overrides\]|#[geoip-overrides]|" "$CONFIG"
|
||||
echo >> "$CONFIG"
|
||||
echo "[geoip-overrides]" >> "$CONFIG"
|
||||
for override in $GEOIP_OVERRIDES; do
|
||||
echo $override >> "$CONFIG"
|
||||
echo "$override" >> "$CONFIG"
|
||||
done
|
||||
echo >> "$CONFIG"
|
||||
fi
|
||||
|
||||
if [ ! -z "$CONTINENT_OVERRIDES" ]; then
|
||||
if [ -n "$CONTINENT_OVERRIDES" ]; then
|
||||
sed -i "s|\[continent-overrides\]|#[continent-overrides]|" "$CONFIG"
|
||||
echo >> "$CONFIG"
|
||||
echo "[continent-overrides]" >> "$CONFIG"
|
||||
for override in $CONTINENT_OVERRIDES; do
|
||||
echo $override >> "$CONFIG"
|
||||
echo "$override" >> "$CONFIG"
|
||||
done
|
||||
echo >> "$CONFIG"
|
||||
fi
|
||||
|
||||
if [ ! -z "$BACKENDS_ALLOWALL" ]; then
|
||||
if [ -n "$BACKENDS_ALLOWALL" ]; then
|
||||
sed -i "s|allowall = false|allowall = $BACKENDS_ALLOWALL|" "$CONFIG"
|
||||
fi
|
||||
|
||||
if [ ! -z "$BACKENDS_ALLOWALL_SECRET" ]; then
|
||||
if [ -n "$BACKENDS_ALLOWALL_SECRET" ]; then
|
||||
sed -i "s|#secret = the-shared-secret-for-allowall|secret = $BACKENDS_ALLOWALL_SECRET|" "$CONFIG"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -z "$BACKENDS" ]; then
|
||||
BACKENDS_CONFIG=$(echo $BACKENDS | sed 's/ /,/g')
|
||||
if [ -n "$BACKENDS" ]; then
|
||||
BACKENDS_CONFIG=${BACKENDS// /,}
|
||||
sed -i "s|#backends = .*|backends = $BACKENDS_CONFIG|" "$CONFIG"
|
||||
|
||||
echo >> "$CONFIG"
|
||||
|
@ -232,27 +241,27 @@ if [ ! -f "$CONFIG" ]; then
|
|||
echo "[$backend]" >> "$CONFIG"
|
||||
|
||||
declare var="BACKEND_${backend^^}_URL"
|
||||
if [ ! -z "${!var}" ]; then
|
||||
if [ -n "${!var}" ]; then
|
||||
echo "url = ${!var}" >> "$CONFIG"
|
||||
fi
|
||||
|
||||
declare var="BACKEND_${backend^^}_SHARED_SECRET"
|
||||
if [ ! -z "${!var}" ]; then
|
||||
if [ -n "${!var}" ]; then
|
||||
echo "secret = ${!var}" >> "$CONFIG"
|
||||
fi
|
||||
|
||||
declare var="BACKEND_${backend^^}_SESSION_LIMIT"
|
||||
if [ ! -z "${!var}" ]; then
|
||||
if [ -n "${!var}" ]; then
|
||||
echo "sessionlimit = ${!var}" >> "$CONFIG"
|
||||
fi
|
||||
|
||||
declare var="BACKEND_${backend^^}_MAX_STREAM_BITRATE"
|
||||
if [ ! -z "${!var}" ]; then
|
||||
if [ -n "${!var}" ]; then
|
||||
echo "maxstreambitrate = ${!var}" >> "$CONFIG"
|
||||
fi
|
||||
|
||||
declare var="BACKEND_${backend^^}_MAX_SCREEN_BITRATE"
|
||||
if [ ! -z "${!var}" ]; then
|
||||
if [ -n "${!var}" ]; then
|
||||
echo "maxscreenbitrate = ${!var}" >> "$CONFIG"
|
||||
fi
|
||||
echo >> "$CONFIG"
|
||||
|
@ -261,4 +270,4 @@ if [ ! -f "$CONFIG" ]; then
|
|||
fi
|
||||
|
||||
echo "Starting signaling server with $CONFIG ..."
|
||||
exec "$@"
|
||||
exec /usr/bin/nextcloud-spreed-signaling -config "$CONFIG"
|
||||
|
|
26
docker/server/stop.sh
Executable file
26
docker/server/stop.sh
Executable file
|
@ -0,0 +1,26 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Standalone signaling server for the Nextcloud Spreed app.
|
||||
# Copyright (C) 2024 struktur AG
|
||||
#
|
||||
# @author Joachim Bauch <bauch@struktur.de>
|
||||
#
|
||||
# @license GNU AGPL version 3 or any later version
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
set -e
|
||||
|
||||
echo "Schedule signaling server to shutdown ..."
|
||||
exec killall -USR1 nextcloud-spreed-signaling
|
33
docker/server/wait.sh
Executable file
33
docker/server/wait.sh
Executable file
|
@ -0,0 +1,33 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Standalone signaling server for the Nextcloud Spreed app.
|
||||
# Copyright (C) 2024 struktur AG
|
||||
#
|
||||
# @author Joachim Bauch <bauch@struktur.de>
|
||||
#
|
||||
# @license GNU AGPL version 3 or any later version
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
set -e
|
||||
|
||||
echo "Waiting for signaling server to shutdown ..."
|
||||
while true
|
||||
do
|
||||
if ! pgrep nextcloud-spreed-signaling > /dev/null ; then
|
||||
echo "Signaling server has stopped"
|
||||
exit 0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
|
@ -48,3 +48,6 @@ The following metrics are available:
|
|||
| `signaling_grpc_clients` | Gauge | 1.0.0 | The current number of GRPC clients | |
|
||||
| `signaling_grpc_client_calls_total` | Counter | 1.0.0 | The total number of GRPC client calls | `method` |
|
||||
| `signaling_grpc_server_calls_total` | Counter | 1.0.0 | The total number of GRPC server calls | `method` |
|
||||
| `signaling_http_client_pool_connections` | Gauge | 1.2.4 | The current number of HTTP client connections per host | `host` |
|
||||
| `signaling_throttle_delayed_total` | Counter | 1.2.5 | The total number of delayed requests | `action`, `delay` |
|
||||
| `signaling_throttle_bruteforce_total` | Counter | 1.2.5 | The total number of rejected bruteforce requests | `action` |
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
jinja2==3.1.2
|
||||
markdown==3.5.1
|
||||
mkdocs==1.5.3
|
||||
readthedocs-sphinx-search==0.3.1
|
||||
sphinx==7.2.6
|
||||
sphinx_rtd_theme==1.3.0
|
||||
jinja2==3.1.4
|
||||
markdown==3.6
|
||||
mkdocs==1.6.0
|
||||
readthedocs-sphinx-search==0.3.2
|
||||
sphinx==7.3.7
|
||||
sphinx_rtd_theme==2.0.0
|
||||
|
|
|
@ -23,6 +23,7 @@ package signaling
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
@ -34,6 +35,8 @@ import (
|
|||
"go.etcd.io/etcd/client/pkg/v3/srv"
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
type EtcdClientListener interface {
|
||||
|
@ -42,8 +45,8 @@ type EtcdClientListener interface {
|
|||
|
||||
type EtcdClientWatcher interface {
|
||||
EtcdWatchCreated(client *EtcdClient, key string)
|
||||
EtcdKeyUpdated(client *EtcdClient, key string, value []byte)
|
||||
EtcdKeyDeleted(client *EtcdClient, key string)
|
||||
EtcdKeyUpdated(client *EtcdClient, key string, value []byte, prevValue []byte)
|
||||
EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte)
|
||||
}
|
||||
|
||||
type EtcdClient struct {
|
||||
|
@ -112,6 +115,17 @@ func (c *EtcdClient) load(config *goconf.ConfigFile, ignoreErrors bool) error {
|
|||
DialTimeout: time.Second,
|
||||
}
|
||||
|
||||
if logLevel, _ := config.GetString("etcd", "loglevel"); logLevel != "" {
|
||||
var l zapcore.Level
|
||||
if err := l.Set(logLevel); err != nil {
|
||||
return fmt.Errorf("Unsupported etcd log level %s: %w", logLevel, err)
|
||||
}
|
||||
|
||||
logConfig := zap.NewProductionConfig()
|
||||
logConfig.Level = zap.NewAtomicLevelAt(l)
|
||||
cfg.LogConfig = &logConfig
|
||||
}
|
||||
|
||||
clientKey := c.getConfigStringWithFallback(config, "clientkey")
|
||||
clientCert := c.getConfigStringWithFallback(config, "clientcert")
|
||||
caCert := c.getConfigStringWithFallback(config, "cacert")
|
||||
|
@ -176,8 +190,8 @@ func (c *EtcdClient) getEtcdClient() *clientv3.Client {
|
|||
return client.(*clientv3.Client)
|
||||
}
|
||||
|
||||
func (c *EtcdClient) syncClient() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
func (c *EtcdClient) syncClient(ctx context.Context) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
|
||||
return c.getEtcdClient().Sync(ctx)
|
||||
|
@ -212,26 +226,32 @@ func (c *EtcdClient) RemoveListener(listener EtcdClientListener) {
|
|||
delete(c.listeners, listener)
|
||||
}
|
||||
|
||||
func (c *EtcdClient) WaitForConnection() {
|
||||
waitDelay := initialWaitDelay
|
||||
func (c *EtcdClient) WaitForConnection(ctx context.Context) error {
|
||||
backoff, err := NewExponentialBackoff(initialWaitDelay, maxWaitDelay)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
if err := c.syncClient(); err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
log.Printf("Timeout waiting for etcd client to connect to the cluster, retry in %s", waitDelay)
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.syncClient(ctx); err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return err
|
||||
} else if errors.Is(err, context.DeadlineExceeded) {
|
||||
log.Printf("Timeout waiting for etcd client to connect to the cluster, retry in %s", backoff.NextWait())
|
||||
} else {
|
||||
log.Printf("Could not sync etcd client with the cluster, retry in %s: %s", waitDelay, err)
|
||||
log.Printf("Could not sync etcd client with the cluster, retry in %s: %s", backoff.NextWait(), err)
|
||||
}
|
||||
|
||||
time.Sleep(waitDelay)
|
||||
waitDelay = waitDelay * 2
|
||||
if waitDelay > maxWaitDelay {
|
||||
waitDelay = maxWaitDelay
|
||||
}
|
||||
backoff.Wait(ctx)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("Client synced, using endpoints %+v", c.getEtcdClient().Endpoints())
|
||||
return
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -239,27 +259,37 @@ func (c *EtcdClient) Get(ctx context.Context, key string, opts ...clientv3.OpOpt
|
|||
return c.getEtcdClient().Get(ctx, key, opts...)
|
||||
}
|
||||
|
||||
func (c *EtcdClient) Watch(ctx context.Context, key string, watcher EtcdClientWatcher, opts ...clientv3.OpOption) error {
|
||||
log.Printf("Wait for leader and start watching on %s", key)
|
||||
func (c *EtcdClient) Watch(ctx context.Context, key string, nextRevision int64, watcher EtcdClientWatcher, opts ...clientv3.OpOption) (int64, error) {
|
||||
log.Printf("Wait for leader and start watching on %s (rev=%d)", key, nextRevision)
|
||||
opts = append(opts, clientv3.WithRev(nextRevision), clientv3.WithPrevKV())
|
||||
ch := c.getEtcdClient().Watch(clientv3.WithRequireLeader(ctx), key, opts...)
|
||||
log.Printf("Watch created for %s", key)
|
||||
watcher.EtcdWatchCreated(c, key)
|
||||
for response := range ch {
|
||||
if err := response.Err(); err != nil {
|
||||
return err
|
||||
return nextRevision, err
|
||||
}
|
||||
|
||||
nextRevision = response.Header.Revision + 1
|
||||
for _, ev := range response.Events {
|
||||
switch ev.Type {
|
||||
case clientv3.EventTypePut:
|
||||
watcher.EtcdKeyUpdated(c, string(ev.Kv.Key), ev.Kv.Value)
|
||||
var prevValue []byte
|
||||
if ev.PrevKv != nil {
|
||||
prevValue = ev.PrevKv.Value
|
||||
}
|
||||
watcher.EtcdKeyUpdated(c, string(ev.Kv.Key), ev.Kv.Value, prevValue)
|
||||
case clientv3.EventTypeDelete:
|
||||
watcher.EtcdKeyDeleted(c, string(ev.Kv.Key))
|
||||
var prevValue []byte
|
||||
if ev.PrevKv != nil {
|
||||
prevValue = ev.PrevKv.Value
|
||||
}
|
||||
watcher.EtcdKeyDeleted(c, string(ev.Kv.Key), prevValue)
|
||||
default:
|
||||
log.Printf("Unsupported watch event %s %q -> %q", ev.Type, ev.Kv.Key, ev.Kv.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return nextRevision, nil
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -39,6 +38,8 @@ import (
|
|||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/embed"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -89,6 +90,7 @@ func NewEtcdForTest(t *testing.T) *embed.Etcd {
|
|||
cfg.ListenPeerUrls = []url.URL{*peerListener}
|
||||
cfg.AdvertisePeerUrls = []url.URL{*peerListener}
|
||||
cfg.InitialCluster = "default=" + peerListener.String()
|
||||
cfg.ZapLoggerBuilder = embed.NewZapLoggerBuilder(zaptest.NewLogger(t, zaptest.Level(zap.WarnLevel)))
|
||||
etcd, err = embed.StartEtcd(cfg)
|
||||
if isErrorAddressAlreadyInUse(err) {
|
||||
continue
|
||||
|
@ -103,6 +105,7 @@ func NewEtcdForTest(t *testing.T) *embed.Etcd {
|
|||
|
||||
t.Cleanup(func() {
|
||||
etcd.Close()
|
||||
<-etcd.Server.StopNotify()
|
||||
})
|
||||
// Wait for server to be ready.
|
||||
<-etcd.Server.ReadyNotify()
|
||||
|
@ -115,6 +118,7 @@ func NewEtcdClientForTest(t *testing.T) (*embed.Etcd, *EtcdClient) {
|
|||
|
||||
config := goconf.NewConfigFile()
|
||||
config.AddOption("etcd", "endpoints", etcd.Config().ListenClientUrls[0].String())
|
||||
config.AddOption("etcd", "loglevel", "error")
|
||||
|
||||
client, err := NewEtcdClient(config, "")
|
||||
if err != nil {
|
||||
|
@ -143,6 +147,8 @@ func DeleteEtcdValue(etcd *embed.Etcd, key string) {
|
|||
}
|
||||
|
||||
func Test_EtcdClient_Get(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
etcd, client := NewEtcdClientForTest(t)
|
||||
|
||||
if response, err := client.Get(context.Background(), "foo"); err != nil {
|
||||
|
@ -165,6 +171,8 @@ func Test_EtcdClient_Get(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_EtcdClient_GetPrefix(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
etcd, client := NewEtcdClientForTest(t)
|
||||
|
||||
if response, err := client.Get(context.Background(), "foo"); err != nil {
|
||||
|
@ -196,6 +204,8 @@ type etcdEvent struct {
|
|||
t mvccpb.Event_EventType
|
||||
key string
|
||||
value string
|
||||
|
||||
prevValue string
|
||||
}
|
||||
|
||||
type EtcdClientTestListener struct {
|
||||
|
@ -204,9 +214,8 @@ type EtcdClientTestListener struct {
|
|||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
initial chan struct{}
|
||||
initialWg sync.WaitGroup
|
||||
events chan etcdEvent
|
||||
initial chan struct{}
|
||||
events chan etcdEvent
|
||||
}
|
||||
|
||||
func NewEtcdClientTestListener(ctx context.Context, t *testing.T) *EtcdClientTestListener {
|
||||
|
@ -227,21 +236,17 @@ func (l *EtcdClientTestListener) Close() {
|
|||
}
|
||||
|
||||
func (l *EtcdClientTestListener) EtcdClientCreated(client *EtcdClient) {
|
||||
l.initialWg.Add(1)
|
||||
go func() {
|
||||
if err := client.Watch(clientv3.WithRequireLeader(l.ctx), "foo", l, clientv3.WithPrefix()); err != nil {
|
||||
l.t.Error(err)
|
||||
if err := client.WaitForConnection(l.ctx); err != nil {
|
||||
l.t.Errorf("error waiting for connection: %s", err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer close(l.initial)
|
||||
client.WaitForConnection()
|
||||
|
||||
ctx, cancel := context.WithTimeout(l.ctx, time.Second)
|
||||
defer cancel()
|
||||
|
||||
if response, err := client.Get(ctx, "foo", clientv3.WithPrefix()); err != nil {
|
||||
response, err := client.Get(ctx, "foo", clientv3.WithPrefix())
|
||||
if err != nil {
|
||||
l.t.Error(err)
|
||||
} else if response.Count != 1 {
|
||||
l.t.Errorf("expected 1 responses, got %d", response.Count)
|
||||
|
@ -250,30 +255,47 @@ func (l *EtcdClientTestListener) EtcdClientCreated(client *EtcdClient) {
|
|||
} else if string(response.Kvs[0].Value) != "1" {
|
||||
l.t.Errorf("expected value \"1\", got \"%s\"", string(response.Kvs[0].Value))
|
||||
}
|
||||
l.initialWg.Wait()
|
||||
|
||||
close(l.initial)
|
||||
nextRevision := response.Header.Revision + 1
|
||||
for l.ctx.Err() == nil {
|
||||
var err error
|
||||
if nextRevision, err = client.Watch(clientv3.WithRequireLeader(l.ctx), "foo", nextRevision, l, clientv3.WithPrefix()); err != nil {
|
||||
l.t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (l *EtcdClientTestListener) EtcdWatchCreated(client *EtcdClient, key string) {
|
||||
l.initialWg.Done()
|
||||
}
|
||||
|
||||
func (l *EtcdClientTestListener) EtcdKeyUpdated(client *EtcdClient, key string, value []byte) {
|
||||
l.events <- etcdEvent{
|
||||
func (l *EtcdClientTestListener) EtcdKeyUpdated(client *EtcdClient, key string, value []byte, prevValue []byte) {
|
||||
evt := etcdEvent{
|
||||
t: clientv3.EventTypePut,
|
||||
key: string(key),
|
||||
value: string(value),
|
||||
}
|
||||
if len(prevValue) > 0 {
|
||||
evt.prevValue = string(prevValue)
|
||||
}
|
||||
l.events <- evt
|
||||
}
|
||||
|
||||
func (l *EtcdClientTestListener) EtcdKeyDeleted(client *EtcdClient, key string) {
|
||||
l.events <- etcdEvent{
|
||||
func (l *EtcdClientTestListener) EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte) {
|
||||
evt := etcdEvent{
|
||||
t: clientv3.EventTypeDelete,
|
||||
key: string(key),
|
||||
}
|
||||
if len(prevValue) > 0 {
|
||||
evt.prevValue = string(prevValue)
|
||||
}
|
||||
l.events <- evt
|
||||
}
|
||||
|
||||
func Test_EtcdClient_Watch(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
etcd, client := NewEtcdClientForTest(t)
|
||||
|
||||
SetEtcdValue(etcd, "foo/a", []byte("1"))
|
||||
|
@ -296,11 +318,23 @@ func Test_EtcdClient_Watch(t *testing.T) {
|
|||
t.Errorf("expected value %s, got %s", "2", event.value)
|
||||
}
|
||||
|
||||
SetEtcdValue(etcd, "foo/a", []byte("3"))
|
||||
event = <-listener.events
|
||||
if event.t != clientv3.EventTypePut {
|
||||
t.Errorf("expected type %d, got %d", clientv3.EventTypePut, event.t)
|
||||
} else if event.key != "foo/a" {
|
||||
t.Errorf("expected key %s, got %s", "foo/a", event.key)
|
||||
} else if event.value != "3" {
|
||||
t.Errorf("expected value %s, got %s", "3", event.value)
|
||||
}
|
||||
|
||||
DeleteEtcdValue(etcd, "foo/a")
|
||||
event = <-listener.events
|
||||
if event.t != clientv3.EventTypeDelete {
|
||||
t.Errorf("expected type %d, got %d", clientv3.EventTypeDelete, event.t)
|
||||
} else if event.key != "foo/a" {
|
||||
t.Errorf("expected key %s, got %s", "foo/a", event.key)
|
||||
} else if event.prevValue != "3" {
|
||||
t.Errorf("expected previous value %s, got %s", "3", event.prevValue)
|
||||
}
|
||||
}
|
||||
|
|
168
file_watcher.go
Normal file
168
file_watcher.go
Normal file
|
@ -0,0 +1,168 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2024 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDeduplicateWatchEvents = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
deduplicateWatchEvents atomic.Int64
|
||||
)
|
||||
|
||||
func init() {
|
||||
deduplicateWatchEvents.Store(int64(defaultDeduplicateWatchEvents))
|
||||
}
|
||||
|
||||
type FileWatcherCallback func(filename string)
|
||||
|
||||
type FileWatcher struct {
|
||||
filename string
|
||||
target string
|
||||
callback FileWatcherCallback
|
||||
|
||||
watcher *fsnotify.Watcher
|
||||
closeCtx context.Context
|
||||
closeFunc context.CancelFunc
|
||||
}
|
||||
|
||||
func NewFileWatcher(filename string, callback FileWatcherCallback) (*FileWatcher, error) {
|
||||
realFilename, err := filepath.EvalSymlinks(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := watcher.Add(realFilename); err != nil {
|
||||
watcher.Close() // nolint
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := watcher.Add(path.Dir(filename)); err != nil {
|
||||
watcher.Close() // nolint
|
||||
return nil, err
|
||||
}
|
||||
|
||||
closeCtx, closeFunc := context.WithCancel(context.Background())
|
||||
|
||||
w := &FileWatcher{
|
||||
filename: filename,
|
||||
target: realFilename,
|
||||
callback: callback,
|
||||
watcher: watcher,
|
||||
|
||||
closeCtx: closeCtx,
|
||||
closeFunc: closeFunc,
|
||||
}
|
||||
go w.run()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (f *FileWatcher) Close() error {
|
||||
f.closeFunc()
|
||||
return f.watcher.Close()
|
||||
}
|
||||
|
||||
func (f *FileWatcher) run() {
|
||||
var mu sync.Mutex
|
||||
timers := make(map[string]*time.Timer)
|
||||
|
||||
triggerEvent := func(event fsnotify.Event) {
|
||||
deduplicate := time.Duration(deduplicateWatchEvents.Load())
|
||||
if deduplicate <= 0 {
|
||||
f.callback(f.filename)
|
||||
return
|
||||
}
|
||||
|
||||
// Use timer to deduplicate multiple events for the same file.
|
||||
mu.Lock()
|
||||
t, found := timers[event.Name]
|
||||
mu.Unlock()
|
||||
if !found {
|
||||
t = time.AfterFunc(deduplicate, func() {
|
||||
f.callback(f.filename)
|
||||
|
||||
mu.Lock()
|
||||
delete(timers, event.Name)
|
||||
mu.Unlock()
|
||||
})
|
||||
mu.Lock()
|
||||
timers[event.Name] = t
|
||||
mu.Unlock()
|
||||
} else {
|
||||
t.Reset(deduplicate)
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-f.watcher.Events:
|
||||
if !event.Has(fsnotify.Write) && !event.Has(fsnotify.Create) && !event.Has(fsnotify.Rename) {
|
||||
continue
|
||||
}
|
||||
|
||||
if stat, err := os.Lstat(event.Name); err != nil {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
log.Printf("Could not lstat %s: %s", event.Name, err)
|
||||
}
|
||||
} else if stat.Mode()&os.ModeSymlink != 0 {
|
||||
target, err := filepath.EvalSymlinks(event.Name)
|
||||
if err == nil && target != f.target && strings.HasSuffix(event.Name, f.filename) {
|
||||
f.target = target
|
||||
triggerEvent(event)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasSuffix(event.Name, f.filename) || strings.HasSuffix(event.Name, f.target) {
|
||||
triggerEvent(event)
|
||||
}
|
||||
case err := <-f.watcher.Errors:
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Error watching %s: %s", f.filename, err)
|
||||
case <-f.closeCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
310
file_watcher_test.go
Normal file
310
file_watcher_test.go
Normal file
|
@ -0,0 +1,310 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2024 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
testWatcherNoEventTimeout = 2 * defaultDeduplicateWatchEvents
|
||||
)
|
||||
|
||||
func TestFileWatcher_NotExist(t *testing.T) {
|
||||
tmpdir := t.TempDir()
|
||||
w, err := NewFileWatcher(path.Join(tmpdir, "test.txt"), func(filename string) {})
|
||||
if err == nil {
|
||||
t.Error("should not be able to watch non-existing files")
|
||||
if err := w.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
} else if !errors.Is(err, os.ErrNotExist) {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileWatcher_File(t *testing.T) {
|
||||
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||
tmpdir := t.TempDir()
|
||||
filename := path.Join(tmpdir, "test.txt")
|
||||
if err := os.WriteFile(filename, []byte("Hello world!"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
modified := make(chan struct{})
|
||||
w, err := NewFileWatcher(filename, func(filename string) {
|
||||
modified <- struct{}{}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
if err := os.WriteFile(filename, []byte("Updated"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
<-modified
|
||||
|
||||
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case <-modified:
|
||||
t.Error("should not have received another event")
|
||||
case <-ctxTimeout.Done():
|
||||
}
|
||||
|
||||
if err := os.WriteFile(filename, []byte("Updated"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
<-modified
|
||||
|
||||
ctxTimeout, cancel = context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case <-modified:
|
||||
t.Error("should not have received another event")
|
||||
case <-ctxTimeout.Done():
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestFileWatcher_Rename(t *testing.T) {
|
||||
tmpdir := t.TempDir()
|
||||
filename := path.Join(tmpdir, "test.txt")
|
||||
if err := os.WriteFile(filename, []byte("Hello world!"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
modified := make(chan struct{})
|
||||
w, err := NewFileWatcher(filename, func(filename string) {
|
||||
modified <- struct{}{}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
filename2 := path.Join(tmpdir, "test.txt.tmp")
|
||||
if err := os.WriteFile(filename2, []byte("Updated"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case <-modified:
|
||||
t.Error("should not have received another event")
|
||||
case <-ctxTimeout.Done():
|
||||
}
|
||||
|
||||
if err := os.Rename(filename2, filename); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
<-modified
|
||||
|
||||
ctxTimeout, cancel = context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case <-modified:
|
||||
t.Error("should not have received another event")
|
||||
case <-ctxTimeout.Done():
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileWatcher_Symlink(t *testing.T) {
|
||||
tmpdir := t.TempDir()
|
||||
sourceFilename := path.Join(tmpdir, "test1.txt")
|
||||
if err := os.WriteFile(sourceFilename, []byte("Hello world!"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
filename := path.Join(tmpdir, "symlink.txt")
|
||||
if err := os.Symlink(sourceFilename, filename); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
modified := make(chan struct{})
|
||||
w, err := NewFileWatcher(filename, func(filename string) {
|
||||
modified <- struct{}{}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
if err := os.WriteFile(sourceFilename, []byte("Updated"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
<-modified
|
||||
|
||||
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case <-modified:
|
||||
t.Error("should not have received another event")
|
||||
case <-ctxTimeout.Done():
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileWatcher_ChangeSymlinkTarget(t *testing.T) {
|
||||
tmpdir := t.TempDir()
|
||||
sourceFilename1 := path.Join(tmpdir, "test1.txt")
|
||||
if err := os.WriteFile(sourceFilename1, []byte("Hello world!"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sourceFilename2 := path.Join(tmpdir, "test2.txt")
|
||||
if err := os.WriteFile(sourceFilename2, []byte("Updated"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
filename := path.Join(tmpdir, "symlink.txt")
|
||||
if err := os.Symlink(sourceFilename1, filename); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
modified := make(chan struct{})
|
||||
w, err := NewFileWatcher(filename, func(filename string) {
|
||||
modified <- struct{}{}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
// Replace symlink by creating new one and rename it to the original target.
|
||||
if err := os.Symlink(sourceFilename2, filename+".tmp"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Rename(filename+".tmp", filename); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
<-modified
|
||||
|
||||
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case <-modified:
|
||||
t.Error("should not have received another event")
|
||||
case <-ctxTimeout.Done():
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileWatcher_OtherSymlink(t *testing.T) {
|
||||
tmpdir := t.TempDir()
|
||||
sourceFilename1 := path.Join(tmpdir, "test1.txt")
|
||||
if err := os.WriteFile(sourceFilename1, []byte("Hello world!"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sourceFilename2 := path.Join(tmpdir, "test2.txt")
|
||||
if err := os.WriteFile(sourceFilename2, []byte("Updated"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
filename := path.Join(tmpdir, "symlink.txt")
|
||||
if err := os.Symlink(sourceFilename1, filename); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
modified := make(chan struct{})
|
||||
w, err := NewFileWatcher(filename, func(filename string) {
|
||||
modified <- struct{}{}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
if err := os.Symlink(sourceFilename2, filename+".tmp"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case <-modified:
|
||||
t.Error("should not have received event for other symlink")
|
||||
case <-ctxTimeout.Done():
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileWatcher_RenameSymlinkTarget(t *testing.T) {
|
||||
tmpdir := t.TempDir()
|
||||
sourceFilename1 := path.Join(tmpdir, "test1.txt")
|
||||
if err := os.WriteFile(sourceFilename1, []byte("Hello world!"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
filename := path.Join(tmpdir, "test.txt")
|
||||
if err := os.Symlink(sourceFilename1, filename); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
modified := make(chan struct{})
|
||||
w, err := NewFileWatcher(filename, func(filename string) {
|
||||
modified <- struct{}{}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
sourceFilename2 := path.Join(tmpdir, "test1.txt.tmp")
|
||||
if err := os.WriteFile(sourceFilename2, []byte("Updated"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case <-modified:
|
||||
t.Error("should not have received another event")
|
||||
case <-ctxTimeout.Done():
|
||||
}
|
||||
|
||||
if err := os.Rename(sourceFilename2, sourceFilename1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
<-modified
|
||||
|
||||
ctxTimeout, cancel = context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case <-modified:
|
||||
t.Error("should not have received another event")
|
||||
case <-ctxTimeout.Done():
|
||||
}
|
||||
}
|
|
@ -97,6 +97,7 @@ func runConcurrentFlags(t *testing.T, count int, f func()) {
|
|||
}
|
||||
|
||||
func TestFlagsConcurrentAdd(t *testing.T) {
|
||||
t.Parallel()
|
||||
var flags Flags
|
||||
|
||||
var added atomic.Int32
|
||||
|
@ -111,6 +112,7 @@ func TestFlagsConcurrentAdd(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFlagsConcurrentRemove(t *testing.T) {
|
||||
t.Parallel()
|
||||
var flags Flags
|
||||
flags.Set(1)
|
||||
|
||||
|
@ -126,6 +128,7 @@ func TestFlagsConcurrentRemove(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFlagsConcurrentSet(t *testing.T) {
|
||||
t.Parallel()
|
||||
var flags Flags
|
||||
|
||||
var set atomic.Int32
|
||||
|
|
39
geoip.go
39
geoip.go
|
@ -156,36 +156,45 @@ func (g *GeoLookup) updateUrl() error {
|
|||
}
|
||||
|
||||
body := response.Body
|
||||
if strings.HasSuffix(g.url, ".gz") {
|
||||
url := g.url
|
||||
if strings.HasSuffix(url, ".gz") {
|
||||
body, err = gzip.NewReader(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
url = strings.TrimSuffix(url, ".gz")
|
||||
}
|
||||
|
||||
tarfile := tar.NewReader(body)
|
||||
var geoipdata []byte
|
||||
for {
|
||||
header, err := tarfile.Next()
|
||||
if err == io.EOF {
|
||||
if strings.HasSuffix(url, ".tar") || strings.HasSuffix(url, "=tar") {
|
||||
tarfile := tar.NewReader(body)
|
||||
for {
|
||||
header, err := tarfile.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(header.Name, ".mmdb") {
|
||||
continue
|
||||
}
|
||||
|
||||
geoipdata, err = io.ReadAll(tarfile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(header.Name, ".mmdb") {
|
||||
continue
|
||||
}
|
||||
|
||||
geoipdata, err = io.ReadAll(tarfile)
|
||||
} else {
|
||||
geoipdata, err = io.ReadAll(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if len(geoipdata) == 0 {
|
||||
return fmt.Errorf("did not find MaxMind database in tarball from %s", g.url)
|
||||
return fmt.Errorf("did not find GeoIP database in download from %s", g.url)
|
||||
}
|
||||
|
||||
reader, err := maxminddb.FromBytes(geoipdata)
|
||||
|
|
|
@ -24,12 +24,14 @@ package signaling
|
|||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func testGeoLookupReader(t *testing.T, reader *GeoLookup) {
|
||||
|
@ -57,13 +59,27 @@ func testGeoLookupReader(t *testing.T, reader *GeoLookup) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGeoLookup(t *testing.T) {
|
||||
license := os.Getenv("MAXMIND_GEOLITE2_LICENSE")
|
||||
if license == "" {
|
||||
t.Skip("No MaxMind GeoLite2 license was set in MAXMIND_GEOLITE2_LICENSE environment variable.")
|
||||
}
|
||||
func GetGeoIpUrlForTest(t *testing.T) string {
|
||||
t.Helper()
|
||||
|
||||
reader, err := NewGeoLookupFromUrl(GetGeoIpDownloadUrl(license))
|
||||
var geoIpUrl string
|
||||
if os.Getenv("USE_DB_IP_GEOIP_DATABASE") != "" {
|
||||
now := time.Now().UTC()
|
||||
geoIpUrl = fmt.Sprintf("https://download.db-ip.com/free/dbip-country-lite-%d-%.2d.mmdb.gz", now.Year(), now.Month())
|
||||
}
|
||||
if geoIpUrl == "" {
|
||||
license := os.Getenv("MAXMIND_GEOLITE2_LICENSE")
|
||||
if license == "" {
|
||||
t.Skip("No MaxMind GeoLite2 license was set in MAXMIND_GEOLITE2_LICENSE environment variable.")
|
||||
}
|
||||
geoIpUrl = GetGeoIpDownloadUrl(license)
|
||||
}
|
||||
return geoIpUrl
|
||||
}
|
||||
|
||||
func TestGeoLookup(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
reader, err := NewGeoLookupFromUrl(GetGeoIpUrlForTest(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -77,12 +93,8 @@ func TestGeoLookup(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGeoLookupCaching(t *testing.T) {
|
||||
license := os.Getenv("MAXMIND_GEOLITE2_LICENSE")
|
||||
if license == "" {
|
||||
t.Skip("No MaxMind GeoLite2 license was set in MAXMIND_GEOLITE2_LICENSE environment variable.")
|
||||
}
|
||||
|
||||
reader, err := NewGeoLookupFromUrl(GetGeoIpDownloadUrl(license))
|
||||
CatchLogForTest(t)
|
||||
reader, err := NewGeoLookupFromUrl(GetGeoIpUrlForTest(t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -128,6 +140,7 @@ func TestGeoLookupContinent(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGeoLookupCloseEmpty(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
reader, err := NewGeoLookupFromUrl("ignore-url")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -136,24 +149,23 @@ func TestGeoLookupCloseEmpty(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGeoLookupFromFile(t *testing.T) {
|
||||
license := os.Getenv("MAXMIND_GEOLITE2_LICENSE")
|
||||
if license == "" {
|
||||
t.Skip("No MaxMind GeoLite2 license was set in MAXMIND_GEOLITE2_LICENSE environment variable.")
|
||||
}
|
||||
CatchLogForTest(t)
|
||||
geoIpUrl := GetGeoIpUrlForTest(t)
|
||||
|
||||
url := GetGeoIpDownloadUrl(license)
|
||||
resp, err := http.Get(url)
|
||||
resp, err := http.Get(geoIpUrl)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body := resp.Body
|
||||
if strings.HasSuffix(url, ".gz") {
|
||||
url := geoIpUrl
|
||||
if strings.HasSuffix(geoIpUrl, ".gz") {
|
||||
body, err = gzip.NewReader(body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
url = strings.TrimSuffix(url, ".gz")
|
||||
}
|
||||
|
||||
tmpfile, err := os.CreateTemp("", "geoipdb")
|
||||
|
@ -164,21 +176,33 @@ func TestGeoLookupFromFile(t *testing.T) {
|
|||
os.Remove(tmpfile.Name())
|
||||
})
|
||||
|
||||
tarfile := tar.NewReader(body)
|
||||
foundDatabase := false
|
||||
for {
|
||||
header, err := tarfile.Next()
|
||||
if err == io.EOF {
|
||||
if strings.HasSuffix(url, ".tar") || strings.HasSuffix(url, "=tar") {
|
||||
tarfile := tar.NewReader(body)
|
||||
for {
|
||||
header, err := tarfile.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(header.Name, ".mmdb") {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := io.Copy(tmpfile, tarfile); err != nil {
|
||||
tmpfile.Close()
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := tmpfile.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
foundDatabase = true
|
||||
break
|
||||
} else if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(header.Name, ".mmdb") {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := io.Copy(tmpfile, tarfile); err != nil {
|
||||
} else {
|
||||
if _, err := io.Copy(tmpfile, body); err != nil {
|
||||
tmpfile.Close()
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -186,11 +210,10 @@ func TestGeoLookupFromFile(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
foundDatabase = true
|
||||
break
|
||||
}
|
||||
|
||||
if !foundDatabase {
|
||||
t.Fatal("Did not find MaxMind database in tarball")
|
||||
t.Fatalf("Did not find GeoIP database in download from %s", geoIpUrl)
|
||||
}
|
||||
|
||||
reader, err := NewGeoLookupFromFile(tmpfile.Name())
|
||||
|
|
80
go.mod
80
go.mod
|
@ -1,33 +1,35 @@
|
|||
module github.com/strukturag/nextcloud-spreed-signaling
|
||||
|
||||
go 1.20
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/dlintw/goconf v0.0.0-20120228082610-dcc070983490
|
||||
github.com/fsnotify/fsnotify v1.7.0
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||
github.com/google/uuid v1.4.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/gorilla/securecookie v1.1.2
|
||||
github.com/gorilla/websocket v1.5.1
|
||||
github.com/mailru/easyjson v0.7.7
|
||||
github.com/nats-io/nats-server/v2 v2.10.5
|
||||
github.com/nats-io/nats.go v1.31.0
|
||||
github.com/nats-io/nats-server/v2 v2.10.16
|
||||
github.com/nats-io/nats.go v1.35.0
|
||||
github.com/notedit/janus-go v0.0.0-20200517101215-10eb8b95d1a0
|
||||
github.com/oschwald/maxminddb-golang v1.12.0
|
||||
github.com/pion/sdp/v3 v3.0.6
|
||||
github.com/prometheus/client_golang v1.17.0
|
||||
go.etcd.io/etcd/api/v3 v3.5.10
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10
|
||||
go.etcd.io/etcd/client/v3 v3.5.10
|
||||
go.etcd.io/etcd/server/v3 v3.5.10
|
||||
google.golang.org/grpc v1.59.0
|
||||
github.com/pion/sdp/v3 v3.0.9
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
go.etcd.io/etcd/api/v3 v3.5.13
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.13
|
||||
go.etcd.io/etcd/client/v3 v3.5.13
|
||||
go.etcd.io/etcd/server/v3 v3.5.13
|
||||
go.uber.org/zap v1.27.0
|
||||
google.golang.org/grpc v1.64.0
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0
|
||||
google.golang.org/protobuf v1.31.0
|
||||
google.golang.org/protobuf v1.34.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
|
@ -36,54 +38,52 @@ require (
|
|||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/jonboulle/clockwork v0.2.2 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.2 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
github.com/minio/highwayhash v1.0.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/nats-io/jwt/v2 v2.5.3 // indirect
|
||||
github.com/nats-io/nkeys v0.4.6 // indirect
|
||||
github.com/nats-io/jwt/v2 v2.5.7 // indirect
|
||||
github.com/nats-io/nkeys v0.4.7 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.11.1 // indirect
|
||||
github.com/sirupsen/logrus v1.7.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/soheilhy/cmux v0.1.5 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
|
||||
go.etcd.io/bbolt v1.3.8 // indirect
|
||||
go.etcd.io/etcd/client/v2 v2.305.10 // indirect
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.10 // indirect
|
||||
go.etcd.io/etcd/raft/v3 v3.5.10 // indirect
|
||||
go.etcd.io/bbolt v1.3.9 // indirect
|
||||
go.etcd.io/etcd/client/v2 v2.305.13 // indirect
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.13 // indirect
|
||||
go.etcd.io/etcd/raft/v3 v3.5.13 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect
|
||||
go.opentelemetry.io/otel v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.20.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.9.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.17.0 // indirect
|
||||
golang.org/x/crypto v0.15.0 // indirect
|
||||
golang.org/x/net v0.17.0 // indirect
|
||||
golang.org/x/sys v0.14.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.4.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/multierr v1.10.0 // indirect
|
||||
golang.org/x/crypto v0.23.0 // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/sys v0.20.0 // indirect
|
||||
golang.org/x/text v0.15.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
|
|
243
go.sum
243
go.sum
|
@ -1,25 +1,26 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o=
|
||||
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
|
||||
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
|
||||
cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU=
|
||||
cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
|
||||
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
|
||||
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
|
||||
github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
|
||||
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
|
||||
|
@ -34,11 +35,11 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn
|
|||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
|
@ -54,37 +55,26 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
|
|||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
|
||||
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA=
|
||||
|
@ -97,6 +87,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy
|
|||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
|
||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
|
@ -105,16 +97,16 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
|||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
|
||||
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
|
||||
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
@ -122,14 +114,14 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
|
|||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/nats-io/jwt/v2 v2.5.3 h1:/9SWvzc6hTfamcgXJ3uYRpgj+QuY2aLNqRiqrKcrpEo=
|
||||
github.com/nats-io/jwt/v2 v2.5.3/go.mod h1:iysuPemFcc7p4IoYots3IuELSI4EDe9Y0bQMe+I3Bf4=
|
||||
github.com/nats-io/nats-server/v2 v2.10.5 h1:hhWt6m9ja/mNnm6ixc85jCthDaiUFPaeJI79K/MD980=
|
||||
github.com/nats-io/nats-server/v2 v2.10.5/go.mod h1:xUMTU4kS//SDkJCSvFwN9SyJ9nUuLhSkzB/Qz0dvjjg=
|
||||
github.com/nats-io/nats.go v1.31.0 h1:/WFBHEc/dOKBF6qf1TZhrdEfTmOZ5JzdJ+Y3m6Y/p7E=
|
||||
github.com/nats-io/nats.go v1.31.0/go.mod h1:di3Bm5MLsoB4Bx61CBTsxuarI36WbhAwOm8QrW39+i8=
|
||||
github.com/nats-io/nkeys v0.4.6 h1:IzVe95ru2CT6ta874rt9saQRkWfe2nFj1NtvYSLqMzY=
|
||||
github.com/nats-io/nkeys v0.4.6/go.mod h1:4DxZNzenSVd1cYQoAa8948QY3QDjrHfcfVADymtkpts=
|
||||
github.com/nats-io/jwt/v2 v2.5.7 h1:j5lH1fUXCnJnY8SsQeB/a/z9Azgu2bYIDvtPVNdxe2c=
|
||||
github.com/nats-io/jwt/v2 v2.5.7/go.mod h1:ZdWS1nZa6WMZfFwwgpEaqBV8EPGVgOTDHN/wTbz0Y5A=
|
||||
github.com/nats-io/nats-server/v2 v2.10.16 h1:2jXaiydp5oB/nAx/Ytf9fdCi9QN6ItIc9eehX8kwVV0=
|
||||
github.com/nats-io/nats-server/v2 v2.10.16/go.mod h1:Pksi38H2+6xLe1vQx0/EA4bzetM0NqyIHcIbmgXSkIU=
|
||||
github.com/nats-io/nats.go v1.35.0 h1:XFNqNM7v5B+MQMKqVGAyHwYhyKb48jrenXNxIU20ULk=
|
||||
github.com/nats-io/nats.go v1.35.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
|
||||
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
|
||||
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/notedit/janus-go v0.0.0-20200517101215-10eb8b95d1a0 h1:EFU9iv8BMPyBo8iFMHvQleYlF5M3PY6zpAbxsngImjE=
|
||||
|
@ -139,95 +131,96 @@ github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq5
|
|||
github.com/oschwald/maxminddb-golang v1.12.0/go.mod h1:q0Nob5lTCqyQ8WT6FYgS1L7PXKVVbgiymefNwIjPzgY=
|
||||
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||
github.com/pion/sdp/v3 v3.0.6 h1:WuDLhtuFUUVpTfus9ILC4HRyHsW6TdugjEX/QY9OiUw=
|
||||
github.com/pion/sdp/v3 v3.0.6/go.mod h1:iiFWFpQO8Fy3S5ldclBkpXqmWy02ns78NOKoLLL0YQw=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY=
|
||||
github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
|
||||
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
|
||||
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
|
||||
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
|
||||
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k=
|
||||
go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U=
|
||||
go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4=
|
||||
go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA=
|
||||
go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao=
|
||||
go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc=
|
||||
go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg=
|
||||
go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo=
|
||||
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
|
||||
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
|
||||
go.etcd.io/etcd/api/v3 v3.5.13 h1:8WXU2/NBge6AUF1K1gOexB6e07NgsN1hXK0rSTtgSp4=
|
||||
go.etcd.io/etcd/api/v3 v3.5.13/go.mod h1:gBqlqkcMMZMVTMm4NDZloEVJzxQOQIls8splbqBDa0c=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.13 h1:RVZSAnWWWiI5IrYAXjQorajncORbS0zI48LQlE2kQWg=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.13/go.mod h1:XxHT4u1qU12E2+po+UVPrEeL94Um6zL58ppuJWXSAB8=
|
||||
go.etcd.io/etcd/client/v2 v2.305.13 h1:RWfV1SX5jTU0lbCvpVQe3iPQeAHETWdOTb6pxhd77C8=
|
||||
go.etcd.io/etcd/client/v2 v2.305.13/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg=
|
||||
go.etcd.io/etcd/client/v3 v3.5.13 h1:o0fHTNJLeO0MyVbc7I3fsCf6nrOqn5d+diSarKnB2js=
|
||||
go.etcd.io/etcd/client/v3 v3.5.13/go.mod h1:cqiAeY8b5DEEcpxvgWKsbLIWNM/8Wy2xJSDMtioMcoI=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.13 h1:st9bDWNsKkBNpP4PR1MvM/9NqUPfvYZx/YXegsYEH8M=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.13/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.13 h1:7r/NKAOups1YnKcfro2RvGGo2PTuizF/xh26Z2CTAzA=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.13/go.mod h1:uUFibGLn2Ksm2URMxN1fICGhk8Wu96EfDQyuLhAcAmw=
|
||||
go.etcd.io/etcd/server/v3 v3.5.13 h1:V6KG+yMfMSqWt+lGnhFpP5z5dRUj1BDRJ5k1fQ9DFok=
|
||||
go.etcd.io/etcd/server/v3 v3.5.13/go.mod h1:K/8nbsGupHqmr5MkgaZpLlH1QdX1pcNQLAkODy44XcQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M=
|
||||
go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU=
|
||||
go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
|
||||
go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1 h1:ofMbch7i29qIUf7VtF+r0HRF6ac0SBaPSziSsKp7wkk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1 h1:CFMFNoz+CGprjFAFy+RJFrfEe4GBia3RRm2a4fREvCA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 h1:DeFD0VgTZ+Cj6hxravYYZE2W4GlneVH81iAOPjZkzk8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0=
|
||||
go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA=
|
||||
go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM=
|
||||
go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI=
|
||||
go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM=
|
||||
go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0=
|
||||
go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk=
|
||||
go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ=
|
||||
go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4=
|
||||
go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
|
||||
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
|
||||
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
|
@ -245,34 +238,36 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
|
||||
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
|
||||
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.4.0 h1:Z81tqI5ddIoXDPvVQ7/7CC9TnLM7ubaFG2qXYd5BbYY=
|
||||
golang.org/x/time v0.4.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
|
@ -287,47 +282,33 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
|
|||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
|
||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
||||
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
|
||||
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
@ -336,8 +317,8 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
|
|
460
grpc_client.go
460
grpc_client.go
|
@ -24,7 +24,9 @@ package signaling
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
|
@ -37,6 +39,8 @@ import (
|
|||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
@ -49,7 +53,7 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
lookupGrpcIp = net.LookupIP // can be overwritten from tests
|
||||
ErrNoSuchResumeId = fmt.Errorf("unknown resume id")
|
||||
|
||||
customResolverPrefix atomic.Uint64
|
||||
)
|
||||
|
@ -138,9 +142,9 @@ func NewGrpcClient(target string, ip net.IP, opts ...grpc.DialOption) (*GrpcClie
|
|||
hostname: hostname,
|
||||
}
|
||||
opts = append(opts, grpc.WithResolvers(resolver))
|
||||
conn, err = grpc.Dial(fmt.Sprintf("%s://%s", resolver.Scheme(), target), opts...)
|
||||
conn, err = grpc.NewClient(fmt.Sprintf("%s://%s", resolver.Scheme(), target), opts...)
|
||||
} else {
|
||||
conn, err = grpc.Dial(target, opts...)
|
||||
conn, err = grpc.NewClient(target, opts...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -185,6 +189,26 @@ func (c *GrpcClient) GetServerId(ctx context.Context) (string, error) {
|
|||
return response.GetServerId(), nil
|
||||
}
|
||||
|
||||
func (c *GrpcClient) LookupResumeId(ctx context.Context, resumeId string) (*LookupResumeIdReply, error) {
|
||||
statsGrpcClientCalls.WithLabelValues("LookupResumeId").Inc()
|
||||
// TODO: Remove debug logging
|
||||
log.Printf("Lookup resume id %s on %s", resumeId, c.Target())
|
||||
response, err := c.impl.LookupResumeId(ctx, &LookupResumeIdRequest{
|
||||
ResumeId: resumeId,
|
||||
}, grpc.WaitForReady(true))
|
||||
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
|
||||
return nil, ErrNoSuchResumeId
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sessionId := response.GetSessionId(); sessionId == "" {
|
||||
return nil, ErrNoSuchResumeId
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (c *GrpcClient) LookupSessionId(ctx context.Context, roomSessionId string, disconnectReason string) (string, error) {
|
||||
statsGrpcClientCalls.WithLabelValues("LookupSessionId").Inc()
|
||||
// TODO: Remove debug logging
|
||||
|
@ -225,13 +249,13 @@ func (c *GrpcClient) IsSessionInCall(ctx context.Context, sessionId string, room
|
|||
return response.GetInCall(), nil
|
||||
}
|
||||
|
||||
func (c *GrpcClient) GetPublisherId(ctx context.Context, sessionId string, streamType string) (string, string, net.IP, error) {
|
||||
func (c *GrpcClient) GetPublisherId(ctx context.Context, sessionId string, streamType StreamType) (string, string, net.IP, error) {
|
||||
statsGrpcClientCalls.WithLabelValues("GetPublisherId").Inc()
|
||||
// TODO: Remove debug logging
|
||||
log.Printf("Get %s publisher id %s on %s", streamType, sessionId, c.Target())
|
||||
response, err := c.impl.GetPublisherId(ctx, &GetPublisherIdRequest{
|
||||
SessionId: sessionId,
|
||||
StreamType: streamType,
|
||||
StreamType: string(streamType),
|
||||
}, grpc.WaitForReady(true))
|
||||
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
|
||||
return "", "", nil, nil
|
||||
|
@ -258,37 +282,125 @@ func (c *GrpcClient) GetSessionCount(ctx context.Context, u *url.URL) (uint32, e
|
|||
return response.GetCount(), nil
|
||||
}
|
||||
|
||||
type ProxySessionReceiver interface {
|
||||
RemoteAddr() string
|
||||
Country() string
|
||||
UserAgent() string
|
||||
|
||||
OnProxyMessage(message *ServerSessionMessage) error
|
||||
OnProxyClose(err error)
|
||||
}
|
||||
|
||||
type SessionProxy struct {
|
||||
sessionId string
|
||||
receiver ProxySessionReceiver
|
||||
|
||||
sendMu sync.Mutex
|
||||
client RpcSessions_ProxySessionClient
|
||||
}
|
||||
|
||||
func (p *SessionProxy) recvPump() {
|
||||
var closeError error
|
||||
defer func() {
|
||||
p.receiver.OnProxyClose(closeError)
|
||||
if err := p.Close(); err != nil {
|
||||
log.Printf("Error closing proxy for session %s: %s", p.sessionId, err)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
msg, err := p.client.Recv()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
|
||||
log.Printf("Error receiving message from proxy for session %s: %s", p.sessionId, err)
|
||||
closeError = err
|
||||
break
|
||||
}
|
||||
|
||||
if err := p.receiver.OnProxyMessage(msg); err != nil {
|
||||
log.Printf("Error processing message %+v from proxy for session %s: %s", msg, p.sessionId, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *SessionProxy) Send(message *ClientSessionMessage) error {
|
||||
p.sendMu.Lock()
|
||||
defer p.sendMu.Unlock()
|
||||
return p.client.Send(message)
|
||||
}
|
||||
|
||||
func (p *SessionProxy) Close() error {
|
||||
p.sendMu.Lock()
|
||||
defer p.sendMu.Unlock()
|
||||
return p.client.CloseSend()
|
||||
}
|
||||
|
||||
func (c *GrpcClient) ProxySession(ctx context.Context, sessionId string, receiver ProxySessionReceiver) (*SessionProxy, error) {
|
||||
statsGrpcClientCalls.WithLabelValues("ProxySession").Inc()
|
||||
md := metadata.Pairs(
|
||||
"sessionId", sessionId,
|
||||
"remoteAddr", receiver.RemoteAddr(),
|
||||
"country", receiver.Country(),
|
||||
"userAgent", receiver.UserAgent(),
|
||||
)
|
||||
client, err := c.impl.ProxySession(metadata.NewOutgoingContext(ctx, md), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proxy := &SessionProxy{
|
||||
sessionId: sessionId,
|
||||
receiver: receiver,
|
||||
|
||||
client: client,
|
||||
}
|
||||
|
||||
go proxy.recvPump()
|
||||
return proxy, nil
|
||||
}
|
||||
|
||||
type grpcClientsList struct {
|
||||
clients []*GrpcClient
|
||||
entry *DnsMonitorEntry
|
||||
}
|
||||
|
||||
type GrpcClients struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
clientsMap map[string][]*GrpcClient
|
||||
clientsMap map[string]*grpcClientsList
|
||||
clients []*GrpcClient
|
||||
|
||||
dnsMonitor *DnsMonitor
|
||||
dnsDiscovery bool
|
||||
stopping chan struct{}
|
||||
stopped chan struct{}
|
||||
|
||||
etcdClient *EtcdClient
|
||||
targetPrefix string
|
||||
targetInformation map[string]*GrpcTargetInformationEtcd
|
||||
dialOptions atomic.Value // []grpc.DialOption
|
||||
creds credentials.TransportCredentials
|
||||
|
||||
initializedCtx context.Context
|
||||
initializedFunc context.CancelFunc
|
||||
initializedWg sync.WaitGroup
|
||||
wakeupChanForTesting chan struct{}
|
||||
selfCheckWaitGroup sync.WaitGroup
|
||||
|
||||
closeCtx context.Context
|
||||
closeFunc context.CancelFunc
|
||||
}
|
||||
|
||||
func NewGrpcClients(config *goconf.ConfigFile, etcdClient *EtcdClient) (*GrpcClients, error) {
|
||||
func NewGrpcClients(config *goconf.ConfigFile, etcdClient *EtcdClient, dnsMonitor *DnsMonitor) (*GrpcClients, error) {
|
||||
initializedCtx, initializedFunc := context.WithCancel(context.Background())
|
||||
closeCtx, closeFunc := context.WithCancel(context.Background())
|
||||
result := &GrpcClients{
|
||||
dnsMonitor: dnsMonitor,
|
||||
etcdClient: etcdClient,
|
||||
initializedCtx: initializedCtx,
|
||||
initializedFunc: initializedFunc,
|
||||
|
||||
stopping: make(chan struct{}, 1),
|
||||
stopped: make(chan struct{}, 1),
|
||||
closeCtx: closeCtx,
|
||||
closeFunc: closeFunc,
|
||||
}
|
||||
if err := result.load(config, false); err != nil {
|
||||
return nil, err
|
||||
|
@ -302,6 +414,13 @@ func (c *GrpcClients) load(config *goconf.ConfigFile, fromReload bool) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if c.creds != nil {
|
||||
if cr, ok := c.creds.(*reloadableCredentials); ok {
|
||||
cr.Close()
|
||||
}
|
||||
}
|
||||
c.creds = creds
|
||||
|
||||
opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)}
|
||||
c.dialOptions.Store(opts)
|
||||
|
||||
|
@ -313,9 +432,6 @@ func (c *GrpcClients) load(config *goconf.ConfigFile, fromReload bool) error {
|
|||
switch targetType {
|
||||
case GrpcTargetTypeStatic:
|
||||
err = c.loadTargetsStatic(config, fromReload, opts...)
|
||||
if err == nil && c.dnsDiscovery {
|
||||
go c.monitorGrpcIPs()
|
||||
}
|
||||
case GrpcTargetTypeEtcd:
|
||||
err = c.loadTargetsEtcd(config, fromReload, opts...)
|
||||
default:
|
||||
|
@ -344,7 +460,7 @@ func (c *GrpcClients) isClientAvailable(target string, client *GrpcClient) bool
|
|||
return false
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
for _, entry := range entries.clients {
|
||||
if entry == client {
|
||||
return true
|
||||
}
|
||||
|
@ -378,6 +494,10 @@ loop:
|
|||
|
||||
id, err := c.getServerIdWithTimeout(ctx, client)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
|
||||
if status.Code(err) != codes.Canceled {
|
||||
log.Printf("Error checking GRPC server id of %s, retrying in %s: %s", client.Target(), backoff.NextWait(), err)
|
||||
}
|
||||
|
@ -401,7 +521,20 @@ func (c *GrpcClients) loadTargetsStatic(config *goconf.ConfigFile, fromReload bo
|
|||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
clientsMap := make(map[string][]*GrpcClient)
|
||||
dnsDiscovery, _ := config.GetBool("grpc", "dnsdiscovery")
|
||||
if dnsDiscovery != c.dnsDiscovery {
|
||||
if !dnsDiscovery {
|
||||
for _, entry := range c.clientsMap {
|
||||
if entry.entry != nil {
|
||||
c.dnsMonitor.Remove(entry.entry)
|
||||
entry.entry = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
c.dnsDiscovery = dnsDiscovery
|
||||
}
|
||||
|
||||
clientsMap := make(map[string]*grpcClientsList)
|
||||
var clients []*GrpcClient
|
||||
removeTargets := make(map[string]bool, len(c.clientsMap))
|
||||
for target, entries := range c.clientsMap {
|
||||
|
@ -417,7 +550,15 @@ func (c *GrpcClients) loadTargetsStatic(config *goconf.ConfigFile, fromReload bo
|
|||
}
|
||||
|
||||
if entries, found := clientsMap[target]; found {
|
||||
clients = append(clients, entries...)
|
||||
clients = append(clients, entries.clients...)
|
||||
if dnsDiscovery && entries.entry == nil {
|
||||
entry, err := c.dnsMonitor.Add(target, c.onLookup)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
entries.entry = entry
|
||||
}
|
||||
delete(removeTargets, target)
|
||||
continue
|
||||
}
|
||||
|
@ -427,61 +568,59 @@ func (c *GrpcClients) loadTargetsStatic(config *goconf.ConfigFile, fromReload bo
|
|||
host = h
|
||||
}
|
||||
|
||||
var ips []net.IP
|
||||
if net.ParseIP(host) == nil {
|
||||
if dnsDiscovery && net.ParseIP(host) == nil {
|
||||
// Use dedicated client for each IP address.
|
||||
var err error
|
||||
ips, err = lookupGrpcIp(host)
|
||||
entry, err := c.dnsMonitor.Add(target, c.onLookup)
|
||||
if err != nil {
|
||||
log.Printf("Could not lookup %s: %s", host, err)
|
||||
// Make sure updating continues even if initial lookup failed.
|
||||
clientsMap[target] = nil
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// Connect directly to IP address.
|
||||
ips = []net.IP{nil}
|
||||
}
|
||||
|
||||
for _, ip := range ips {
|
||||
client, err := NewGrpcClient(target, ip, opts...)
|
||||
if err != nil {
|
||||
for _, clients := range clientsMap {
|
||||
for _, client := range clients {
|
||||
c.closeClient(client)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
c.selfCheckWaitGroup.Add(1)
|
||||
go c.checkIsSelf(context.Background(), target, client)
|
||||
|
||||
log.Printf("Adding %s as GRPC target", client.Target())
|
||||
clientsMap[target] = append(clientsMap[target], client)
|
||||
clients = append(clients, client)
|
||||
clientsMap[target] = &grpcClientsList{
|
||||
entry: entry,
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
client, err := NewGrpcClient(target, nil, opts...)
|
||||
if err != nil {
|
||||
for _, entry := range clientsMap {
|
||||
for _, client := range entry.clients {
|
||||
c.closeClient(client)
|
||||
}
|
||||
|
||||
if entry.entry != nil {
|
||||
c.dnsMonitor.Remove(entry.entry)
|
||||
entry.entry = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
c.selfCheckWaitGroup.Add(1)
|
||||
go c.checkIsSelf(c.closeCtx, target, client)
|
||||
|
||||
log.Printf("Adding %s as GRPC target", client.Target())
|
||||
entry, found := clientsMap[target]
|
||||
if !found {
|
||||
entry = &grpcClientsList{}
|
||||
clientsMap[target] = entry
|
||||
}
|
||||
entry.clients = append(entry.clients, client)
|
||||
clients = append(clients, client)
|
||||
}
|
||||
|
||||
for target := range removeTargets {
|
||||
if clients, found := clientsMap[target]; found {
|
||||
for _, client := range clients {
|
||||
if entry, found := clientsMap[target]; found {
|
||||
for _, client := range entry.clients {
|
||||
log.Printf("Deleting GRPC target %s", client.Target())
|
||||
c.closeClient(client)
|
||||
}
|
||||
delete(clientsMap, target)
|
||||
}
|
||||
}
|
||||
|
||||
dnsDiscovery, _ := config.GetBool("grpc", "dnsdiscovery")
|
||||
if dnsDiscovery != c.dnsDiscovery {
|
||||
if !dnsDiscovery && fromReload {
|
||||
c.stopping <- struct{}{}
|
||||
<-c.stopped
|
||||
}
|
||||
c.dnsDiscovery = dnsDiscovery
|
||||
if dnsDiscovery && fromReload {
|
||||
go c.monitorGrpcIPs()
|
||||
if entry.entry != nil {
|
||||
c.dnsMonitor.Remove(entry.entry)
|
||||
entry.entry = nil
|
||||
}
|
||||
delete(clientsMap, target)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -492,91 +631,61 @@ func (c *GrpcClients) loadTargetsStatic(config *goconf.ConfigFile, fromReload bo
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *GrpcClients) monitorGrpcIPs() {
|
||||
log.Printf("Start monitoring GRPC client IPs")
|
||||
ticker := time.NewTicker(updateDnsInterval)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
c.updateGrpcIPs()
|
||||
case <-c.stopping:
|
||||
c.stopped <- struct{}{}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *GrpcClients) updateGrpcIPs() {
|
||||
func (c *GrpcClients) onLookup(entry *DnsMonitorEntry, all []net.IP, added []net.IP, keep []net.IP, removed []net.IP) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
target := entry.URL()
|
||||
e, found := c.clientsMap[target]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
opts := c.dialOptions.Load().([]grpc.DialOption)
|
||||
|
||||
mapModified := false
|
||||
for target, clients := range c.clientsMap {
|
||||
host := target
|
||||
if h, _, err := net.SplitHostPort(target); err == nil {
|
||||
host = h
|
||||
}
|
||||
|
||||
if net.ParseIP(host) != nil {
|
||||
// No need to lookup endpoints that connect to IP addresses.
|
||||
continue
|
||||
}
|
||||
|
||||
ips, err := lookupGrpcIp(host)
|
||||
if err != nil {
|
||||
log.Printf("Could not lookup %s: %s", host, err)
|
||||
continue
|
||||
}
|
||||
|
||||
var newClients []*GrpcClient
|
||||
changed := false
|
||||
for _, client := range clients {
|
||||
found := false
|
||||
for idx, ip := range ips {
|
||||
if ip.Equal(client.ip) {
|
||||
ips = append(ips[:idx], ips[idx+1:]...)
|
||||
found = true
|
||||
newClients = append(newClients, client)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
changed = true
|
||||
var newClients []*GrpcClient
|
||||
for _, ip := range removed {
|
||||
for _, client := range e.clients {
|
||||
if ip.Equal(client.ip) {
|
||||
mapModified = true
|
||||
log.Printf("Removing connection to %s", client.Target())
|
||||
c.closeClient(client)
|
||||
c.wakeupForTesting()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, ip := range ips {
|
||||
client, err := NewGrpcClient(target, ip, opts...)
|
||||
if err != nil {
|
||||
log.Printf("Error creating client to %s with IP %s: %s", target, ip.String(), err)
|
||||
continue
|
||||
for _, ip := range keep {
|
||||
for _, client := range e.clients {
|
||||
if ip.Equal(client.ip) {
|
||||
newClients = append(newClients, client)
|
||||
}
|
||||
|
||||
c.selfCheckWaitGroup.Add(1)
|
||||
go c.checkIsSelf(context.Background(), target, client)
|
||||
|
||||
log.Printf("Adding %s as GRPC target", client.Target())
|
||||
newClients = append(newClients, client)
|
||||
changed = true
|
||||
c.wakeupForTesting()
|
||||
}
|
||||
|
||||
if changed {
|
||||
c.clientsMap[target] = newClients
|
||||
mapModified = true
|
||||
}
|
||||
}
|
||||
|
||||
for _, ip := range added {
|
||||
client, err := NewGrpcClient(target, ip, opts...)
|
||||
if err != nil {
|
||||
log.Printf("Error creating client to %s with IP %s: %s", target, ip.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
c.selfCheckWaitGroup.Add(1)
|
||||
go c.checkIsSelf(c.closeCtx, target, client)
|
||||
|
||||
log.Printf("Adding %s as GRPC target", client.Target())
|
||||
newClients = append(newClients, client)
|
||||
mapModified = true
|
||||
c.wakeupForTesting()
|
||||
}
|
||||
|
||||
if mapModified {
|
||||
c.clientsMap[target].clients = newClients
|
||||
|
||||
c.clients = make([]*GrpcClient, 0, len(c.clientsMap))
|
||||
for _, clients := range c.clientsMap {
|
||||
c.clients = append(c.clients, clients...)
|
||||
for _, entry := range c.clientsMap {
|
||||
c.clients = append(c.clients, entry.clients...)
|
||||
}
|
||||
statsGrpcClients.Set(float64(len(c.clients)))
|
||||
}
|
||||
|
@ -601,52 +710,72 @@ func (c *GrpcClients) loadTargetsEtcd(config *goconf.ConfigFile, fromReload bool
|
|||
}
|
||||
|
||||
func (c *GrpcClients) EtcdClientCreated(client *EtcdClient) {
|
||||
c.initializedWg.Add(1)
|
||||
go func() {
|
||||
if err := client.Watch(context.Background(), c.targetPrefix, c, clientv3.WithPrefix()); err != nil {
|
||||
log.Printf("Error processing watch for %s: %s", c.targetPrefix, err)
|
||||
}
|
||||
}()
|
||||
if err := client.WaitForConnection(c.closeCtx); err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
client.WaitForConnection()
|
||||
panic(err)
|
||||
}
|
||||
|
||||
backoff, _ := NewExponentialBackoff(initialWaitDelay, maxWaitDelay)
|
||||
for {
|
||||
response, err := c.getGrpcTargets(client, c.targetPrefix)
|
||||
var nextRevision int64
|
||||
for c.closeCtx.Err() == nil {
|
||||
response, err := c.getGrpcTargets(c.closeCtx, client, c.targetPrefix)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
} else if errors.Is(err, context.DeadlineExceeded) {
|
||||
log.Printf("Timeout getting initial list of GRPC targets, retry in %s", backoff.NextWait())
|
||||
} else {
|
||||
log.Printf("Could not get initial list of GRPC targets, retry in %s: %s", backoff.NextWait(), err)
|
||||
}
|
||||
|
||||
backoff.Wait(context.Background())
|
||||
backoff.Wait(c.closeCtx)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ev := range response.Kvs {
|
||||
c.EtcdKeyUpdated(client, string(ev.Key), ev.Value)
|
||||
c.EtcdKeyUpdated(client, string(ev.Key), ev.Value, nil)
|
||||
}
|
||||
c.initializedWg.Wait()
|
||||
c.initializedFunc()
|
||||
return
|
||||
nextRevision = response.Header.Revision + 1
|
||||
break
|
||||
}
|
||||
|
||||
prevRevision := nextRevision
|
||||
backoff.Reset()
|
||||
for c.closeCtx.Err() == nil {
|
||||
var err error
|
||||
if nextRevision, err = client.Watch(c.closeCtx, c.targetPrefix, nextRevision, c, clientv3.WithPrefix()); err != nil {
|
||||
log.Printf("Error processing watch for %s (%s), retry in %s", c.targetPrefix, err, backoff.NextWait())
|
||||
backoff.Wait(c.closeCtx)
|
||||
continue
|
||||
}
|
||||
|
||||
if nextRevision != prevRevision {
|
||||
backoff.Reset()
|
||||
prevRevision = nextRevision
|
||||
} else {
|
||||
log.Printf("Processing watch for %s interrupted, retry in %s", c.targetPrefix, backoff.NextWait())
|
||||
backoff.Wait(c.closeCtx)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (c *GrpcClients) EtcdWatchCreated(client *EtcdClient, key string) {
|
||||
c.initializedWg.Done()
|
||||
}
|
||||
|
||||
func (c *GrpcClients) getGrpcTargets(client *EtcdClient, targetPrefix string) (*clientv3.GetResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
func (c *GrpcClients) getGrpcTargets(ctx context.Context, client *EtcdClient, targetPrefix string) (*clientv3.GetResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
|
||||
return client.Get(ctx, targetPrefix, clientv3.WithPrefix())
|
||||
}
|
||||
|
||||
func (c *GrpcClients) EtcdKeyUpdated(client *EtcdClient, key string, data []byte) {
|
||||
func (c *GrpcClients) EtcdKeyUpdated(client *EtcdClient, key string, data []byte, prevValue []byte) {
|
||||
var info GrpcTargetInformationEtcd
|
||||
if err := json.Unmarshal(data, &info); err != nil {
|
||||
log.Printf("Could not decode GRPC target %s=%s: %s", key, string(data), err)
|
||||
|
@ -679,21 +808,23 @@ func (c *GrpcClients) EtcdKeyUpdated(client *EtcdClient, key string, data []byte
|
|||
}
|
||||
|
||||
c.selfCheckWaitGroup.Add(1)
|
||||
go c.checkIsSelf(context.Background(), info.Address, cl)
|
||||
go c.checkIsSelf(c.closeCtx, info.Address, cl)
|
||||
|
||||
log.Printf("Adding %s as GRPC target", cl.Target())
|
||||
|
||||
if c.clientsMap == nil {
|
||||
c.clientsMap = make(map[string][]*GrpcClient)
|
||||
c.clientsMap = make(map[string]*grpcClientsList)
|
||||
}
|
||||
c.clientsMap[info.Address] = &grpcClientsList{
|
||||
clients: []*GrpcClient{cl},
|
||||
}
|
||||
c.clientsMap[info.Address] = []*GrpcClient{cl}
|
||||
c.clients = append(c.clients, cl)
|
||||
c.targetInformation[key] = &info
|
||||
statsGrpcClients.Inc()
|
||||
c.wakeupForTesting()
|
||||
}
|
||||
|
||||
func (c *GrpcClients) EtcdKeyDeleted(client *EtcdClient, key string) {
|
||||
func (c *GrpcClients) EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
@ -709,19 +840,19 @@ func (c *GrpcClients) removeEtcdClientLocked(key string) {
|
|||
}
|
||||
|
||||
delete(c.targetInformation, key)
|
||||
clients, found := c.clientsMap[info.Address]
|
||||
entry, found := c.clientsMap[info.Address]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
for _, client := range clients {
|
||||
for _, client := range entry.clients {
|
||||
log.Printf("Removing connection to %s (from %s)", client.Target(), key)
|
||||
c.closeClient(client)
|
||||
}
|
||||
delete(c.clientsMap, info.Address)
|
||||
c.clients = make([]*GrpcClient, 0, len(c.clientsMap))
|
||||
for _, clients := range c.clientsMap {
|
||||
c.clients = append(c.clients, clients...)
|
||||
for _, entry := range c.clientsMap {
|
||||
c.clients = append(c.clients, entry.clients...)
|
||||
}
|
||||
statsGrpcClients.Dec()
|
||||
c.wakeupForTesting()
|
||||
|
@ -757,25 +888,32 @@ func (c *GrpcClients) Close() {
|
|||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
for _, clients := range c.clientsMap {
|
||||
for _, client := range clients {
|
||||
for _, entry := range c.clientsMap {
|
||||
for _, client := range entry.clients {
|
||||
if err := client.Close(); err != nil {
|
||||
log.Printf("Error closing client to %s: %s", client.Target(), err)
|
||||
}
|
||||
}
|
||||
|
||||
if entry.entry != nil {
|
||||
c.dnsMonitor.Remove(entry.entry)
|
||||
entry.entry = nil
|
||||
}
|
||||
}
|
||||
|
||||
c.clients = nil
|
||||
c.clientsMap = nil
|
||||
if c.dnsDiscovery {
|
||||
c.stopping <- struct{}{}
|
||||
<-c.stopped
|
||||
c.dnsDiscovery = false
|
||||
}
|
||||
c.dnsDiscovery = false
|
||||
|
||||
if c.etcdClient != nil {
|
||||
c.etcdClient.RemoveListener(c)
|
||||
}
|
||||
if c.creds != nil {
|
||||
if cr, ok := c.creds.(*reloadableCredentials); ok {
|
||||
cr.Close()
|
||||
}
|
||||
}
|
||||
c.closeFunc()
|
||||
}
|
||||
|
||||
func (c *GrpcClients) GetClients() []*GrpcClient {
|
||||
|
|
|
@ -37,6 +37,9 @@ import (
|
|||
)
|
||||
|
||||
func (c *GrpcClients) getWakeupChannelForTesting() <-chan struct{} {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.wakeupChanForTesting != nil {
|
||||
return c.wakeupChanForTesting
|
||||
}
|
||||
|
@ -46,8 +49,9 @@ func (c *GrpcClients) getWakeupChannelForTesting() <-chan struct{} {
|
|||
return ch
|
||||
}
|
||||
|
||||
func NewGrpcClientsForTestWithConfig(t *testing.T, config *goconf.ConfigFile, etcdClient *EtcdClient) *GrpcClients {
|
||||
client, err := NewGrpcClients(config, etcdClient)
|
||||
func NewGrpcClientsForTestWithConfig(t *testing.T, config *goconf.ConfigFile, etcdClient *EtcdClient) (*GrpcClients, *DnsMonitor) {
|
||||
dnsMonitor := newDnsMonitorForTest(t, time.Hour) // will be updated manually
|
||||
client, err := NewGrpcClients(config, etcdClient, dnsMonitor)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -55,10 +59,10 @@ func NewGrpcClientsForTestWithConfig(t *testing.T, config *goconf.ConfigFile, et
|
|||
client.Close()
|
||||
})
|
||||
|
||||
return client
|
||||
return client, dnsMonitor
|
||||
}
|
||||
|
||||
func NewGrpcClientsForTest(t *testing.T, addr string) *GrpcClients {
|
||||
func NewGrpcClientsForTest(t *testing.T, addr string) (*GrpcClients, *DnsMonitor) {
|
||||
config := goconf.NewConfigFile()
|
||||
config.AddOption("grpc", "targets", addr)
|
||||
config.AddOption("grpc", "dnsdiscovery", "true")
|
||||
|
@ -66,7 +70,7 @@ func NewGrpcClientsForTest(t *testing.T, addr string) *GrpcClients {
|
|||
return NewGrpcClientsForTestWithConfig(t, config, nil)
|
||||
}
|
||||
|
||||
func NewGrpcClientsWithEtcdForTest(t *testing.T, etcd *embed.Etcd) *GrpcClients {
|
||||
func NewGrpcClientsWithEtcdForTest(t *testing.T, etcd *embed.Etcd) (*GrpcClients, *DnsMonitor) {
|
||||
config := goconf.NewConfigFile()
|
||||
config.AddOption("etcd", "endpoints", etcd.Config().ListenClientUrls[0].String())
|
||||
|
||||
|
@ -108,29 +112,34 @@ func waitForEvent(ctx context.Context, t *testing.T, ch <-chan struct{}) {
|
|||
}
|
||||
|
||||
func Test_GrpcClients_EtcdInitial(t *testing.T) {
|
||||
_, addr1 := NewGrpcServerForTest(t)
|
||||
_, addr2 := NewGrpcServerForTest(t)
|
||||
CatchLogForTest(t)
|
||||
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||
_, addr1 := NewGrpcServerForTest(t)
|
||||
_, addr2 := NewGrpcServerForTest(t)
|
||||
|
||||
etcd := NewEtcdForTest(t)
|
||||
etcd := NewEtcdForTest(t)
|
||||
|
||||
SetEtcdValue(etcd, "/grpctargets/one", []byte("{\"address\":\""+addr1+"\"}"))
|
||||
SetEtcdValue(etcd, "/grpctargets/two", []byte("{\"address\":\""+addr2+"\"}"))
|
||||
SetEtcdValue(etcd, "/grpctargets/one", []byte("{\"address\":\""+addr1+"\"}"))
|
||||
SetEtcdValue(etcd, "/grpctargets/two", []byte("{\"address\":\""+addr2+"\"}"))
|
||||
|
||||
client := NewGrpcClientsWithEtcdForTest(t, etcd)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
if err := client.WaitForInitialized(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
if err := client.WaitForInitialized(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if clients := client.GetClients(); len(clients) != 2 {
|
||||
t.Errorf("Expected two clients, got %+v", clients)
|
||||
}
|
||||
if clients := client.GetClients(); len(clients) != 2 {
|
||||
t.Errorf("Expected two clients, got %+v", clients)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func Test_GrpcClients_EtcdUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
etcd := NewEtcdForTest(t)
|
||||
client := NewGrpcClientsWithEtcdForTest(t, etcd)
|
||||
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
|
||||
ch := client.getWakeupChannelForTesting()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||
|
@ -183,8 +192,10 @@ func Test_GrpcClients_EtcdUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_GrpcClients_EtcdIgnoreSelf(t *testing.T) {
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
etcd := NewEtcdForTest(t)
|
||||
client := NewGrpcClientsWithEtcdForTest(t, etcd)
|
||||
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
|
||||
ch := client.getWakeupChannelForTesting()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||
|
@ -227,82 +238,70 @@ func Test_GrpcClients_EtcdIgnoreSelf(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_GrpcClients_DnsDiscovery(t *testing.T) {
|
||||
var ipsResult []net.IP
|
||||
lookupGrpcIp = func(host string) ([]net.IP, error) {
|
||||
if host == "testgrpc" {
|
||||
return ipsResult, nil
|
||||
CatchLogForTest(t)
|
||||
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||
lookup := newMockDnsLookupForTest(t)
|
||||
target := "testgrpc:12345"
|
||||
ip1 := net.ParseIP("192.168.0.1")
|
||||
ip2 := net.ParseIP("192.168.0.2")
|
||||
targetWithIp1 := fmt.Sprintf("%s (%s)", target, ip1)
|
||||
targetWithIp2 := fmt.Sprintf("%s (%s)", target, ip2)
|
||||
lookup.Set("testgrpc", []net.IP{ip1})
|
||||
client, dnsMonitor := NewGrpcClientsForTest(t, target)
|
||||
ch := client.getWakeupChannelForTesting()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||
defer cancel()
|
||||
|
||||
dnsMonitor.checkHostnames()
|
||||
if clients := client.GetClients(); len(clients) != 1 {
|
||||
t.Errorf("Expected one client, got %+v", clients)
|
||||
} else if clients[0].Target() != targetWithIp1 {
|
||||
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
|
||||
} else if !clients[0].ip.Equal(ip1) {
|
||||
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown host")
|
||||
}
|
||||
target := "testgrpc:12345"
|
||||
ip1 := net.ParseIP("192.168.0.1")
|
||||
ip2 := net.ParseIP("192.168.0.2")
|
||||
targetWithIp1 := fmt.Sprintf("%s (%s)", target, ip1)
|
||||
targetWithIp2 := fmt.Sprintf("%s (%s)", target, ip2)
|
||||
ipsResult = []net.IP{ip1}
|
||||
client := NewGrpcClientsForTest(t, target)
|
||||
ch := client.getWakeupChannelForTesting()
|
||||
lookup.Set("testgrpc", []net.IP{ip1, ip2})
|
||||
drainWakeupChannel(ch)
|
||||
dnsMonitor.checkHostnames()
|
||||
waitForEvent(ctx, t, ch)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||
defer cancel()
|
||||
if clients := client.GetClients(); len(clients) != 2 {
|
||||
t.Errorf("Expected two client, got %+v", clients)
|
||||
} else if clients[0].Target() != targetWithIp1 {
|
||||
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
|
||||
} else if !clients[0].ip.Equal(ip1) {
|
||||
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
|
||||
} else if clients[1].Target() != targetWithIp2 {
|
||||
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[1].Target())
|
||||
} else if !clients[1].ip.Equal(ip2) {
|
||||
t.Errorf("Expected IP %s, got %s", ip2, clients[1].ip)
|
||||
}
|
||||
|
||||
if clients := client.GetClients(); len(clients) != 1 {
|
||||
t.Errorf("Expected one client, got %+v", clients)
|
||||
} else if clients[0].Target() != targetWithIp1 {
|
||||
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
|
||||
} else if !clients[0].ip.Equal(ip1) {
|
||||
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
|
||||
}
|
||||
lookup.Set("testgrpc", []net.IP{ip2})
|
||||
drainWakeupChannel(ch)
|
||||
dnsMonitor.checkHostnames()
|
||||
waitForEvent(ctx, t, ch)
|
||||
|
||||
ipsResult = []net.IP{ip1, ip2}
|
||||
drainWakeupChannel(ch)
|
||||
client.updateGrpcIPs()
|
||||
waitForEvent(ctx, t, ch)
|
||||
|
||||
if clients := client.GetClients(); len(clients) != 2 {
|
||||
t.Errorf("Expected two client, got %+v", clients)
|
||||
} else if clients[0].Target() != targetWithIp1 {
|
||||
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
|
||||
} else if !clients[0].ip.Equal(ip1) {
|
||||
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
|
||||
} else if clients[1].Target() != targetWithIp2 {
|
||||
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[1].Target())
|
||||
} else if !clients[1].ip.Equal(ip2) {
|
||||
t.Errorf("Expected IP %s, got %s", ip2, clients[1].ip)
|
||||
}
|
||||
|
||||
ipsResult = []net.IP{ip2}
|
||||
drainWakeupChannel(ch)
|
||||
client.updateGrpcIPs()
|
||||
waitForEvent(ctx, t, ch)
|
||||
|
||||
if clients := client.GetClients(); len(clients) != 1 {
|
||||
t.Errorf("Expected one client, got %+v", clients)
|
||||
} else if clients[0].Target() != targetWithIp2 {
|
||||
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[0].Target())
|
||||
} else if !clients[0].ip.Equal(ip2) {
|
||||
t.Errorf("Expected IP %s, got %s", ip2, clients[0].ip)
|
||||
}
|
||||
if clients := client.GetClients(); len(clients) != 1 {
|
||||
t.Errorf("Expected one client, got %+v", clients)
|
||||
} else if clients[0].Target() != targetWithIp2 {
|
||||
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[0].Target())
|
||||
} else if !clients[0].ip.Equal(ip2) {
|
||||
t.Errorf("Expected IP %s, got %s", ip2, clients[0].ip)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func Test_GrpcClients_DnsDiscoveryInitialFailed(t *testing.T) {
|
||||
var ipsResult []net.IP
|
||||
lookupGrpcIp = func(host string) ([]net.IP, error) {
|
||||
if host == "testgrpc" && len(ipsResult) > 0 {
|
||||
return ipsResult, nil
|
||||
}
|
||||
|
||||
return nil, &net.DNSError{
|
||||
Err: "no such host",
|
||||
Name: host,
|
||||
IsNotFound: true,
|
||||
}
|
||||
}
|
||||
t.Parallel()
|
||||
CatchLogForTest(t)
|
||||
lookup := newMockDnsLookupForTest(t)
|
||||
target := "testgrpc:12345"
|
||||
ip1 := net.ParseIP("192.168.0.1")
|
||||
targetWithIp1 := fmt.Sprintf("%s (%s)", target, ip1)
|
||||
client := NewGrpcClientsForTest(t, target)
|
||||
client, dnsMonitor := NewGrpcClientsForTest(t, target)
|
||||
ch := client.getWakeupChannelForTesting()
|
||||
|
||||
testCtx, testCtxCancel := context.WithTimeout(context.Background(), testTimeout)
|
||||
|
@ -318,9 +317,9 @@ func Test_GrpcClients_DnsDiscoveryInitialFailed(t *testing.T) {
|
|||
t.Errorf("Expected no client, got %+v", clients)
|
||||
}
|
||||
|
||||
ipsResult = []net.IP{ip1}
|
||||
lookup.Set("testgrpc", []net.IP{ip1})
|
||||
drainWakeupChannel(ch)
|
||||
client.updateGrpcIPs()
|
||||
dnsMonitor.checkHostnames()
|
||||
waitForEvent(testCtx, t, ch)
|
||||
|
||||
if clients := client.GetClients(); len(clients) != 1 {
|
||||
|
@ -333,55 +332,58 @@ func Test_GrpcClients_DnsDiscoveryInitialFailed(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_GrpcClients_Encryption(t *testing.T) {
|
||||
serverKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clientKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
serverCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Server cert", serverKey)
|
||||
clientCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Testing client", clientKey)
|
||||
|
||||
dir := t.TempDir()
|
||||
serverPrivkeyFile := path.Join(dir, "server-privkey.pem")
|
||||
serverPubkeyFile := path.Join(dir, "server-pubkey.pem")
|
||||
serverCertFile := path.Join(dir, "server-cert.pem")
|
||||
WritePrivateKey(serverKey, serverPrivkeyFile) // nolint
|
||||
WritePublicKey(&serverKey.PublicKey, serverPubkeyFile) // nolint
|
||||
os.WriteFile(serverCertFile, serverCert, 0755) // nolint
|
||||
clientPrivkeyFile := path.Join(dir, "client-privkey.pem")
|
||||
clientPubkeyFile := path.Join(dir, "client-pubkey.pem")
|
||||
clientCertFile := path.Join(dir, "client-cert.pem")
|
||||
WritePrivateKey(clientKey, clientPrivkeyFile) // nolint
|
||||
WritePublicKey(&clientKey.PublicKey, clientPubkeyFile) // nolint
|
||||
os.WriteFile(clientCertFile, clientCert, 0755) // nolint
|
||||
|
||||
serverConfig := goconf.NewConfigFile()
|
||||
serverConfig.AddOption("grpc", "servercertificate", serverCertFile)
|
||||
serverConfig.AddOption("grpc", "serverkey", serverPrivkeyFile)
|
||||
serverConfig.AddOption("grpc", "clientca", clientCertFile)
|
||||
_, addr := NewGrpcServerForTestWithConfig(t, serverConfig)
|
||||
|
||||
clientConfig := goconf.NewConfigFile()
|
||||
clientConfig.AddOption("grpc", "targets", addr)
|
||||
clientConfig.AddOption("grpc", "clientcertificate", clientCertFile)
|
||||
clientConfig.AddOption("grpc", "clientkey", clientPrivkeyFile)
|
||||
clientConfig.AddOption("grpc", "serverca", serverCertFile)
|
||||
clients := NewGrpcClientsForTestWithConfig(t, clientConfig, nil)
|
||||
|
||||
ctx, cancel1 := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel1()
|
||||
|
||||
if err := clients.WaitForInitialized(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, client := range clients.GetClients() {
|
||||
if _, err := client.GetServerId(ctx); err != nil {
|
||||
CatchLogForTest(t)
|
||||
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||
serverKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
clientKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
serverCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Server cert", serverKey)
|
||||
clientCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Testing client", clientKey)
|
||||
|
||||
dir := t.TempDir()
|
||||
serverPrivkeyFile := path.Join(dir, "server-privkey.pem")
|
||||
serverPubkeyFile := path.Join(dir, "server-pubkey.pem")
|
||||
serverCertFile := path.Join(dir, "server-cert.pem")
|
||||
WritePrivateKey(serverKey, serverPrivkeyFile) // nolint
|
||||
WritePublicKey(&serverKey.PublicKey, serverPubkeyFile) // nolint
|
||||
os.WriteFile(serverCertFile, serverCert, 0755) // nolint
|
||||
clientPrivkeyFile := path.Join(dir, "client-privkey.pem")
|
||||
clientPubkeyFile := path.Join(dir, "client-pubkey.pem")
|
||||
clientCertFile := path.Join(dir, "client-cert.pem")
|
||||
WritePrivateKey(clientKey, clientPrivkeyFile) // nolint
|
||||
WritePublicKey(&clientKey.PublicKey, clientPubkeyFile) // nolint
|
||||
os.WriteFile(clientCertFile, clientCert, 0755) // nolint
|
||||
|
||||
serverConfig := goconf.NewConfigFile()
|
||||
serverConfig.AddOption("grpc", "servercertificate", serverCertFile)
|
||||
serverConfig.AddOption("grpc", "serverkey", serverPrivkeyFile)
|
||||
serverConfig.AddOption("grpc", "clientca", clientCertFile)
|
||||
_, addr := NewGrpcServerForTestWithConfig(t, serverConfig)
|
||||
|
||||
clientConfig := goconf.NewConfigFile()
|
||||
clientConfig.AddOption("grpc", "targets", addr)
|
||||
clientConfig.AddOption("grpc", "clientcertificate", clientCertFile)
|
||||
clientConfig.AddOption("grpc", "clientkey", clientPrivkeyFile)
|
||||
clientConfig.AddOption("grpc", "serverca", serverCertFile)
|
||||
clients, _ := NewGrpcClientsForTestWithConfig(t, clientConfig, nil)
|
||||
|
||||
ctx, cancel1 := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel1()
|
||||
|
||||
if err := clients.WaitForInitialized(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, client := range clients.GetClients() {
|
||||
if _, err := client.GetServerId(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -125,6 +125,15 @@ func (c *reloadableCredentials) OverrideServerName(serverName string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *reloadableCredentials) Close() {
|
||||
if c.loader != nil {
|
||||
c.loader.Close()
|
||||
}
|
||||
if c.pool != nil {
|
||||
c.pool.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func NewReloadableCredentials(config *goconf.ConfigFile, server bool) (credentials.TransportCredentials, error) {
|
||||
var prefix string
|
||||
var caPrefix string
|
||||
|
|
|
@ -22,11 +22,13 @@
|
|||
package signaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"math/big"
|
||||
"net"
|
||||
|
@ -35,6 +37,22 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
func (c *reloadableCredentials) WaitForCertificateReload(ctx context.Context) error {
|
||||
if c.loader == nil {
|
||||
return errors.New("no certificate loaded")
|
||||
}
|
||||
|
||||
return c.loader.WaitForReload(ctx)
|
||||
}
|
||||
|
||||
func (c *reloadableCredentials) WaitForCertPoolReload(ctx context.Context) error {
|
||||
if c.pool == nil {
|
||||
return errors.New("no certificate pool loaded")
|
||||
}
|
||||
|
||||
return c.pool.WaitForReload(ctx)
|
||||
}
|
||||
|
||||
func GenerateSelfSignedCertificateForTesting(t *testing.T, bits int, organization string, key *rsa.PrivateKey) []byte {
|
||||
template := x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
|
|
229
grpc_remote_client.go
Normal file
229
grpc_remote_client.go
Normal file
|
@ -0,0 +1,229 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2024 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"sync/atomic"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
grpcRemoteClientMessageQueue = 16
|
||||
)
|
||||
|
||||
func getMD(md metadata.MD, key string) string {
|
||||
if values := md.Get(key); len(values) > 0 {
|
||||
return values[0]
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// remoteGrpcClient is a remote client connecting from a GRPC proxy to a Hub.
|
||||
type remoteGrpcClient struct {
|
||||
hub *Hub
|
||||
client RpcSessions_ProxySessionServer
|
||||
|
||||
sessionId string
|
||||
remoteAddr string
|
||||
country string
|
||||
userAgent string
|
||||
|
||||
closeCtx context.Context
|
||||
closeFunc context.CancelCauseFunc
|
||||
|
||||
session atomic.Pointer[Session]
|
||||
messages chan WritableClientMessage
|
||||
}
|
||||
|
||||
func newRemoteGrpcClient(hub *Hub, request RpcSessions_ProxySessionServer) (*remoteGrpcClient, error) {
|
||||
md, found := metadata.FromIncomingContext(request.Context())
|
||||
if !found {
|
||||
return nil, errors.New("no metadata provided")
|
||||
}
|
||||
|
||||
closeCtx, closeFunc := context.WithCancelCause(context.Background())
|
||||
|
||||
result := &remoteGrpcClient{
|
||||
hub: hub,
|
||||
client: request,
|
||||
|
||||
sessionId: getMD(md, "sessionId"),
|
||||
remoteAddr: getMD(md, "remoteAddr"),
|
||||
country: getMD(md, "country"),
|
||||
userAgent: getMD(md, "userAgent"),
|
||||
|
||||
closeCtx: closeCtx,
|
||||
closeFunc: closeFunc,
|
||||
|
||||
messages: make(chan WritableClientMessage, grpcRemoteClientMessageQueue),
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *remoteGrpcClient) readPump() {
|
||||
var closeError error
|
||||
defer func() {
|
||||
c.closeFunc(closeError)
|
||||
c.hub.OnClosed(c)
|
||||
}()
|
||||
|
||||
for {
|
||||
msg, err := c.client.Recv()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
// Connection was closed locally.
|
||||
break
|
||||
}
|
||||
|
||||
if status.Code(err) != codes.Canceled {
|
||||
log.Printf("Error reading from remote client for session %s: %s", c.sessionId, err)
|
||||
closeError = err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
c.hub.OnMessageReceived(c, msg.Message)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *remoteGrpcClient) Context() context.Context {
|
||||
return c.client.Context()
|
||||
}
|
||||
|
||||
func (c *remoteGrpcClient) RemoteAddr() string {
|
||||
return c.remoteAddr
|
||||
}
|
||||
|
||||
func (c *remoteGrpcClient) UserAgent() string {
|
||||
return c.userAgent
|
||||
}
|
||||
|
||||
func (c *remoteGrpcClient) Country() string {
|
||||
return c.country
|
||||
}
|
||||
|
||||
func (c *remoteGrpcClient) IsConnected() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *remoteGrpcClient) IsAuthenticated() bool {
|
||||
return c.GetSession() != nil
|
||||
}
|
||||
|
||||
func (c *remoteGrpcClient) GetSession() Session {
|
||||
session := c.session.Load()
|
||||
if session == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return *session
|
||||
}
|
||||
|
||||
func (c *remoteGrpcClient) SetSession(session Session) {
|
||||
if session == nil {
|
||||
c.session.Store(nil)
|
||||
} else {
|
||||
c.session.Store(&session)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *remoteGrpcClient) SendError(e *Error) bool {
|
||||
message := &ServerMessage{
|
||||
Type: "error",
|
||||
Error: e,
|
||||
}
|
||||
return c.SendMessage(message)
|
||||
}
|
||||
|
||||
func (c *remoteGrpcClient) SendByeResponse(message *ClientMessage) bool {
|
||||
return c.SendByeResponseWithReason(message, "")
|
||||
}
|
||||
|
||||
func (c *remoteGrpcClient) SendByeResponseWithReason(message *ClientMessage, reason string) bool {
|
||||
response := &ServerMessage{
|
||||
Type: "bye",
|
||||
}
|
||||
if message != nil {
|
||||
response.Id = message.Id
|
||||
}
|
||||
if reason != "" {
|
||||
if response.Bye == nil {
|
||||
response.Bye = &ByeServerMessage{}
|
||||
}
|
||||
response.Bye.Reason = reason
|
||||
}
|
||||
return c.SendMessage(response)
|
||||
}
|
||||
|
||||
func (c *remoteGrpcClient) SendMessage(message WritableClientMessage) bool {
|
||||
if c.closeCtx.Err() != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case c.messages <- message:
|
||||
return true
|
||||
default:
|
||||
log.Printf("Message queue for remote client of session %s is full, not sending %+v", c.sessionId, message)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (c *remoteGrpcClient) Close() {
|
||||
c.closeFunc(nil)
|
||||
}
|
||||
|
||||
func (c *remoteGrpcClient) run() error {
|
||||
go c.readPump()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-c.closeCtx.Done():
|
||||
if err := context.Cause(c.closeCtx); err != context.Canceled {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case msg := <-c.messages:
|
||||
data, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
log.Printf("Error marshalling %+v for remote client for session %s: %s", msg, c.sessionId, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := c.client.Send(&ServerSessionMessage{
|
||||
Message: data,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error sending %+v to remote client for session %s: %w", msg, c.sessionId, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -35,6 +35,7 @@ import (
|
|||
"github.com/dlintw/goconf"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
|
@ -54,17 +55,26 @@ func init() {
|
|||
GrpcServerId = hex.EncodeToString(md.Sum(nil))
|
||||
}
|
||||
|
||||
type GrpcServerHub interface {
|
||||
GetSessionByResumeId(resumeId string) Session
|
||||
GetSessionByPublicId(sessionId string) Session
|
||||
GetSessionIdByRoomSessionId(roomSessionId string) (string, error)
|
||||
|
||||
GetBackend(u *url.URL) *Backend
|
||||
}
|
||||
|
||||
type GrpcServer struct {
|
||||
UnimplementedRpcBackendServer
|
||||
UnimplementedRpcInternalServer
|
||||
UnimplementedRpcMcuServer
|
||||
UnimplementedRpcSessionsServer
|
||||
|
||||
creds credentials.TransportCredentials
|
||||
conn *grpc.Server
|
||||
listener net.Listener
|
||||
serverId string // can be overwritten from tests
|
||||
|
||||
hub *Hub
|
||||
hub GrpcServerHub
|
||||
}
|
||||
|
||||
func NewGrpcServer(config *goconf.ConfigFile) (*GrpcServer, error) {
|
||||
|
@ -84,6 +94,7 @@ func NewGrpcServer(config *goconf.ConfigFile) (*GrpcServer, error) {
|
|||
|
||||
conn := grpc.NewServer(grpc.Creds(creds))
|
||||
result := &GrpcServer{
|
||||
creds: creds,
|
||||
conn: conn,
|
||||
listener: listener,
|
||||
serverId: GrpcServerId,
|
||||
|
@ -105,13 +116,30 @@ func (s *GrpcServer) Run() error {
|
|||
|
||||
func (s *GrpcServer) Close() {
|
||||
s.conn.GracefulStop()
|
||||
if cr, ok := s.creds.(*reloadableCredentials); ok {
|
||||
cr.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *GrpcServer) LookupResumeId(ctx context.Context, request *LookupResumeIdRequest) (*LookupResumeIdReply, error) {
|
||||
statsGrpcServerCalls.WithLabelValues("LookupResumeId").Inc()
|
||||
// TODO: Remove debug logging
|
||||
log.Printf("Lookup session for resume id %s", request.ResumeId)
|
||||
session := s.hub.GetSessionByResumeId(request.ResumeId)
|
||||
if session == nil {
|
||||
return nil, status.Error(codes.NotFound, "no such room session id")
|
||||
}
|
||||
|
||||
return &LookupResumeIdReply{
|
||||
SessionId: session.PublicId(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *GrpcServer) LookupSessionId(ctx context.Context, request *LookupSessionIdRequest) (*LookupSessionIdReply, error) {
|
||||
statsGrpcServerCalls.WithLabelValues("LookupSessionId").Inc()
|
||||
// TODO: Remove debug logging
|
||||
log.Printf("Lookup session id for room session id %s", request.RoomSessionId)
|
||||
sid, err := s.hub.roomSessions.GetSessionId(request.RoomSessionId)
|
||||
sid, err := s.hub.GetSessionIdByRoomSessionId(request.RoomSessionId)
|
||||
if errors.Is(err, ErrNoSuchRoomSession) {
|
||||
return nil, status.Error(codes.NotFound, "no such room session id")
|
||||
} else if err != nil {
|
||||
|
@ -171,7 +199,7 @@ func (s *GrpcServer) GetPublisherId(ctx context.Context, request *GetPublisherId
|
|||
return nil, status.Error(codes.NotFound, "no such session")
|
||||
}
|
||||
|
||||
publisher := clientSession.GetOrWaitForPublisher(ctx, request.StreamType)
|
||||
publisher := clientSession.GetOrWaitForPublisher(ctx, StreamType(request.StreamType))
|
||||
if publisher, ok := publisher.(*mcuProxyPublisher); ok {
|
||||
reply := &GetPublisherIdReply{
|
||||
PublisherId: publisher.Id(),
|
||||
|
@ -201,7 +229,7 @@ func (s *GrpcServer) GetSessionCount(ctx context.Context, request *GetSessionCou
|
|||
return nil, status.Error(codes.InvalidArgument, "invalid url")
|
||||
}
|
||||
|
||||
backend := s.hub.backend.GetBackend(u)
|
||||
backend := s.hub.GetBackend(u)
|
||||
if backend == nil {
|
||||
return nil, status.Error(codes.NotFound, "no such backend")
|
||||
}
|
||||
|
@ -210,3 +238,21 @@ func (s *GrpcServer) GetSessionCount(ctx context.Context, request *GetSessionCou
|
|||
Count: uint32(backend.Len()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *GrpcServer) ProxySession(request RpcSessions_ProxySessionServer) error {
|
||||
statsGrpcServerCalls.WithLabelValues("ProxySession").Inc()
|
||||
hub, ok := s.hub.(*Hub)
|
||||
if !ok {
|
||||
return status.Error(codes.Internal, "invalid hub type")
|
||||
|
||||
}
|
||||
client, err := newRemoteGrpcClient(hub, request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sid := hub.registerClient(client)
|
||||
defer hub.unregisterClient(sid)
|
||||
|
||||
return client.run()
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
|
@ -40,6 +41,24 @@ import (
|
|||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
func (s *GrpcServer) WaitForCertificateReload(ctx context.Context) error {
|
||||
c, ok := s.creds.(*reloadableCredentials)
|
||||
if !ok {
|
||||
return errors.New("no reloadable credentials found")
|
||||
}
|
||||
|
||||
return c.WaitForCertificateReload(ctx)
|
||||
}
|
||||
|
||||
func (s *GrpcServer) WaitForCertPoolReload(ctx context.Context) error {
|
||||
c, ok := s.creds.(*reloadableCredentials)
|
||||
if !ok {
|
||||
return errors.New("no reloadable credentials found")
|
||||
}
|
||||
|
||||
return c.WaitForCertPoolReload(ctx)
|
||||
}
|
||||
|
||||
func NewGrpcServerForTestWithConfig(t *testing.T, config *goconf.ConfigFile) (server *GrpcServer, addr string) {
|
||||
for port := 50000; port < 50100; port++ {
|
||||
addr = net.JoinHostPort("127.0.0.1", strconv.Itoa(port))
|
||||
|
@ -79,6 +98,7 @@ func NewGrpcServerForTest(t *testing.T) (server *GrpcServer, addr string) {
|
|||
}
|
||||
|
||||
func Test_GrpcServer_ReloadCerts(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
key, err := rsa.GenerateKey(rand.Reader, 1024)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -99,8 +119,8 @@ func Test_GrpcServer_ReloadCerts(t *testing.T) {
|
|||
config.AddOption("grpc", "servercertificate", certFile)
|
||||
config.AddOption("grpc", "serverkey", privkeyFile)
|
||||
|
||||
UpdateCertificateCheckIntervalForTest(t, time.Millisecond)
|
||||
_, addr := NewGrpcServerForTestWithConfig(t, config)
|
||||
UpdateCertificateCheckIntervalForTest(t, 0)
|
||||
server, addr := NewGrpcServerForTestWithConfig(t, config)
|
||||
|
||||
cp1 := x509.NewCertPool()
|
||||
if !cp1.AppendCertsFromPEM(cert1) {
|
||||
|
@ -128,6 +148,13 @@ func Test_GrpcServer_ReloadCerts(t *testing.T) {
|
|||
cert2 := GenerateSelfSignedCertificateForTesting(t, 1024, org2, key)
|
||||
replaceFile(t, certFile, cert2, 0755)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := server.WaitForCertificateReload(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cp2 := x509.NewCertPool()
|
||||
if !cp2.AppendCertsFromPEM(cert2) {
|
||||
t.Fatalf("could not add certificate")
|
||||
|
@ -152,6 +179,7 @@ func Test_GrpcServer_ReloadCerts(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_GrpcServer_ReloadCA(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
serverKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -180,8 +208,8 @@ func Test_GrpcServer_ReloadCA(t *testing.T) {
|
|||
config.AddOption("grpc", "serverkey", privkeyFile)
|
||||
config.AddOption("grpc", "clientca", caFile)
|
||||
|
||||
UpdateCertificateCheckIntervalForTest(t, time.Millisecond)
|
||||
_, addr := NewGrpcServerForTestWithConfig(t, config)
|
||||
UpdateCertificateCheckIntervalForTest(t, 0)
|
||||
server, addr := NewGrpcServerForTestWithConfig(t, config)
|
||||
|
||||
pool := x509.NewCertPool()
|
||||
if !pool.AppendCertsFromPEM(serverCert) {
|
||||
|
@ -217,6 +245,10 @@ func Test_GrpcServer_ReloadCA(t *testing.T) {
|
|||
clientCert2 := GenerateSelfSignedCertificateForTesting(t, 1024, org2, clientKey)
|
||||
replaceFile(t, caFile, clientCert2, 0755)
|
||||
|
||||
if err := server.WaitForCertPoolReload(ctx1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pair2, err := tls.X509KeyPair(clientCert2, pem.EncodeToMemory(&pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(clientKey),
|
||||
|
|
|
@ -26,8 +26,18 @@ option go_package = "github.com/strukturag/nextcloud-spreed-signaling;signaling"
|
|||
package signaling;
|
||||
|
||||
service RpcSessions {
|
||||
rpc LookupResumeId(LookupResumeIdRequest) returns (LookupResumeIdReply) {}
|
||||
rpc LookupSessionId(LookupSessionIdRequest) returns (LookupSessionIdReply) {}
|
||||
rpc IsSessionInCall(IsSessionInCallRequest) returns (IsSessionInCallReply) {}
|
||||
rpc ProxySession(stream ClientSessionMessage) returns (stream ServerSessionMessage) {}
|
||||
}
|
||||
|
||||
message LookupResumeIdRequest {
|
||||
string resumeId = 1;
|
||||
}
|
||||
|
||||
message LookupResumeIdReply {
|
||||
string sessionId = 1;
|
||||
}
|
||||
|
||||
message LookupSessionIdRequest {
|
||||
|
@ -49,3 +59,11 @@ message IsSessionInCallRequest {
|
|||
message IsSessionInCallReply {
|
||||
bool inCall = 1;
|
||||
}
|
||||
|
||||
message ClientSessionMessage {
|
||||
bytes message = 1;
|
||||
}
|
||||
|
||||
message ServerSessionMessage {
|
||||
bytes message = 1;
|
||||
}
|
||||
|
|
|
@ -29,10 +29,18 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
RegisterHttpClientPoolStats()
|
||||
}
|
||||
|
||||
type Pool struct {
|
||||
pool chan *http.Client
|
||||
|
||||
currentConnections prometheus.Gauge
|
||||
}
|
||||
|
||||
func (p *Pool) get(ctx context.Context) (client *http.Client, err error) {
|
||||
|
@ -40,21 +48,24 @@ func (p *Pool) get(ctx context.Context) (client *http.Client, err error) {
|
|||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case client := <-p.pool:
|
||||
p.currentConnections.Inc()
|
||||
return client, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pool) Put(c *http.Client) {
|
||||
p.currentConnections.Dec()
|
||||
p.pool <- c
|
||||
}
|
||||
|
||||
func newPool(constructor func() *http.Client, size int) (*Pool, error) {
|
||||
func newPool(host string, constructor func() *http.Client, size int) (*Pool, error) {
|
||||
if size <= 0 {
|
||||
return nil, fmt.Errorf("can't create empty pool")
|
||||
}
|
||||
|
||||
p := &Pool{
|
||||
pool: make(chan *http.Client, size),
|
||||
pool: make(chan *http.Client, size),
|
||||
currentConnections: connectionsPerHostCurrent.WithLabelValues(host),
|
||||
}
|
||||
for size > 0 {
|
||||
c := constructor()
|
||||
|
@ -103,7 +114,7 @@ func (p *HttpClientPool) getPool(url *url.URL) (*Pool, error) {
|
|||
return pool, nil
|
||||
}
|
||||
|
||||
pool, err := newPool(func() *http.Client {
|
||||
pool, err := newPool(url.Host, func() *http.Client {
|
||||
return &http.Client{
|
||||
Transport: p.transport,
|
||||
// Only send body in redirect if going to same scheme / host.
|
||||
|
|
43
http_client_pool_stats_prometheus.go
Normal file
43
http_client_pool_stats_prometheus.go
Normal file
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2024 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
connectionsPerHostCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: "signaling",
|
||||
Subsystem: "http_client_pool",
|
||||
Name: "connections",
|
||||
Help: "The current number of HTTP client connections per host",
|
||||
}, []string{"host"})
|
||||
|
||||
httpClientPoolStats = []prometheus.Collector{
|
||||
connectionsPerHostCurrent,
|
||||
}
|
||||
)
|
||||
|
||||
func RegisterHttpClientPoolStats() {
|
||||
registerAll(httpClientPoolStats...)
|
||||
}
|
|
@ -29,6 +29,7 @@ import (
|
|||
)
|
||||
|
||||
func TestHttpClientPool(t *testing.T) {
|
||||
t.Parallel()
|
||||
if _, err := NewHttpClientPool(0, false); err == nil {
|
||||
t.Error("should not be possible to create empty pool")
|
||||
}
|
||||
|
|
844
hub_test.go
844
hub_test.go
File diff suppressed because it is too large
Load diff
|
@ -258,8 +258,8 @@ type JanusGateway struct {
|
|||
// return gateway, nil
|
||||
// }
|
||||
|
||||
func NewJanusGateway(wsURL string, listener GatewayListener) (*JanusGateway, error) {
|
||||
conn, _, err := janusDialer.Dial(wsURL, nil)
|
||||
func NewJanusGateway(ctx context.Context, wsURL string, listener GatewayListener) (*JanusGateway, error) {
|
||||
conn, _, err := janusDialer.DialContext(ctx, wsURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -310,7 +310,7 @@ func (gateway *JanusGateway) cancelTransactions() {
|
|||
t.quit()
|
||||
}(t)
|
||||
}
|
||||
gateway.transactions = make(map[uint64]*transaction)
|
||||
clear(gateway.transactions)
|
||||
gateway.Unlock()
|
||||
}
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ type McuInitiator interface {
|
|||
}
|
||||
|
||||
type Mcu interface {
|
||||
Start() error
|
||||
Start(ctx context.Context) error
|
||||
Stop()
|
||||
Reload(config *goconf.ConfigFile)
|
||||
|
||||
|
@ -75,14 +75,77 @@ type Mcu interface {
|
|||
|
||||
GetStats() interface{}
|
||||
|
||||
NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType string, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error)
|
||||
NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType string) (McuSubscriber, error)
|
||||
NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error)
|
||||
NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType, initiator McuInitiator) (McuSubscriber, error)
|
||||
}
|
||||
|
||||
// PublisherStream contains the available properties when creating a
|
||||
// remote publisher in Janus.
|
||||
type PublisherStream struct {
|
||||
Mid string `json:"mid"`
|
||||
Mindex int `json:"mindex"`
|
||||
Type string `json:"type"`
|
||||
|
||||
Description string `json:"description,omitempty"`
|
||||
Disabled bool `json:"disabled,omitempty"`
|
||||
|
||||
// For types "audio" and "video"
|
||||
Codec string `json:"codec,omitempty"`
|
||||
|
||||
// For type "audio"
|
||||
Stereo bool `json:"stereo,omitempty"`
|
||||
Fec bool `json:"fec,omitempty"`
|
||||
Dtx bool `json:"dtx,omitempty"`
|
||||
|
||||
// For type "video"
|
||||
Simulcast bool `json:"simulcast,omitempty"`
|
||||
Svc bool `json:"svc,omitempty"`
|
||||
|
||||
ProfileH264 string `json:"h264_profile,omitempty"`
|
||||
ProfileVP9 string `json:"vp9_profile,omitempty"`
|
||||
|
||||
ExtIdVideoOrientation int `json:"videoorient_ext_id,omitempty"`
|
||||
ExtIdPlayoutDelay int `json:"playoutdelay_ext_id,omitempty"`
|
||||
}
|
||||
|
||||
type RemotePublisherController interface {
|
||||
PublisherId() string
|
||||
|
||||
StartPublishing(ctx context.Context, publisher McuRemotePublisherProperties) error
|
||||
GetStreams(ctx context.Context) ([]PublisherStream, error)
|
||||
}
|
||||
|
||||
type RemoteMcu interface {
|
||||
NewRemotePublisher(ctx context.Context, listener McuListener, controller RemotePublisherController, streamType StreamType) (McuRemotePublisher, error)
|
||||
NewRemoteSubscriber(ctx context.Context, listener McuListener, publisher McuRemotePublisher) (McuRemoteSubscriber, error)
|
||||
}
|
||||
|
||||
type StreamType string
|
||||
|
||||
const (
|
||||
StreamTypeAudio StreamType = "audio"
|
||||
StreamTypeVideo StreamType = "video"
|
||||
StreamTypeScreen StreamType = "screen"
|
||||
)
|
||||
|
||||
func IsValidStreamType(s string) bool {
|
||||
switch s {
|
||||
case string(StreamTypeAudio):
|
||||
fallthrough
|
||||
case string(StreamTypeVideo):
|
||||
fallthrough
|
||||
case string(StreamTypeScreen):
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
type McuClient interface {
|
||||
Id() string
|
||||
Sid() string
|
||||
StreamType() string
|
||||
StreamType() StreamType
|
||||
MaxBitrate() int
|
||||
|
||||
Close(ctx context.Context)
|
||||
|
||||
|
@ -94,6 +157,10 @@ type McuPublisher interface {
|
|||
|
||||
HasMedia(MediaType) bool
|
||||
SetMedia(MediaType)
|
||||
|
||||
GetStreams(ctx context.Context) ([]PublisherStream, error)
|
||||
PublishRemote(ctx context.Context, remoteId string, hostname string, port int, rtcpPort int) error
|
||||
UnpublishRemote(ctx context.Context, remoteId string) error
|
||||
}
|
||||
|
||||
type McuSubscriber interface {
|
||||
|
@ -101,3 +168,18 @@ type McuSubscriber interface {
|
|||
|
||||
Publisher() string
|
||||
}
|
||||
|
||||
type McuRemotePublisherProperties interface {
|
||||
Port() int
|
||||
RtcpPort() int
|
||||
}
|
||||
|
||||
type McuRemotePublisher interface {
|
||||
McuClient
|
||||
|
||||
McuRemotePublisherProperties
|
||||
}
|
||||
|
||||
type McuRemoteSubscriber interface {
|
||||
McuSubscriber
|
||||
}
|
||||
|
|
|
@ -28,3 +28,43 @@ import (
|
|||
func TestCommonMcuStats(t *testing.T) {
|
||||
collectAndLint(t, commonMcuStats...)
|
||||
}
|
||||
|
||||
type MockMcuListener struct {
|
||||
publicId string
|
||||
}
|
||||
|
||||
func (m *MockMcuListener) PublicId() string {
|
||||
return m.publicId
|
||||
}
|
||||
|
||||
func (m *MockMcuListener) OnUpdateOffer(client McuClient, offer map[string]interface{}) {
|
||||
|
||||
}
|
||||
|
||||
func (m *MockMcuListener) OnIceCandidate(client McuClient, candidate interface{}) {
|
||||
|
||||
}
|
||||
|
||||
func (m *MockMcuListener) OnIceCompleted(client McuClient) {
|
||||
|
||||
}
|
||||
|
||||
func (m *MockMcuListener) SubscriberSidUpdated(subscriber McuSubscriber) {
|
||||
|
||||
}
|
||||
|
||||
func (m *MockMcuListener) PublisherClosed(publisher McuPublisher) {
|
||||
|
||||
}
|
||||
|
||||
func (m *MockMcuListener) SubscriberClosed(subscriber McuSubscriber) {
|
||||
|
||||
}
|
||||
|
||||
type MockMcuInitiator struct {
|
||||
country string
|
||||
}
|
||||
|
||||
func (m *MockMcuInitiator) Country() string {
|
||||
return m.country
|
||||
}
|
||||
|
|
1040
mcu_janus.go
1040
mcu_janus.go
File diff suppressed because it is too large
Load diff
216
mcu_janus_client.go
Normal file
216
mcu_janus_client.go
Normal file
|
@ -0,0 +1,216 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2017 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/notedit/janus-go"
|
||||
)
|
||||
|
||||
type mcuJanusClient struct {
|
||||
mcu *mcuJanus
|
||||
listener McuListener
|
||||
mu sync.Mutex // nolint
|
||||
|
||||
id uint64
|
||||
session uint64
|
||||
roomId uint64
|
||||
sid string
|
||||
streamType StreamType
|
||||
maxBitrate int
|
||||
|
||||
handle *JanusHandle
|
||||
handleId uint64
|
||||
closeChan chan struct{}
|
||||
deferred chan func()
|
||||
|
||||
handleEvent func(event *janus.EventMsg)
|
||||
handleHangup func(event *janus.HangupMsg)
|
||||
handleDetached func(event *janus.DetachedMsg)
|
||||
handleConnected func(event *janus.WebRTCUpMsg)
|
||||
handleSlowLink func(event *janus.SlowLinkMsg)
|
||||
handleMedia func(event *janus.MediaMsg)
|
||||
}
|
||||
|
||||
func (c *mcuJanusClient) Id() string {
|
||||
return strconv.FormatUint(c.id, 10)
|
||||
}
|
||||
|
||||
func (c *mcuJanusClient) Sid() string {
|
||||
return c.sid
|
||||
}
|
||||
|
||||
func (c *mcuJanusClient) StreamType() StreamType {
|
||||
return c.streamType
|
||||
}
|
||||
|
||||
func (c *mcuJanusClient) MaxBitrate() int {
|
||||
return c.maxBitrate
|
||||
}
|
||||
|
||||
func (c *mcuJanusClient) Close(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (c *mcuJanusClient) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
|
||||
}
|
||||
|
||||
func (c *mcuJanusClient) closeClient(ctx context.Context) bool {
|
||||
if handle := c.handle; handle != nil {
|
||||
c.handle = nil
|
||||
close(c.closeChan)
|
||||
if _, err := handle.Detach(ctx); err != nil {
|
||||
if e, ok := err.(*janus.ErrorMsg); !ok || e.Err.Code != JANUS_ERROR_HANDLE_NOT_FOUND {
|
||||
log.Println("Could not detach client", handle.Id, err)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *mcuJanusClient) run(handle *JanusHandle, closeChan <-chan struct{}) {
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case msg := <-handle.Events:
|
||||
switch t := msg.(type) {
|
||||
case *janus.EventMsg:
|
||||
c.handleEvent(t)
|
||||
case *janus.HangupMsg:
|
||||
c.handleHangup(t)
|
||||
case *janus.DetachedMsg:
|
||||
c.handleDetached(t)
|
||||
case *janus.MediaMsg:
|
||||
c.handleMedia(t)
|
||||
case *janus.WebRTCUpMsg:
|
||||
c.handleConnected(t)
|
||||
case *janus.SlowLinkMsg:
|
||||
c.handleSlowLink(t)
|
||||
case *TrickleMsg:
|
||||
c.handleTrickle(t)
|
||||
default:
|
||||
log.Println("Received unsupported event type", msg, reflect.TypeOf(msg))
|
||||
}
|
||||
case f := <-c.deferred:
|
||||
f()
|
||||
case <-closeChan:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *mcuJanusClient) sendOffer(ctx context.Context, offer map[string]interface{}, callback func(error, map[string]interface{})) {
|
||||
handle := c.handle
|
||||
if handle == nil {
|
||||
callback(ErrNotConnected, nil)
|
||||
return
|
||||
}
|
||||
|
||||
configure_msg := map[string]interface{}{
|
||||
"request": "configure",
|
||||
"audio": true,
|
||||
"video": true,
|
||||
"data": true,
|
||||
}
|
||||
answer_msg, err := handle.Message(ctx, configure_msg, offer)
|
||||
if err != nil {
|
||||
callback(err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
callback(nil, answer_msg.Jsep)
|
||||
}
|
||||
|
||||
func (c *mcuJanusClient) sendAnswer(ctx context.Context, answer map[string]interface{}, callback func(error, map[string]interface{})) {
|
||||
handle := c.handle
|
||||
if handle == nil {
|
||||
callback(ErrNotConnected, nil)
|
||||
return
|
||||
}
|
||||
|
||||
start_msg := map[string]interface{}{
|
||||
"request": "start",
|
||||
"room": c.roomId,
|
||||
}
|
||||
start_response, err := handle.Message(ctx, start_msg, answer)
|
||||
if err != nil {
|
||||
callback(err, nil)
|
||||
return
|
||||
}
|
||||
log.Println("Started listener", start_response)
|
||||
callback(nil, nil)
|
||||
}
|
||||
|
||||
func (c *mcuJanusClient) sendCandidate(ctx context.Context, candidate interface{}, callback func(error, map[string]interface{})) {
|
||||
handle := c.handle
|
||||
if handle == nil {
|
||||
callback(ErrNotConnected, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := handle.Trickle(ctx, candidate); err != nil {
|
||||
callback(err, nil)
|
||||
return
|
||||
}
|
||||
callback(nil, nil)
|
||||
}
|
||||
|
||||
func (c *mcuJanusClient) handleTrickle(event *TrickleMsg) {
|
||||
if event.Candidate.Completed {
|
||||
c.listener.OnIceCompleted(c)
|
||||
} else {
|
||||
c.listener.OnIceCandidate(c, event.Candidate)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *mcuJanusClient) selectStream(ctx context.Context, stream *streamSelection, callback func(error, map[string]interface{})) {
|
||||
handle := c.handle
|
||||
if handle == nil {
|
||||
callback(ErrNotConnected, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if stream == nil || !stream.HasValues() {
|
||||
callback(nil, nil)
|
||||
return
|
||||
}
|
||||
|
||||
configure_msg := map[string]interface{}{
|
||||
"request": "configure",
|
||||
}
|
||||
if stream != nil {
|
||||
stream.AddToMessage(configure_msg)
|
||||
}
|
||||
_, err := handle.Message(ctx, configure_msg, nil)
|
||||
if err != nil {
|
||||
callback(err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
callback(nil, nil)
|
||||
}
|
457
mcu_janus_publisher.go
Normal file
457
mcu_janus_publisher.go
Normal file
|
@ -0,0 +1,457 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2017 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/notedit/janus-go"
|
||||
"github.com/pion/sdp/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
ExtensionUrlPlayoutDelay = "http://www.webrtc.org/experiments/rtp-hdrext/playout-delay"
|
||||
ExtensionUrlVideoOrientation = "urn:3gpp:video-orientation"
|
||||
)
|
||||
|
||||
const (
|
||||
sdpHasOffer = 1
|
||||
sdpHasAnswer = 2
|
||||
)
|
||||
|
||||
type mcuJanusPublisher struct {
|
||||
mcuJanusClient
|
||||
|
||||
id string
|
||||
bitrate int
|
||||
mediaTypes MediaType
|
||||
stats publisherStatsCounter
|
||||
sdpFlags Flags
|
||||
sdpReady *Closer
|
||||
offerSdp atomic.Pointer[sdp.SessionDescription]
|
||||
answerSdp atomic.Pointer[sdp.SessionDescription]
|
||||
}
|
||||
|
||||
func (p *mcuJanusPublisher) handleEvent(event *janus.EventMsg) {
|
||||
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
|
||||
ctx := context.TODO()
|
||||
switch videoroom {
|
||||
case "destroyed":
|
||||
log.Printf("Publisher %d: associated room has been destroyed, closing", p.handleId)
|
||||
go p.Close(ctx)
|
||||
case "slow_link":
|
||||
// Ignore, processed through "handleSlowLink" in the general events.
|
||||
default:
|
||||
log.Printf("Unsupported videoroom publisher event in %d: %+v", p.handleId, event)
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unsupported publisher event in %d: %+v", p.handleId, event)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *mcuJanusPublisher) handleHangup(event *janus.HangupMsg) {
|
||||
log.Printf("Publisher %d received hangup (%s), closing", p.handleId, event.Reason)
|
||||
go p.Close(context.Background())
|
||||
}
|
||||
|
||||
func (p *mcuJanusPublisher) handleDetached(event *janus.DetachedMsg) {
|
||||
log.Printf("Publisher %d received detached, closing", p.handleId)
|
||||
go p.Close(context.Background())
|
||||
}
|
||||
|
||||
func (p *mcuJanusPublisher) handleConnected(event *janus.WebRTCUpMsg) {
|
||||
log.Printf("Publisher %d received connected", p.handleId)
|
||||
p.mcu.publisherConnected.Notify(getStreamId(p.id, p.streamType))
|
||||
}
|
||||
|
||||
func (p *mcuJanusPublisher) handleSlowLink(event *janus.SlowLinkMsg) {
|
||||
if event.Uplink {
|
||||
log.Printf("Publisher %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||
} else {
|
||||
log.Printf("Publisher %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *mcuJanusPublisher) handleMedia(event *janus.MediaMsg) {
|
||||
mediaType := StreamType(event.Type)
|
||||
if mediaType == StreamTypeVideo && p.streamType == StreamTypeScreen {
|
||||
// We want to differentiate between audio, video and screensharing
|
||||
mediaType = p.streamType
|
||||
}
|
||||
|
||||
p.stats.EnableStream(mediaType, event.Receiving)
|
||||
}
|
||||
|
||||
func (p *mcuJanusPublisher) HasMedia(mt MediaType) bool {
|
||||
return (p.mediaTypes & mt) == mt
|
||||
}
|
||||
|
||||
func (p *mcuJanusPublisher) SetMedia(mt MediaType) {
|
||||
p.mediaTypes = mt
|
||||
}
|
||||
|
||||
func (p *mcuJanusPublisher) NotifyReconnected() {
|
||||
ctx := context.TODO()
|
||||
handle, session, roomId, _, err := p.mcu.getOrCreatePublisherHandle(ctx, p.id, p.streamType, p.bitrate)
|
||||
if err != nil {
|
||||
log.Printf("Could not reconnect publisher %s: %s", p.id, err)
|
||||
// TODO(jojo): Retry
|
||||
return
|
||||
}
|
||||
|
||||
p.handle = handle
|
||||
p.handleId = handle.Id
|
||||
p.session = session
|
||||
p.roomId = roomId
|
||||
|
||||
log.Printf("Publisher %s reconnected on handle %d", p.id, p.handleId)
|
||||
}
|
||||
|
||||
func (p *mcuJanusPublisher) Close(ctx context.Context) {
|
||||
notify := false
|
||||
p.mu.Lock()
|
||||
if handle := p.handle; handle != nil && p.roomId != 0 {
|
||||
destroy_msg := map[string]interface{}{
|
||||
"request": "destroy",
|
||||
"room": p.roomId,
|
||||
}
|
||||
if _, err := handle.Request(ctx, destroy_msg); err != nil {
|
||||
log.Printf("Error destroying room %d: %s", p.roomId, err)
|
||||
} else {
|
||||
log.Printf("Room %d destroyed", p.roomId)
|
||||
}
|
||||
p.mcu.mu.Lock()
|
||||
delete(p.mcu.publishers, getStreamId(p.id, p.streamType))
|
||||
p.mcu.mu.Unlock()
|
||||
p.roomId = 0
|
||||
notify = true
|
||||
}
|
||||
p.closeClient(ctx)
|
||||
p.mu.Unlock()
|
||||
|
||||
p.stats.Reset()
|
||||
|
||||
if notify {
|
||||
statsPublishersCurrent.WithLabelValues(string(p.streamType)).Dec()
|
||||
p.mcu.unregisterClient(p)
|
||||
p.listener.PublisherClosed(p)
|
||||
}
|
||||
p.mcuJanusClient.Close(ctx)
|
||||
}
|
||||
|
||||
func (p *mcuJanusPublisher) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
|
||||
statsMcuMessagesTotal.WithLabelValues(data.Type).Inc()
|
||||
jsep_msg := data.Payload
|
||||
switch data.Type {
|
||||
case "offer":
|
||||
p.deferred <- func() {
|
||||
if data.offerSdp == nil {
|
||||
// Should have been checked before.
|
||||
go callback(errors.New("No sdp found in offer"), nil)
|
||||
return
|
||||
}
|
||||
|
||||
p.offerSdp.Store(data.offerSdp)
|
||||
p.sdpFlags.Add(sdpHasOffer)
|
||||
if p.sdpFlags.Get() == sdpHasAnswer|sdpHasOffer {
|
||||
p.sdpReady.Close()
|
||||
}
|
||||
|
||||
// TODO Tear down previous publisher and get a new one if sid does
|
||||
// not match?
|
||||
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||
defer cancel()
|
||||
|
||||
p.sendOffer(msgctx, jsep_msg, func(err error, jsep map[string]interface{}) {
|
||||
if err != nil {
|
||||
callback(err, jsep)
|
||||
return
|
||||
}
|
||||
|
||||
sdpData, found := jsep["sdp"]
|
||||
if !found {
|
||||
log.Printf("No sdp found in answer %+v", jsep)
|
||||
} else {
|
||||
sdpString, ok := sdpData.(string)
|
||||
if !ok {
|
||||
log.Printf("Invalid sdp found in answer %+v", jsep)
|
||||
} else {
|
||||
var answerSdp sdp.SessionDescription
|
||||
if err := answerSdp.UnmarshalString(sdpString); err != nil {
|
||||
log.Printf("Error parsing answer sdp %+v: %s", sdpString, err)
|
||||
p.answerSdp.Store(nil)
|
||||
p.sdpFlags.Remove(sdpHasAnswer)
|
||||
} else {
|
||||
p.answerSdp.Store(&answerSdp)
|
||||
p.sdpFlags.Add(sdpHasAnswer)
|
||||
if p.sdpFlags.Get() == sdpHasAnswer|sdpHasOffer {
|
||||
p.sdpReady.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
callback(nil, jsep)
|
||||
})
|
||||
}
|
||||
case "candidate":
|
||||
p.deferred <- func() {
|
||||
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||
defer cancel()
|
||||
|
||||
if data.Sid == "" || data.Sid == p.Sid() {
|
||||
p.sendCandidate(msgctx, jsep_msg["candidate"], callback)
|
||||
} else {
|
||||
go callback(fmt.Errorf("Candidate message sid (%s) does not match publisher sid (%s)", data.Sid, p.Sid()), nil)
|
||||
}
|
||||
}
|
||||
case "endOfCandidates":
|
||||
// Ignore
|
||||
default:
|
||||
go callback(fmt.Errorf("Unsupported message type: %s", data.Type), nil)
|
||||
}
|
||||
}
|
||||
|
||||
func getFmtpValue(fmtp string, key string) (string, bool) {
|
||||
parts := strings.Split(fmtp, ";")
|
||||
for _, part := range parts {
|
||||
kv := strings.SplitN(part, "=", 2)
|
||||
if len(kv) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.EqualFold(strings.TrimSpace(kv[0]), key) {
|
||||
return strings.TrimSpace(kv[1]), true
|
||||
}
|
||||
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (p *mcuJanusPublisher) GetStreams(ctx context.Context) ([]PublisherStream, error) {
|
||||
offerSdp := p.offerSdp.Load()
|
||||
answerSdp := p.answerSdp.Load()
|
||||
if offerSdp == nil || answerSdp == nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-p.sdpReady.C:
|
||||
offerSdp = p.offerSdp.Load()
|
||||
answerSdp = p.answerSdp.Load()
|
||||
if offerSdp == nil || answerSdp == nil {
|
||||
// Only can happen on invalid SDPs.
|
||||
return nil, errors.New("no offer and/or answer processed yet")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var streams []PublisherStream
|
||||
for idx, m := range answerSdp.MediaDescriptions {
|
||||
mid, found := m.Attribute(sdp.AttrKeyMID)
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
s := PublisherStream{
|
||||
Mid: mid,
|
||||
Mindex: idx,
|
||||
Type: m.MediaName.Media,
|
||||
}
|
||||
|
||||
if len(m.MediaName.Formats) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.EqualFold(s.Type, "application") && strings.EqualFold(m.MediaName.Formats[0], "webrtc-datachannel") {
|
||||
s.Type = "data"
|
||||
streams = append(streams, s)
|
||||
continue
|
||||
}
|
||||
|
||||
pt, err := strconv.ParseInt(m.MediaName.Formats[0], 10, 8)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
answerCodec, err := answerSdp.GetCodecForPayloadType(uint8(pt))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.EqualFold(s.Type, "audio") {
|
||||
s.Codec = answerCodec.Name
|
||||
if value, found := getFmtpValue(answerCodec.Fmtp, "useinbandfec"); found && value == "1" {
|
||||
s.Fec = true
|
||||
}
|
||||
if value, found := getFmtpValue(answerCodec.Fmtp, "usedtx"); found && value == "1" {
|
||||
s.Dtx = true
|
||||
}
|
||||
if value, found := getFmtpValue(answerCodec.Fmtp, "stereo"); found && value == "1" {
|
||||
s.Stereo = true
|
||||
}
|
||||
} else if strings.EqualFold(s.Type, "video") {
|
||||
s.Codec = answerCodec.Name
|
||||
// TODO: Determine if SVC is used.
|
||||
s.Svc = false
|
||||
|
||||
if strings.EqualFold(answerCodec.Name, "vp9") {
|
||||
// Parse VP9 profile from "profile-id=XXX"
|
||||
// Exampe: "a=fmtp:98 profile-id=0"
|
||||
if profile, found := getFmtpValue(answerCodec.Fmtp, "profile-id"); found {
|
||||
s.ProfileVP9 = profile
|
||||
}
|
||||
} else if strings.EqualFold(answerCodec.Name, "h264") {
|
||||
// Parse H.264 profile from "profile-level-id=XXX"
|
||||
// Example: "a=fmtp:104 level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42001f"
|
||||
if profile, found := getFmtpValue(answerCodec.Fmtp, "profile-level-id"); found {
|
||||
s.ProfileH264 = profile
|
||||
}
|
||||
}
|
||||
|
||||
var extmap sdp.ExtMap
|
||||
for _, a := range m.Attributes {
|
||||
switch a.Key {
|
||||
case sdp.AttrKeyExtMap:
|
||||
if err := extmap.Unmarshal(extmap.Name() + ":" + a.Value); err != nil {
|
||||
log.Printf("Error parsing extmap %s: %s", a.Value, err)
|
||||
continue
|
||||
}
|
||||
|
||||
switch extmap.URI.String() {
|
||||
case ExtensionUrlPlayoutDelay:
|
||||
s.ExtIdPlayoutDelay = extmap.Value
|
||||
case ExtensionUrlVideoOrientation:
|
||||
s.ExtIdVideoOrientation = extmap.Value
|
||||
}
|
||||
case "simulcast":
|
||||
s.Simulcast = true
|
||||
case sdp.AttrKeySSRCGroup:
|
||||
if strings.HasPrefix(a.Value, "SIM ") {
|
||||
s.Simulcast = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, a := range offerSdp.MediaDescriptions[idx].Attributes {
|
||||
switch a.Key {
|
||||
case "simulcast":
|
||||
s.Simulcast = true
|
||||
case sdp.AttrKeySSRCGroup:
|
||||
if strings.HasPrefix(a.Value, "SIM ") {
|
||||
s.Simulcast = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} else if strings.EqualFold(s.Type, "data") { // nolint
|
||||
// Already handled above.
|
||||
} else {
|
||||
log.Printf("Skip type %s", s.Type)
|
||||
continue
|
||||
}
|
||||
|
||||
streams = append(streams, s)
|
||||
}
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func getPublisherRemoteId(id string, remoteId string) string {
|
||||
return fmt.Sprintf("%s@%s", id, remoteId)
|
||||
}
|
||||
|
||||
func (p *mcuJanusPublisher) PublishRemote(ctx context.Context, remoteId string, hostname string, port int, rtcpPort int) error {
|
||||
msg := map[string]interface{}{
|
||||
"request": "publish_remotely",
|
||||
"room": p.roomId,
|
||||
"publisher_id": streamTypeUserIds[p.streamType],
|
||||
"remote_id": getPublisherRemoteId(p.id, remoteId),
|
||||
"host": hostname,
|
||||
"port": port,
|
||||
"rtcp_port": rtcpPort,
|
||||
}
|
||||
response, err := p.handle.Request(ctx, msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
errorMessage := getPluginStringValue(response.PluginData, pluginVideoRoom, "error")
|
||||
errorCode := getPluginIntValue(response.PluginData, pluginVideoRoom, "error_code")
|
||||
if errorMessage != "" || errorCode != 0 {
|
||||
if errorCode == 0 {
|
||||
errorCode = 500
|
||||
}
|
||||
if errorMessage == "" {
|
||||
errorMessage = "unknown error"
|
||||
}
|
||||
|
||||
return &janus.ErrorMsg{
|
||||
Err: janus.ErrorData{
|
||||
Code: int(errorCode),
|
||||
Reason: errorMessage,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Publishing %s to %s (port=%d, rtcpPort=%d) for %s", p.id, hostname, port, rtcpPort, remoteId)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *mcuJanusPublisher) UnpublishRemote(ctx context.Context, remoteId string) error {
|
||||
msg := map[string]interface{}{
|
||||
"request": "unpublish_remotely",
|
||||
"room": p.roomId,
|
||||
"publisher_id": streamTypeUserIds[p.streamType],
|
||||
"remote_id": getPublisherRemoteId(p.id, remoteId),
|
||||
}
|
||||
response, err := p.handle.Request(ctx, msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
errorMessage := getPluginStringValue(response.PluginData, pluginVideoRoom, "error")
|
||||
errorCode := getPluginIntValue(response.PluginData, pluginVideoRoom, "error_code")
|
||||
if errorMessage != "" || errorCode != 0 {
|
||||
if errorCode == 0 {
|
||||
errorCode = 500
|
||||
}
|
||||
if errorMessage == "" {
|
||||
errorMessage = "unknown error"
|
||||
}
|
||||
|
||||
return &janus.ErrorMsg{
|
||||
Err: janus.ErrorData{
|
||||
Code: int(errorCode),
|
||||
Reason: errorMessage,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Unpublished remote %s for %s", p.id, remoteId)
|
||||
return nil
|
||||
}
|
92
mcu_janus_publisher_test.go
Normal file
92
mcu_janus_publisher_test.go
Normal file
|
@ -0,0 +1,92 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2024 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetFmtpValueH264(t *testing.T) {
|
||||
testcases := []struct {
|
||||
fmtp string
|
||||
profile string
|
||||
}{
|
||||
{
|
||||
"",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42001f",
|
||||
"42001f",
|
||||
},
|
||||
{
|
||||
"level-asymmetry-allowed=1;packetization-mode=0",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"level-asymmetry-allowed=1; packetization-mode=0; profile-level-id = 42001f",
|
||||
"42001f",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
value, found := getFmtpValue(tc.fmtp, "profile-level-id")
|
||||
if !found && tc.profile != "" {
|
||||
t.Errorf("did not find profile \"%s\" in \"%s\"", tc.profile, tc.fmtp)
|
||||
} else if found && tc.profile == "" {
|
||||
t.Errorf("did not expect profile in \"%s\" but got \"%s\"", tc.fmtp, value)
|
||||
} else if found && tc.profile != value {
|
||||
t.Errorf("expected profile \"%s\" in \"%s\" but got \"%s\"", tc.profile, tc.fmtp, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFmtpValueVP9(t *testing.T) {
|
||||
testcases := []struct {
|
||||
fmtp string
|
||||
profile string
|
||||
}{
|
||||
{
|
||||
"",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"profile-id=0",
|
||||
"0",
|
||||
},
|
||||
{
|
||||
"profile-id = 0",
|
||||
"0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
value, found := getFmtpValue(tc.fmtp, "profile-id")
|
||||
if !found && tc.profile != "" {
|
||||
t.Errorf("did not find profile \"%s\" in \"%s\"", tc.profile, tc.fmtp)
|
||||
} else if found && tc.profile == "" {
|
||||
t.Errorf("did not expect profile in \"%s\" but got \"%s\"", tc.fmtp, value)
|
||||
} else if found && tc.profile != value {
|
||||
t.Errorf("expected profile \"%s\" in \"%s\" but got \"%s\"", tc.profile, tc.fmtp, value)
|
||||
}
|
||||
}
|
||||
}
|
150
mcu_janus_remote_publisher.go
Normal file
150
mcu_janus_remote_publisher.go
Normal file
|
@ -0,0 +1,150 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2024 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/notedit/janus-go"
|
||||
)
|
||||
|
||||
type mcuJanusRemotePublisher struct {
|
||||
mcuJanusPublisher
|
||||
|
||||
ref atomic.Int64
|
||||
|
||||
port int
|
||||
rtcpPort int
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemotePublisher) addRef() int64 {
|
||||
return p.ref.Add(1)
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemotePublisher) release() bool {
|
||||
return p.ref.Add(-1) == 0
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemotePublisher) Port() int {
|
||||
return p.port
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemotePublisher) RtcpPort() int {
|
||||
return p.rtcpPort
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemotePublisher) handleEvent(event *janus.EventMsg) {
|
||||
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
|
||||
ctx := context.TODO()
|
||||
switch videoroom {
|
||||
case "destroyed":
|
||||
log.Printf("Remote publisher %d: associated room has been destroyed, closing", p.handleId)
|
||||
go p.Close(ctx)
|
||||
case "slow_link":
|
||||
// Ignore, processed through "handleSlowLink" in the general events.
|
||||
default:
|
||||
log.Printf("Unsupported videoroom remote publisher event in %d: %+v", p.handleId, event)
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unsupported remote publisher event in %d: %+v", p.handleId, event)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemotePublisher) handleHangup(event *janus.HangupMsg) {
|
||||
log.Printf("Remote publisher %d received hangup (%s), closing", p.handleId, event.Reason)
|
||||
go p.Close(context.Background())
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemotePublisher) handleDetached(event *janus.DetachedMsg) {
|
||||
log.Printf("Remote publisher %d received detached, closing", p.handleId)
|
||||
go p.Close(context.Background())
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemotePublisher) handleConnected(event *janus.WebRTCUpMsg) {
|
||||
log.Printf("Remote publisher %d received connected", p.handleId)
|
||||
p.mcu.publisherConnected.Notify(getStreamId(p.id, p.streamType))
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemotePublisher) handleSlowLink(event *janus.SlowLinkMsg) {
|
||||
if event.Uplink {
|
||||
log.Printf("Remote publisher %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||
} else {
|
||||
log.Printf("Remote publisher %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemotePublisher) NotifyReconnected() {
|
||||
ctx := context.TODO()
|
||||
handle, session, roomId, _, err := p.mcu.getOrCreatePublisherHandle(ctx, p.id, p.streamType, p.bitrate)
|
||||
if err != nil {
|
||||
log.Printf("Could not reconnect remote publisher %s: %s", p.id, err)
|
||||
// TODO(jojo): Retry
|
||||
return
|
||||
}
|
||||
|
||||
p.handle = handle
|
||||
p.handleId = handle.Id
|
||||
p.session = session
|
||||
p.roomId = roomId
|
||||
|
||||
log.Printf("Remote publisher %s reconnected on handle %d", p.id, p.handleId)
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemotePublisher) Close(ctx context.Context) {
|
||||
if !p.release() {
|
||||
return
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
if handle := p.handle; handle != nil {
|
||||
response, err := p.handle.Request(ctx, map[string]interface{}{
|
||||
"request": "remove_remote_publisher",
|
||||
"room": p.roomId,
|
||||
"id": streamTypeUserIds[p.streamType],
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("Error removing remote publisher %s in room %d: %s", p.id, p.roomId, err)
|
||||
} else {
|
||||
log.Printf("Removed remote publisher: %+v", response)
|
||||
}
|
||||
if p.roomId != 0 {
|
||||
destroy_msg := map[string]interface{}{
|
||||
"request": "destroy",
|
||||
"room": p.roomId,
|
||||
}
|
||||
if _, err := handle.Request(ctx, destroy_msg); err != nil {
|
||||
log.Printf("Error destroying room %d: %s", p.roomId, err)
|
||||
} else {
|
||||
log.Printf("Room %d destroyed", p.roomId)
|
||||
}
|
||||
p.mcu.mu.Lock()
|
||||
delete(p.mcu.remotePublishers, getStreamId(p.id, p.streamType))
|
||||
p.mcu.mu.Unlock()
|
||||
p.roomId = 0
|
||||
}
|
||||
}
|
||||
|
||||
p.closeClient(ctx)
|
||||
p.mu.Unlock()
|
||||
}
|
115
mcu_janus_remote_subscriber.go
Normal file
115
mcu_janus_remote_subscriber.go
Normal file
|
@ -0,0 +1,115 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2024 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/notedit/janus-go"
|
||||
)
|
||||
|
||||
type mcuJanusRemoteSubscriber struct {
|
||||
mcuJanusSubscriber
|
||||
|
||||
remote atomic.Pointer[mcuJanusRemotePublisher]
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemoteSubscriber) handleEvent(event *janus.EventMsg) {
|
||||
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
|
||||
ctx := context.TODO()
|
||||
switch videoroom {
|
||||
case "destroyed":
|
||||
log.Printf("Remote subscriber %d: associated room has been destroyed, closing", p.handleId)
|
||||
go p.Close(ctx)
|
||||
case "event":
|
||||
// Handle renegotiations, but ignore other events like selected
|
||||
// substream / temporal layer.
|
||||
if getPluginStringValue(event.Plugindata, pluginVideoRoom, "configured") == "ok" &&
|
||||
event.Jsep != nil && event.Jsep["type"] == "offer" && event.Jsep["sdp"] != nil {
|
||||
p.listener.OnUpdateOffer(p, event.Jsep)
|
||||
}
|
||||
case "slow_link":
|
||||
// Ignore, processed through "handleSlowLink" in the general events.
|
||||
default:
|
||||
log.Printf("Unsupported videoroom event %s for remote subscriber %d: %+v", videoroom, p.handleId, event)
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unsupported event for remote subscriber %d: %+v", p.handleId, event)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemoteSubscriber) handleHangup(event *janus.HangupMsg) {
|
||||
log.Printf("Remote subscriber %d received hangup (%s), closing", p.handleId, event.Reason)
|
||||
go p.Close(context.Background())
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemoteSubscriber) handleDetached(event *janus.DetachedMsg) {
|
||||
log.Printf("Remote subscriber %d received detached, closing", p.handleId)
|
||||
go p.Close(context.Background())
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemoteSubscriber) handleConnected(event *janus.WebRTCUpMsg) {
|
||||
log.Printf("Remote subscriber %d received connected", p.handleId)
|
||||
p.mcu.SubscriberConnected(p.Id(), p.publisher, p.streamType)
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemoteSubscriber) handleSlowLink(event *janus.SlowLinkMsg) {
|
||||
if event.Uplink {
|
||||
log.Printf("Remote subscriber %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||
} else {
|
||||
log.Printf("Remote subscriber %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemoteSubscriber) handleMedia(event *janus.MediaMsg) {
|
||||
// Only triggered for publishers
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemoteSubscriber) NotifyReconnected() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||
defer cancel()
|
||||
handle, pub, err := p.mcu.getOrCreateSubscriberHandle(ctx, p.publisher, p.streamType)
|
||||
if err != nil {
|
||||
// TODO(jojo): Retry?
|
||||
log.Printf("Could not reconnect remote subscriber for publisher %s: %s", p.publisher, err)
|
||||
p.Close(context.Background())
|
||||
return
|
||||
}
|
||||
|
||||
p.handle = handle
|
||||
p.handleId = handle.Id
|
||||
p.roomId = pub.roomId
|
||||
p.sid = strconv.FormatUint(handle.Id, 10)
|
||||
p.listener.SubscriberSidUpdated(p)
|
||||
log.Printf("Subscriber %d for publisher %s reconnected on handle %d", p.id, p.publisher, p.handleId)
|
||||
}
|
||||
|
||||
func (p *mcuJanusRemoteSubscriber) Close(ctx context.Context) {
|
||||
p.mcuJanusSubscriber.Close(ctx)
|
||||
|
||||
if remote := p.remote.Swap(nil); remote != nil {
|
||||
remote.Close(context.Background())
|
||||
}
|
||||
}
|
110
mcu_janus_stream_selection.go
Normal file
110
mcu_janus_stream_selection.go
Normal file
|
@ -0,0 +1,110 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2017 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type streamSelection struct {
|
||||
substream sql.NullInt16
|
||||
temporal sql.NullInt16
|
||||
audio sql.NullBool
|
||||
video sql.NullBool
|
||||
}
|
||||
|
||||
func (s *streamSelection) HasValues() bool {
|
||||
return s.substream.Valid || s.temporal.Valid || s.audio.Valid || s.video.Valid
|
||||
}
|
||||
|
||||
func (s *streamSelection) AddToMessage(message map[string]interface{}) {
|
||||
if s.substream.Valid {
|
||||
message["substream"] = s.substream.Int16
|
||||
}
|
||||
if s.temporal.Valid {
|
||||
message["temporal"] = s.temporal.Int16
|
||||
}
|
||||
if s.audio.Valid {
|
||||
message["audio"] = s.audio.Bool
|
||||
}
|
||||
if s.video.Valid {
|
||||
message["video"] = s.video.Bool
|
||||
}
|
||||
}
|
||||
|
||||
func parseStreamSelection(payload map[string]interface{}) (*streamSelection, error) {
|
||||
var stream streamSelection
|
||||
if value, found := payload["substream"]; found {
|
||||
switch value := value.(type) {
|
||||
case int:
|
||||
stream.substream.Valid = true
|
||||
stream.substream.Int16 = int16(value)
|
||||
case float32:
|
||||
stream.substream.Valid = true
|
||||
stream.substream.Int16 = int16(value)
|
||||
case float64:
|
||||
stream.substream.Valid = true
|
||||
stream.substream.Int16 = int16(value)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported substream value: %v", value)
|
||||
}
|
||||
}
|
||||
|
||||
if value, found := payload["temporal"]; found {
|
||||
switch value := value.(type) {
|
||||
case int:
|
||||
stream.temporal.Valid = true
|
||||
stream.temporal.Int16 = int16(value)
|
||||
case float32:
|
||||
stream.temporal.Valid = true
|
||||
stream.temporal.Int16 = int16(value)
|
||||
case float64:
|
||||
stream.temporal.Valid = true
|
||||
stream.temporal.Int16 = int16(value)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported temporal value: %v", value)
|
||||
}
|
||||
}
|
||||
|
||||
if value, found := payload["audio"]; found {
|
||||
switch value := value.(type) {
|
||||
case bool:
|
||||
stream.audio.Valid = true
|
||||
stream.audio.Bool = value
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported audio value: %v", value)
|
||||
}
|
||||
}
|
||||
|
||||
if value, found := payload["video"]; found {
|
||||
switch value := value.(type) {
|
||||
case bool:
|
||||
stream.video.Valid = true
|
||||
stream.video.Bool = value
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported video value: %v", value)
|
||||
}
|
||||
}
|
||||
|
||||
return &stream, nil
|
||||
}
|
321
mcu_janus_subscriber.go
Normal file
321
mcu_janus_subscriber.go
Normal file
|
@ -0,0 +1,321 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2017 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
|
||||
"github.com/notedit/janus-go"
|
||||
)
|
||||
|
||||
type mcuJanusSubscriber struct {
|
||||
mcuJanusClient
|
||||
|
||||
publisher string
|
||||
}
|
||||
|
||||
func (p *mcuJanusSubscriber) Publisher() string {
|
||||
return p.publisher
|
||||
}
|
||||
|
||||
func (p *mcuJanusSubscriber) handleEvent(event *janus.EventMsg) {
|
||||
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
|
||||
ctx := context.TODO()
|
||||
switch videoroom {
|
||||
case "destroyed":
|
||||
log.Printf("Subscriber %d: associated room has been destroyed, closing", p.handleId)
|
||||
go p.Close(ctx)
|
||||
case "event":
|
||||
// Handle renegotiations, but ignore other events like selected
|
||||
// substream / temporal layer.
|
||||
if getPluginStringValue(event.Plugindata, pluginVideoRoom, "configured") == "ok" &&
|
||||
event.Jsep != nil && event.Jsep["type"] == "offer" && event.Jsep["sdp"] != nil {
|
||||
p.listener.OnUpdateOffer(p, event.Jsep)
|
||||
}
|
||||
case "slow_link":
|
||||
// Ignore, processed through "handleSlowLink" in the general events.
|
||||
default:
|
||||
log.Printf("Unsupported videoroom event %s for subscriber %d: %+v", videoroom, p.handleId, event)
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unsupported event for subscriber %d: %+v", p.handleId, event)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *mcuJanusSubscriber) handleHangup(event *janus.HangupMsg) {
|
||||
log.Printf("Subscriber %d received hangup (%s), closing", p.handleId, event.Reason)
|
||||
go p.Close(context.Background())
|
||||
}
|
||||
|
||||
func (p *mcuJanusSubscriber) handleDetached(event *janus.DetachedMsg) {
|
||||
log.Printf("Subscriber %d received detached, closing", p.handleId)
|
||||
go p.Close(context.Background())
|
||||
}
|
||||
|
||||
func (p *mcuJanusSubscriber) handleConnected(event *janus.WebRTCUpMsg) {
|
||||
log.Printf("Subscriber %d received connected", p.handleId)
|
||||
p.mcu.SubscriberConnected(p.Id(), p.publisher, p.streamType)
|
||||
}
|
||||
|
||||
func (p *mcuJanusSubscriber) handleSlowLink(event *janus.SlowLinkMsg) {
|
||||
if event.Uplink {
|
||||
log.Printf("Subscriber %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||
} else {
|
||||
log.Printf("Subscriber %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *mcuJanusSubscriber) handleMedia(event *janus.MediaMsg) {
|
||||
// Only triggered for publishers
|
||||
}
|
||||
|
||||
func (p *mcuJanusSubscriber) NotifyReconnected() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||
defer cancel()
|
||||
handle, pub, err := p.mcu.getOrCreateSubscriberHandle(ctx, p.publisher, p.streamType)
|
||||
if err != nil {
|
||||
// TODO(jojo): Retry?
|
||||
log.Printf("Could not reconnect subscriber for publisher %s: %s", p.publisher, err)
|
||||
p.Close(context.Background())
|
||||
return
|
||||
}
|
||||
|
||||
p.handle = handle
|
||||
p.handleId = handle.Id
|
||||
p.roomId = pub.roomId
|
||||
p.sid = strconv.FormatUint(handle.Id, 10)
|
||||
p.listener.SubscriberSidUpdated(p)
|
||||
log.Printf("Subscriber %d for publisher %s reconnected on handle %d", p.id, p.publisher, p.handleId)
|
||||
}
|
||||
|
||||
func (p *mcuJanusSubscriber) Close(ctx context.Context) {
|
||||
p.mu.Lock()
|
||||
closed := p.closeClient(ctx)
|
||||
p.mu.Unlock()
|
||||
|
||||
if closed {
|
||||
p.mcu.SubscriberDisconnected(p.Id(), p.publisher, p.streamType)
|
||||
statsSubscribersCurrent.WithLabelValues(string(p.streamType)).Dec()
|
||||
}
|
||||
p.mcu.unregisterClient(p)
|
||||
p.listener.SubscriberClosed(p)
|
||||
p.mcuJanusClient.Close(ctx)
|
||||
}
|
||||
|
||||
func (p *mcuJanusSubscriber) joinRoom(ctx context.Context, stream *streamSelection, callback func(error, map[string]interface{})) {
|
||||
handle := p.handle
|
||||
if handle == nil {
|
||||
callback(ErrNotConnected, nil)
|
||||
return
|
||||
}
|
||||
|
||||
waiter := p.mcu.publisherConnected.NewWaiter(getStreamId(p.publisher, p.streamType))
|
||||
defer p.mcu.publisherConnected.Release(waiter)
|
||||
|
||||
loggedNotPublishingYet := false
|
||||
retry:
|
||||
join_msg := map[string]interface{}{
|
||||
"request": "join",
|
||||
"ptype": "subscriber",
|
||||
"room": p.roomId,
|
||||
}
|
||||
if p.mcu.isMultistream() {
|
||||
join_msg["streams"] = []map[string]interface{}{
|
||||
{
|
||||
"feed": streamTypeUserIds[p.streamType],
|
||||
},
|
||||
}
|
||||
} else {
|
||||
join_msg["feed"] = streamTypeUserIds[p.streamType]
|
||||
}
|
||||
if stream != nil {
|
||||
stream.AddToMessage(join_msg)
|
||||
}
|
||||
join_response, err := handle.Message(ctx, join_msg, nil)
|
||||
if err != nil {
|
||||
callback(err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if error_code := getPluginIntValue(join_response.Plugindata, pluginVideoRoom, "error_code"); error_code > 0 {
|
||||
switch error_code {
|
||||
case JANUS_VIDEOROOM_ERROR_ALREADY_JOINED:
|
||||
// The subscriber is already connected to the room. This can happen
|
||||
// if a client leaves a call but keeps the subscriber objects active.
|
||||
// On joining the call again, the subscriber tries to join on the
|
||||
// MCU which will fail because he is still connected.
|
||||
// To get a new Offer SDP, we have to tear down the session on the
|
||||
// MCU and join again.
|
||||
p.mu.Lock()
|
||||
p.closeClient(ctx)
|
||||
p.mu.Unlock()
|
||||
|
||||
var pub *mcuJanusPublisher
|
||||
handle, pub, err = p.mcu.getOrCreateSubscriberHandle(ctx, p.publisher, p.streamType)
|
||||
if err != nil {
|
||||
// Reconnection didn't work, need to unregister/remove subscriber
|
||||
// so a new object will be created if the request is retried.
|
||||
p.mcu.unregisterClient(p)
|
||||
p.listener.SubscriberClosed(p)
|
||||
callback(fmt.Errorf("Already connected as subscriber for %s, error during re-joining: %s", p.streamType, err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
p.handle = handle
|
||||
p.handleId = handle.Id
|
||||
p.roomId = pub.roomId
|
||||
p.sid = strconv.FormatUint(handle.Id, 10)
|
||||
p.listener.SubscriberSidUpdated(p)
|
||||
p.closeChan = make(chan struct{}, 1)
|
||||
go p.run(p.handle, p.closeChan)
|
||||
log.Printf("Already connected subscriber %d for %s, leaving and re-joining on handle %d", p.id, p.streamType, p.handleId)
|
||||
goto retry
|
||||
case JANUS_VIDEOROOM_ERROR_NO_SUCH_ROOM:
|
||||
fallthrough
|
||||
case JANUS_VIDEOROOM_ERROR_NO_SUCH_FEED:
|
||||
switch error_code {
|
||||
case JANUS_VIDEOROOM_ERROR_NO_SUCH_ROOM:
|
||||
log.Printf("Publisher %s not created yet for %s, wait and retry to join room %d as subscriber", p.publisher, p.streamType, p.roomId)
|
||||
case JANUS_VIDEOROOM_ERROR_NO_SUCH_FEED:
|
||||
log.Printf("Publisher %s not sending yet for %s, wait and retry to join room %d as subscriber", p.publisher, p.streamType, p.roomId)
|
||||
}
|
||||
|
||||
if !loggedNotPublishingYet {
|
||||
loggedNotPublishingYet = true
|
||||
statsWaitingForPublisherTotal.WithLabelValues(string(p.streamType)).Inc()
|
||||
}
|
||||
|
||||
if err := waiter.Wait(ctx); err != nil {
|
||||
callback(err, nil)
|
||||
return
|
||||
}
|
||||
log.Printf("Retry subscribing %s from %s", p.streamType, p.publisher)
|
||||
goto retry
|
||||
default:
|
||||
// TODO(jojo): Should we handle other errors, too?
|
||||
callback(fmt.Errorf("Error joining room as subscriber: %+v", join_response), nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
//log.Println("Joined as listener", join_response)
|
||||
|
||||
p.session = join_response.Session
|
||||
callback(nil, join_response.Jsep)
|
||||
}
|
||||
|
||||
func (p *mcuJanusSubscriber) update(ctx context.Context, stream *streamSelection, callback func(error, map[string]interface{})) {
|
||||
handle := p.handle
|
||||
if handle == nil {
|
||||
callback(ErrNotConnected, nil)
|
||||
return
|
||||
}
|
||||
|
||||
configure_msg := map[string]interface{}{
|
||||
"request": "configure",
|
||||
"update": true,
|
||||
}
|
||||
if stream != nil {
|
||||
stream.AddToMessage(configure_msg)
|
||||
}
|
||||
configure_response, err := handle.Message(ctx, configure_msg, nil)
|
||||
if err != nil {
|
||||
callback(err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
callback(nil, configure_response.Jsep)
|
||||
}
|
||||
|
||||
func (p *mcuJanusSubscriber) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
|
||||
statsMcuMessagesTotal.WithLabelValues(data.Type).Inc()
|
||||
jsep_msg := data.Payload
|
||||
switch data.Type {
|
||||
case "requestoffer":
|
||||
fallthrough
|
||||
case "sendoffer":
|
||||
p.deferred <- func() {
|
||||
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||
defer cancel()
|
||||
|
||||
stream, err := parseStreamSelection(jsep_msg)
|
||||
if err != nil {
|
||||
go callback(err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if data.Sid == "" || data.Sid != p.Sid() {
|
||||
p.joinRoom(msgctx, stream, callback)
|
||||
} else {
|
||||
p.update(msgctx, stream, callback)
|
||||
}
|
||||
}
|
||||
case "answer":
|
||||
p.deferred <- func() {
|
||||
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||
defer cancel()
|
||||
|
||||
if data.Sid == "" || data.Sid == p.Sid() {
|
||||
p.sendAnswer(msgctx, jsep_msg, callback)
|
||||
} else {
|
||||
go callback(fmt.Errorf("Answer message sid (%s) does not match subscriber sid (%s)", data.Sid, p.Sid()), nil)
|
||||
}
|
||||
}
|
||||
case "candidate":
|
||||
p.deferred <- func() {
|
||||
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||
defer cancel()
|
||||
|
||||
if data.Sid == "" || data.Sid == p.Sid() {
|
||||
p.sendCandidate(msgctx, jsep_msg["candidate"], callback)
|
||||
} else {
|
||||
go callback(fmt.Errorf("Candidate message sid (%s) does not match subscriber sid (%s)", data.Sid, p.Sid()), nil)
|
||||
}
|
||||
}
|
||||
case "endOfCandidates":
|
||||
// Ignore
|
||||
case "selectStream":
|
||||
stream, err := parseStreamSelection(jsep_msg)
|
||||
if err != nil {
|
||||
go callback(err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if stream == nil || !stream.HasValues() {
|
||||
// Nothing to do
|
||||
go callback(nil, nil)
|
||||
return
|
||||
}
|
||||
|
||||
p.deferred <- func() {
|
||||
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||
defer cancel()
|
||||
|
||||
p.selectStream(msgctx, stream, callback)
|
||||
}
|
||||
default:
|
||||
// Return error asynchronously
|
||||
go callback(fmt.Errorf("Unsupported message type: %s", data.Type), nil)
|
||||
}
|
||||
}
|
1167
mcu_proxy.go
1167
mcu_proxy.go
File diff suppressed because it is too large
Load diff
1558
mcu_proxy_test.go
1558
mcu_proxy_test.go
File diff suppressed because it is too large
Load diff
31
mcu_test.go
31
mcu_test.go
|
@ -23,6 +23,7 @@ package signaling
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
|
@ -49,7 +50,7 @@ func NewTestMCU() (*TestMCU, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (m *TestMCU) Start() error {
|
||||
func (m *TestMCU) Start(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -69,9 +70,9 @@ func (m *TestMCU) GetStats() interface{} {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *TestMCU) NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType string, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error) {
|
||||
func (m *TestMCU) NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error) {
|
||||
var maxBitrate int
|
||||
if streamType == streamTypeScreen {
|
||||
if streamType == StreamTypeScreen {
|
||||
maxBitrate = TestMaxBitrateScreen
|
||||
} else {
|
||||
maxBitrate = TestMaxBitrateVideo
|
||||
|
@ -117,7 +118,7 @@ func (m *TestMCU) GetPublisher(id string) *TestMCUPublisher {
|
|||
return m.publishers[id]
|
||||
}
|
||||
|
||||
func (m *TestMCU) NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType string) (McuSubscriber, error) {
|
||||
func (m *TestMCU) NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType, initiator McuInitiator) (McuSubscriber, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
|
@ -143,7 +144,7 @@ type TestMCUClient struct {
|
|||
|
||||
id string
|
||||
sid string
|
||||
streamType string
|
||||
streamType StreamType
|
||||
}
|
||||
|
||||
func (c *TestMCUClient) Id() string {
|
||||
|
@ -154,10 +155,14 @@ func (c *TestMCUClient) Sid() string {
|
|||
return c.sid
|
||||
}
|
||||
|
||||
func (c *TestMCUClient) StreamType() string {
|
||||
func (c *TestMCUClient) StreamType() StreamType {
|
||||
return c.streamType
|
||||
}
|
||||
|
||||
func (c *TestMCUClient) MaxBitrate() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *TestMCUClient) Close(ctx context.Context) {
|
||||
if c.closed.CompareAndSwap(false, true) {
|
||||
log.Printf("Close MCU client %s", c.id)
|
||||
|
@ -218,6 +223,18 @@ func (p *TestMCUPublisher) SendMessage(ctx context.Context, message *MessageClie
|
|||
}()
|
||||
}
|
||||
|
||||
func (p *TestMCUPublisher) GetStreams(ctx context.Context) ([]PublisherStream, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (p *TestMCUPublisher) PublishRemote(ctx context.Context, remoteId string, hostname string, port int, rtcpPort int) error {
|
||||
return errors.New("remote publishing not supported")
|
||||
}
|
||||
|
||||
func (p *TestMCUPublisher) UnpublishRemote(ctx context.Context, remoteId string) error {
|
||||
return errors.New("remote publishing not supported")
|
||||
}
|
||||
|
||||
type TestMCUSubscriber struct {
|
||||
TestMCUClient
|
||||
|
||||
|
@ -249,6 +266,8 @@ func (s *TestMCUSubscriber) SendMessage(ctx context.Context, message *MessageCli
|
|||
"type": "offer",
|
||||
"sdp": sdp,
|
||||
})
|
||||
case "answer":
|
||||
callback(nil, nil)
|
||||
default:
|
||||
callback(fmt.Errorf("Message type %s is not implemented", data.Type), nil)
|
||||
}
|
||||
|
|
|
@ -1,3 +1,24 @@
|
|||
/**
|
||||
* Standalone signaling server for the Nextcloud Spreed app.
|
||||
* Copyright (C) 2021 struktur AG
|
||||
*
|
||||
* @author Joachim Bauch <bauch@struktur.de>
|
||||
*
|
||||
* @license GNU AGPL version 3 or any later version
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
package signaling
|
||||
|
||||
const (
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
package signaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"log"
|
||||
|
@ -74,33 +75,27 @@ func NewNatsClient(url string) (NatsClient, error) {
|
|||
return NewLoopbackNatsClient()
|
||||
}
|
||||
|
||||
backoff, err := NewExponentialBackoff(initialConnectInterval, maxConnectInterval)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &natsClient{}
|
||||
|
||||
var err error
|
||||
client.nc, err = nats.Connect(url,
|
||||
nats.ClosedHandler(client.onClosed),
|
||||
nats.DisconnectHandler(client.onDisconnected),
|
||||
nats.ReconnectHandler(client.onReconnected))
|
||||
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
defer signal.Stop(interrupt)
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||
defer stop()
|
||||
|
||||
delay := initialConnectInterval
|
||||
timer := time.NewTimer(delay)
|
||||
// The initial connect must succeed, so we retry in the case of an error.
|
||||
for err != nil {
|
||||
log.Printf("Could not create connection (%s), will retry in %s", err, delay)
|
||||
timer.Reset(delay)
|
||||
select {
|
||||
case <-interrupt:
|
||||
log.Printf("Could not create connection (%s), will retry in %s", err, backoff.NextWait())
|
||||
backoff.Wait(ctx)
|
||||
if ctx.Err() != nil {
|
||||
return nil, fmt.Errorf("interrupted")
|
||||
case <-timer.C:
|
||||
// Retry connection
|
||||
delay = delay * 2
|
||||
if delay > maxConnectInterval {
|
||||
delay = maxConnectInterval
|
||||
}
|
||||
}
|
||||
|
||||
client.nc, err = nats.Connect(url)
|
||||
|
|
|
@ -104,6 +104,7 @@ func testNatsClient_Subscribe(t *testing.T, client NatsClient) {
|
|||
}
|
||||
|
||||
func TestNatsClient_Subscribe(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||
client := CreateLocalNatsClientForTest(t)
|
||||
|
||||
|
@ -120,6 +121,7 @@ func testNatsClient_PublishAfterClose(t *testing.T, client NatsClient) {
|
|||
}
|
||||
|
||||
func TestNatsClient_PublishAfterClose(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||
client := CreateLocalNatsClientForTest(t)
|
||||
|
||||
|
@ -137,6 +139,7 @@ func testNatsClient_SubscribeAfterClose(t *testing.T, client NatsClient) {
|
|||
}
|
||||
|
||||
func TestNatsClient_SubscribeAfterClose(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||
client := CreateLocalNatsClientForTest(t)
|
||||
|
||||
|
@ -159,6 +162,7 @@ func testNatsClient_BadSubjects(t *testing.T, client NatsClient) {
|
|||
}
|
||||
|
||||
func TestNatsClient_BadSubjects(t *testing.T) {
|
||||
CatchLogForTest(t)
|
||||
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||
client := CreateLocalNatsClientForTest(t)
|
||||
|
||||
|
|
|
@ -118,6 +118,7 @@ func TestNotifierResetWillNotify(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNotifierDuplicate(t *testing.T) {
|
||||
t.Parallel()
|
||||
var notifier Notifier
|
||||
var wgStart sync.WaitGroup
|
||||
var wgEnd sync.WaitGroup
|
||||
|
|
|
@ -8,6 +8,12 @@
|
|||
# See "https://golang.org/pkg/net/http/pprof/" for further information.
|
||||
#debug = false
|
||||
|
||||
# Comma separated list of trusted proxies (IPs or CIDR networks) that may set
|
||||
# the "X-Real-Ip" or "X-Forwarded-For" headers. If both are provided, the
|
||||
# "X-Real-Ip" header will take precedence (if valid).
|
||||
# Leave empty to allow loopback and local addresses.
|
||||
#trustedproxies =
|
||||
|
||||
# ISO 3166 country this proxy is located at. This will be used by the signaling
|
||||
# servers to determine the closest proxy for publishers.
|
||||
#country = DE
|
||||
|
@ -20,6 +26,36 @@
|
|||
# - etcd: Token information are retrieved from an etcd cluster (see below).
|
||||
tokentype = static
|
||||
|
||||
# The external hostname for remote streams. Leaving this empty will autodetect
|
||||
# and use the first public IP found on the available network interfaces.
|
||||
#hostname =
|
||||
|
||||
# The token id to use when connecting remote stream.
|
||||
#token_id = server1
|
||||
|
||||
# The private key for the configured token id to use when connecting remote
|
||||
# streams.
|
||||
#token_key = privkey.pem
|
||||
|
||||
# If set to "true", certificate validation of remote stream requests will be
|
||||
# skipped. This should only be enabled during development, e.g. to work with
|
||||
# self-signed certificates.
|
||||
#skipverify = false
|
||||
|
||||
[bandwidth]
|
||||
# Target bandwidth limit for incoming streams (in megabits per second).
|
||||
# Set to 0 to disable the limit. If the limit is reached, the proxy notifies
|
||||
# the signaling servers that another proxy should be used for publishing if
|
||||
# possible.
|
||||
#incoming = 1024
|
||||
|
||||
# Target bandwidth limit for outgoing streams (in megabits per second).
|
||||
# Set to 0 to disable the limit. If the limit is reached, the proxy notifies
|
||||
# the signaling servers that another proxy should be used for subscribing if
|
||||
# possible. Note that this might require additional outgoing bandwidth for the
|
||||
# remote streams.
|
||||
#outgoing = 1024
|
||||
|
||||
[tokens]
|
||||
# For token type "static": Mapping of <tokenid> = <publickey> of signaling
|
||||
# servers allowed to connect.
|
||||
|
|
|
@ -36,6 +36,8 @@ import (
|
|||
|
||||
"github.com/dlintw/goconf"
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
signaling "github.com/strukturag/nextcloud-spreed-signaling"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -90,7 +92,7 @@ func main() {
|
|||
}
|
||||
defer proxy.Stop()
|
||||
|
||||
if addr, _ := config.GetString("http", "listen"); addr != "" {
|
||||
if addr, _ := signaling.GetStringOptionWithEnv(config, "http", "listen"); addr != "" {
|
||||
readTimeout, _ := config.GetInt("http", "readtimeout")
|
||||
if readTimeout <= 0 {
|
||||
readTimeout = defaultReadTimeout
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue