mirror of
https://github.com/strukturag/nextcloud-spreed-signaling
synced 2024-06-17 05:06:14 +02:00
Compare commits
168 commits
Author | SHA1 | Date | |
---|---|---|---|
ef01f2c4d9 | |||
6e1a2f6d7e | |||
c46f7b7f9d | |||
82919ce97b | |||
26e3a39f12 | |||
224daa5efd | |||
2f9bdbd919 | |||
5b04cb41e6 | |||
fd77de0e02 | |||
c17882307f | |||
c7cccc9287 | |||
15edeca814 | |||
2a1fd2e018 | |||
be66d9425b | |||
b033e07e06 | |||
b47a112e7e | |||
6c62f9caae | |||
8b39217551 | |||
de3507690c | |||
8123be9551 | |||
cad442c486 | |||
e8ebfed711 | |||
8d8ec677f1 | |||
80d96916b9 | |||
8a0ce7c9b6 | |||
1952bfc2be | |||
b3d2f7b02c | |||
7583fb6486 | |||
040e663b37 | |||
15b1214413 | |||
05810e10ce | |||
7e7a04ad6c | |||
d25169d0ff | |||
79b76b1ca4 | |||
f8e37a1bca | |||
b5cbb917c5 | |||
e2ac08ae67 | |||
00d17bae97 | |||
ff69a294a9 | |||
5790e7a369 | |||
4c807c86e8 | |||
e862392872 | |||
39f4b2eb11 | |||
7f8e44b3b5 | |||
31b8c74d1c | |||
5f18913646 | |||
716a93538b | |||
2cd3418f09 | |||
c6cbe88d0e | |||
f73ad7b508 | |||
efb722a55e | |||
d63b1cf14a | |||
75060b25aa | |||
7e7a6d5c09 | |||
a4b8a81734 | |||
3ce963ee91 | |||
24c1a09662 | |||
56f5a72f61 | |||
a66c1d82bf | |||
d9deddfda7 | |||
9c99129242 | |||
63c42dd84c | |||
92cbc28065 | |||
132cf0d474 | |||
4fd929c15a | |||
879469df19 | |||
fe0a002adf | |||
7b555e91ec | |||
b2afa88bcc | |||
1bbc49351a | |||
dff78d0101 | |||
2ad2327090 | |||
4b76a49355 | |||
f6125dac3f | |||
c2e93cd92a | |||
4f8349d4c1 | |||
aac4874e72 | |||
936f83feb9 | |||
c1e9e02087 | |||
beee423a7c | |||
5a85fecb10 | |||
88575abea2 | |||
fdc43d12cd | |||
d03ea86991 | |||
18300ce89e | |||
d8f2f265ab | |||
ddbf1065f6 | |||
bad52af35a | |||
c58564c0e8 | |||
0b259a8171 | |||
3fc5f5253d | |||
3e92664edc | |||
0ee976d377 | |||
552474f6f0 | |||
09e010ee14 | |||
70a5318973 | |||
94a8f0f02b | |||
4603b2b290 | |||
a50d637107 | |||
307ffdc29a | |||
ec3ac62474 | |||
e3a163fbe5 | |||
cf36530b30 | |||
adc72aa578 | |||
ea0d31b0dc | |||
5b305f6f99 | |||
3c923a9ef9 | |||
1a692bc4bb | |||
6a495bfc5c | |||
9a91e885cf | |||
b4830b1fd3 | |||
16da87106a | |||
e763f4519c | |||
bfb185f382 | |||
46e8ea9148 | |||
4eb1b6609d | |||
815088f269 | |||
527061bbe2 | |||
a2f0bec564 | |||
70f0519ca2 | |||
9e2a896326 | |||
2d48018b58 | |||
cf19b3b1b4 | |||
ebb215c592 | |||
0eb234b24d | |||
cad397e59e | |||
f8899ef189 | |||
54c4f1847a | |||
d368a060fa | |||
602452fa25 | |||
0c2cefa63a | |||
2468443572 | |||
3721fb131f | |||
6960912681 | |||
b77525603c | |||
9adb762ccf | |||
bf68a15943 | |||
bc7aea68f3 | |||
69beea84cb | |||
952b8ae460 | |||
2e6cf7f86b | |||
dcec32be7e | |||
b0d052c6ec | |||
318ed3700f | |||
ee16a8d8be | |||
91033bf8c2 | |||
b541ebc4c6 | |||
0aed690463 | |||
71a4248568 | |||
df210a6a85 | |||
5bc9ada233 | |||
d0d68f0d21 | |||
9a892a194e | |||
26102e7acb | |||
88a575c36c | |||
fdab3db819 | |||
c8aa4c71e0 | |||
ec9e44f5d6 | |||
543a85f8aa | |||
9f104cb281 | |||
4e623a8e08 | |||
9ba5b4330a | |||
4b6a4dbfe1 | |||
e1f40a024e | |||
47fc6694ca | |||
d0c711b500 | |||
7dc450350b | |||
bd445bd99b |
4
.github/workflows/docker-janus.yml
vendored
4
.github/workflows/docker-janus.yml
vendored
|
@ -34,7 +34,3 @@ jobs:
|
||||||
context: docker/janus
|
context: docker/janus
|
||||||
load: true
|
load: true
|
||||||
tags: ${{ env.TEST_TAG }}
|
tags: ${{ env.TEST_TAG }}
|
||||||
|
|
||||||
- name: Test Docker image
|
|
||||||
run: |
|
|
||||||
docker run --rm ${{ env.TEST_TAG }} /usr/local/bin/janus --version
|
|
||||||
|
|
46
.github/workflows/govuln.yml
vendored
Normal file
46
.github/workflows/govuln.yml
vendored
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
name: Go Vulnerability Checker
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
paths:
|
||||||
|
- '.github/workflows/govuln.yml'
|
||||||
|
- '**.go'
|
||||||
|
- 'go.*'
|
||||||
|
pull_request:
|
||||||
|
branches: [ master ]
|
||||||
|
paths:
|
||||||
|
- '.github/workflows/govuln.yml'
|
||||||
|
- '**.go'
|
||||||
|
- 'go.*'
|
||||||
|
schedule:
|
||||||
|
- cron: "0 2 * * SUN"
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
run:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
go-version:
|
||||||
|
- "1.21"
|
||||||
|
- "1.22"
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
|
- run: date
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt -y update && sudo apt -y install protobuf-compiler
|
||||||
|
make common
|
||||||
|
|
||||||
|
- name: Install and run govulncheck
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
govulncheck ./...
|
8
.github/workflows/lint.yml
vendored
8
.github/workflows/lint.yml
vendored
|
@ -28,7 +28,7 @@ jobs:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: "1.20"
|
go-version: "1.21"
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
|
@ -36,13 +36,11 @@ jobs:
|
||||||
make common
|
make common
|
||||||
|
|
||||||
- name: lint
|
- name: lint
|
||||||
uses: golangci/golangci-lint-action@v4.0.0
|
uses: golangci/golangci-lint-action@v6.0.1
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
args: --timeout=2m0s
|
args: --timeout=2m0s
|
||||||
skip-cache: true
|
skip-cache: true
|
||||||
skip-pkg-cache: true
|
|
||||||
skip-build-cache: true
|
|
||||||
|
|
||||||
dependencies:
|
dependencies:
|
||||||
name: dependencies
|
name: dependencies
|
||||||
|
@ -56,7 +54,7 @@ jobs:
|
||||||
|
|
||||||
- name: Check minimum supported version of Go
|
- name: Check minimum supported version of Go
|
||||||
run: |
|
run: |
|
||||||
go mod tidy -go=1.20 -compat=1.20
|
go mod tidy -go=1.21 -compat=1.21
|
||||||
|
|
||||||
- name: Check go.mod / go.sum
|
- name: Check go.mod / go.sum
|
||||||
run: |
|
run: |
|
||||||
|
|
27
.github/workflows/shellcheck.yml
vendored
Normal file
27
.github/workflows/shellcheck.yml
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
name: shellcheck
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
paths:
|
||||||
|
- '.github/workflows/shellcheck.yml'
|
||||||
|
- '**.sh'
|
||||||
|
pull_request:
|
||||||
|
branches: [ master ]
|
||||||
|
paths:
|
||||||
|
- '.github/workflows/shellcheck.yml'
|
||||||
|
- '**.sh'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint:
|
||||||
|
name: shellcheck
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: shellcheck
|
||||||
|
run: |
|
||||||
|
find -name "*.sh" | xargs shellcheck
|
5
.github/workflows/tarball.yml
vendored
5
.github/workflows/tarball.yml
vendored
|
@ -24,7 +24,6 @@ jobs:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go-version:
|
go-version:
|
||||||
- "1.20"
|
|
||||||
- "1.21"
|
- "1.21"
|
||||||
- "1.22"
|
- "1.22"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -53,7 +52,6 @@ jobs:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go-version:
|
go-version:
|
||||||
- "1.20"
|
|
||||||
- "1.21"
|
- "1.21"
|
||||||
- "1.22"
|
- "1.22"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -79,15 +77,12 @@ jobs:
|
||||||
[ -d "tmp/vendor" ] || exit 1
|
[ -d "tmp/vendor" ] || exit 1
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
env:
|
|
||||||
GOPROXY: off
|
|
||||||
run: |
|
run: |
|
||||||
echo "Building with $(nproc) threads"
|
echo "Building with $(nproc) threads"
|
||||||
make -C tmp build -j$(nproc)
|
make -C tmp build -j$(nproc)
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
env:
|
env:
|
||||||
GOPROXY: off
|
|
||||||
USE_DB_IP_GEOIP_DATABASE: "1"
|
USE_DB_IP_GEOIP_DATABASE: "1"
|
||||||
run: |
|
run: |
|
||||||
make -C tmp test TIMEOUT=120s
|
make -C tmp test TIMEOUT=120s
|
||||||
|
|
5
.github/workflows/test.yml
vendored
5
.github/workflows/test.yml
vendored
|
@ -27,7 +27,6 @@ jobs:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go-version:
|
go-version:
|
||||||
- "1.20"
|
|
||||||
- "1.21"
|
- "1.21"
|
||||||
- "1.22"
|
- "1.22"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -64,7 +63,7 @@ jobs:
|
||||||
outfile: cover.lcov
|
outfile: cover.lcov
|
||||||
|
|
||||||
- name: Coveralls Parallel
|
- name: Coveralls Parallel
|
||||||
uses: coverallsapp/github-action@v2.2.3
|
uses: coverallsapp/github-action@v2.3.0
|
||||||
env:
|
env:
|
||||||
COVERALLS_FLAG_NAME: run-${{ matrix.go-version }}
|
COVERALLS_FLAG_NAME: run-${{ matrix.go-version }}
|
||||||
with:
|
with:
|
||||||
|
@ -79,7 +78,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Coveralls Finished
|
- name: Coveralls Finished
|
||||||
uses: coverallsapp/github-action@v2.2.3
|
uses: coverallsapp/github-action@v2.3.0
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.github_token }}
|
github-token: ${{ secrets.github_token }}
|
||||||
parallel-finished: true
|
parallel-finished: true
|
||||||
|
|
116
CHANGELOG.md
116
CHANGELOG.md
|
@ -2,6 +2,122 @@
|
||||||
|
|
||||||
All notable changes to this project will be documented in this file.
|
All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
|
## 1.3.1 - 2024-05-23
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Bump alpine from 3.19 to 3.20 in /docker/janus
|
||||||
|
[#746](https://github.com/strukturag/nextcloud-spreed-signaling/pull/746)
|
||||||
|
- CI: Remove deprecated options from lint workflow.
|
||||||
|
[#748](https://github.com/strukturag/nextcloud-spreed-signaling/pull/748)
|
||||||
|
- docker: Update Janus in example image to 1.2.2
|
||||||
|
[#749](https://github.com/strukturag/nextcloud-spreed-signaling/pull/749)
|
||||||
|
- Improve detection of actual client IP.
|
||||||
|
[#747](https://github.com/strukturag/nextcloud-spreed-signaling/pull/747)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- docker: Fix proxy entrypoint.
|
||||||
|
[#745](https://github.com/strukturag/nextcloud-spreed-signaling/pull/745)
|
||||||
|
|
||||||
|
|
||||||
|
## 1.3.0 - 2024-05-22
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Support resuming remote sessions
|
||||||
|
[#715](https://github.com/strukturag/nextcloud-spreed-signaling/pull/715)
|
||||||
|
- Gracefully shut down signaling server on SIGUSR1.
|
||||||
|
[#706](https://github.com/strukturag/nextcloud-spreed-signaling/pull/706)
|
||||||
|
- docker: Add helper scripts to gracefully stop / wait for server.
|
||||||
|
[#722](https://github.com/strukturag/nextcloud-spreed-signaling/pull/722)
|
||||||
|
- Support environment variables in some configuration.
|
||||||
|
[#721](https://github.com/strukturag/nextcloud-spreed-signaling/pull/721)
|
||||||
|
- Add Context to clients / sessions.
|
||||||
|
[#732](https://github.com/strukturag/nextcloud-spreed-signaling/pull/732)
|
||||||
|
- Drop support for Golang 1.20
|
||||||
|
[#737](https://github.com/strukturag/nextcloud-spreed-signaling/pull/737)
|
||||||
|
- CI: Run "govulncheck".
|
||||||
|
[#694](https://github.com/strukturag/nextcloud-spreed-signaling/pull/694)
|
||||||
|
- Make trusted proxies configurable and default to loopback / private IPs.
|
||||||
|
[#738](https://github.com/strukturag/nextcloud-spreed-signaling/pull/738)
|
||||||
|
- Add support for remote streams (preview)
|
||||||
|
[#708](https://github.com/strukturag/nextcloud-spreed-signaling/pull/708)
|
||||||
|
- Add throttler for backend requests
|
||||||
|
[#744](https://github.com/strukturag/nextcloud-spreed-signaling/pull/744)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- build(deps): Bump github.com/nats-io/nats.go from 1.34.0 to 1.34.1
|
||||||
|
[#697](https://github.com/strukturag/nextcloud-spreed-signaling/pull/697)
|
||||||
|
- build(deps): Bump google.golang.org/grpc from 1.62.1 to 1.63.0
|
||||||
|
[#699](https://github.com/strukturag/nextcloud-spreed-signaling/pull/699)
|
||||||
|
- build(deps): Bump google.golang.org/grpc from 1.63.0 to 1.63.2
|
||||||
|
[#700](https://github.com/strukturag/nextcloud-spreed-signaling/pull/700)
|
||||||
|
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.12 to 2.10.14
|
||||||
|
[#702](https://github.com/strukturag/nextcloud-spreed-signaling/pull/702)
|
||||||
|
- Include previous value with etcd watch events.
|
||||||
|
[#704](https://github.com/strukturag/nextcloud-spreed-signaling/pull/704)
|
||||||
|
- build(deps): Bump go.uber.org/zap from 1.17.0 to 1.27.0
|
||||||
|
[#705](https://github.com/strukturag/nextcloud-spreed-signaling/pull/705)
|
||||||
|
- Improve support for Janus 1.x
|
||||||
|
[#669](https://github.com/strukturag/nextcloud-spreed-signaling/pull/669)
|
||||||
|
- build(deps): Bump sphinx from 7.2.6 to 7.3.5 in /docs
|
||||||
|
[#709](https://github.com/strukturag/nextcloud-spreed-signaling/pull/709)
|
||||||
|
- build(deps): Bump sphinx from 7.3.5 to 7.3.7 in /docs
|
||||||
|
[#712](https://github.com/strukturag/nextcloud-spreed-signaling/pull/712)
|
||||||
|
- build(deps): Bump golang.org/x/net from 0.21.0 to 0.23.0
|
||||||
|
[#711](https://github.com/strukturag/nextcloud-spreed-signaling/pull/711)
|
||||||
|
- Don't keep expiration timestamp in each session.
|
||||||
|
[#713](https://github.com/strukturag/nextcloud-spreed-signaling/pull/713)
|
||||||
|
- build(deps): Bump mkdocs from 1.5.3 to 1.6.0 in /docs
|
||||||
|
[#714](https://github.com/strukturag/nextcloud-spreed-signaling/pull/714)
|
||||||
|
- Speedup tests by running in parallel
|
||||||
|
[#718](https://github.com/strukturag/nextcloud-spreed-signaling/pull/718)
|
||||||
|
- build(deps): Bump golangci/golangci-lint-action from 4.0.0 to 5.0.0
|
||||||
|
[#719](https://github.com/strukturag/nextcloud-spreed-signaling/pull/719)
|
||||||
|
- build(deps): Bump golangci/golangci-lint-action from 5.0.0 to 5.1.0
|
||||||
|
[#720](https://github.com/strukturag/nextcloud-spreed-signaling/pull/720)
|
||||||
|
- build(deps): Bump coverallsapp/github-action from 2.2.3 to 2.3.0
|
||||||
|
[#728](https://github.com/strukturag/nextcloud-spreed-signaling/pull/728)
|
||||||
|
- build(deps): Bump jinja2 from 3.1.3 to 3.1.4 in /docs
|
||||||
|
[#726](https://github.com/strukturag/nextcloud-spreed-signaling/pull/726)
|
||||||
|
- build(deps): Bump google.golang.org/protobuf from 1.33.0 to 1.34.1
|
||||||
|
[#725](https://github.com/strukturag/nextcloud-spreed-signaling/pull/725)
|
||||||
|
- build(deps): Bump github.com/prometheus/client_golang from 1.19.0 to 1.19.1
|
||||||
|
[#730](https://github.com/strukturag/nextcloud-spreed-signaling/pull/730)
|
||||||
|
- build(deps): Bump golangci/golangci-lint-action from 5.1.0 to 6.0.1
|
||||||
|
[#729](https://github.com/strukturag/nextcloud-spreed-signaling/pull/729)
|
||||||
|
- build(deps): Bump google.golang.org/grpc from 1.63.2 to 1.64.0
|
||||||
|
[#734](https://github.com/strukturag/nextcloud-spreed-signaling/pull/734)
|
||||||
|
- Validate received SDP earlier.
|
||||||
|
[#707](https://github.com/strukturag/nextcloud-spreed-signaling/pull/707)
|
||||||
|
- Log something if mcu publisher / subscriber was closed.
|
||||||
|
[#736](https://github.com/strukturag/nextcloud-spreed-signaling/pull/736)
|
||||||
|
- build(deps): Bump the etcd group with 4 updates
|
||||||
|
[#693](https://github.com/strukturag/nextcloud-spreed-signaling/pull/693)
|
||||||
|
- build(deps): Bump github.com/nats-io/nats.go from 1.34.1 to 1.35.0
|
||||||
|
[#740](https://github.com/strukturag/nextcloud-spreed-signaling/pull/740)
|
||||||
|
- Don't use unnecessary pointer to "json.RawMessage".
|
||||||
|
[#739](https://github.com/strukturag/nextcloud-spreed-signaling/pull/739)
|
||||||
|
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.14 to 2.10.15
|
||||||
|
[#741](https://github.com/strukturag/nextcloud-spreed-signaling/pull/741)
|
||||||
|
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.15 to 2.10.16
|
||||||
|
[#743](https://github.com/strukturag/nextcloud-spreed-signaling/pull/743)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Improve detecting renames in file watcher.
|
||||||
|
[#698](https://github.com/strukturag/nextcloud-spreed-signaling/pull/698)
|
||||||
|
- Update etcd watch handling.
|
||||||
|
[#701](https://github.com/strukturag/nextcloud-spreed-signaling/pull/701)
|
||||||
|
- Prevent goroutine leaks in GRPC tests.
|
||||||
|
[#716](https://github.com/strukturag/nextcloud-spreed-signaling/pull/716)
|
||||||
|
- Fix potential race in capabilities test.
|
||||||
|
[#731](https://github.com/strukturag/nextcloud-spreed-signaling/pull/731)
|
||||||
|
- Don't log read error after we closed the connection.
|
||||||
|
[#735](https://github.com/strukturag/nextcloud-spreed-signaling/pull/735)
|
||||||
|
- Fix lock order inversion when leaving room / publishing room sessions.
|
||||||
|
[#742](https://github.com/strukturag/nextcloud-spreed-signaling/pull/742)
|
||||||
|
- Relax "MessageClientMessageData" validation.
|
||||||
|
[#733](https://github.com/strukturag/nextcloud-spreed-signaling/pull/733)
|
||||||
|
|
||||||
|
|
||||||
## 1.2.4 - 2024-04-03
|
## 1.2.4 - 2024-04-03
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
17
Makefile
17
Makefile
|
@ -52,6 +52,14 @@ ifneq ($(COUNT),)
|
||||||
TESTARGS := $(TESTARGS) -count $(COUNT)
|
TESTARGS := $(TESTARGS) -count $(COUNT)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifneq ($(PARALLEL),)
|
||||||
|
TESTARGS := $(TESTARGS) -parallel $(PARALLEL)
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifneq ($(VERBOSE),)
|
||||||
|
TESTARGS := $(TESTARGS) -v
|
||||||
|
endif
|
||||||
|
|
||||||
ifeq ($(GOARCH), amd64)
|
ifeq ($(GOARCH), amd64)
|
||||||
GOPATHBIN := $(GOPATH)/bin
|
GOPATHBIN := $(GOPATH)/bin
|
||||||
else
|
else
|
||||||
|
@ -62,14 +70,12 @@ hook:
|
||||||
[ ! -d "$(CURDIR)/.git/hooks" ] || ln -sf "$(CURDIR)/scripts/pre-commit.hook" "$(CURDIR)/.git/hooks/pre-commit"
|
[ ! -d "$(CURDIR)/.git/hooks" ] || ln -sf "$(CURDIR)/scripts/pre-commit.hook" "$(CURDIR)/.git/hooks/pre-commit"
|
||||||
|
|
||||||
$(GOPATHBIN)/easyjson: go.mod go.sum
|
$(GOPATHBIN)/easyjson: go.mod go.sum
|
||||||
[ "$(GOPROXY)" = "off" ] || $(GO) get -d github.com/mailru/easyjson/...
|
|
||||||
$(GO) install github.com/mailru/easyjson/...
|
$(GO) install github.com/mailru/easyjson/...
|
||||||
|
|
||||||
$(GOPATHBIN)/protoc-gen-go: go.mod go.sum
|
$(GOPATHBIN)/protoc-gen-go: go.mod go.sum
|
||||||
$(GO) install google.golang.org/protobuf/cmd/protoc-gen-go
|
$(GO) install google.golang.org/protobuf/cmd/protoc-gen-go
|
||||||
|
|
||||||
$(GOPATHBIN)/protoc-gen-go-grpc: go.mod go.sum
|
$(GOPATHBIN)/protoc-gen-go-grpc: go.mod go.sum
|
||||||
[ "$(GOPROXY)" = "off" ] || $(GO) get -d google.golang.org/grpc/cmd/protoc-gen-go-grpc
|
|
||||||
$(GO) install google.golang.org/grpc/cmd/protoc-gen-go-grpc
|
$(GO) install google.golang.org/grpc/cmd/protoc-gen-go-grpc
|
||||||
|
|
||||||
continentmap.go:
|
continentmap.go:
|
||||||
|
@ -93,18 +99,18 @@ vet: common
|
||||||
$(GO) vet $(ALL_PACKAGES)
|
$(GO) vet $(ALL_PACKAGES)
|
||||||
|
|
||||||
test: vet common
|
test: vet common
|
||||||
$(GO) test -v -timeout $(TIMEOUT) $(TESTARGS) $(ALL_PACKAGES)
|
$(GO) test -timeout $(TIMEOUT) $(TESTARGS) $(ALL_PACKAGES)
|
||||||
|
|
||||||
cover: vet common
|
cover: vet common
|
||||||
rm -f cover.out && \
|
rm -f cover.out && \
|
||||||
$(GO) test -v -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
|
$(GO) test -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
|
||||||
sed -i "/_easyjson/d" cover.out && \
|
sed -i "/_easyjson/d" cover.out && \
|
||||||
sed -i "/\.pb\.go/d" cover.out && \
|
sed -i "/\.pb\.go/d" cover.out && \
|
||||||
$(GO) tool cover -func=cover.out
|
$(GO) tool cover -func=cover.out
|
||||||
|
|
||||||
coverhtml: vet common
|
coverhtml: vet common
|
||||||
rm -f cover.out && \
|
rm -f cover.out && \
|
||||||
$(GO) test -v -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
|
$(GO) test -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
|
||||||
sed -i "/_easyjson/d" cover.out && \
|
sed -i "/_easyjson/d" cover.out && \
|
||||||
sed -i "/\.pb\.go/d" cover.out && \
|
sed -i "/\.pb\.go/d" cover.out && \
|
||||||
$(GO) tool cover -html=cover.out -o coverage.html
|
$(GO) tool cover -html=cover.out -o coverage.html
|
||||||
|
@ -147,7 +153,6 @@ build: server proxy
|
||||||
vendor: go.mod go.sum common
|
vendor: go.mod go.sum common
|
||||||
set -e ;\
|
set -e ;\
|
||||||
rm -rf $(VENDORDIR)
|
rm -rf $(VENDORDIR)
|
||||||
$(GO) mod tidy; \
|
|
||||||
$(GO) mod vendor
|
$(GO) mod vendor
|
||||||
|
|
||||||
tarball: vendor
|
tarball: vendor
|
||||||
|
|
17
README.md
17
README.md
|
@ -17,7 +17,7 @@ information on the API of the signaling server.
|
||||||
The following tools are required for building the signaling server.
|
The following tools are required for building the signaling server.
|
||||||
|
|
||||||
- git
|
- git
|
||||||
- go >= 1.20
|
- go >= 1.21
|
||||||
- make
|
- make
|
||||||
- protobuf-compiler >= 3
|
- protobuf-compiler >= 3
|
||||||
|
|
||||||
|
@ -171,7 +171,17 @@ proxy process gracefully after all clients have been disconnected. No new
|
||||||
publishers will be accepted in this case.
|
publishers will be accepted in this case.
|
||||||
|
|
||||||
|
|
||||||
### Clustering
|
### Remote streams (preview)
|
||||||
|
|
||||||
|
With Janus 1.1.0 or newer, remote streams are supported, i.e. a subscriber can
|
||||||
|
receive a published stream from any server. For this, you need to configure
|
||||||
|
`hostname`, `token_id` and `token_key` in the proxy configuration. Each proxy
|
||||||
|
server also supports configuring maximum `incoming` and `outgoing` bandwidth
|
||||||
|
settings, which will also be used to select remote streams.
|
||||||
|
See `proxy.conf.in` in section `app` for details.
|
||||||
|
|
||||||
|
|
||||||
|
## Clustering
|
||||||
|
|
||||||
The signaling server supports a clustering mode where multiple running servers
|
The signaling server supports a clustering mode where multiple running servers
|
||||||
can be interconnected to form a single "virtual" server. This can be used to
|
can be interconnected to form a single "virtual" server. This can be used to
|
||||||
|
@ -299,6 +309,8 @@ interface on port `8080` below):
|
||||||
# Enable proxying Websocket requests to the standalone signaling server.
|
# Enable proxying Websocket requests to the standalone signaling server.
|
||||||
ProxyPass "/standalone-signaling/" "ws://127.0.0.1:8080/"
|
ProxyPass "/standalone-signaling/" "ws://127.0.0.1:8080/"
|
||||||
|
|
||||||
|
RequestHeader set X-Real-IP %{REMOTE_ADDR}s
|
||||||
|
|
||||||
RewriteEngine On
|
RewriteEngine On
|
||||||
# Websocket connections from the clients.
|
# Websocket connections from the clients.
|
||||||
RewriteRule ^/standalone-signaling/spreed/$ - [L]
|
RewriteRule ^/standalone-signaling/spreed/$ - [L]
|
||||||
|
@ -334,6 +346,7 @@ myserver.domain.invalid {
|
||||||
route /standalone-signaling/* {
|
route /standalone-signaling/* {
|
||||||
uri strip_prefix /standalone-signaling
|
uri strip_prefix /standalone-signaling
|
||||||
reverse_proxy http://127.0.0.1:8080
|
reverse_proxy http://127.0.0.1:8080
|
||||||
|
header_up X-Real-IP {remote_host}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
package signaling
|
package signaling
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -31,6 +32,19 @@ type AllowedIps struct {
|
||||||
allowed []*net.IPNet
|
allowed []*net.IPNet
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *AllowedIps) String() string {
|
||||||
|
var b bytes.Buffer
|
||||||
|
b.WriteString("[")
|
||||||
|
for idx, n := range a.allowed {
|
||||||
|
if idx > 0 {
|
||||||
|
b.WriteString(", ")
|
||||||
|
}
|
||||||
|
b.WriteString(n.String())
|
||||||
|
}
|
||||||
|
b.WriteString("]")
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
func (a *AllowedIps) Empty() bool {
|
func (a *AllowedIps) Empty() bool {
|
||||||
return len(a.allowed) == 0
|
return len(a.allowed) == 0
|
||||||
}
|
}
|
||||||
|
@ -99,3 +113,22 @@ func DefaultAllowedIps() *AllowedIps {
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
privateIpNets = []string{
|
||||||
|
// Loopback addresses.
|
||||||
|
"127.0.0.0/8",
|
||||||
|
// Private addresses.
|
||||||
|
"10.0.0.0/8",
|
||||||
|
"172.16.0.0/12",
|
||||||
|
"192.168.0.0/16",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func DefaultPrivateIps() *AllowedIps {
|
||||||
|
allowed, err := ParseAllowedIps(strings.Join(privateIpNets, ","))
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("could not parse private ips %+v: %w", privateIpNets, err))
|
||||||
|
}
|
||||||
|
return allowed
|
||||||
|
}
|
||||||
|
|
|
@ -34,6 +34,9 @@ func TestAllowedIps(t *testing.T) {
|
||||||
if a.Empty() {
|
if a.Empty() {
|
||||||
t.Fatal("should not be empty")
|
t.Fatal("should not be empty")
|
||||||
}
|
}
|
||||||
|
if expected := `[127.0.0.1/32, 192.168.0.1/32, 192.168.1.0/24]`; a.String() != expected {
|
||||||
|
t.Errorf("expected %s, got %s", expected, a.String())
|
||||||
|
}
|
||||||
|
|
||||||
allowed := []string{
|
allowed := []string{
|
||||||
"127.0.0.1",
|
"127.0.0.1",
|
||||||
|
|
|
@ -118,8 +118,8 @@ type BackendRoomInviteRequest struct {
|
||||||
UserIds []string `json:"userids,omitempty"`
|
UserIds []string `json:"userids,omitempty"`
|
||||||
// TODO(jojo): We should get rid of "AllUserIds" and find a better way to
|
// TODO(jojo): We should get rid of "AllUserIds" and find a better way to
|
||||||
// notify existing users the room has changed and they need to update it.
|
// notify existing users the room has changed and they need to update it.
|
||||||
AllUserIds []string `json:"alluserids,omitempty"`
|
AllUserIds []string `json:"alluserids,omitempty"`
|
||||||
Properties *json.RawMessage `json:"properties,omitempty"`
|
Properties json.RawMessage `json:"properties,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type BackendRoomDisinviteRequest struct {
|
type BackendRoomDisinviteRequest struct {
|
||||||
|
@ -127,13 +127,13 @@ type BackendRoomDisinviteRequest struct {
|
||||||
SessionIds []string `json:"sessionids,omitempty"`
|
SessionIds []string `json:"sessionids,omitempty"`
|
||||||
// TODO(jojo): We should get rid of "AllUserIds" and find a better way to
|
// TODO(jojo): We should get rid of "AllUserIds" and find a better way to
|
||||||
// notify existing users the room has changed and they need to update it.
|
// notify existing users the room has changed and they need to update it.
|
||||||
AllUserIds []string `json:"alluserids,omitempty"`
|
AllUserIds []string `json:"alluserids,omitempty"`
|
||||||
Properties *json.RawMessage `json:"properties,omitempty"`
|
Properties json.RawMessage `json:"properties,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type BackendRoomUpdateRequest struct {
|
type BackendRoomUpdateRequest struct {
|
||||||
UserIds []string `json:"userids,omitempty"`
|
UserIds []string `json:"userids,omitempty"`
|
||||||
Properties *json.RawMessage `json:"properties,omitempty"`
|
Properties json.RawMessage `json:"properties,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type BackendRoomDeleteRequest struct {
|
type BackendRoomDeleteRequest struct {
|
||||||
|
@ -154,7 +154,7 @@ type BackendRoomParticipantsRequest struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type BackendRoomMessageRequest struct {
|
type BackendRoomMessageRequest struct {
|
||||||
Data *json.RawMessage `json:"data,omitempty"`
|
Data json.RawMessage `json:"data,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type BackendRoomSwitchToSessionsList []string
|
type BackendRoomSwitchToSessionsList []string
|
||||||
|
@ -169,7 +169,7 @@ type BackendRoomSwitchToMessageRequest struct {
|
||||||
// In the map, the key is the session id, the value additional details
|
// In the map, the key is the session id, the value additional details
|
||||||
// (or null) for the session. The details will be included in the request
|
// (or null) for the session. The details will be included in the request
|
||||||
// to the connected client.
|
// to the connected client.
|
||||||
Sessions *json.RawMessage `json:"sessions,omitempty"`
|
Sessions json.RawMessage `json:"sessions,omitempty"`
|
||||||
|
|
||||||
// Internal properties
|
// Internal properties
|
||||||
SessionsList BackendRoomSwitchToSessionsList `json:"sessionslist,omitempty"`
|
SessionsList BackendRoomSwitchToSessionsList `json:"sessionslist,omitempty"`
|
||||||
|
@ -237,8 +237,8 @@ type BackendRoomDialoutResponse struct {
|
||||||
// Requests from the signaling server to the Nextcloud backend.
|
// Requests from the signaling server to the Nextcloud backend.
|
||||||
|
|
||||||
type BackendClientAuthRequest struct {
|
type BackendClientAuthRequest struct {
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
Params *json.RawMessage `json:"params"`
|
Params json.RawMessage `json:"params"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type BackendClientRequest struct {
|
type BackendClientRequest struct {
|
||||||
|
@ -256,7 +256,7 @@ type BackendClientRequest struct {
|
||||||
Session *BackendClientSessionRequest `json:"session,omitempty"`
|
Session *BackendClientSessionRequest `json:"session,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBackendClientAuthRequest(params *json.RawMessage) *BackendClientRequest {
|
func NewBackendClientAuthRequest(params json.RawMessage) *BackendClientRequest {
|
||||||
return &BackendClientRequest{
|
return &BackendClientRequest{
|
||||||
Type: "auth",
|
Type: "auth",
|
||||||
Auth: &BackendClientAuthRequest{
|
Auth: &BackendClientAuthRequest{
|
||||||
|
@ -284,9 +284,9 @@ type BackendClientResponse struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type BackendClientAuthResponse struct {
|
type BackendClientAuthResponse struct {
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
UserId string `json:"userid"`
|
UserId string `json:"userid"`
|
||||||
User *json.RawMessage `json:"user"`
|
User json.RawMessage `json:"user"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type BackendClientRoomRequest struct {
|
type BackendClientRoomRequest struct {
|
||||||
|
@ -315,14 +315,14 @@ func NewBackendClientRoomRequest(roomid string, userid string, sessionid string)
|
||||||
}
|
}
|
||||||
|
|
||||||
type BackendClientRoomResponse struct {
|
type BackendClientRoomResponse struct {
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
RoomId string `json:"roomid"`
|
RoomId string `json:"roomid"`
|
||||||
Properties *json.RawMessage `json:"properties"`
|
Properties json.RawMessage `json:"properties"`
|
||||||
|
|
||||||
// Optional information about the Nextcloud Talk session. Can be used for
|
// Optional information about the Nextcloud Talk session. Can be used for
|
||||||
// example to define a "userid" for otherwise anonymous users.
|
// example to define a "userid" for otherwise anonymous users.
|
||||||
// See "RoomSessionData" for a possible content.
|
// See "RoomSessionData" for a possible content.
|
||||||
Session *json.RawMessage `json:"session,omitempty"`
|
Session json.RawMessage `json:"session,omitempty"`
|
||||||
|
|
||||||
Permissions *[]Permission `json:"permissions,omitempty"`
|
Permissions *[]Permission `json:"permissions,omitempty"`
|
||||||
}
|
}
|
||||||
|
@ -359,12 +359,12 @@ type BackendClientRingResponse struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type BackendClientSessionRequest struct {
|
type BackendClientSessionRequest struct {
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
RoomId string `json:"roomid"`
|
RoomId string `json:"roomid"`
|
||||||
Action string `json:"action"`
|
Action string `json:"action"`
|
||||||
SessionId string `json:"sessionid"`
|
SessionId string `json:"sessionid"`
|
||||||
UserId string `json:"userid,omitempty"`
|
UserId string `json:"userid,omitempty"`
|
||||||
User *json.RawMessage `json:"user,omitempty"`
|
User json.RawMessage `json:"user,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type BackendClientSessionResponse struct {
|
type BackendClientSessionResponse struct {
|
||||||
|
@ -396,8 +396,8 @@ type OcsMeta struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type OcsBody struct {
|
type OcsBody struct {
|
||||||
Meta OcsMeta `json:"meta"`
|
Meta OcsMeta `json:"meta"`
|
||||||
Data *json.RawMessage `json:"data"`
|
Data json.RawMessage `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type OcsResponse struct {
|
type OcsResponse struct {
|
||||||
|
|
|
@ -27,6 +27,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBackendChecksum(t *testing.T) {
|
func TestBackendChecksum(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
rnd := newRandomString(32)
|
rnd := newRandomString(32)
|
||||||
body := []byte{1, 2, 3, 4, 5}
|
body := []byte{1, 2, 3, 4, 5}
|
||||||
secret := []byte("shared-secret")
|
secret := []byte("shared-secret")
|
||||||
|
@ -58,6 +59,7 @@ func TestBackendChecksum(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidNumbers(t *testing.T) {
|
func TestValidNumbers(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
valid := []string{
|
valid := []string{
|
||||||
"+12",
|
"+12",
|
||||||
"+12345",
|
"+12345",
|
||||||
|
|
67
api_proxy.go
67
api_proxy.go
|
@ -24,6 +24,7 @@ package signaling
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
"github.com/golang-jwt/jwt/v4"
|
"github.com/golang-jwt/jwt/v4"
|
||||||
)
|
)
|
||||||
|
@ -48,6 +49,14 @@ type ProxyClientMessage struct {
|
||||||
Payload *PayloadProxyClientMessage `json:"payload,omitempty"`
|
Payload *PayloadProxyClientMessage `json:"payload,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *ProxyClientMessage) String() string {
|
||||||
|
data, err := json.Marshal(m)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("Could not serialize %#v: %s", m, err)
|
||||||
|
}
|
||||||
|
return string(data)
|
||||||
|
}
|
||||||
|
|
||||||
func (m *ProxyClientMessage) CheckValid() error {
|
func (m *ProxyClientMessage) CheckValid() error {
|
||||||
switch m.Type {
|
switch m.Type {
|
||||||
case "":
|
case "":
|
||||||
|
@ -115,6 +124,14 @@ type ProxyServerMessage struct {
|
||||||
Event *EventProxyServerMessage `json:"event,omitempty"`
|
Event *EventProxyServerMessage `json:"event,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *ProxyServerMessage) String() string {
|
||||||
|
data, err := json.Marshal(r)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("Could not serialize %#v: %s", r, err)
|
||||||
|
}
|
||||||
|
return string(data)
|
||||||
|
}
|
||||||
|
|
||||||
func (r *ProxyServerMessage) CloseAfterSend(session Session) bool {
|
func (r *ProxyServerMessage) CloseAfterSend(session Session) bool {
|
||||||
switch r.Type {
|
switch r.Type {
|
||||||
case "bye":
|
case "bye":
|
||||||
|
@ -185,6 +202,14 @@ type CommandProxyClientMessage struct {
|
||||||
ClientId string `json:"clientId,omitempty"`
|
ClientId string `json:"clientId,omitempty"`
|
||||||
Bitrate int `json:"bitrate,omitempty"`
|
Bitrate int `json:"bitrate,omitempty"`
|
||||||
MediaTypes MediaType `json:"mediatypes,omitempty"`
|
MediaTypes MediaType `json:"mediatypes,omitempty"`
|
||||||
|
|
||||||
|
RemoteUrl string `json:"remoteUrl,omitempty"`
|
||||||
|
remoteUrl *url.URL
|
||||||
|
RemoteToken string `json:"remoteToken,omitempty"`
|
||||||
|
|
||||||
|
Hostname string `json:"hostname,omitempty"`
|
||||||
|
Port int `json:"port,omitempty"`
|
||||||
|
RtcpPort int `json:"rtcpPort,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CommandProxyClientMessage) CheckValid() error {
|
func (m *CommandProxyClientMessage) CheckValid() error {
|
||||||
|
@ -202,6 +227,17 @@ func (m *CommandProxyClientMessage) CheckValid() error {
|
||||||
if m.StreamType == "" {
|
if m.StreamType == "" {
|
||||||
return fmt.Errorf("stream type missing")
|
return fmt.Errorf("stream type missing")
|
||||||
}
|
}
|
||||||
|
if m.RemoteUrl != "" {
|
||||||
|
if m.RemoteToken == "" {
|
||||||
|
return fmt.Errorf("remote token missing")
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteUrl, err := url.Parse(m.RemoteUrl)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid remote url: %w", err)
|
||||||
|
}
|
||||||
|
m.remoteUrl = remoteUrl
|
||||||
|
}
|
||||||
case "delete-publisher":
|
case "delete-publisher":
|
||||||
fallthrough
|
fallthrough
|
||||||
case "delete-subscriber":
|
case "delete-subscriber":
|
||||||
|
@ -217,6 +253,8 @@ type CommandProxyServerMessage struct {
|
||||||
Sid string `json:"sid,omitempty"`
|
Sid string `json:"sid,omitempty"`
|
||||||
|
|
||||||
Bitrate int `json:"bitrate,omitempty"`
|
Bitrate int `json:"bitrate,omitempty"`
|
||||||
|
|
||||||
|
Streams []PublisherStream `json:"streams,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Type "payload"
|
// Type "payload"
|
||||||
|
@ -261,12 +299,41 @@ type PayloadProxyServerMessage struct {
|
||||||
|
|
||||||
// Type "event"
|
// Type "event"
|
||||||
|
|
||||||
|
type EventProxyServerBandwidth struct {
|
||||||
|
// Incoming is the bandwidth utilization for publishers in percent.
|
||||||
|
Incoming *float64 `json:"incoming,omitempty"`
|
||||||
|
// Outgoing is the bandwidth utilization for subscribers in percent.
|
||||||
|
Outgoing *float64 `json:"outgoing,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *EventProxyServerBandwidth) String() string {
|
||||||
|
if b.Incoming != nil && b.Outgoing != nil {
|
||||||
|
return fmt.Sprintf("bandwidth: incoming=%.3f%%, outgoing=%.3f%%", *b.Incoming, *b.Outgoing)
|
||||||
|
} else if b.Incoming != nil {
|
||||||
|
return fmt.Sprintf("bandwidth: incoming=%.3f%%, outgoing=unlimited", *b.Incoming)
|
||||||
|
} else if b.Outgoing != nil {
|
||||||
|
return fmt.Sprintf("bandwidth: incoming=unlimited, outgoing=%.3f%%", *b.Outgoing)
|
||||||
|
} else {
|
||||||
|
return "bandwidth: incoming=unlimited, outgoing=unlimited"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b EventProxyServerBandwidth) AllowIncoming() bool {
|
||||||
|
return b.Incoming == nil || *b.Incoming < 100
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b EventProxyServerBandwidth) AllowOutgoing() bool {
|
||||||
|
return b.Outgoing == nil || *b.Outgoing < 100
|
||||||
|
}
|
||||||
|
|
||||||
type EventProxyServerMessage struct {
|
type EventProxyServerMessage struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
|
|
||||||
ClientId string `json:"clientId,omitempty"`
|
ClientId string `json:"clientId,omitempty"`
|
||||||
Load int64 `json:"load,omitempty"`
|
Load int64 `json:"load,omitempty"`
|
||||||
Sid string `json:"sid,omitempty"`
|
Sid string `json:"sid,omitempty"`
|
||||||
|
|
||||||
|
Bandwidth *EventProxyServerBandwidth `json:"bandwidth,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Information on a proxy in the etcd cluster.
|
// Information on a proxy in the etcd cluster.
|
||||||
|
|
|
@ -32,6 +32,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang-jwt/jwt/v4"
|
"github.com/golang-jwt/jwt/v4"
|
||||||
|
"github.com/pion/sdp/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -42,6 +43,11 @@ const (
|
||||||
HelloVersionV2 = "2.0"
|
HelloVersionV2 = "2.0"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNoSdp = NewError("no_sdp", "Payload does not contain a SDP.")
|
||||||
|
ErrInvalidSdp = NewError("invalid_sdp", "Payload does not contain a valid SDP.")
|
||||||
|
)
|
||||||
|
|
||||||
// ClientMessage is a message that is sent from a client to the server.
|
// ClientMessage is a message that is sent from a client to the server.
|
||||||
type ClientMessage struct {
|
type ClientMessage struct {
|
||||||
json.Marshaler
|
json.Marshaler
|
||||||
|
@ -192,12 +198,12 @@ func (r *ServerMessage) CloseAfterSend(session Session) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ServerMessage) IsChatRefresh() bool {
|
func (r *ServerMessage) IsChatRefresh() bool {
|
||||||
if r.Type != "message" || r.Message == nil || r.Message.Data == nil || len(*r.Message.Data) == 0 {
|
if r.Type != "message" || r.Message == nil || len(r.Message.Data) == 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
var data MessageServerMessageData
|
var data MessageServerMessageData
|
||||||
if err := json.Unmarshal(*r.Message.Data, &data); err != nil {
|
if err := json.Unmarshal(r.Message.Data, &data); err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -360,7 +366,7 @@ func (p *HelloV2AuthParams) CheckValid() error {
|
||||||
type HelloV2TokenClaims struct {
|
type HelloV2TokenClaims struct {
|
||||||
jwt.RegisteredClaims
|
jwt.RegisteredClaims
|
||||||
|
|
||||||
UserData *json.RawMessage `json:"userdata,omitempty"`
|
UserData json.RawMessage `json:"userdata,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type HelloClientMessageAuth struct {
|
type HelloClientMessageAuth struct {
|
||||||
|
@ -368,7 +374,7 @@ type HelloClientMessageAuth struct {
|
||||||
// "HelloClientTypeClient"
|
// "HelloClientTypeClient"
|
||||||
Type string `json:"type,omitempty"`
|
Type string `json:"type,omitempty"`
|
||||||
|
|
||||||
Params *json.RawMessage `json:"params"`
|
Params json.RawMessage `json:"params"`
|
||||||
|
|
||||||
Url string `json:"url"`
|
Url string `json:"url"`
|
||||||
parsedUrl *url.URL
|
parsedUrl *url.URL
|
||||||
|
@ -387,7 +393,7 @@ type HelloClientMessage struct {
|
||||||
Features []string `json:"features,omitempty"`
|
Features []string `json:"features,omitempty"`
|
||||||
|
|
||||||
// The authentication credentials.
|
// The authentication credentials.
|
||||||
Auth HelloClientMessageAuth `json:"auth"`
|
Auth *HelloClientMessageAuth `json:"auth,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *HelloClientMessage) CheckValid() error {
|
func (m *HelloClientMessage) CheckValid() error {
|
||||||
|
@ -395,7 +401,7 @@ func (m *HelloClientMessage) CheckValid() error {
|
||||||
return InvalidHelloVersion
|
return InvalidHelloVersion
|
||||||
}
|
}
|
||||||
if m.ResumeId == "" {
|
if m.ResumeId == "" {
|
||||||
if m.Auth.Params == nil || len(*m.Auth.Params) == 0 {
|
if m.Auth == nil || len(m.Auth.Params) == 0 {
|
||||||
return fmt.Errorf("params missing")
|
return fmt.Errorf("params missing")
|
||||||
}
|
}
|
||||||
if m.Auth.Type == "" {
|
if m.Auth.Type == "" {
|
||||||
|
@ -419,14 +425,14 @@ func (m *HelloClientMessage) CheckValid() error {
|
||||||
case HelloVersionV1:
|
case HelloVersionV1:
|
||||||
// No additional validation necessary.
|
// No additional validation necessary.
|
||||||
case HelloVersionV2:
|
case HelloVersionV2:
|
||||||
if err := json.Unmarshal(*m.Auth.Params, &m.Auth.helloV2Params); err != nil {
|
if err := json.Unmarshal(m.Auth.Params, &m.Auth.helloV2Params); err != nil {
|
||||||
return err
|
return err
|
||||||
} else if err := m.Auth.helloV2Params.CheckValid(); err != nil {
|
} else if err := m.Auth.helloV2Params.CheckValid(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case HelloClientTypeInternal:
|
case HelloClientTypeInternal:
|
||||||
if err := json.Unmarshal(*m.Auth.Params, &m.Auth.internalParams); err != nil {
|
if err := json.Unmarshal(m.Auth.Params, &m.Auth.internalParams); err != nil {
|
||||||
return err
|
return err
|
||||||
} else if err := m.Auth.internalParams.CheckValid(); err != nil {
|
} else if err := m.Auth.internalParams.CheckValid(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -528,8 +534,8 @@ func (m *RoomClientMessage) CheckValid() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
type RoomServerMessage struct {
|
type RoomServerMessage struct {
|
||||||
RoomId string `json:"roomid"`
|
RoomId string `json:"roomid"`
|
||||||
Properties *json.RawMessage `json:"properties,omitempty"`
|
Properties json.RawMessage `json:"properties,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type RoomErrorDetails struct {
|
type RoomErrorDetails struct {
|
||||||
|
@ -554,7 +560,7 @@ type MessageClientMessageRecipient struct {
|
||||||
type MessageClientMessage struct {
|
type MessageClientMessage struct {
|
||||||
Recipient MessageClientMessageRecipient `json:"recipient"`
|
Recipient MessageClientMessageRecipient `json:"recipient"`
|
||||||
|
|
||||||
Data *json.RawMessage `json:"data"`
|
Data json.RawMessage `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type MessageClientMessageData struct {
|
type MessageClientMessageData struct {
|
||||||
|
@ -563,17 +569,44 @@ type MessageClientMessageData struct {
|
||||||
RoomType string `json:"roomType"`
|
RoomType string `json:"roomType"`
|
||||||
Bitrate int `json:"bitrate,omitempty"`
|
Bitrate int `json:"bitrate,omitempty"`
|
||||||
Payload map[string]interface{} `json:"payload"`
|
Payload map[string]interface{} `json:"payload"`
|
||||||
|
|
||||||
|
offerSdp *sdp.SessionDescription // Only set if Type == "offer"
|
||||||
|
answerSdp *sdp.SessionDescription // Only set if Type == "answer"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MessageClientMessageData) CheckValid() error {
|
func (m *MessageClientMessageData) CheckValid() error {
|
||||||
if !IsValidStreamType(m.RoomType) {
|
if m.RoomType != "" && !IsValidStreamType(m.RoomType) {
|
||||||
return fmt.Errorf("invalid room type: %s", m.RoomType)
|
return fmt.Errorf("invalid room type: %s", m.RoomType)
|
||||||
}
|
}
|
||||||
|
if m.Type == "offer" || m.Type == "answer" {
|
||||||
|
sdpValue, found := m.Payload["sdp"]
|
||||||
|
if !found {
|
||||||
|
return ErrNoSdp
|
||||||
|
}
|
||||||
|
sdpText, ok := sdpValue.(string)
|
||||||
|
if !ok {
|
||||||
|
return ErrInvalidSdp
|
||||||
|
}
|
||||||
|
|
||||||
|
var sdp sdp.SessionDescription
|
||||||
|
if err := sdp.Unmarshal([]byte(sdpText)); err != nil {
|
||||||
|
return NewErrorDetail("invalid_sdp", "Error parsing SDP from payload.", map[string]interface{}{
|
||||||
|
"error": err.Error(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
switch m.Type {
|
||||||
|
case "offer":
|
||||||
|
m.offerSdp = &sdp
|
||||||
|
case "answer":
|
||||||
|
m.answerSdp = &sdp
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MessageClientMessage) CheckValid() error {
|
func (m *MessageClientMessage) CheckValid() error {
|
||||||
if m.Data == nil || len(*m.Data) == 0 {
|
if len(m.Data) == 0 {
|
||||||
return fmt.Errorf("message empty")
|
return fmt.Errorf("message empty")
|
||||||
}
|
}
|
||||||
switch m.Recipient.Type {
|
switch m.Recipient.Type {
|
||||||
|
@ -614,7 +647,7 @@ type MessageServerMessage struct {
|
||||||
Sender *MessageServerMessageSender `json:"sender"`
|
Sender *MessageServerMessageSender `json:"sender"`
|
||||||
Recipient *MessageClientMessageRecipient `json:"recipient,omitempty"`
|
Recipient *MessageClientMessageRecipient `json:"recipient,omitempty"`
|
||||||
|
|
||||||
Data *json.RawMessage `json:"data"`
|
Data json.RawMessage `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Type "control"
|
// Type "control"
|
||||||
|
@ -631,7 +664,7 @@ type ControlServerMessage struct {
|
||||||
Sender *MessageServerMessageSender `json:"sender"`
|
Sender *MessageServerMessageSender `json:"sender"`
|
||||||
Recipient *MessageClientMessageRecipient `json:"recipient,omitempty"`
|
Recipient *MessageClientMessageRecipient `json:"recipient,omitempty"`
|
||||||
|
|
||||||
Data *json.RawMessage `json:"data"`
|
Data json.RawMessage `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Type "internal"
|
// Type "internal"
|
||||||
|
@ -660,10 +693,10 @@ type AddSessionOptions struct {
|
||||||
type AddSessionInternalClientMessage struct {
|
type AddSessionInternalClientMessage struct {
|
||||||
CommonSessionInternalClientMessage
|
CommonSessionInternalClientMessage
|
||||||
|
|
||||||
UserId string `json:"userid,omitempty"`
|
UserId string `json:"userid,omitempty"`
|
||||||
User *json.RawMessage `json:"user,omitempty"`
|
User json.RawMessage `json:"user,omitempty"`
|
||||||
Flags uint32 `json:"flags,omitempty"`
|
Flags uint32 `json:"flags,omitempty"`
|
||||||
InCall *int `json:"incall,omitempty"`
|
InCall *int `json:"incall,omitempty"`
|
||||||
|
|
||||||
Options *AddSessionOptions `json:"options,omitempty"`
|
Options *AddSessionOptions `json:"options,omitempty"`
|
||||||
}
|
}
|
||||||
|
@ -815,10 +848,10 @@ type InternalServerMessage struct {
|
||||||
// Type "event"
|
// Type "event"
|
||||||
|
|
||||||
type RoomEventServerMessage struct {
|
type RoomEventServerMessage struct {
|
||||||
RoomId string `json:"roomid"`
|
RoomId string `json:"roomid"`
|
||||||
Properties *json.RawMessage `json:"properties,omitempty"`
|
Properties json.RawMessage `json:"properties,omitempty"`
|
||||||
// TODO(jojo): Change "InCall" to "int" when #914 has landed in NC Talk.
|
// TODO(jojo): Change "InCall" to "int" when #914 has landed in NC Talk.
|
||||||
InCall *json.RawMessage `json:"incall,omitempty"`
|
InCall json.RawMessage `json:"incall,omitempty"`
|
||||||
Changed []map[string]interface{} `json:"changed,omitempty"`
|
Changed []map[string]interface{} `json:"changed,omitempty"`
|
||||||
Users []map[string]interface{} `json:"users,omitempty"`
|
Users []map[string]interface{} `json:"users,omitempty"`
|
||||||
|
|
||||||
|
@ -845,8 +878,8 @@ type RoomDisinviteEventServerMessage struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type RoomEventMessage struct {
|
type RoomEventMessage struct {
|
||||||
RoomId string `json:"roomid"`
|
RoomId string `json:"roomid"`
|
||||||
Data *json.RawMessage `json:"data,omitempty"`
|
Data json.RawMessage `json:"data,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type RoomFlagsServerMessage struct {
|
type RoomFlagsServerMessage struct {
|
||||||
|
@ -896,10 +929,10 @@ func (m *EventServerMessage) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type EventServerMessageSessionEntry struct {
|
type EventServerMessageSessionEntry struct {
|
||||||
SessionId string `json:"sessionid"`
|
SessionId string `json:"sessionid"`
|
||||||
UserId string `json:"userid"`
|
UserId string `json:"userid"`
|
||||||
User *json.RawMessage `json:"user,omitempty"`
|
User json.RawMessage `json:"user,omitempty"`
|
||||||
RoomSessionId string `json:"roomsessionid,omitempty"`
|
RoomSessionId string `json:"roomsessionid,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *EventServerMessageSessionEntry) Clone() *EventServerMessageSessionEntry {
|
func (e *EventServerMessageSessionEntry) Clone() *EventServerMessageSessionEntry {
|
||||||
|
@ -932,9 +965,9 @@ type AnswerOfferMessage struct {
|
||||||
type TransientDataClientMessage struct {
|
type TransientDataClientMessage struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
|
|
||||||
Key string `json:"key,omitempty"`
|
Key string `json:"key,omitempty"`
|
||||||
Value *json.RawMessage `json:"value,omitempty"`
|
Value json.RawMessage `json:"value,omitempty"`
|
||||||
TTL time.Duration `json:"ttl,omitempty"`
|
TTL time.Duration `json:"ttl,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TransientDataClientMessage) CheckValid() error {
|
func (m *TransientDataClientMessage) CheckValid() error {
|
||||||
|
|
|
@ -81,6 +81,7 @@ func testMessages(t *testing.T, messageType string, valid_messages []testCheckVa
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestClientMessage(t *testing.T) {
|
func TestClientMessage(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// The message needs a type.
|
// The message needs a type.
|
||||||
msg := ClientMessage{}
|
msg := ClientMessage{}
|
||||||
if err := msg.CheckValid(); err == nil {
|
if err := msg.CheckValid(); err == nil {
|
||||||
|
@ -89,30 +90,31 @@ func TestClientMessage(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHelloClientMessage(t *testing.T) {
|
func TestHelloClientMessage(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
internalAuthParams := []byte("{\"backend\":\"https://domain.invalid\"}")
|
internalAuthParams := []byte("{\"backend\":\"https://domain.invalid\"}")
|
||||||
tokenAuthParams := []byte("{\"token\":\"invalid-token\"}")
|
tokenAuthParams := []byte("{\"token\":\"invalid-token\"}")
|
||||||
valid_messages := []testCheckValid{
|
valid_messages := []testCheckValid{
|
||||||
// Hello version 1
|
// Hello version 1
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV1,
|
Version: HelloVersionV1,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Params: &json.RawMessage{'{', '}'},
|
Params: json.RawMessage("{}"),
|
||||||
Url: "https://domain.invalid",
|
Url: "https://domain.invalid",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV1,
|
Version: HelloVersionV1,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Type: "client",
|
Type: "client",
|
||||||
Params: &json.RawMessage{'{', '}'},
|
Params: json.RawMessage("{}"),
|
||||||
Url: "https://domain.invalid",
|
Url: "https://domain.invalid",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV1,
|
Version: HelloVersionV1,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Type: "internal",
|
Type: "internal",
|
||||||
Params: (*json.RawMessage)(&internalAuthParams),
|
Params: internalAuthParams,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
|
@ -122,16 +124,16 @@ func TestHelloClientMessage(t *testing.T) {
|
||||||
// Hello version 2
|
// Hello version 2
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV2,
|
Version: HelloVersionV2,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Params: (*json.RawMessage)(&tokenAuthParams),
|
Params: tokenAuthParams,
|
||||||
Url: "https://domain.invalid",
|
Url: "https://domain.invalid",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV2,
|
Version: HelloVersionV2,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Type: "client",
|
Type: "client",
|
||||||
Params: (*json.RawMessage)(&tokenAuthParams),
|
Params: tokenAuthParams,
|
||||||
Url: "https://domain.invalid",
|
Url: "https://domain.invalid",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -147,75 +149,75 @@ func TestHelloClientMessage(t *testing.T) {
|
||||||
&HelloClientMessage{Version: HelloVersionV1},
|
&HelloClientMessage{Version: HelloVersionV1},
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV1,
|
Version: HelloVersionV1,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Params: &json.RawMessage{'{', '}'},
|
Params: json.RawMessage("{}"),
|
||||||
Type: "invalid-type",
|
Type: "invalid-type",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV1,
|
Version: HelloVersionV1,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Url: "https://domain.invalid",
|
Url: "https://domain.invalid",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV1,
|
Version: HelloVersionV1,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Params: &json.RawMessage{'{', '}'},
|
Params: json.RawMessage("{}"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV1,
|
Version: HelloVersionV1,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Params: &json.RawMessage{'{', '}'},
|
Params: json.RawMessage("{}"),
|
||||||
Url: "invalid-url",
|
Url: "invalid-url",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV1,
|
Version: HelloVersionV1,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Type: "internal",
|
Type: "internal",
|
||||||
Params: &json.RawMessage{'{', '}'},
|
Params: json.RawMessage("{}"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV1,
|
Version: HelloVersionV1,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Type: "internal",
|
Type: "internal",
|
||||||
Params: &json.RawMessage{'x', 'y', 'z'}, // Invalid JSON.
|
Params: json.RawMessage("xyz"), // Invalid JSON.
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Hello version 2
|
// Hello version 2
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV2,
|
Version: HelloVersionV2,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Url: "https://domain.invalid",
|
Url: "https://domain.invalid",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV2,
|
Version: HelloVersionV2,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Params: (*json.RawMessage)(&tokenAuthParams),
|
Params: tokenAuthParams,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV2,
|
Version: HelloVersionV2,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Params: (*json.RawMessage)(&tokenAuthParams),
|
Params: tokenAuthParams,
|
||||||
Url: "invalid-url",
|
Url: "invalid-url",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV2,
|
Version: HelloVersionV2,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Params: (*json.RawMessage)(&internalAuthParams),
|
Params: internalAuthParams,
|
||||||
Url: "https://domain.invalid",
|
Url: "https://domain.invalid",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&HelloClientMessage{
|
&HelloClientMessage{
|
||||||
Version: HelloVersionV2,
|
Version: HelloVersionV2,
|
||||||
Auth: HelloClientMessageAuth{
|
Auth: &HelloClientMessageAuth{
|
||||||
Params: &json.RawMessage{'x', 'y', 'z'}, // Invalid JSON.
|
Params: json.RawMessage("xyz"), // Invalid JSON.
|
||||||
Url: "https://domain.invalid",
|
Url: "https://domain.invalid",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -233,26 +235,27 @@ func TestHelloClientMessage(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMessageClientMessage(t *testing.T) {
|
func TestMessageClientMessage(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
valid_messages := []testCheckValid{
|
valid_messages := []testCheckValid{
|
||||||
&MessageClientMessage{
|
&MessageClientMessage{
|
||||||
Recipient: MessageClientMessageRecipient{
|
Recipient: MessageClientMessageRecipient{
|
||||||
Type: "session",
|
Type: "session",
|
||||||
SessionId: "the-session-id",
|
SessionId: "the-session-id",
|
||||||
},
|
},
|
||||||
Data: &json.RawMessage{'{', '}'},
|
Data: json.RawMessage("{}"),
|
||||||
},
|
},
|
||||||
&MessageClientMessage{
|
&MessageClientMessage{
|
||||||
Recipient: MessageClientMessageRecipient{
|
Recipient: MessageClientMessageRecipient{
|
||||||
Type: "user",
|
Type: "user",
|
||||||
UserId: "the-user-id",
|
UserId: "the-user-id",
|
||||||
},
|
},
|
||||||
Data: &json.RawMessage{'{', '}'},
|
Data: json.RawMessage("{}"),
|
||||||
},
|
},
|
||||||
&MessageClientMessage{
|
&MessageClientMessage{
|
||||||
Recipient: MessageClientMessageRecipient{
|
Recipient: MessageClientMessageRecipient{
|
||||||
Type: "room",
|
Type: "room",
|
||||||
},
|
},
|
||||||
Data: &json.RawMessage{'{', '}'},
|
Data: json.RawMessage("{}"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
invalid_messages := []testCheckValid{
|
invalid_messages := []testCheckValid{
|
||||||
|
@ -267,20 +270,20 @@ func TestMessageClientMessage(t *testing.T) {
|
||||||
Recipient: MessageClientMessageRecipient{
|
Recipient: MessageClientMessageRecipient{
|
||||||
Type: "session",
|
Type: "session",
|
||||||
},
|
},
|
||||||
Data: &json.RawMessage{'{', '}'},
|
Data: json.RawMessage("{}"),
|
||||||
},
|
},
|
||||||
&MessageClientMessage{
|
&MessageClientMessage{
|
||||||
Recipient: MessageClientMessageRecipient{
|
Recipient: MessageClientMessageRecipient{
|
||||||
Type: "session",
|
Type: "session",
|
||||||
UserId: "the-user-id",
|
UserId: "the-user-id",
|
||||||
},
|
},
|
||||||
Data: &json.RawMessage{'{', '}'},
|
Data: json.RawMessage("{}"),
|
||||||
},
|
},
|
||||||
&MessageClientMessage{
|
&MessageClientMessage{
|
||||||
Recipient: MessageClientMessageRecipient{
|
Recipient: MessageClientMessageRecipient{
|
||||||
Type: "user",
|
Type: "user",
|
||||||
},
|
},
|
||||||
Data: &json.RawMessage{'{', '}'},
|
Data: json.RawMessage("{}"),
|
||||||
},
|
},
|
||||||
&MessageClientMessage{
|
&MessageClientMessage{
|
||||||
Recipient: MessageClientMessageRecipient{
|
Recipient: MessageClientMessageRecipient{
|
||||||
|
@ -293,13 +296,13 @@ func TestMessageClientMessage(t *testing.T) {
|
||||||
Type: "user",
|
Type: "user",
|
||||||
SessionId: "the-user-id",
|
SessionId: "the-user-id",
|
||||||
},
|
},
|
||||||
Data: &json.RawMessage{'{', '}'},
|
Data: json.RawMessage("{}"),
|
||||||
},
|
},
|
||||||
&MessageClientMessage{
|
&MessageClientMessage{
|
||||||
Recipient: MessageClientMessageRecipient{
|
Recipient: MessageClientMessageRecipient{
|
||||||
Type: "unknown-type",
|
Type: "unknown-type",
|
||||||
},
|
},
|
||||||
Data: &json.RawMessage{'{', '}'},
|
Data: json.RawMessage("{}"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
testMessages(t, "message", valid_messages, invalid_messages)
|
testMessages(t, "message", valid_messages, invalid_messages)
|
||||||
|
@ -314,6 +317,7 @@ func TestMessageClientMessage(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestByeClientMessage(t *testing.T) {
|
func TestByeClientMessage(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Any "bye" message is valid.
|
// Any "bye" message is valid.
|
||||||
valid_messages := []testCheckValid{
|
valid_messages := []testCheckValid{
|
||||||
&ByeClientMessage{},
|
&ByeClientMessage{},
|
||||||
|
@ -332,6 +336,7 @@ func TestByeClientMessage(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRoomClientMessage(t *testing.T) {
|
func TestRoomClientMessage(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
// Any "room" message is valid.
|
// Any "room" message is valid.
|
||||||
valid_messages := []testCheckValid{
|
valid_messages := []testCheckValid{
|
||||||
&RoomClientMessage{},
|
&RoomClientMessage{},
|
||||||
|
@ -350,6 +355,7 @@ func TestRoomClientMessage(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestErrorMessages(t *testing.T) {
|
func TestErrorMessages(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
id := "request-id"
|
id := "request-id"
|
||||||
msg := ClientMessage{
|
msg := ClientMessage{
|
||||||
Id: id,
|
Id: id,
|
||||||
|
@ -382,12 +388,13 @@ func TestErrorMessages(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsChatRefresh(t *testing.T) {
|
func TestIsChatRefresh(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var msg ServerMessage
|
var msg ServerMessage
|
||||||
data_true := []byte("{\"type\":\"chat\",\"chat\":{\"refresh\":true}}")
|
data_true := []byte("{\"type\":\"chat\",\"chat\":{\"refresh\":true}}")
|
||||||
msg = ServerMessage{
|
msg = ServerMessage{
|
||||||
Type: "message",
|
Type: "message",
|
||||||
Message: &MessageServerMessage{
|
Message: &MessageServerMessage{
|
||||||
Data: (*json.RawMessage)(&data_true),
|
Data: data_true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if !msg.IsChatRefresh() {
|
if !msg.IsChatRefresh() {
|
||||||
|
@ -398,7 +405,7 @@ func TestIsChatRefresh(t *testing.T) {
|
||||||
msg = ServerMessage{
|
msg = ServerMessage{
|
||||||
Type: "message",
|
Type: "message",
|
||||||
Message: &MessageServerMessage{
|
Message: &MessageServerMessage{
|
||||||
Data: (*json.RawMessage)(&data_false),
|
Data: data_false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if msg.IsChatRefresh() {
|
if msg.IsChatRefresh() {
|
||||||
|
@ -426,6 +433,7 @@ func assertEqualStrings(t *testing.T, expected, result []string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_Welcome_AddRemoveFeature(t *testing.T) {
|
func Test_Welcome_AddRemoveFeature(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var msg WelcomeServerMessage
|
var msg WelcomeServerMessage
|
||||||
assertEqualStrings(t, []string{}, msg.Features)
|
assertEqualStrings(t, []string{}, msg.Features)
|
||||||
|
|
||||||
|
|
|
@ -280,6 +280,8 @@ func (e *asyncEventsNats) Close() {
|
||||||
sub.close()
|
sub.close()
|
||||||
}
|
}
|
||||||
}(e.sessionSubscriptions)
|
}(e.sessionSubscriptions)
|
||||||
|
// Can't use clear(...) here as the maps are processed asynchronously by the
|
||||||
|
// goroutines above.
|
||||||
e.backendRoomSubscriptions = make(map[string]*asyncBackendRoomSubscriberNats)
|
e.backendRoomSubscriptions = make(map[string]*asyncBackendRoomSubscriberNats)
|
||||||
e.roomSubscriptions = make(map[string]*asyncRoomSubscriberNats)
|
e.roomSubscriptions = make(map[string]*asyncRoomSubscriberNats)
|
||||||
e.userSubscriptions = make(map[string]*asyncUserSubscriberNats)
|
e.userSubscriptions = make(map[string]*asyncUserSubscriberNats)
|
||||||
|
|
|
@ -194,7 +194,7 @@ func (b *BackendClient) PerformJSONRequest(ctx context.Context, u *url.URL, requ
|
||||||
if err := json.Unmarshal(body, &ocs); err != nil {
|
if err := json.Unmarshal(body, &ocs); err != nil {
|
||||||
log.Printf("Could not decode OCS response %s from %s: %s", string(body), req.URL, err)
|
log.Printf("Could not decode OCS response %s from %s: %s", string(body), req.URL, err)
|
||||||
return err
|
return err
|
||||||
} else if ocs.Ocs == nil || ocs.Ocs.Data == nil {
|
} else if ocs.Ocs == nil || len(ocs.Ocs.Data) == 0 {
|
||||||
log.Printf("Incomplete OCS response %s from %s", string(body), req.URL)
|
log.Printf("Incomplete OCS response %s from %s", string(body), req.URL)
|
||||||
return ErrIncompleteResponse
|
return ErrIncompleteResponse
|
||||||
}
|
}
|
||||||
|
@ -205,8 +205,8 @@ func (b *BackendClient) PerformJSONRequest(ctx context.Context, u *url.URL, requ
|
||||||
return ErrThrottledResponse
|
return ErrThrottledResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal(*ocs.Ocs.Data, response); err != nil {
|
if err := json.Unmarshal(ocs.Ocs.Data, response); err != nil {
|
||||||
log.Printf("Could not decode OCS response body %s from %s: %s", string(*ocs.Ocs.Data), req.URL, err)
|
log.Printf("Could not decode OCS response body %s from %s: %s", string(ocs.Ocs.Data), req.URL, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if err := json.Unmarshal(body, response); err != nil {
|
} else if err := json.Unmarshal(body, response); err != nil {
|
||||||
|
|
|
@ -45,7 +45,7 @@ func returnOCS(t *testing.T, w http.ResponseWriter, body []byte) {
|
||||||
StatusCode: http.StatusOK,
|
StatusCode: http.StatusOK,
|
||||||
Message: "OK",
|
Message: "OK",
|
||||||
},
|
},
|
||||||
Data: (*json.RawMessage)(&body),
|
Data: body,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if strings.Contains(t.Name(), "Throttled") {
|
if strings.Contains(t.Name(), "Throttled") {
|
||||||
|
@ -70,6 +70,8 @@ func returnOCS(t *testing.T, w http.ResponseWriter, body []byte) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPostOnRedirect(t *testing.T) {
|
func TestPostOnRedirect(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
r := mux.NewRouter()
|
r := mux.NewRouter()
|
||||||
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
|
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
|
||||||
http.Redirect(w, r, "/ocs/v2.php/two", http.StatusTemporaryRedirect)
|
http.Redirect(w, r, "/ocs/v2.php/two", http.StatusTemporaryRedirect)
|
||||||
|
@ -125,6 +127,8 @@ func TestPostOnRedirect(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPostOnRedirectDifferentHost(t *testing.T) {
|
func TestPostOnRedirectDifferentHost(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
r := mux.NewRouter()
|
r := mux.NewRouter()
|
||||||
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
|
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
|
||||||
http.Redirect(w, r, "http://domain.invalid/ocs/v2.php/two", http.StatusTemporaryRedirect)
|
http.Redirect(w, r, "http://domain.invalid/ocs/v2.php/two", http.StatusTemporaryRedirect)
|
||||||
|
@ -165,6 +169,8 @@ func TestPostOnRedirectDifferentHost(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPostOnRedirectStatusFound(t *testing.T) {
|
func TestPostOnRedirectStatusFound(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
r := mux.NewRouter()
|
r := mux.NewRouter()
|
||||||
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
|
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
|
||||||
http.Redirect(w, r, "/ocs/v2.php/two", http.StatusFound)
|
http.Redirect(w, r, "/ocs/v2.php/two", http.StatusFound)
|
||||||
|
@ -217,6 +223,8 @@ func TestPostOnRedirectStatusFound(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleThrottled(t *testing.T) {
|
func TestHandleThrottled(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
r := mux.NewRouter()
|
r := mux.NewRouter()
|
||||||
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
|
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
|
||||||
returnOCS(t, w, []byte("[]"))
|
returnOCS(t, w, []byte("[]"))
|
||||||
|
|
|
@ -92,6 +92,7 @@ func testBackends(t *testing.T, config *BackendConfiguration, valid_urls [][]str
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsUrlAllowed_Compat(t *testing.T) {
|
func TestIsUrlAllowed_Compat(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
// Old-style configuration
|
// Old-style configuration
|
||||||
valid_urls := []string{
|
valid_urls := []string{
|
||||||
"http://domain.invalid",
|
"http://domain.invalid",
|
||||||
|
@ -114,6 +115,7 @@ func TestIsUrlAllowed_Compat(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsUrlAllowed_CompatForceHttps(t *testing.T) {
|
func TestIsUrlAllowed_CompatForceHttps(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
// Old-style configuration, force HTTPS
|
// Old-style configuration, force HTTPS
|
||||||
valid_urls := []string{
|
valid_urls := []string{
|
||||||
"https://domain.invalid",
|
"https://domain.invalid",
|
||||||
|
@ -135,6 +137,7 @@ func TestIsUrlAllowed_CompatForceHttps(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsUrlAllowed(t *testing.T) {
|
func TestIsUrlAllowed(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
valid_urls := [][]string{
|
valid_urls := [][]string{
|
||||||
{"https://domain.invalid/foo", string(testBackendSecret) + "-foo"},
|
{"https://domain.invalid/foo", string(testBackendSecret) + "-foo"},
|
||||||
{"https://domain.invalid/foo/", string(testBackendSecret) + "-foo"},
|
{"https://domain.invalid/foo/", string(testBackendSecret) + "-foo"},
|
||||||
|
@ -180,6 +183,7 @@ func TestIsUrlAllowed(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsUrlAllowed_EmptyAllowlist(t *testing.T) {
|
func TestIsUrlAllowed_EmptyAllowlist(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
valid_urls := []string{}
|
valid_urls := []string{}
|
||||||
invalid_urls := []string{
|
invalid_urls := []string{
|
||||||
"http://domain.invalid",
|
"http://domain.invalid",
|
||||||
|
@ -197,6 +201,7 @@ func TestIsUrlAllowed_EmptyAllowlist(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsUrlAllowed_AllowAll(t *testing.T) {
|
func TestIsUrlAllowed_AllowAll(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
valid_urls := []string{
|
valid_urls := []string{
|
||||||
"http://domain.invalid",
|
"http://domain.invalid",
|
||||||
"https://domain.invalid",
|
"https://domain.invalid",
|
||||||
|
@ -222,6 +227,7 @@ type ParseBackendIdsTestcase struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseBackendIds(t *testing.T) {
|
func TestParseBackendIds(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
testcases := []ParseBackendIdsTestcase{
|
testcases := []ParseBackendIdsTestcase{
|
||||||
{"", nil},
|
{"", nil},
|
||||||
{"backend1", []string{"backend1"}},
|
{"backend1", []string{"backend1"}},
|
||||||
|
@ -241,6 +247,7 @@ func TestParseBackendIds(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendReloadNoChange(t *testing.T) {
|
func TestBackendReloadNoChange(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
current := testutil.ToFloat64(statsBackendsCurrent)
|
current := testutil.ToFloat64(statsBackendsCurrent)
|
||||||
original_config := goconf.NewConfigFile()
|
original_config := goconf.NewConfigFile()
|
||||||
original_config.AddOption("backend", "backends", "backend1, backend2")
|
original_config.AddOption("backend", "backends", "backend1, backend2")
|
||||||
|
@ -276,6 +283,7 @@ func TestBackendReloadNoChange(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendReloadChangeExistingURL(t *testing.T) {
|
func TestBackendReloadChangeExistingURL(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
current := testutil.ToFloat64(statsBackendsCurrent)
|
current := testutil.ToFloat64(statsBackendsCurrent)
|
||||||
original_config := goconf.NewConfigFile()
|
original_config := goconf.NewConfigFile()
|
||||||
original_config.AddOption("backend", "backends", "backend1, backend2")
|
original_config.AddOption("backend", "backends", "backend1, backend2")
|
||||||
|
@ -316,6 +324,7 @@ func TestBackendReloadChangeExistingURL(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendReloadChangeSecret(t *testing.T) {
|
func TestBackendReloadChangeSecret(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
current := testutil.ToFloat64(statsBackendsCurrent)
|
current := testutil.ToFloat64(statsBackendsCurrent)
|
||||||
original_config := goconf.NewConfigFile()
|
original_config := goconf.NewConfigFile()
|
||||||
original_config.AddOption("backend", "backends", "backend1, backend2")
|
original_config.AddOption("backend", "backends", "backend1, backend2")
|
||||||
|
@ -354,6 +363,7 @@ func TestBackendReloadChangeSecret(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendReloadAddBackend(t *testing.T) {
|
func TestBackendReloadAddBackend(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
current := testutil.ToFloat64(statsBackendsCurrent)
|
current := testutil.ToFloat64(statsBackendsCurrent)
|
||||||
original_config := goconf.NewConfigFile()
|
original_config := goconf.NewConfigFile()
|
||||||
original_config.AddOption("backend", "backends", "backend1")
|
original_config.AddOption("backend", "backends", "backend1")
|
||||||
|
@ -394,6 +404,7 @@ func TestBackendReloadAddBackend(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendReloadRemoveHost(t *testing.T) {
|
func TestBackendReloadRemoveHost(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
current := testutil.ToFloat64(statsBackendsCurrent)
|
current := testutil.ToFloat64(statsBackendsCurrent)
|
||||||
original_config := goconf.NewConfigFile()
|
original_config := goconf.NewConfigFile()
|
||||||
original_config.AddOption("backend", "backends", "backend1, backend2")
|
original_config.AddOption("backend", "backends", "backend1, backend2")
|
||||||
|
@ -431,6 +442,7 @@ func TestBackendReloadRemoveHost(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendReloadRemoveBackendFromSharedHost(t *testing.T) {
|
func TestBackendReloadRemoveBackendFromSharedHost(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
current := testutil.ToFloat64(statsBackendsCurrent)
|
current := testutil.ToFloat64(statsBackendsCurrent)
|
||||||
original_config := goconf.NewConfigFile()
|
original_config := goconf.NewConfigFile()
|
||||||
original_config.AddOption("backend", "backends", "backend1, backend2")
|
original_config.AddOption("backend", "backends", "backend1, backend2")
|
||||||
|
@ -486,6 +498,8 @@ func mustParse(s string) *url.URL {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendConfiguration_Etcd(t *testing.T) {
|
func TestBackendConfiguration_Etcd(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
etcd, client := NewEtcdClientForTest(t)
|
etcd, client := NewEtcdClientForTest(t)
|
||||||
|
|
||||||
url1 := "https://domain1.invalid/foo"
|
url1 := "https://domain1.invalid/foo"
|
||||||
|
@ -619,6 +633,8 @@ func TestBackendConfiguration_Etcd(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendCommonSecret(t *testing.T) {
|
func TestBackendCommonSecret(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
u1, err := url.Parse("http://domain1.invalid")
|
u1, err := url.Parse("http://domain1.invalid")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
|
|
@ -68,7 +68,7 @@ type BackendServer struct {
|
||||||
turnvalid time.Duration
|
turnvalid time.Duration
|
||||||
turnservers []string
|
turnservers []string
|
||||||
|
|
||||||
statsAllowedIps *AllowedIps
|
statsAllowedIps atomic.Pointer[AllowedIps]
|
||||||
invalidSecret []byte
|
invalidSecret []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ func NewBackendServer(config *goconf.ConfigFile, hub *Hub, version string) (*Bac
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &BackendServer{
|
result := &BackendServer{
|
||||||
hub: hub,
|
hub: hub,
|
||||||
events: hub.events,
|
events: hub.events,
|
||||||
roomSessions: hub.roomSessions,
|
roomSessions: hub.roomSessions,
|
||||||
|
@ -131,9 +131,27 @@ func NewBackendServer(config *goconf.ConfigFile, hub *Hub, version string) (*Bac
|
||||||
turnvalid: turnvalid,
|
turnvalid: turnvalid,
|
||||||
turnservers: turnserverslist,
|
turnservers: turnserverslist,
|
||||||
|
|
||||||
statsAllowedIps: statsAllowedIps,
|
invalidSecret: invalidSecret,
|
||||||
invalidSecret: invalidSecret,
|
}
|
||||||
}, nil
|
|
||||||
|
result.statsAllowedIps.Store(statsAllowedIps)
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BackendServer) Reload(config *goconf.ConfigFile) {
|
||||||
|
statsAllowed, _ := config.GetString("stats", "allowed_ips")
|
||||||
|
if statsAllowedIps, err := ParseAllowedIps(statsAllowed); err == nil {
|
||||||
|
if !statsAllowedIps.Empty() {
|
||||||
|
log.Printf("Only allowing access to the stats endpoint from %s", statsAllowed)
|
||||||
|
} else {
|
||||||
|
log.Printf("No IPs configured for the stats endpoint, only allowing access from 127.0.0.1")
|
||||||
|
statsAllowedIps = DefaultAllowedIps()
|
||||||
|
}
|
||||||
|
b.statsAllowedIps.Store(statsAllowedIps)
|
||||||
|
} else {
|
||||||
|
log.Printf("Error parsing allowed stats ips from \"%s\": %s", statsAllowedIps, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *BackendServer) Start(r *mux.Router) error {
|
func (b *BackendServer) Start(r *mux.Router) error {
|
||||||
|
@ -277,7 +295,7 @@ func (b *BackendServer) parseRequestBody(f func(http.ResponseWriter, *http.Reque
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *BackendServer) sendRoomInvite(roomid string, backend *Backend, userids []string, properties *json.RawMessage) {
|
func (b *BackendServer) sendRoomInvite(roomid string, backend *Backend, userids []string, properties json.RawMessage) {
|
||||||
msg := &AsyncMessage{
|
msg := &AsyncMessage{
|
||||||
Type: "message",
|
Type: "message",
|
||||||
Message: &ServerMessage{
|
Message: &ServerMessage{
|
||||||
|
@ -347,7 +365,7 @@ func (b *BackendServer) sendRoomDisinvite(roomid string, backend *Backend, reaso
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *BackendServer) sendRoomUpdate(roomid string, backend *Backend, notified_userids []string, all_userids []string, properties *json.RawMessage) {
|
func (b *BackendServer) sendRoomUpdate(roomid string, backend *Backend, notified_userids []string, all_userids []string, properties json.RawMessage) {
|
||||||
msg := &AsyncMessage{
|
msg := &AsyncMessage{
|
||||||
Type: "message",
|
Type: "message",
|
||||||
Message: &ServerMessage{
|
Message: &ServerMessage{
|
||||||
|
@ -553,11 +571,11 @@ func (b *BackendServer) sendRoomSwitchTo(roomid string, backend *Backend, reques
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
if request.SwitchTo.Sessions != nil {
|
if len(request.SwitchTo.Sessions) > 0 {
|
||||||
// We support both a list of sessions or a map with additional details per session.
|
// We support both a list of sessions or a map with additional details per session.
|
||||||
if (*request.SwitchTo.Sessions)[0] == '[' {
|
if request.SwitchTo.Sessions[0] == '[' {
|
||||||
var sessionsList BackendRoomSwitchToSessionsList
|
var sessionsList BackendRoomSwitchToSessionsList
|
||||||
if err := json.Unmarshal(*request.SwitchTo.Sessions, &sessionsList); err != nil {
|
if err := json.Unmarshal(request.SwitchTo.Sessions, &sessionsList); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -595,7 +613,7 @@ func (b *BackendServer) sendRoomSwitchTo(roomid string, backend *Backend, reques
|
||||||
request.SwitchTo.SessionsMap = nil
|
request.SwitchTo.SessionsMap = nil
|
||||||
} else {
|
} else {
|
||||||
var sessionsMap BackendRoomSwitchToSessionsMap
|
var sessionsMap BackendRoomSwitchToSessionsMap
|
||||||
if err := json.Unmarshal(*request.SwitchTo.Sessions, &sessionsMap); err != nil {
|
if err := json.Unmarshal(request.SwitchTo.Sessions, &sessionsMap); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -761,6 +779,16 @@ func (b *BackendServer) startDialout(roomid string, backend *Backend, backendUrl
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body []byte) {
|
func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body []byte) {
|
||||||
|
throttle, err := b.hub.throttler.CheckBruteforce(r.Context(), b.hub.getRealUserIP(r), "BackendRoomAuth")
|
||||||
|
if err == ErrBruteforceDetected {
|
||||||
|
http.Error(w, "Too many requests", http.StatusTooManyRequests)
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
log.Printf("Error checking for bruteforce: %s", err)
|
||||||
|
http.Error(w, "Could not check for bruteforce", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
v := mux.Vars(r)
|
v := mux.Vars(r)
|
||||||
roomid := v["roomid"]
|
roomid := v["roomid"]
|
||||||
|
|
||||||
|
@ -773,6 +801,7 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
|
||||||
|
|
||||||
if backend == nil {
|
if backend == nil {
|
||||||
// Unknown backend URL passed, return immediately.
|
// Unknown backend URL passed, return immediately.
|
||||||
|
throttle(r.Context())
|
||||||
http.Error(w, "Authentication check failed", http.StatusForbidden)
|
http.Error(w, "Authentication check failed", http.StatusForbidden)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -794,12 +823,14 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
|
||||||
}
|
}
|
||||||
|
|
||||||
if backend == nil {
|
if backend == nil {
|
||||||
|
throttle(r.Context())
|
||||||
http.Error(w, "Authentication check failed", http.StatusForbidden)
|
http.Error(w, "Authentication check failed", http.StatusForbidden)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ValidateBackendChecksum(r, body, backend.Secret()) {
|
if !ValidateBackendChecksum(r, body, backend.Secret()) {
|
||||||
|
throttle(r.Context())
|
||||||
http.Error(w, "Authentication check failed", http.StatusForbidden)
|
http.Error(w, "Authentication check failed", http.StatusForbidden)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -814,7 +845,6 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
|
||||||
request.ReceivedTime = time.Now().UnixNano()
|
request.ReceivedTime = time.Now().UnixNano()
|
||||||
|
|
||||||
var response any
|
var response any
|
||||||
var err error
|
|
||||||
switch request.Type {
|
switch request.Type {
|
||||||
case "invite":
|
case "invite":
|
||||||
b.sendRoomInvite(roomid, backend, request.Invite.UserIds, request.Invite.Properties)
|
b.sendRoomInvite(roomid, backend, request.Invite.UserIds, request.Invite.Properties)
|
||||||
|
@ -881,19 +911,14 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *BackendServer) allowStatsAccess(r *http.Request) bool {
|
func (b *BackendServer) allowStatsAccess(r *http.Request) bool {
|
||||||
addr := getRealUserIP(r)
|
addr := b.hub.getRealUserIP(r)
|
||||||
if strings.Contains(addr, ":") {
|
|
||||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
|
||||||
addr = host
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ip := net.ParseIP(addr)
|
ip := net.ParseIP(addr)
|
||||||
if ip == nil {
|
if len(ip) == 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return b.statsAllowedIps.Allowed(ip)
|
allowed := b.statsAllowedIps.Load()
|
||||||
|
return allowed != nil && allowed.Allowed(ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *BackendServer) validateStatsRequest(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
|
func (b *BackendServer) validateStatsRequest(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
|
||||||
|
|
|
@ -30,6 +30,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"net/textproto"
|
"net/textproto"
|
||||||
|
@ -275,6 +276,8 @@ func expectRoomlistEvent(ch chan *AsyncMessage, msgType string) (*EventServerMes
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_NoAuth(t *testing.T) {
|
func TestBackendServer_NoAuth(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
_, _, _, _, _, server := CreateBackendServerForTest(t)
|
_, _, _, _, _, server := CreateBackendServerForTest(t)
|
||||||
|
|
||||||
roomId := "the-room-id"
|
roomId := "the-room-id"
|
||||||
|
@ -301,6 +304,8 @@ func TestBackendServer_NoAuth(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_InvalidAuth(t *testing.T) {
|
func TestBackendServer_InvalidAuth(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
_, _, _, _, _, server := CreateBackendServerForTest(t)
|
_, _, _, _, _, server := CreateBackendServerForTest(t)
|
||||||
|
|
||||||
roomId := "the-room-id"
|
roomId := "the-room-id"
|
||||||
|
@ -329,6 +334,8 @@ func TestBackendServer_InvalidAuth(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_OldCompatAuth(t *testing.T) {
|
func TestBackendServer_OldCompatAuth(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
_, _, _, _, _, server := CreateBackendServerForTest(t)
|
_, _, _, _, _, server := CreateBackendServerForTest(t)
|
||||||
|
|
||||||
roomId := "the-room-id"
|
roomId := "the-room-id"
|
||||||
|
@ -343,7 +350,7 @@ func TestBackendServer_OldCompatAuth(t *testing.T) {
|
||||||
AllUserIds: []string{
|
AllUserIds: []string{
|
||||||
userid,
|
userid,
|
||||||
},
|
},
|
||||||
Properties: &roomProperties,
|
Properties: roomProperties,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -378,6 +385,8 @@ func TestBackendServer_OldCompatAuth(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_InvalidBody(t *testing.T) {
|
func TestBackendServer_InvalidBody(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
_, _, _, _, _, server := CreateBackendServerForTest(t)
|
_, _, _, _, _, server := CreateBackendServerForTest(t)
|
||||||
|
|
||||||
roomId := "the-room-id"
|
roomId := "the-room-id"
|
||||||
|
@ -397,6 +406,8 @@ func TestBackendServer_InvalidBody(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_UnsupportedRequest(t *testing.T) {
|
func TestBackendServer_UnsupportedRequest(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
_, _, _, _, _, server := CreateBackendServerForTest(t)
|
_, _, _, _, _, server := CreateBackendServerForTest(t)
|
||||||
|
|
||||||
msg := &BackendServerRoomRequest{
|
msg := &BackendServerRoomRequest{
|
||||||
|
@ -423,8 +434,10 @@ func TestBackendServer_UnsupportedRequest(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_RoomInvite(t *testing.T) {
|
func TestBackendServer_RoomInvite(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
for _, backend := range eventBackendsForTest {
|
for _, backend := range eventBackendsForTest {
|
||||||
t.Run(backend, func(t *testing.T) {
|
t.Run(backend, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
RunTestBackendServer_RoomInvite(t)
|
RunTestBackendServer_RoomInvite(t)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -468,7 +481,7 @@ func RunTestBackendServer_RoomInvite(t *testing.T) {
|
||||||
AllUserIds: []string{
|
AllUserIds: []string{
|
||||||
userid,
|
userid,
|
||||||
},
|
},
|
||||||
Properties: &roomProperties,
|
Properties: roomProperties,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -497,14 +510,16 @@ func RunTestBackendServer_RoomInvite(t *testing.T) {
|
||||||
t.Errorf("Expected invite, got %+v", event)
|
t.Errorf("Expected invite, got %+v", event)
|
||||||
} else if event.Invite.RoomId != roomId {
|
} else if event.Invite.RoomId != roomId {
|
||||||
t.Errorf("Expected room %s, got %+v", roomId, event)
|
t.Errorf("Expected room %s, got %+v", roomId, event)
|
||||||
} else if event.Invite.Properties == nil || !bytes.Equal(*event.Invite.Properties, roomProperties) {
|
} else if !bytes.Equal(event.Invite.Properties, roomProperties) {
|
||||||
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(*event.Invite.Properties))
|
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(event.Invite.Properties))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_RoomDisinvite(t *testing.T) {
|
func TestBackendServer_RoomDisinvite(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
for _, backend := range eventBackendsForTest {
|
for _, backend := range eventBackendsForTest {
|
||||||
t.Run(backend, func(t *testing.T) {
|
t.Run(backend, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
RunTestBackendServer_RoomDisinvite(t)
|
RunTestBackendServer_RoomDisinvite(t)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -568,7 +583,7 @@ func RunTestBackendServer_RoomDisinvite(t *testing.T) {
|
||||||
roomId + "-" + hello.Hello.SessionId,
|
roomId + "-" + hello.Hello.SessionId,
|
||||||
},
|
},
|
||||||
AllUserIds: []string{},
|
AllUserIds: []string{},
|
||||||
Properties: &roomProperties,
|
Properties: roomProperties,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -596,8 +611,8 @@ func RunTestBackendServer_RoomDisinvite(t *testing.T) {
|
||||||
t.Errorf("Expected disinvite, got %+v", event)
|
t.Errorf("Expected disinvite, got %+v", event)
|
||||||
} else if event.Disinvite.RoomId != roomId {
|
} else if event.Disinvite.RoomId != roomId {
|
||||||
t.Errorf("Expected room %s, got %+v", roomId, event)
|
t.Errorf("Expected room %s, got %+v", roomId, event)
|
||||||
} else if event.Disinvite.Properties != nil {
|
} else if len(event.Disinvite.Properties) > 0 {
|
||||||
t.Errorf("Room properties should be omitted, got %s", string(*event.Disinvite.Properties))
|
t.Errorf("Room properties should be omitted, got %s", string(event.Disinvite.Properties))
|
||||||
} else if event.Disinvite.Reason != "disinvited" {
|
} else if event.Disinvite.Reason != "disinvited" {
|
||||||
t.Errorf("Reason should be disinvited, got %s", event.Disinvite.Reason)
|
t.Errorf("Reason should be disinvited, got %s", event.Disinvite.Reason)
|
||||||
}
|
}
|
||||||
|
@ -616,6 +631,8 @@ func RunTestBackendServer_RoomDisinvite(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) {
|
func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||||
|
|
||||||
client1 := NewTestClient(t, server, hub)
|
client1 := NewTestClient(t, server, hub)
|
||||||
|
@ -712,7 +729,7 @@ func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) {
|
||||||
UserIds: []string{
|
UserIds: []string{
|
||||||
testDefaultUserId,
|
testDefaultUserId,
|
||||||
},
|
},
|
||||||
Properties: (*json.RawMessage)(&testRoomProperties),
|
Properties: testRoomProperties,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -741,8 +758,10 @@ func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_RoomUpdate(t *testing.T) {
|
func TestBackendServer_RoomUpdate(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
for _, backend := range eventBackendsForTest {
|
for _, backend := range eventBackendsForTest {
|
||||||
t.Run(backend, func(t *testing.T) {
|
t.Run(backend, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
RunTestBackendServer_RoomUpdate(t)
|
RunTestBackendServer_RoomUpdate(t)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -762,7 +781,7 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
|
||||||
if backend == nil {
|
if backend == nil {
|
||||||
t.Fatalf("Did not find backend")
|
t.Fatalf("Did not find backend")
|
||||||
}
|
}
|
||||||
room, err := hub.createRoom(roomId, &emptyProperties, backend)
|
room, err := hub.createRoom(roomId, emptyProperties, backend)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Could not create room: %s", err)
|
t.Fatalf("Could not create room: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -786,7 +805,7 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
|
||||||
UserIds: []string{
|
UserIds: []string{
|
||||||
userid,
|
userid,
|
||||||
},
|
},
|
||||||
Properties: &roomProperties,
|
Properties: roomProperties,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -814,8 +833,8 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
|
||||||
t.Errorf("Expected update, got %+v", event)
|
t.Errorf("Expected update, got %+v", event)
|
||||||
} else if event.Update.RoomId != roomId {
|
} else if event.Update.RoomId != roomId {
|
||||||
t.Errorf("Expected room %s, got %+v", roomId, event)
|
t.Errorf("Expected room %s, got %+v", roomId, event)
|
||||||
} else if event.Update.Properties == nil || !bytes.Equal(*event.Update.Properties, roomProperties) {
|
} else if !bytes.Equal(event.Update.Properties, roomProperties) {
|
||||||
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(*event.Update.Properties))
|
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(event.Update.Properties))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Use event to wait for asynchronous messages.
|
// TODO: Use event to wait for asynchronous messages.
|
||||||
|
@ -825,14 +844,16 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
|
||||||
if room == nil {
|
if room == nil {
|
||||||
t.Fatalf("Room %s does not exist", roomId)
|
t.Fatalf("Room %s does not exist", roomId)
|
||||||
}
|
}
|
||||||
if string(*room.Properties()) != string(roomProperties) {
|
if string(room.Properties()) != string(roomProperties) {
|
||||||
t.Errorf("Expected properties %s for room %s, got %s", string(roomProperties), room.Id(), string(*room.Properties()))
|
t.Errorf("Expected properties %s for room %s, got %s", string(roomProperties), room.Id(), string(room.Properties()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_RoomDelete(t *testing.T) {
|
func TestBackendServer_RoomDelete(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
for _, backend := range eventBackendsForTest {
|
for _, backend := range eventBackendsForTest {
|
||||||
t.Run(backend, func(t *testing.T) {
|
t.Run(backend, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
RunTestBackendServer_RoomDelete(t)
|
RunTestBackendServer_RoomDelete(t)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -852,7 +873,7 @@ func RunTestBackendServer_RoomDelete(t *testing.T) {
|
||||||
if backend == nil {
|
if backend == nil {
|
||||||
t.Fatalf("Did not find backend")
|
t.Fatalf("Did not find backend")
|
||||||
}
|
}
|
||||||
if _, err := hub.createRoom(roomId, &emptyProperties, backend); err != nil {
|
if _, err := hub.createRoom(roomId, emptyProperties, backend); err != nil {
|
||||||
t.Fatalf("Could not create room: %s", err)
|
t.Fatalf("Could not create room: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -900,8 +921,8 @@ func RunTestBackendServer_RoomDelete(t *testing.T) {
|
||||||
t.Errorf("Expected disinvite, got %+v", event)
|
t.Errorf("Expected disinvite, got %+v", event)
|
||||||
} else if event.Disinvite.RoomId != roomId {
|
} else if event.Disinvite.RoomId != roomId {
|
||||||
t.Errorf("Expected room %s, got %+v", roomId, event)
|
t.Errorf("Expected room %s, got %+v", roomId, event)
|
||||||
} else if event.Disinvite.Properties != nil {
|
} else if len(event.Disinvite.Properties) > 0 {
|
||||||
t.Errorf("Room properties should be omitted, got %s", string(*event.Disinvite.Properties))
|
t.Errorf("Room properties should be omitted, got %s", string(event.Disinvite.Properties))
|
||||||
} else if event.Disinvite.Reason != "deleted" {
|
} else if event.Disinvite.Reason != "deleted" {
|
||||||
t.Errorf("Reason should be deleted, got %s", event.Disinvite.Reason)
|
t.Errorf("Reason should be deleted, got %s", event.Disinvite.Reason)
|
||||||
}
|
}
|
||||||
|
@ -916,8 +937,10 @@ func RunTestBackendServer_RoomDelete(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_ParticipantsUpdatePermissions(t *testing.T) {
|
func TestBackendServer_ParticipantsUpdatePermissions(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
for _, subtest := range clusteredTests {
|
for _, subtest := range clusteredTests {
|
||||||
t.Run(subtest, func(t *testing.T) {
|
t.Run(subtest, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var hub1 *Hub
|
var hub1 *Hub
|
||||||
var hub2 *Hub
|
var hub2 *Hub
|
||||||
var server1 *httptest.Server
|
var server1 *httptest.Server
|
||||||
|
@ -1047,6 +1070,8 @@ func TestBackendServer_ParticipantsUpdatePermissions(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_ParticipantsUpdateEmptyPermissions(t *testing.T) {
|
func TestBackendServer_ParticipantsUpdateEmptyPermissions(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||||
|
|
||||||
client := NewTestClient(t, server, hub)
|
client := NewTestClient(t, server, hub)
|
||||||
|
@ -1132,6 +1157,8 @@ func TestBackendServer_ParticipantsUpdateEmptyPermissions(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_ParticipantsUpdateTimeout(t *testing.T) {
|
func TestBackendServer_ParticipantsUpdateTimeout(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||||
|
|
||||||
client1 := NewTestClient(t, server, hub)
|
client1 := NewTestClient(t, server, hub)
|
||||||
|
@ -1345,8 +1372,10 @@ func TestBackendServer_ParticipantsUpdateTimeout(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_InCallAll(t *testing.T) {
|
func TestBackendServer_InCallAll(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
for _, subtest := range clusteredTests {
|
for _, subtest := range clusteredTests {
|
||||||
t.Run(subtest, func(t *testing.T) {
|
t.Run(subtest, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var hub1 *Hub
|
var hub1 *Hub
|
||||||
var hub2 *Hub
|
var hub2 *Hub
|
||||||
var server1 *httptest.Server
|
var server1 *httptest.Server
|
||||||
|
@ -1471,8 +1500,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
} else if !in_call_1.All {
|
} else if !in_call_1.All {
|
||||||
t.Errorf("All flag not set in message %+v", in_call_1)
|
t.Errorf("All flag not set in message %+v", in_call_1)
|
||||||
} else if !bytes.Equal(*in_call_1.InCall, []byte("7")) {
|
} else if !bytes.Equal(in_call_1.InCall, []byte("7")) {
|
||||||
t.Errorf("Expected inCall flag 7, got %s", string(*in_call_1.InCall))
|
t.Errorf("Expected inCall flag 7, got %s", string(in_call_1.InCall))
|
||||||
}
|
}
|
||||||
|
|
||||||
if msg2_a, err := client2.RunUntilMessage(ctx); err != nil {
|
if msg2_a, err := client2.RunUntilMessage(ctx); err != nil {
|
||||||
|
@ -1481,8 +1510,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
} else if !in_call_1.All {
|
} else if !in_call_1.All {
|
||||||
t.Errorf("All flag not set in message %+v", in_call_1)
|
t.Errorf("All flag not set in message %+v", in_call_1)
|
||||||
} else if !bytes.Equal(*in_call_1.InCall, []byte("7")) {
|
} else if !bytes.Equal(in_call_1.InCall, []byte("7")) {
|
||||||
t.Errorf("Expected inCall flag 7, got %s", string(*in_call_1.InCall))
|
t.Errorf("Expected inCall flag 7, got %s", string(in_call_1.InCall))
|
||||||
}
|
}
|
||||||
|
|
||||||
if !room1.IsSessionInCall(session1) {
|
if !room1.IsSessionInCall(session1) {
|
||||||
|
@ -1552,8 +1581,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
} else if !in_call_1.All {
|
} else if !in_call_1.All {
|
||||||
t.Errorf("All flag not set in message %+v", in_call_1)
|
t.Errorf("All flag not set in message %+v", in_call_1)
|
||||||
} else if !bytes.Equal(*in_call_1.InCall, []byte("0")) {
|
} else if !bytes.Equal(in_call_1.InCall, []byte("0")) {
|
||||||
t.Errorf("Expected inCall flag 0, got %s", string(*in_call_1.InCall))
|
t.Errorf("Expected inCall flag 0, got %s", string(in_call_1.InCall))
|
||||||
}
|
}
|
||||||
|
|
||||||
if msg2_a, err := client2.RunUntilMessage(ctx); err != nil {
|
if msg2_a, err := client2.RunUntilMessage(ctx); err != nil {
|
||||||
|
@ -1562,8 +1591,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
} else if !in_call_1.All {
|
} else if !in_call_1.All {
|
||||||
t.Errorf("All flag not set in message %+v", in_call_1)
|
t.Errorf("All flag not set in message %+v", in_call_1)
|
||||||
} else if !bytes.Equal(*in_call_1.InCall, []byte("0")) {
|
} else if !bytes.Equal(in_call_1.InCall, []byte("0")) {
|
||||||
t.Errorf("Expected inCall flag 0, got %s", string(*in_call_1.InCall))
|
t.Errorf("Expected inCall flag 0, got %s", string(in_call_1.InCall))
|
||||||
}
|
}
|
||||||
|
|
||||||
if room1.IsSessionInCall(session1) {
|
if room1.IsSessionInCall(session1) {
|
||||||
|
@ -1595,6 +1624,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_RoomMessage(t *testing.T) {
|
func TestBackendServer_RoomMessage(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||||
|
|
||||||
client := NewTestClient(t, server, hub)
|
client := NewTestClient(t, server, hub)
|
||||||
|
@ -1628,7 +1659,7 @@ func TestBackendServer_RoomMessage(t *testing.T) {
|
||||||
msg := &BackendServerRoomRequest{
|
msg := &BackendServerRoomRequest{
|
||||||
Type: "message",
|
Type: "message",
|
||||||
Message: &BackendRoomMessageRequest{
|
Message: &BackendRoomMessageRequest{
|
||||||
Data: &messageData,
|
Data: messageData,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1654,12 +1685,14 @@ func TestBackendServer_RoomMessage(t *testing.T) {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
} else if message.RoomId != roomId {
|
} else if message.RoomId != roomId {
|
||||||
t.Errorf("Expected message for room %s, got %s", roomId, message.RoomId)
|
t.Errorf("Expected message for room %s, got %s", roomId, message.RoomId)
|
||||||
} else if !bytes.Equal(messageData, *message.Data) {
|
} else if !bytes.Equal(messageData, message.Data) {
|
||||||
t.Errorf("Expected message data %s, got %s", string(messageData), string(*message.Data))
|
t.Errorf("Expected message data %s, got %s", string(messageData), string(message.Data))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_TurnCredentials(t *testing.T) {
|
func TestBackendServer_TurnCredentials(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
_, _, _, _, _, server := CreateBackendServerForTestWithTurn(t)
|
_, _, _, _, _, server := CreateBackendServerForTestWithTurn(t)
|
||||||
|
|
||||||
q := make(url.Values)
|
q := make(url.Values)
|
||||||
|
@ -1703,7 +1736,9 @@ func TestBackendServer_TurnCredentials(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_StatsAllowedIps(t *testing.T) {
|
func TestBackendServer_StatsAllowedIps(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
config := goconf.NewConfigFile()
|
config := goconf.NewConfigFile()
|
||||||
|
config.AddOption("app", "trustedproxies", "1.2.3.4")
|
||||||
config.AddOption("stats", "allowed_ips", "127.0.0.1, 192.168.0.1, 192.168.1.1/24")
|
config.AddOption("stats", "allowed_ips", "127.0.0.1, 192.168.0.1, 192.168.1.1/24")
|
||||||
_, backend, _, _, _, _ := CreateBackendServerForTestFromConfig(t, config)
|
_, backend, _, _, _, _ := CreateBackendServerForTestFromConfig(t, config)
|
||||||
|
|
||||||
|
@ -1720,7 +1755,9 @@ func TestBackendServer_StatsAllowedIps(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, addr := range allowed {
|
for _, addr := range allowed {
|
||||||
|
addr := addr
|
||||||
t.Run(addr, func(t *testing.T) {
|
t.Run(addr, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
r1 := &http.Request{
|
r1 := &http.Request{
|
||||||
RemoteAddr: addr,
|
RemoteAddr: addr,
|
||||||
}
|
}
|
||||||
|
@ -1728,6 +1765,10 @@ func TestBackendServer_StatsAllowedIps(t *testing.T) {
|
||||||
t.Errorf("should allow %s", addr)
|
t.Errorf("should allow %s", addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if host, _, err := net.SplitHostPort(addr); err == nil {
|
||||||
|
addr = host
|
||||||
|
}
|
||||||
|
|
||||||
r2 := &http.Request{
|
r2 := &http.Request{
|
||||||
RemoteAddr: "1.2.3.4:12345",
|
RemoteAddr: "1.2.3.4:12345",
|
||||||
Header: http.Header{
|
Header: http.Header{
|
||||||
|
@ -1761,7 +1802,9 @@ func TestBackendServer_StatsAllowedIps(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, addr := range notAllowed {
|
for _, addr := range notAllowed {
|
||||||
|
addr := addr
|
||||||
t.Run(addr, func(t *testing.T) {
|
t.Run(addr, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
r := &http.Request{
|
r := &http.Request{
|
||||||
RemoteAddr: addr,
|
RemoteAddr: addr,
|
||||||
}
|
}
|
||||||
|
@ -1773,6 +1816,7 @@ func TestBackendServer_StatsAllowedIps(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_IsNumeric(t *testing.T) {
|
func Test_IsNumeric(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
numeric := []string{
|
numeric := []string{
|
||||||
"0",
|
"0",
|
||||||
"1",
|
"1",
|
||||||
|
@ -1802,6 +1846,8 @@ func Test_IsNumeric(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_DialoutNoSipBridge(t *testing.T) {
|
func TestBackendServer_DialoutNoSipBridge(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||||
|
|
||||||
client := NewTestClient(t, server, hub)
|
client := NewTestClient(t, server, hub)
|
||||||
|
@ -1860,6 +1906,8 @@ func TestBackendServer_DialoutNoSipBridge(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_DialoutAccepted(t *testing.T) {
|
func TestBackendServer_DialoutAccepted(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||||
|
|
||||||
client := NewTestClient(t, server, hub)
|
client := NewTestClient(t, server, hub)
|
||||||
|
@ -1966,6 +2014,8 @@ func TestBackendServer_DialoutAccepted(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_DialoutAcceptedCompat(t *testing.T) {
|
func TestBackendServer_DialoutAcceptedCompat(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||||
|
|
||||||
client := NewTestClient(t, server, hub)
|
client := NewTestClient(t, server, hub)
|
||||||
|
@ -2072,6 +2122,8 @@ func TestBackendServer_DialoutAcceptedCompat(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackendServer_DialoutRejected(t *testing.T) {
|
func TestBackendServer_DialoutRejected(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
_, _, _, hub, _, server := CreateBackendServerForTest(t)
|
||||||
|
|
||||||
client := NewTestClient(t, server, hub)
|
client := NewTestClient(t, server, hub)
|
||||||
|
|
|
@ -24,10 +24,10 @@ package signaling
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"net/url"
|
"net/url"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/dlintw/goconf"
|
"github.com/dlintw/goconf"
|
||||||
|
@ -43,8 +43,10 @@ type backendStorageEtcd struct {
|
||||||
|
|
||||||
initializedCtx context.Context
|
initializedCtx context.Context
|
||||||
initializedFunc context.CancelFunc
|
initializedFunc context.CancelFunc
|
||||||
initializedWg sync.WaitGroup
|
|
||||||
wakeupChanForTesting chan struct{}
|
wakeupChanForTesting chan struct{}
|
||||||
|
|
||||||
|
closeCtx context.Context
|
||||||
|
closeFunc context.CancelFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBackendStorageEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient) (BackendStorage, error) {
|
func NewBackendStorageEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient) (BackendStorage, error) {
|
||||||
|
@ -58,6 +60,7 @@ func NewBackendStorageEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient) (B
|
||||||
}
|
}
|
||||||
|
|
||||||
initializedCtx, initializedFunc := context.WithCancel(context.Background())
|
initializedCtx, initializedFunc := context.WithCancel(context.Background())
|
||||||
|
closeCtx, closeFunc := context.WithCancel(context.Background())
|
||||||
result := &backendStorageEtcd{
|
result := &backendStorageEtcd{
|
||||||
backendStorageCommon: backendStorageCommon{
|
backendStorageCommon: backendStorageCommon{
|
||||||
backends: make(map[string][]*Backend),
|
backends: make(map[string][]*Backend),
|
||||||
|
@ -68,6 +71,8 @@ func NewBackendStorageEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient) (B
|
||||||
|
|
||||||
initializedCtx: initializedCtx,
|
initializedCtx: initializedCtx,
|
||||||
initializedFunc: initializedFunc,
|
initializedFunc: initializedFunc,
|
||||||
|
closeCtx: closeCtx,
|
||||||
|
closeFunc: closeFunc,
|
||||||
}
|
}
|
||||||
|
|
||||||
etcdClient.AddListener(result)
|
etcdClient.AddListener(result)
|
||||||
|
@ -95,15 +100,12 @@ func (s *backendStorageEtcd) wakeupForTesting() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *backendStorageEtcd) EtcdClientCreated(client *EtcdClient) {
|
func (s *backendStorageEtcd) EtcdClientCreated(client *EtcdClient) {
|
||||||
s.initializedWg.Add(1)
|
|
||||||
go func() {
|
go func() {
|
||||||
if err := client.Watch(context.Background(), s.keyPrefix, s, clientv3.WithPrefix()); err != nil {
|
if err := client.WaitForConnection(s.closeCtx); err != nil {
|
||||||
log.Printf("Error processing watch for %s: %s", s.keyPrefix, err)
|
if errors.Is(err, context.Canceled) {
|
||||||
}
|
return
|
||||||
}()
|
}
|
||||||
|
|
||||||
go func() {
|
|
||||||
if err := client.WaitForConnection(context.Background()); err != nil {
|
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,41 +113,61 @@ func (s *backendStorageEtcd) EtcdClientCreated(client *EtcdClient) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
for {
|
for s.closeCtx.Err() == nil {
|
||||||
response, err := s.getBackends(client, s.keyPrefix)
|
response, err := s.getBackends(s.closeCtx, client, s.keyPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == context.DeadlineExceeded {
|
if errors.Is(err, context.Canceled) {
|
||||||
|
return
|
||||||
|
} else if errors.Is(err, context.DeadlineExceeded) {
|
||||||
log.Printf("Timeout getting initial list of backends, retry in %s", backoff.NextWait())
|
log.Printf("Timeout getting initial list of backends, retry in %s", backoff.NextWait())
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Could not get initial list of backends, retry in %s: %s", backoff.NextWait(), err)
|
log.Printf("Could not get initial list of backends, retry in %s: %s", backoff.NextWait(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
backoff.Wait(context.Background())
|
backoff.Wait(s.closeCtx)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ev := range response.Kvs {
|
for _, ev := range response.Kvs {
|
||||||
s.EtcdKeyUpdated(client, string(ev.Key), ev.Value)
|
s.EtcdKeyUpdated(client, string(ev.Key), ev.Value, nil)
|
||||||
}
|
}
|
||||||
s.initializedWg.Wait()
|
|
||||||
s.initializedFunc()
|
s.initializedFunc()
|
||||||
|
|
||||||
|
nextRevision := response.Header.Revision + 1
|
||||||
|
prevRevision := nextRevision
|
||||||
|
backoff.Reset()
|
||||||
|
for s.closeCtx.Err() == nil {
|
||||||
|
var err error
|
||||||
|
if nextRevision, err = client.Watch(s.closeCtx, s.keyPrefix, nextRevision, s, clientv3.WithPrefix()); err != nil {
|
||||||
|
log.Printf("Error processing watch for %s (%s), retry in %s", s.keyPrefix, err, backoff.NextWait())
|
||||||
|
backoff.Wait(s.closeCtx)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if nextRevision != prevRevision {
|
||||||
|
backoff.Reset()
|
||||||
|
prevRevision = nextRevision
|
||||||
|
} else {
|
||||||
|
log.Printf("Processing watch for %s interrupted, retry in %s", s.keyPrefix, backoff.NextWait())
|
||||||
|
backoff.Wait(s.closeCtx)
|
||||||
|
}
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *backendStorageEtcd) EtcdWatchCreated(client *EtcdClient, key string) {
|
func (s *backendStorageEtcd) EtcdWatchCreated(client *EtcdClient, key string) {
|
||||||
s.initializedWg.Done()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *backendStorageEtcd) getBackends(client *EtcdClient, keyPrefix string) (*clientv3.GetResponse, error) {
|
func (s *backendStorageEtcd) getBackends(ctx context.Context, client *EtcdClient, keyPrefix string) (*clientv3.GetResponse, error) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
return client.Get(ctx, keyPrefix, clientv3.WithPrefix())
|
return client.Get(ctx, keyPrefix, clientv3.WithPrefix())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *backendStorageEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []byte) {
|
func (s *backendStorageEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []byte, prevValue []byte) {
|
||||||
var info BackendInformationEtcd
|
var info BackendInformationEtcd
|
||||||
if err := json.Unmarshal(data, &info); err != nil {
|
if err := json.Unmarshal(data, &info); err != nil {
|
||||||
log.Printf("Could not decode backend information %s: %s", string(data), err)
|
log.Printf("Could not decode backend information %s: %s", string(data), err)
|
||||||
|
@ -205,7 +227,7 @@ func (s *backendStorageEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data
|
||||||
s.wakeupForTesting()
|
s.wakeupForTesting()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *backendStorageEtcd) EtcdKeyDeleted(client *EtcdClient, key string) {
|
func (s *backendStorageEtcd) EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
@ -241,6 +263,7 @@ func (s *backendStorageEtcd) EtcdKeyDeleted(client *EtcdClient, key string) {
|
||||||
|
|
||||||
func (s *backendStorageEtcd) Close() {
|
func (s *backendStorageEtcd) Close() {
|
||||||
s.etcdClient.RemoveListener(s)
|
s.etcdClient.RemoveListener(s)
|
||||||
|
s.closeFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *backendStorageEtcd) Reload(config *goconf.ConfigFile) {
|
func (s *backendStorageEtcd) Reload(config *goconf.ConfigFile) {
|
||||||
|
|
|
@ -21,6 +21,13 @@
|
||||||
*/
|
*/
|
||||||
package signaling
|
package signaling
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/dlintw/goconf"
|
||||||
|
"go.etcd.io/etcd/server/v3/embed"
|
||||||
|
)
|
||||||
|
|
||||||
func (s *backendStorageEtcd) getWakeupChannelForTesting() <-chan struct{} {
|
func (s *backendStorageEtcd) getWakeupChannelForTesting() <-chan struct{} {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
@ -33,3 +40,38 @@ func (s *backendStorageEtcd) getWakeupChannelForTesting() <-chan struct{} {
|
||||||
s.wakeupChanForTesting = ch
|
s.wakeupChanForTesting = ch
|
||||||
return ch
|
return ch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type testListener struct {
|
||||||
|
etcd *embed.Etcd
|
||||||
|
closed chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tl *testListener) EtcdClientCreated(client *EtcdClient) {
|
||||||
|
tl.etcd.Server.Stop()
|
||||||
|
close(tl.closed)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_BackendStorageEtcdNoLeak(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
|
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||||
|
etcd, client := NewEtcdClientForTest(t)
|
||||||
|
tl := &testListener{
|
||||||
|
etcd: etcd,
|
||||||
|
closed: make(chan struct{}),
|
||||||
|
}
|
||||||
|
client.AddListener(tl)
|
||||||
|
defer client.RemoveListener(tl)
|
||||||
|
|
||||||
|
config := goconf.NewConfigFile()
|
||||||
|
config.AddOption("backend", "backendtype", "etcd")
|
||||||
|
config.AddOption("backend", "backendprefix", "/backends")
|
||||||
|
|
||||||
|
cfg, err := NewBackendConfiguration(config, client)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
<-tl.closed
|
||||||
|
cfg.Close()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBackoff_Exponential(t *testing.T) {
|
func TestBackoff_Exponential(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
backoff, err := NewExponentialBackoff(100*time.Millisecond, 500*time.Millisecond)
|
backoff, err := NewExponentialBackoff(100*time.Millisecond, 500*time.Millisecond)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
|
|
@ -48,9 +48,6 @@ const (
|
||||||
maxInvalidateInterval = time.Minute
|
maxInvalidateInterval = time.Minute
|
||||||
)
|
)
|
||||||
|
|
||||||
// Can be overwritten by tests.
|
|
||||||
var getCapabilitiesNow = time.Now
|
|
||||||
|
|
||||||
type capabilitiesEntry struct {
|
type capabilitiesEntry struct {
|
||||||
nextUpdate time.Time
|
nextUpdate time.Time
|
||||||
capabilities map[string]interface{}
|
capabilities map[string]interface{}
|
||||||
|
@ -59,6 +56,9 @@ type capabilitiesEntry struct {
|
||||||
type Capabilities struct {
|
type Capabilities struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
|
|
||||||
|
// Can be overwritten by tests.
|
||||||
|
getNow func() time.Time
|
||||||
|
|
||||||
version string
|
version string
|
||||||
pool *HttpClientPool
|
pool *HttpClientPool
|
||||||
entries map[string]*capabilitiesEntry
|
entries map[string]*capabilitiesEntry
|
||||||
|
@ -67,6 +67,8 @@ type Capabilities struct {
|
||||||
|
|
||||||
func NewCapabilities(version string, pool *HttpClientPool) (*Capabilities, error) {
|
func NewCapabilities(version string, pool *HttpClientPool) (*Capabilities, error) {
|
||||||
result := &Capabilities{
|
result := &Capabilities{
|
||||||
|
getNow: time.Now,
|
||||||
|
|
||||||
version: version,
|
version: version,
|
||||||
pool: pool,
|
pool: pool,
|
||||||
entries: make(map[string]*capabilitiesEntry),
|
entries: make(map[string]*capabilitiesEntry),
|
||||||
|
@ -86,15 +88,15 @@ type CapabilitiesVersion struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type CapabilitiesResponse struct {
|
type CapabilitiesResponse struct {
|
||||||
Version CapabilitiesVersion `json:"version"`
|
Version CapabilitiesVersion `json:"version"`
|
||||||
Capabilities map[string]*json.RawMessage `json:"capabilities"`
|
Capabilities map[string]json.RawMessage `json:"capabilities"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Capabilities) getCapabilities(key string) (map[string]interface{}, bool) {
|
func (c *Capabilities) getCapabilities(key string) (map[string]interface{}, bool) {
|
||||||
c.mu.RLock()
|
c.mu.RLock()
|
||||||
defer c.mu.RUnlock()
|
defer c.mu.RUnlock()
|
||||||
|
|
||||||
now := getCapabilitiesNow()
|
now := c.getNow()
|
||||||
if entry, found := c.entries[key]; found && entry.nextUpdate.After(now) {
|
if entry, found := c.entries[key]; found && entry.nextUpdate.After(now) {
|
||||||
return entry.capabilities, true
|
return entry.capabilities, true
|
||||||
}
|
}
|
||||||
|
@ -103,14 +105,15 @@ func (c *Capabilities) getCapabilities(key string) (map[string]interface{}, bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Capabilities) setCapabilities(key string, capabilities map[string]interface{}) {
|
func (c *Capabilities) setCapabilities(key string, capabilities map[string]interface{}) {
|
||||||
now := getCapabilitiesNow()
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
now := c.getNow()
|
||||||
entry := &capabilitiesEntry{
|
entry := &capabilitiesEntry{
|
||||||
nextUpdate: now.Add(CapabilitiesCacheDuration),
|
nextUpdate: now.Add(CapabilitiesCacheDuration),
|
||||||
capabilities: capabilities,
|
capabilities: capabilities,
|
||||||
}
|
}
|
||||||
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
c.entries[key] = entry
|
c.entries[key] = entry
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,7 +121,7 @@ func (c *Capabilities) invalidateCapabilities(key string) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
now := getCapabilitiesNow()
|
now := c.getNow()
|
||||||
if entry, found := c.nextInvalidate[key]; found && entry.After(now) {
|
if entry, found := c.nextInvalidate[key]; found && entry.After(now) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -188,25 +191,25 @@ func (c *Capabilities) loadCapabilities(ctx context.Context, u *url.URL) (map[st
|
||||||
if err := json.Unmarshal(body, &ocs); err != nil {
|
if err := json.Unmarshal(body, &ocs); err != nil {
|
||||||
log.Printf("Could not decode OCS response %s from %s: %s", string(body), capUrl.String(), err)
|
log.Printf("Could not decode OCS response %s from %s: %s", string(body), capUrl.String(), err)
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
} else if ocs.Ocs == nil || ocs.Ocs.Data == nil {
|
} else if ocs.Ocs == nil || len(ocs.Ocs.Data) == 0 {
|
||||||
log.Printf("Incomplete OCS response %s from %s", string(body), u)
|
log.Printf("Incomplete OCS response %s from %s", string(body), u)
|
||||||
return nil, false, fmt.Errorf("incomplete OCS response")
|
return nil, false, fmt.Errorf("incomplete OCS response")
|
||||||
}
|
}
|
||||||
|
|
||||||
var response CapabilitiesResponse
|
var response CapabilitiesResponse
|
||||||
if err := json.Unmarshal(*ocs.Ocs.Data, &response); err != nil {
|
if err := json.Unmarshal(ocs.Ocs.Data, &response); err != nil {
|
||||||
log.Printf("Could not decode OCS response body %s from %s: %s", string(*ocs.Ocs.Data), capUrl.String(), err)
|
log.Printf("Could not decode OCS response body %s from %s: %s", string(ocs.Ocs.Data), capUrl.String(), err)
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
capaObj, found := response.Capabilities[AppNameSpreed]
|
capaObj, found := response.Capabilities[AppNameSpreed]
|
||||||
if !found || capaObj == nil {
|
if !found || len(capaObj) == 0 {
|
||||||
log.Printf("No capabilities received for app spreed from %s: %+v", capUrl.String(), response)
|
log.Printf("No capabilities received for app spreed from %s: %+v", capUrl.String(), response)
|
||||||
return nil, false, nil
|
return nil, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var capa map[string]interface{}
|
var capa map[string]interface{}
|
||||||
if err := json.Unmarshal(*capaObj, &capa); err != nil {
|
if err := json.Unmarshal(capaObj, &capa); err != nil {
|
||||||
log.Printf("Unsupported capabilities received for app spreed from %s: %+v", capUrl.String(), response)
|
log.Printf("Unsupported capabilities received for app spreed from %s: %+v", capUrl.String(), response)
|
||||||
return nil, false, nil
|
return nil, false, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,9 +80,9 @@ func NewCapabilitiesForTestWithCallback(t *testing.T, callback func(*Capabilitie
|
||||||
Version: CapabilitiesVersion{
|
Version: CapabilitiesVersion{
|
||||||
Major: 20,
|
Major: 20,
|
||||||
},
|
},
|
||||||
Capabilities: map[string]*json.RawMessage{
|
Capabilities: map[string]json.RawMessage{
|
||||||
"anotherApp": (*json.RawMessage)(&emptyArray),
|
"anotherApp": emptyArray,
|
||||||
"spreed": (*json.RawMessage)(&spreedCapa),
|
"spreed": spreedCapa,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ func NewCapabilitiesForTestWithCallback(t *testing.T, callback func(*Capabilitie
|
||||||
StatusCode: http.StatusOK,
|
StatusCode: http.StatusOK,
|
||||||
Message: http.StatusText(http.StatusOK),
|
Message: http.StatusText(http.StatusOK),
|
||||||
},
|
},
|
||||||
Data: (*json.RawMessage)(&data),
|
Data: data,
|
||||||
}
|
}
|
||||||
if data, err = json.Marshal(ocs); err != nil {
|
if data, err = json.Marshal(ocs); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -120,16 +120,25 @@ func NewCapabilitiesForTest(t *testing.T) (*url.URL, *Capabilities) {
|
||||||
return NewCapabilitiesForTestWithCallback(t, nil)
|
return NewCapabilitiesForTestWithCallback(t, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetCapabilitiesGetNow(t *testing.T, f func() time.Time) {
|
func SetCapabilitiesGetNow(t *testing.T, capabilities *Capabilities, f func() time.Time) {
|
||||||
old := getCapabilitiesNow
|
capabilities.mu.Lock()
|
||||||
|
defer capabilities.mu.Unlock()
|
||||||
|
|
||||||
|
old := capabilities.getNow
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
getCapabilitiesNow = old
|
capabilities.mu.Lock()
|
||||||
|
defer capabilities.mu.Unlock()
|
||||||
|
|
||||||
|
capabilities.getNow = old
|
||||||
})
|
})
|
||||||
|
|
||||||
getCapabilitiesNow = f
|
capabilities.getNow = f
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCapabilities(t *testing.T) {
|
func TestCapabilities(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
url, capabilities := NewCapabilitiesForTest(t)
|
url, capabilities := NewCapabilitiesForTest(t)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||||
|
@ -192,6 +201,8 @@ func TestCapabilities(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInvalidateCapabilities(t *testing.T) {
|
func TestInvalidateCapabilities(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
var called atomic.Uint32
|
var called atomic.Uint32
|
||||||
url, capabilities := NewCapabilitiesForTestWithCallback(t, func(cr *CapabilitiesResponse) {
|
url, capabilities := NewCapabilitiesForTestWithCallback(t, func(cr *CapabilitiesResponse) {
|
||||||
called.Add(1)
|
called.Add(1)
|
||||||
|
@ -244,7 +255,7 @@ func TestInvalidateCapabilities(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// At a later time, invalidating can be done again.
|
// At a later time, invalidating can be done again.
|
||||||
SetCapabilitiesGetNow(t, func() time.Time {
|
SetCapabilitiesGetNow(t, capabilities, func() time.Time {
|
||||||
return time.Now().Add(2 * time.Minute)
|
return time.Now().Add(2 * time.Minute)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -66,6 +66,11 @@ func NewCertificateReloader(certFile string, keyFile string) (*CertificateReload
|
||||||
return reloader, nil
|
return reloader, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *CertificateReloader) Close() {
|
||||||
|
r.keyWatcher.Close()
|
||||||
|
r.certWatcher.Close()
|
||||||
|
}
|
||||||
|
|
||||||
func (r *CertificateReloader) reload(filename string) {
|
func (r *CertificateReloader) reload(filename string) {
|
||||||
log.Printf("reloading certificate from %s with %s", r.certFile, r.keyFile)
|
log.Printf("reloading certificate from %s with %s", r.certFile, r.keyFile)
|
||||||
pair, err := tls.LoadX509KeyPair(r.certFile, r.keyFile)
|
pair, err := tls.LoadX509KeyPair(r.certFile, r.keyFile)
|
||||||
|
@ -135,6 +140,10 @@ func NewCertPoolReloader(certFile string) (*CertPoolReloader, error) {
|
||||||
return reloader, nil
|
return reloader, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *CertPoolReloader) Close() {
|
||||||
|
r.certWatcher.Close()
|
||||||
|
}
|
||||||
|
|
||||||
func (r *CertPoolReloader) reload(filename string) {
|
func (r *CertPoolReloader) reload(filename string) {
|
||||||
log.Printf("reloading certificate pool from %s", r.certFile)
|
log.Printf("reloading certificate pool from %s", r.certFile)
|
||||||
pool, err := loadCertPool(r.certFile)
|
pool, err := loadCertPool(r.certFile)
|
||||||
|
|
|
@ -28,6 +28,9 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func UpdateCertificateCheckIntervalForTest(t *testing.T, interval time.Duration) {
|
func UpdateCertificateCheckIntervalForTest(t *testing.T, interval time.Duration) {
|
||||||
|
t.Helper()
|
||||||
|
// Make sure test is not executed with "t.Parallel()"
|
||||||
|
t.Setenv("PARALLEL_CHECK", "1")
|
||||||
old := deduplicateWatchEvents.Load()
|
old := deduplicateWatchEvents.Load()
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
deduplicateWatchEvents.Store(old)
|
deduplicateWatchEvents.Store(old)
|
||||||
|
|
144
client.go
144
client.go
|
@ -23,8 +23,11 @@ package signaling
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"log"
|
"log"
|
||||||
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -92,26 +95,49 @@ type WritableClientMessage interface {
|
||||||
CloseAfterSend(session Session) bool
|
CloseAfterSend(session Session) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type HandlerClient interface {
|
||||||
|
Context() context.Context
|
||||||
|
RemoteAddr() string
|
||||||
|
Country() string
|
||||||
|
UserAgent() string
|
||||||
|
IsConnected() bool
|
||||||
|
IsAuthenticated() bool
|
||||||
|
|
||||||
|
GetSession() Session
|
||||||
|
SetSession(session Session)
|
||||||
|
|
||||||
|
SendError(e *Error) bool
|
||||||
|
SendByeResponse(message *ClientMessage) bool
|
||||||
|
SendByeResponseWithReason(message *ClientMessage, reason string) bool
|
||||||
|
SendMessage(message WritableClientMessage) bool
|
||||||
|
|
||||||
|
Close()
|
||||||
|
}
|
||||||
|
|
||||||
type ClientHandler interface {
|
type ClientHandler interface {
|
||||||
OnClosed(*Client)
|
OnClosed(HandlerClient)
|
||||||
OnMessageReceived(*Client, []byte)
|
OnMessageReceived(HandlerClient, []byte)
|
||||||
OnRTTReceived(*Client, time.Duration)
|
OnRTTReceived(HandlerClient, time.Duration)
|
||||||
}
|
}
|
||||||
|
|
||||||
type ClientGeoIpHandler interface {
|
type ClientGeoIpHandler interface {
|
||||||
OnLookupCountry(*Client) string
|
OnLookupCountry(HandlerClient) string
|
||||||
}
|
}
|
||||||
|
|
||||||
type Client struct {
|
type Client struct {
|
||||||
|
ctx context.Context
|
||||||
conn *websocket.Conn
|
conn *websocket.Conn
|
||||||
addr string
|
addr string
|
||||||
handler ClientHandler
|
|
||||||
agent string
|
agent string
|
||||||
closed atomic.Int32
|
closed atomic.Int32
|
||||||
country *string
|
country *string
|
||||||
logRTT bool
|
logRTT bool
|
||||||
|
|
||||||
session atomic.Pointer[ClientSession]
|
handlerMu sync.RWMutex
|
||||||
|
handler ClientHandler
|
||||||
|
|
||||||
|
session atomic.Pointer[Session]
|
||||||
|
sessionId atomic.Pointer[string]
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
|
||||||
|
@ -121,7 +147,7 @@ type Client struct {
|
||||||
messageChan chan *bytes.Buffer
|
messageChan chan *bytes.Buffer
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewClient(conn *websocket.Conn, remoteAddress string, agent string, handler ClientHandler) (*Client, error) {
|
func NewClient(ctx context.Context, conn *websocket.Conn, remoteAddress string, agent string, handler ClientHandler) (*Client, error) {
|
||||||
remoteAddress = strings.TrimSpace(remoteAddress)
|
remoteAddress = strings.TrimSpace(remoteAddress)
|
||||||
if remoteAddress == "" {
|
if remoteAddress == "" {
|
||||||
remoteAddress = "unknown remote address"
|
remoteAddress = "unknown remote address"
|
||||||
|
@ -132,6 +158,7 @@ func NewClient(conn *websocket.Conn, remoteAddress string, agent string, handler
|
||||||
}
|
}
|
||||||
|
|
||||||
client := &Client{
|
client := &Client{
|
||||||
|
ctx: ctx,
|
||||||
agent: agent,
|
agent: agent,
|
||||||
logRTT: true,
|
logRTT: true,
|
||||||
}
|
}
|
||||||
|
@ -142,12 +169,28 @@ func NewClient(conn *websocket.Conn, remoteAddress string, agent string, handler
|
||||||
func (c *Client) SetConn(conn *websocket.Conn, remoteAddress string, handler ClientHandler) {
|
func (c *Client) SetConn(conn *websocket.Conn, remoteAddress string, handler ClientHandler) {
|
||||||
c.conn = conn
|
c.conn = conn
|
||||||
c.addr = remoteAddress
|
c.addr = remoteAddress
|
||||||
c.handler = handler
|
c.SetHandler(handler)
|
||||||
c.closer = NewCloser()
|
c.closer = NewCloser()
|
||||||
c.messageChan = make(chan *bytes.Buffer, 16)
|
c.messageChan = make(chan *bytes.Buffer, 16)
|
||||||
c.messagesDone = make(chan struct{})
|
c.messagesDone = make(chan struct{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Client) SetHandler(handler ClientHandler) {
|
||||||
|
c.handlerMu.Lock()
|
||||||
|
defer c.handlerMu.Unlock()
|
||||||
|
c.handler = handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) getHandler() ClientHandler {
|
||||||
|
c.handlerMu.RLock()
|
||||||
|
defer c.handlerMu.RUnlock()
|
||||||
|
return c.handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) Context() context.Context {
|
||||||
|
return c.ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Client) IsConnected() bool {
|
func (c *Client) IsConnected() bool {
|
||||||
return c.closed.Load() == 0
|
return c.closed.Load() == 0
|
||||||
}
|
}
|
||||||
|
@ -156,12 +199,39 @@ func (c *Client) IsAuthenticated() bool {
|
||||||
return c.GetSession() != nil
|
return c.GetSession() != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) GetSession() *ClientSession {
|
func (c *Client) GetSession() Session {
|
||||||
return c.session.Load()
|
session := c.session.Load()
|
||||||
|
if session == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return *session
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) SetSession(session *ClientSession) {
|
func (c *Client) SetSession(session Session) {
|
||||||
c.session.Store(session)
|
if session == nil {
|
||||||
|
c.session.Store(nil)
|
||||||
|
} else {
|
||||||
|
c.session.Store(&session)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) SetSessionId(sessionId string) {
|
||||||
|
c.sessionId.Store(&sessionId)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) GetSessionId() string {
|
||||||
|
sessionId := c.sessionId.Load()
|
||||||
|
if sessionId == nil {
|
||||||
|
session := c.GetSession()
|
||||||
|
if session == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return session.PublicId()
|
||||||
|
}
|
||||||
|
|
||||||
|
return *sessionId
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) RemoteAddr() string {
|
func (c *Client) RemoteAddr() string {
|
||||||
|
@ -175,7 +245,7 @@ func (c *Client) UserAgent() string {
|
||||||
func (c *Client) Country() string {
|
func (c *Client) Country() string {
|
||||||
if c.country == nil {
|
if c.country == nil {
|
||||||
var country string
|
var country string
|
||||||
if handler, ok := c.handler.(ClientGeoIpHandler); ok {
|
if handler, ok := c.getHandler().(ClientGeoIpHandler); ok {
|
||||||
country = handler.OnLookupCountry(c)
|
country = handler.OnLookupCountry(c)
|
||||||
} else {
|
} else {
|
||||||
country = unknownCountry
|
country = unknownCountry
|
||||||
|
@ -214,7 +284,7 @@ func (c *Client) doClose() {
|
||||||
c.closer.Close()
|
c.closer.Close()
|
||||||
<-c.messagesDone
|
<-c.messagesDone
|
||||||
|
|
||||||
c.handler.OnClosed(c)
|
c.getHandler().OnClosed(c)
|
||||||
c.SetSession(nil)
|
c.SetSession(nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -234,12 +304,14 @@ func (c *Client) SendByeResponse(message *ClientMessage) bool {
|
||||||
func (c *Client) SendByeResponseWithReason(message *ClientMessage, reason string) bool {
|
func (c *Client) SendByeResponseWithReason(message *ClientMessage, reason string) bool {
|
||||||
response := &ServerMessage{
|
response := &ServerMessage{
|
||||||
Type: "bye",
|
Type: "bye",
|
||||||
Bye: &ByeServerMessage{},
|
|
||||||
}
|
}
|
||||||
if message != nil {
|
if message != nil {
|
||||||
response.Id = message.Id
|
response.Id = message.Id
|
||||||
}
|
}
|
||||||
if reason != "" {
|
if reason != "" {
|
||||||
|
if response.Bye == nil {
|
||||||
|
response.Bye = &ByeServerMessage{}
|
||||||
|
}
|
||||||
response.Bye.Reason = reason
|
response.Bye.Reason = reason
|
||||||
}
|
}
|
||||||
return c.SendMessage(response)
|
return c.SendMessage(response)
|
||||||
|
@ -277,13 +349,13 @@ func (c *Client) ReadPump() {
|
||||||
rtt := now.Sub(time.Unix(0, ts))
|
rtt := now.Sub(time.Unix(0, ts))
|
||||||
if c.logRTT {
|
if c.logRTT {
|
||||||
rtt_ms := rtt.Nanoseconds() / time.Millisecond.Nanoseconds()
|
rtt_ms := rtt.Nanoseconds() / time.Millisecond.Nanoseconds()
|
||||||
if session := c.GetSession(); session != nil {
|
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||||
log.Printf("Client %s has RTT of %d ms (%s)", session.PublicId(), rtt_ms, rtt)
|
log.Printf("Client %s has RTT of %d ms (%s)", sessionId, rtt_ms, rtt)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Client from %s has RTT of %d ms (%s)", addr, rtt_ms, rtt)
|
log.Printf("Client from %s has RTT of %d ms (%s)", addr, rtt_ms, rtt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
c.handler.OnRTTReceived(c, rtt)
|
c.getHandler().OnRTTReceived(c, rtt)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
@ -292,12 +364,15 @@ func (c *Client) ReadPump() {
|
||||||
conn.SetReadDeadline(time.Now().Add(pongWait)) // nolint
|
conn.SetReadDeadline(time.Now().Add(pongWait)) // nolint
|
||||||
messageType, reader, err := conn.NextReader()
|
messageType, reader, err := conn.NextReader()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if _, ok := err.(*websocket.CloseError); !ok || websocket.IsUnexpectedCloseError(err,
|
// Gorilla websocket hides the original net.Error, so also compare error messages
|
||||||
|
if errors.Is(err, net.ErrClosed) || strings.Contains(err.Error(), net.ErrClosed.Error()) {
|
||||||
|
break
|
||||||
|
} else if _, ok := err.(*websocket.CloseError); !ok || websocket.IsUnexpectedCloseError(err,
|
||||||
websocket.CloseNormalClosure,
|
websocket.CloseNormalClosure,
|
||||||
websocket.CloseGoingAway,
|
websocket.CloseGoingAway,
|
||||||
websocket.CloseNoStatusReceived) {
|
websocket.CloseNoStatusReceived) {
|
||||||
if session := c.GetSession(); session != nil {
|
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||||
log.Printf("Error reading from client %s: %v", session.PublicId(), err)
|
log.Printf("Error reading from client %s: %v", sessionId, err)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Error reading from %s: %v", addr, err)
|
log.Printf("Error reading from %s: %v", addr, err)
|
||||||
}
|
}
|
||||||
|
@ -306,8 +381,8 @@ func (c *Client) ReadPump() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if messageType != websocket.TextMessage {
|
if messageType != websocket.TextMessage {
|
||||||
if session := c.GetSession(); session != nil {
|
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||||
log.Printf("Unsupported message type %v from client %s", messageType, session.PublicId())
|
log.Printf("Unsupported message type %v from client %s", messageType, sessionId)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Unsupported message type %v from %s", messageType, addr)
|
log.Printf("Unsupported message type %v from %s", messageType, addr)
|
||||||
}
|
}
|
||||||
|
@ -319,8 +394,8 @@ func (c *Client) ReadPump() {
|
||||||
decodeBuffer.Reset()
|
decodeBuffer.Reset()
|
||||||
if _, err := decodeBuffer.ReadFrom(reader); err != nil {
|
if _, err := decodeBuffer.ReadFrom(reader); err != nil {
|
||||||
bufferPool.Put(decodeBuffer)
|
bufferPool.Put(decodeBuffer)
|
||||||
if session := c.GetSession(); session != nil {
|
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||||
log.Printf("Error reading message from client %s: %v", session.PublicId(), err)
|
log.Printf("Error reading message from client %s: %v", sessionId, err)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Error reading message from %s: %v", addr, err)
|
log.Printf("Error reading message from %s: %v", addr, err)
|
||||||
}
|
}
|
||||||
|
@ -344,7 +419,7 @@ func (c *Client) processMessages() {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
c.handler.OnMessageReceived(c, buffer.Bytes())
|
c.getHandler().OnMessageReceived(c, buffer.Bytes())
|
||||||
bufferPool.Put(buffer)
|
bufferPool.Put(buffer)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -373,8 +448,8 @@ func (c *Client) writeInternal(message json.Marshaler) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if session := c.GetSession(); session != nil {
|
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||||
log.Printf("Could not send message %+v to client %s: %v", message, session.PublicId(), err)
|
log.Printf("Could not send message %+v to client %s: %v", message, sessionId, err)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Could not send message %+v to %s: %v", message, c.RemoteAddr(), err)
|
log.Printf("Could not send message %+v to %s: %v", message, c.RemoteAddr(), err)
|
||||||
}
|
}
|
||||||
|
@ -386,8 +461,8 @@ func (c *Client) writeInternal(message json.Marshaler) bool {
|
||||||
close:
|
close:
|
||||||
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
|
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
|
||||||
if err := c.conn.WriteMessage(websocket.CloseMessage, closeData); err != nil {
|
if err := c.conn.WriteMessage(websocket.CloseMessage, closeData); err != nil {
|
||||||
if session := c.GetSession(); session != nil {
|
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||||
log.Printf("Could not send close message to client %s: %v", session.PublicId(), err)
|
log.Printf("Could not send close message to client %s: %v", sessionId, err)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Could not send close message to %s: %v", c.RemoteAddr(), err)
|
log.Printf("Could not send close message to %s: %v", c.RemoteAddr(), err)
|
||||||
}
|
}
|
||||||
|
@ -413,8 +488,8 @@ func (c *Client) writeError(e error) bool { // nolint
|
||||||
closeData := websocket.FormatCloseMessage(websocket.CloseInternalServerErr, e.Error())
|
closeData := websocket.FormatCloseMessage(websocket.CloseInternalServerErr, e.Error())
|
||||||
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
|
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
|
||||||
if err := c.conn.WriteMessage(websocket.CloseMessage, closeData); err != nil {
|
if err := c.conn.WriteMessage(websocket.CloseMessage, closeData); err != nil {
|
||||||
if session := c.GetSession(); session != nil {
|
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||||
log.Printf("Could not send close message to client %s: %v", session.PublicId(), err)
|
log.Printf("Could not send close message to client %s: %v", sessionId, err)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Could not send close message to %s: %v", c.RemoteAddr(), err)
|
log.Printf("Could not send close message to %s: %v", c.RemoteAddr(), err)
|
||||||
}
|
}
|
||||||
|
@ -445,7 +520,6 @@ func (c *Client) writeMessageLocked(message WritableClientMessage) bool {
|
||||||
go session.Close()
|
go session.Close()
|
||||||
}
|
}
|
||||||
go c.Close()
|
go c.Close()
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
@ -462,8 +536,8 @@ func (c *Client) sendPing() bool {
|
||||||
msg := strconv.FormatInt(now, 10)
|
msg := strconv.FormatInt(now, 10)
|
||||||
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
|
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
|
||||||
if err := c.conn.WriteMessage(websocket.PingMessage, []byte(msg)); err != nil {
|
if err := c.conn.WriteMessage(websocket.PingMessage, []byte(msg)); err != nil {
|
||||||
if session := c.GetSession(); session != nil {
|
if sessionId := c.GetSessionId(); sessionId != "" {
|
||||||
log.Printf("Could not send ping to client %s: %v", session.PublicId(), err)
|
log.Printf("Could not send ping to client %s: %v", sessionId, err)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Could not send ping to %s: %v", c.RemoteAddr(), err)
|
log.Printf("Could not send ping to %s: %v", c.RemoteAddr(), err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -248,7 +248,7 @@ func (c *SignalingClient) PublicSessionId() string {
|
||||||
|
|
||||||
func (c *SignalingClient) processMessageMessage(message *signaling.ServerMessage) {
|
func (c *SignalingClient) processMessageMessage(message *signaling.ServerMessage) {
|
||||||
var msg MessagePayload
|
var msg MessagePayload
|
||||||
if err := json.Unmarshal(*message.Message.Data, &msg); err != nil {
|
if err := json.Unmarshal(message.Message.Data, &msg); err != nil {
|
||||||
log.Println("Error in unmarshal", err)
|
log.Println("Error in unmarshal", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -404,7 +404,7 @@ func (c *SignalingClient) SendMessages(clients []*SignalingClient) {
|
||||||
Type: "session",
|
Type: "session",
|
||||||
SessionId: sessionIds[recipient],
|
SessionId: sessionIds[recipient],
|
||||||
},
|
},
|
||||||
Data: (*json.RawMessage)(&data),
|
Data: data,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
sender.Send(msg)
|
sender.Send(msg)
|
||||||
|
@ -461,7 +461,7 @@ func registerAuthHandler(router *mux.Router) {
|
||||||
StatusCode: http.StatusOK,
|
StatusCode: http.StatusOK,
|
||||||
Message: http.StatusText(http.StatusOK),
|
Message: http.StatusText(http.StatusOK),
|
||||||
},
|
},
|
||||||
Data: &rawdata,
|
Data: rawdata,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -601,9 +601,9 @@ func main() {
|
||||||
Type: "hello",
|
Type: "hello",
|
||||||
Hello: &signaling.HelloClientMessage{
|
Hello: &signaling.HelloClientMessage{
|
||||||
Version: signaling.HelloVersionV1,
|
Version: signaling.HelloVersionV1,
|
||||||
Auth: signaling.HelloClientMessageAuth{
|
Auth: &signaling.HelloClientMessageAuth{
|
||||||
Url: backendUrl + "/auth",
|
Url: backendUrl + "/auth",
|
||||||
Params: &json.RawMessage{'{', '}'},
|
Params: json.RawMessage("{}"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
150
clientsession.go
150
clientsession.go
|
@ -36,9 +36,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Sessions expire 30 seconds after the connection closed.
|
|
||||||
sessionExpireDuration = 30 * time.Second
|
|
||||||
|
|
||||||
// Warn if a session has 32 or more pending messages.
|
// Warn if a session has 32 or more pending messages.
|
||||||
warnPendingMessagesCount = 32
|
warnPendingMessagesCount = 32
|
||||||
|
|
||||||
|
@ -54,11 +51,13 @@ type ClientSession struct {
|
||||||
privateId string
|
privateId string
|
||||||
publicId string
|
publicId string
|
||||||
data *SessionIdData
|
data *SessionIdData
|
||||||
|
ctx context.Context
|
||||||
|
closeFunc context.CancelFunc
|
||||||
|
|
||||||
clientType string
|
clientType string
|
||||||
features []string
|
features []string
|
||||||
userId string
|
userId string
|
||||||
userData *json.RawMessage
|
userData json.RawMessage
|
||||||
|
|
||||||
inCall Flags
|
inCall Flags
|
||||||
supportsPermissions bool
|
supportsPermissions bool
|
||||||
|
@ -68,14 +67,14 @@ type ClientSession struct {
|
||||||
backendUrl string
|
backendUrl string
|
||||||
parsedBackendUrl *url.URL
|
parsedBackendUrl *url.URL
|
||||||
|
|
||||||
expires time.Time
|
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
|
||||||
client *Client
|
client HandlerClient
|
||||||
room atomic.Pointer[Room]
|
room atomic.Pointer[Room]
|
||||||
roomJoinTime atomic.Int64
|
roomJoinTime atomic.Int64
|
||||||
roomSessionId string
|
|
||||||
|
roomSessionIdLock sync.RWMutex
|
||||||
|
roomSessionId string
|
||||||
|
|
||||||
publisherWaiters ChannelWaiters
|
publisherWaiters ChannelWaiters
|
||||||
|
|
||||||
|
@ -96,12 +95,15 @@ type ClientSession struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewClientSession(hub *Hub, privateId string, publicId string, data *SessionIdData, backend *Backend, hello *HelloClientMessage, auth *BackendClientAuthResponse) (*ClientSession, error) {
|
func NewClientSession(hub *Hub, privateId string, publicId string, data *SessionIdData, backend *Backend, hello *HelloClientMessage, auth *BackendClientAuthResponse) (*ClientSession, error) {
|
||||||
|
ctx, closeFunc := context.WithCancel(context.Background())
|
||||||
s := &ClientSession{
|
s := &ClientSession{
|
||||||
hub: hub,
|
hub: hub,
|
||||||
events: hub.events,
|
events: hub.events,
|
||||||
privateId: privateId,
|
privateId: privateId,
|
||||||
publicId: publicId,
|
publicId: publicId,
|
||||||
data: data,
|
data: data,
|
||||||
|
ctx: ctx,
|
||||||
|
closeFunc: closeFunc,
|
||||||
|
|
||||||
clientType: hello.Auth.Type,
|
clientType: hello.Auth.Type,
|
||||||
features: hello.Features,
|
features: hello.Features,
|
||||||
|
@ -145,6 +147,10 @@ func NewClientSession(hub *Hub, privateId string, publicId string, data *Session
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *ClientSession) Context() context.Context {
|
||||||
|
return s.ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (s *ClientSession) PrivateId() string {
|
func (s *ClientSession) PrivateId() string {
|
||||||
return s.privateId
|
return s.privateId
|
||||||
}
|
}
|
||||||
|
@ -154,8 +160,8 @@ func (s *ClientSession) PublicId() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ClientSession) RoomSessionId() string {
|
func (s *ClientSession) RoomSessionId() string {
|
||||||
s.mu.Lock()
|
s.roomSessionIdLock.RLock()
|
||||||
defer s.mu.Unlock()
|
defer s.roomSessionIdLock.RUnlock()
|
||||||
return s.roomSessionId
|
return s.roomSessionId
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -309,25 +315,10 @@ func (s *ClientSession) UserId() string {
|
||||||
return userId
|
return userId
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ClientSession) UserData() *json.RawMessage {
|
func (s *ClientSession) UserData() json.RawMessage {
|
||||||
return s.userData
|
return s.userData
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ClientSession) StartExpire() {
|
|
||||||
// The hub mutex must be held when calling this method.
|
|
||||||
s.expires = time.Now().Add(sessionExpireDuration)
|
|
||||||
s.hub.expiredSessions[s] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ClientSession) StopExpire() {
|
|
||||||
// The hub mutex must be held when calling this method.
|
|
||||||
delete(s.hub.expiredSessions, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ClientSession) IsExpired(now time.Time) bool {
|
|
||||||
return now.After(s.expires)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ClientSession) SetRoom(room *Room) {
|
func (s *ClientSession) SetRoom(room *Room) {
|
||||||
s.room.Store(room)
|
s.room.Store(room)
|
||||||
if room != nil {
|
if room != nil {
|
||||||
|
@ -357,7 +348,7 @@ func (s *ClientSession) getRoomJoinTime() time.Time {
|
||||||
func (s *ClientSession) releaseMcuObjects() {
|
func (s *ClientSession) releaseMcuObjects() {
|
||||||
if len(s.publishers) > 0 {
|
if len(s.publishers) > 0 {
|
||||||
go func(publishers map[StreamType]McuPublisher) {
|
go func(publishers map[StreamType]McuPublisher) {
|
||||||
ctx := context.TODO()
|
ctx := context.Background()
|
||||||
for _, publisher := range publishers {
|
for _, publisher := range publishers {
|
||||||
publisher.Close(ctx)
|
publisher.Close(ctx)
|
||||||
}
|
}
|
||||||
|
@ -366,7 +357,7 @@ func (s *ClientSession) releaseMcuObjects() {
|
||||||
}
|
}
|
||||||
if len(s.subscribers) > 0 {
|
if len(s.subscribers) > 0 {
|
||||||
go func(subscribers map[string]McuSubscriber) {
|
go func(subscribers map[string]McuSubscriber) {
|
||||||
ctx := context.TODO()
|
ctx := context.Background()
|
||||||
for _, subscriber := range subscribers {
|
for _, subscriber := range subscribers {
|
||||||
subscriber.Close(ctx)
|
subscriber.Close(ctx)
|
||||||
}
|
}
|
||||||
|
@ -380,6 +371,7 @@ func (s *ClientSession) Close() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ClientSession) closeAndWait(wait bool) {
|
func (s *ClientSession) closeAndWait(wait bool) {
|
||||||
|
s.closeFunc()
|
||||||
s.hub.removeSession(s)
|
s.hub.removeSession(s)
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
|
@ -413,8 +405,8 @@ func (s *ClientSession) SubscribeEvents() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ClientSession) UpdateRoomSessionId(roomSessionId string) error {
|
func (s *ClientSession) UpdateRoomSessionId(roomSessionId string) error {
|
||||||
s.mu.Lock()
|
s.roomSessionIdLock.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.roomSessionIdLock.Unlock()
|
||||||
|
|
||||||
if s.roomSessionId == roomSessionId {
|
if s.roomSessionId == roomSessionId {
|
||||||
return nil
|
return nil
|
||||||
|
@ -443,8 +435,8 @@ func (s *ClientSession) UpdateRoomSessionId(roomSessionId string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ClientSession) SubscribeRoomEvents(roomid string, roomSessionId string) error {
|
func (s *ClientSession) SubscribeRoomEvents(roomid string, roomSessionId string) error {
|
||||||
s.mu.Lock()
|
s.roomSessionIdLock.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.roomSessionIdLock.Unlock()
|
||||||
|
|
||||||
if err := s.events.RegisterRoomListener(roomid, s.backend, s); err != nil {
|
if err := s.events.RegisterRoomListener(roomid, s.backend, s); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -503,6 +495,9 @@ func (s *ClientSession) doUnsubscribeRoomEvents(notify bool) {
|
||||||
s.events.UnregisterRoomListener(room.Id(), s.Backend(), s)
|
s.events.UnregisterRoomListener(room.Id(), s.Backend(), s)
|
||||||
}
|
}
|
||||||
s.hub.roomSessions.DeleteRoomSession(s)
|
s.hub.roomSessions.DeleteRoomSession(s)
|
||||||
|
|
||||||
|
s.roomSessionIdLock.Lock()
|
||||||
|
defer s.roomSessionIdLock.Unlock()
|
||||||
if notify && room != nil && s.roomSessionId != "" {
|
if notify && room != nil && s.roomSessionId != "" {
|
||||||
// Notify
|
// Notify
|
||||||
go func(sid string) {
|
go func(sid string) {
|
||||||
|
@ -520,14 +515,14 @@ func (s *ClientSession) doUnsubscribeRoomEvents(notify bool) {
|
||||||
s.roomSessionId = ""
|
s.roomSessionId = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ClientSession) ClearClient(client *Client) {
|
func (s *ClientSession) ClearClient(client HandlerClient) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
s.clearClientLocked(client)
|
s.clearClientLocked(client)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ClientSession) clearClientLocked(client *Client) {
|
func (s *ClientSession) clearClientLocked(client HandlerClient) {
|
||||||
if s.client == nil {
|
if s.client == nil {
|
||||||
return
|
return
|
||||||
} else if client != nil && s.client != client {
|
} else if client != nil && s.client != client {
|
||||||
|
@ -540,18 +535,18 @@ func (s *ClientSession) clearClientLocked(client *Client) {
|
||||||
prevClient.SetSession(nil)
|
prevClient.SetSession(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ClientSession) GetClient() *Client {
|
func (s *ClientSession) GetClient() HandlerClient {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
return s.getClientUnlocked()
|
return s.getClientUnlocked()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ClientSession) getClientUnlocked() *Client {
|
func (s *ClientSession) getClientUnlocked() HandlerClient {
|
||||||
return s.client
|
return s.client
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ClientSession) SetClient(client *Client) *Client {
|
func (s *ClientSession) SetClient(client HandlerClient) HandlerClient {
|
||||||
if client == nil {
|
if client == nil {
|
||||||
panic("Use ClearClient to set the client to nil")
|
panic("Use ClearClient to set the client to nil")
|
||||||
}
|
}
|
||||||
|
@ -594,7 +589,7 @@ func (s *ClientSession) sendOffer(client McuClient, sender string, streamType St
|
||||||
Type: "session",
|
Type: "session",
|
||||||
SessionId: sender,
|
SessionId: sender,
|
||||||
},
|
},
|
||||||
Data: (*json.RawMessage)(&offer_data),
|
Data: offer_data,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -624,7 +619,7 @@ func (s *ClientSession) sendCandidate(client McuClient, sender string, streamTyp
|
||||||
Type: "session",
|
Type: "session",
|
||||||
SessionId: sender,
|
SessionId: sender,
|
||||||
},
|
},
|
||||||
Data: (*json.RawMessage)(&candidate_data),
|
Data: candidate_data,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -740,23 +735,6 @@ func (s *ClientSession) SubscriberClosed(subscriber McuSubscriber) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type SdpError struct {
|
|
||||||
message string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *SdpError) Error() string {
|
|
||||||
return e.message
|
|
||||||
}
|
|
||||||
|
|
||||||
type WrappedSdpError struct {
|
|
||||||
SdpError
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *WrappedSdpError) Unwrap() error {
|
|
||||||
return e.err
|
|
||||||
}
|
|
||||||
|
|
||||||
type PermissionError struct {
|
type PermissionError struct {
|
||||||
permission Permission
|
permission Permission
|
||||||
}
|
}
|
||||||
|
@ -769,23 +747,10 @@ func (e *PermissionError) Error() string {
|
||||||
return fmt.Sprintf("permission \"%s\" not found", e.permission)
|
return fmt.Sprintf("permission \"%s\" not found", e.permission)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ClientSession) isSdpAllowedToSendLocked(payload map[string]interface{}) (MediaType, error) {
|
func (s *ClientSession) isSdpAllowedToSendLocked(sdp *sdp.SessionDescription) (MediaType, error) {
|
||||||
sdpValue, found := payload["sdp"]
|
if sdp == nil {
|
||||||
if !found {
|
// Should have already been checked when data was validated.
|
||||||
return 0, &SdpError{"payload does not contain a sdp"}
|
return 0, ErrNoSdp
|
||||||
}
|
|
||||||
sdpText, ok := sdpValue.(string)
|
|
||||||
if !ok {
|
|
||||||
return 0, &SdpError{"payload does not contain a valid sdp"}
|
|
||||||
}
|
|
||||||
var sdp sdp.SessionDescription
|
|
||||||
if err := sdp.Unmarshal([]byte(sdpText)); err != nil {
|
|
||||||
return 0, &WrappedSdpError{
|
|
||||||
SdpError: SdpError{
|
|
||||||
message: fmt.Sprintf("could not parse sdp: %s", err),
|
|
||||||
},
|
|
||||||
err: err,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var mediaTypes MediaType
|
var mediaTypes MediaType
|
||||||
|
@ -823,8 +788,8 @@ func (s *ClientSession) IsAllowedToSend(data *MessageClientMessageData) error {
|
||||||
// Client is allowed to publish any media (audio / video).
|
// Client is allowed to publish any media (audio / video).
|
||||||
return nil
|
return nil
|
||||||
} else if data != nil && data.Type == "offer" {
|
} else if data != nil && data.Type == "offer" {
|
||||||
// Parse SDP to check what user is trying to publish and check permissions accordingly.
|
// Check what user is trying to publish and check permissions accordingly.
|
||||||
if _, err := s.isSdpAllowedToSendLocked(data.Payload); err != nil {
|
if _, err := s.isSdpAllowedToSendLocked(data.offerSdp); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -854,7 +819,7 @@ func (s *ClientSession) checkOfferTypeLocked(streamType StreamType, data *Messag
|
||||||
|
|
||||||
return MediaTypeScreen, nil
|
return MediaTypeScreen, nil
|
||||||
} else if data != nil && data.Type == "offer" {
|
} else if data != nil && data.Type == "offer" {
|
||||||
mediaTypes, err := s.isSdpAllowedToSendLocked(data.Payload)
|
mediaTypes, err := s.isSdpAllowedToSendLocked(data.offerSdp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -905,7 +870,7 @@ func (s *ClientSession) GetOrCreatePublisher(ctx context.Context, mcu Mcu, strea
|
||||||
if prev, found := s.publishers[streamType]; found {
|
if prev, found := s.publishers[streamType]; found {
|
||||||
// Another thread created the publisher while we were waiting.
|
// Another thread created the publisher while we were waiting.
|
||||||
go func(pub McuPublisher) {
|
go func(pub McuPublisher) {
|
||||||
closeCtx := context.TODO()
|
closeCtx := context.Background()
|
||||||
pub.Close(closeCtx)
|
pub.Close(closeCtx)
|
||||||
}(publisher)
|
}(publisher)
|
||||||
publisher = prev
|
publisher = prev
|
||||||
|
@ -969,9 +934,10 @@ func (s *ClientSession) GetOrCreateSubscriber(ctx context.Context, mcu Mcu, id s
|
||||||
|
|
||||||
subscriber, found := s.subscribers[getStreamId(id, streamType)]
|
subscriber, found := s.subscribers[getStreamId(id, streamType)]
|
||||||
if !found {
|
if !found {
|
||||||
|
client := s.getClientUnlocked()
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
var err error
|
var err error
|
||||||
subscriber, err = mcu.NewSubscriber(ctx, s, id, streamType)
|
subscriber, err = mcu.NewSubscriber(ctx, s, id, streamType, client)
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -982,7 +948,7 @@ func (s *ClientSession) GetOrCreateSubscriber(ctx context.Context, mcu Mcu, id s
|
||||||
if prev, found := s.subscribers[getStreamId(id, streamType)]; found {
|
if prev, found := s.subscribers[getStreamId(id, streamType)]; found {
|
||||||
// Another thread created the subscriber while we were waiting.
|
// Another thread created the subscriber while we were waiting.
|
||||||
go func(sub McuSubscriber) {
|
go func(sub McuSubscriber) {
|
||||||
closeCtx := context.TODO()
|
closeCtx := context.Background()
|
||||||
sub.Close(closeCtx)
|
sub.Close(closeCtx)
|
||||||
}(subscriber)
|
}(subscriber)
|
||||||
subscriber = prev
|
subscriber = prev
|
||||||
|
@ -1056,7 +1022,7 @@ func (s *ClientSession) processAsyncMessage(message *AsyncMessage) {
|
||||||
case "sendoffer":
|
case "sendoffer":
|
||||||
// Process asynchronously to not block other messages received.
|
// Process asynchronously to not block other messages received.
|
||||||
go func() {
|
go func() {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), s.hub.mcuTimeout)
|
ctx, cancel := context.WithTimeout(s.Context(), s.hub.mcuTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
mc, err := s.GetOrCreateSubscriber(ctx, s.hub.mcu, message.SendOffer.SessionId, StreamType(message.SendOffer.Data.RoomType))
|
mc, err := s.GetOrCreateSubscriber(ctx, s.hub.mcu, message.SendOffer.SessionId, StreamType(message.SendOffer.Data.RoomType))
|
||||||
|
@ -1088,7 +1054,7 @@ func (s *ClientSession) processAsyncMessage(message *AsyncMessage) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
mc.SendMessage(context.TODO(), nil, message.SendOffer.Data, func(err error, response map[string]interface{}) {
|
mc.SendMessage(s.Context(), nil, message.SendOffer.Data, func(err error, response map[string]interface{}) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Could not send MCU message %+v for session %s to %s: %s", message.SendOffer.Data, message.SendOffer.SessionId, s.PublicId(), err)
|
log.Printf("Could not send MCU message %+v for session %s to %s: %s", message.SendOffer.Data, message.SendOffer.SessionId, s.PublicId(), err)
|
||||||
if err := s.events.PublishSessionMessage(message.SendOffer.SessionId, s.backend, &AsyncMessage{
|
if err := s.events.PublishSessionMessage(message.SendOffer.SessionId, s.backend, &AsyncMessage{
|
||||||
|
@ -1146,13 +1112,13 @@ func (s *ClientSession) storePendingMessage(message *ServerMessage) {
|
||||||
func filterDisplayNames(events []*EventServerMessageSessionEntry) []*EventServerMessageSessionEntry {
|
func filterDisplayNames(events []*EventServerMessageSessionEntry) []*EventServerMessageSessionEntry {
|
||||||
result := make([]*EventServerMessageSessionEntry, 0, len(events))
|
result := make([]*EventServerMessageSessionEntry, 0, len(events))
|
||||||
for _, event := range events {
|
for _, event := range events {
|
||||||
if event.User == nil {
|
if len(event.User) == 0 {
|
||||||
result = append(result, event)
|
result = append(result, event)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
var userdata map[string]interface{}
|
var userdata map[string]interface{}
|
||||||
if err := json.Unmarshal(*event.User, &userdata); err != nil {
|
if err := json.Unmarshal(event.User, &userdata); err != nil {
|
||||||
result = append(result, event)
|
result = append(result, event)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -1178,7 +1144,7 @@ func filterDisplayNames(events []*EventServerMessageSessionEntry) []*EventServer
|
||||||
}
|
}
|
||||||
|
|
||||||
e := event.Clone()
|
e := event.Clone()
|
||||||
e.User = (*json.RawMessage)(&data)
|
e.User = data
|
||||||
result = append(result, e)
|
result = append(result, e)
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
|
@ -1273,12 +1239,12 @@ func (s *ClientSession) filterMessage(message *ServerMessage) *ServerMessage {
|
||||||
delete(s.seenJoinedEvents, e)
|
delete(s.seenJoinedEvents, e)
|
||||||
}
|
}
|
||||||
case "message":
|
case "message":
|
||||||
if message.Event.Message == nil || message.Event.Message.Data == nil || len(*message.Event.Message.Data) == 0 || !s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
|
if message.Event.Message == nil || len(message.Event.Message.Data) == 0 || !s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
|
||||||
return message
|
return message
|
||||||
}
|
}
|
||||||
|
|
||||||
var data RoomEventMessageData
|
var data RoomEventMessageData
|
||||||
if err := json.Unmarshal(*message.Event.Message.Data, &data); err != nil {
|
if err := json.Unmarshal(message.Event.Message.Data, &data); err != nil {
|
||||||
return message
|
return message
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1295,7 +1261,7 @@ func (s *ClientSession) filterMessage(message *ServerMessage) *ServerMessage {
|
||||||
Target: message.Event.Target,
|
Target: message.Event.Target,
|
||||||
Message: &RoomEventMessage{
|
Message: &RoomEventMessage{
|
||||||
RoomId: message.Event.Message.RoomId,
|
RoomId: message.Event.Message.RoomId,
|
||||||
Data: (*json.RawMessage)(&encoded),
|
Data: encoded,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1305,9 +1271,9 @@ func (s *ClientSession) filterMessage(message *ServerMessage) *ServerMessage {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case "message":
|
case "message":
|
||||||
if message.Message != nil && message.Message.Data != nil && len(*message.Message.Data) > 0 && s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
|
if message.Message != nil && len(message.Message.Data) > 0 && s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
|
||||||
var data MessageServerMessageData
|
var data MessageServerMessageData
|
||||||
if err := json.Unmarshal(*message.Message.Data, &data); err != nil {
|
if err := json.Unmarshal(message.Message.Data, &data); err != nil {
|
||||||
return message
|
return message
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1361,7 +1327,7 @@ func (s *ClientSession) filterAsyncMessage(msg *AsyncMessage) *ServerMessage {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ClientSession) NotifySessionResumed(client *Client) {
|
func (s *ClientSession) NotifySessionResumed(client HandlerClient) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
if len(s.pendingClientMessages) == 0 {
|
if len(s.pendingClientMessages) == 0 {
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
|
|
|
@ -117,6 +117,7 @@ func Test_permissionsEqual(t *testing.T) {
|
||||||
for idx, test := range tests {
|
for idx, test := range tests {
|
||||||
test := test
|
test := test
|
||||||
t.Run(strconv.Itoa(idx), func(t *testing.T) {
|
t.Run(strconv.Itoa(idx), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
equal := permissionsEqual(test.a, test.b)
|
equal := permissionsEqual(test.a, test.b)
|
||||||
if equal != test.equal {
|
if equal != test.equal {
|
||||||
t.Errorf("Expected %+v to be %s to %+v but was %s", test.a, equalStrings[test.equal], test.b, equalStrings[equal])
|
t.Errorf("Expected %+v to be %s to %+v but was %s", test.a, equalStrings[test.equal], test.b, equalStrings[equal])
|
||||||
|
@ -126,12 +127,17 @@ func Test_permissionsEqual(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBandwidth_Client(t *testing.T) {
|
func TestBandwidth_Client(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
hub, _, _, server := CreateHubForTest(t)
|
hub, _, _, server := CreateHubForTest(t)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
mcu, err := NewTestMCU()
|
mcu, err := NewTestMCU()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := mcu.Start(); err != nil {
|
} else if err := mcu.Start(ctx); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer mcu.Stop()
|
defer mcu.Stop()
|
||||||
|
@ -145,9 +151,6 @@ func TestBandwidth_Client(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
hello, err := client.RunUntilHello(ctx)
|
hello, err := client.RunUntilHello(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -198,6 +201,8 @@ func TestBandwidth_Client(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBandwidth_Backend(t *testing.T) {
|
func TestBandwidth_Backend(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
hub, _, _, server := CreateHubWithMultipleBackendsForTest(t)
|
hub, _, _, server := CreateHubWithMultipleBackendsForTest(t)
|
||||||
|
|
||||||
u, err := url.Parse(server.URL + "/one")
|
u, err := url.Parse(server.URL + "/one")
|
||||||
|
@ -212,10 +217,13 @@ func TestBandwidth_Backend(t *testing.T) {
|
||||||
backend.maxScreenBitrate = 1000
|
backend.maxScreenBitrate = 1000
|
||||||
backend.maxStreamBitrate = 2000
|
backend.maxStreamBitrate = 2000
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
mcu, err := NewTestMCU()
|
mcu, err := NewTestMCU()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
} else if err := mcu.Start(); err != nil {
|
} else if err := mcu.Start(ctx); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer mcu.Stop()
|
defer mcu.Stop()
|
||||||
|
@ -227,9 +235,6 @@ func TestBandwidth_Backend(t *testing.T) {
|
||||||
StreamTypeScreen,
|
StreamTypeScreen,
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
for _, streamType := range streamTypes {
|
for _, streamType := range streamTypes {
|
||||||
t.Run(string(streamType), func(t *testing.T) {
|
t.Run(string(streamType), func(t *testing.T) {
|
||||||
client := NewTestClient(t, server, hub)
|
client := NewTestClient(t, server, hub)
|
||||||
|
|
32
config.go
32
config.go
|
@ -23,10 +23,40 @@ package signaling
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
"github.com/dlintw/goconf"
|
"github.com/dlintw/goconf"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
searchVarsRegexp = regexp.MustCompile(`\$\([A-Za-z][A-Za-z0-9_]*\)`)
|
||||||
|
)
|
||||||
|
|
||||||
|
func replaceEnvVars(s string) string {
|
||||||
|
return searchVarsRegexp.ReplaceAllStringFunc(s, func(name string) string {
|
||||||
|
name = name[2 : len(name)-1]
|
||||||
|
value, found := os.LookupEnv(name)
|
||||||
|
if !found {
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
return value
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStringOptionWithEnv will get the string option and resolve any environment
|
||||||
|
// variable references in the form "$(VAR)".
|
||||||
|
func GetStringOptionWithEnv(config *goconf.ConfigFile, section string, option string) (string, error) {
|
||||||
|
value, err := config.GetString(section, option)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
value = replaceEnvVars(value)
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
func GetStringOptions(config *goconf.ConfigFile, section string, ignoreErrors bool) (map[string]string, error) {
|
func GetStringOptions(config *goconf.ConfigFile, section string, ignoreErrors bool) (map[string]string, error) {
|
||||||
options, _ := config.GetOptions(section)
|
options, _ := config.GetOptions(section)
|
||||||
if len(options) == 0 {
|
if len(options) == 0 {
|
||||||
|
@ -35,7 +65,7 @@ func GetStringOptions(config *goconf.ConfigFile, section string, ignoreErrors bo
|
||||||
|
|
||||||
result := make(map[string]string)
|
result := make(map[string]string)
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
value, err := config.GetString(section, option)
|
value, err := GetStringOptionWithEnv(config, section, option)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if ignoreErrors {
|
if ignoreErrors {
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -29,13 +29,19 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestStringOptions(t *testing.T) {
|
func TestStringOptions(t *testing.T) {
|
||||||
|
t.Setenv("FOO", "foo")
|
||||||
expected := map[string]string{
|
expected := map[string]string{
|
||||||
"one": "1",
|
"one": "1",
|
||||||
"two": "2",
|
"two": "2",
|
||||||
|
"foo": "http://foo/1",
|
||||||
}
|
}
|
||||||
config := goconf.NewConfigFile()
|
config := goconf.NewConfigFile()
|
||||||
for k, v := range expected {
|
for k, v := range expected {
|
||||||
config.AddOption("foo", k, v)
|
if k == "foo" {
|
||||||
|
config.AddOption("foo", k, "http://$(FOO)/1")
|
||||||
|
} else {
|
||||||
|
config.AddOption("foo", k, v)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
config.AddOption("default", "three", "3")
|
config.AddOption("default", "three", "3")
|
||||||
|
|
||||||
|
@ -48,3 +54,39 @@ func TestStringOptions(t *testing.T) {
|
||||||
t.Errorf("expected %+v, got %+v", expected, options)
|
t.Errorf("expected %+v, got %+v", expected, options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStringOptionWithEnv(t *testing.T) {
|
||||||
|
t.Setenv("FOO", "foo")
|
||||||
|
t.Setenv("BAR", "")
|
||||||
|
t.Setenv("BA_R", "bar")
|
||||||
|
|
||||||
|
config := goconf.NewConfigFile()
|
||||||
|
config.AddOption("test", "foo", "http://$(FOO)/1")
|
||||||
|
config.AddOption("test", "bar", "http://$(BAR)/2")
|
||||||
|
config.AddOption("test", "bar2", "http://$(BA_R)/3")
|
||||||
|
config.AddOption("test", "baz", "http://$(BAZ)/4")
|
||||||
|
config.AddOption("test", "inv1", "http://$(FOO")
|
||||||
|
config.AddOption("test", "inv2", "http://$FOO)")
|
||||||
|
config.AddOption("test", "inv3", "http://$((FOO)")
|
||||||
|
config.AddOption("test", "inv4", "http://$(F.OO)")
|
||||||
|
|
||||||
|
expected := map[string]string{
|
||||||
|
"foo": "http://foo/1",
|
||||||
|
"bar": "http:///2",
|
||||||
|
"bar2": "http://bar/3",
|
||||||
|
"baz": "http://BAZ/4",
|
||||||
|
"inv1": "http://$(FOO",
|
||||||
|
"inv2": "http://$FOO)",
|
||||||
|
"inv3": "http://$((FOO)",
|
||||||
|
"inv4": "http://$(F.OO)",
|
||||||
|
}
|
||||||
|
for k, v := range expected {
|
||||||
|
value, err := GetStringOptionWithEnv(config, "test", k)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("expected value for %s, got %s", k, err)
|
||||||
|
} else if value != v {
|
||||||
|
t.Errorf("expected value %s for %s, got %s", v, k, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -35,6 +35,7 @@ func TestDeferredExecutor_MultiClose(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeferredExecutor_QueueSize(t *testing.T) {
|
func TestDeferredExecutor_QueueSize(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
e := NewDeferredExecutor(0)
|
e := NewDeferredExecutor(0)
|
||||||
defer e.waitForStop()
|
defer e.waitForStop()
|
||||||
defer e.Close()
|
defer e.Close()
|
||||||
|
@ -100,6 +101,7 @@ func TestDeferredExecutor_CloseFromFunc(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeferredExecutor_DeferAfterClose(t *testing.T) {
|
func TestDeferredExecutor_DeferAfterClose(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
e := NewDeferredExecutor(64)
|
e := NewDeferredExecutor(64)
|
||||||
defer e.waitForStop()
|
defer e.waitForStop()
|
||||||
|
|
||||||
|
|
|
@ -55,6 +55,7 @@ The running container can be configured through different environment variables:
|
||||||
- `GEOIP_OVERRIDES`: Optional space-separated list of overrides for GeoIP lookups.
|
- `GEOIP_OVERRIDES`: Optional space-separated list of overrides for GeoIP lookups.
|
||||||
- `CONTINENT_OVERRIDES`: Optional space-separated list of overrides for continent mappings.
|
- `CONTINENT_OVERRIDES`: Optional space-separated list of overrides for continent mappings.
|
||||||
- `STATS_IPS`: Comma-separated list of IP addresses that are allowed to access the stats endpoint.
|
- `STATS_IPS`: Comma-separated list of IP addresses that are allowed to access the stats endpoint.
|
||||||
|
- `TRUSTED_PROXIES`: Comma-separated list of IPs / networks that are trusted proxies.
|
||||||
- `GRPC_LISTEN`: IP and port to listen on for GRPC requests.
|
- `GRPC_LISTEN`: IP and port to listen on for GRPC requests.
|
||||||
- `GRPC_SERVER_CERTIFICATE`: Certificate to use for the GRPC server.
|
- `GRPC_SERVER_CERTIFICATE`: Certificate to use for the GRPC server.
|
||||||
- `GRPC_SERVER_KEY`: Private key to use for the GRPC server.
|
- `GRPC_SERVER_KEY`: Private key to use for the GRPC server.
|
||||||
|
@ -99,9 +100,16 @@ The running container can be configured through different environment variables:
|
||||||
- `CONFIG`: Optional name of configuration file to use.
|
- `CONFIG`: Optional name of configuration file to use.
|
||||||
- `HTTP_LISTEN`: Address of HTTP listener.
|
- `HTTP_LISTEN`: Address of HTTP listener.
|
||||||
- `COUNTRY`: Optional ISO 3166 country this proxy is located at.
|
- `COUNTRY`: Optional ISO 3166 country this proxy is located at.
|
||||||
|
- `EXTERNAL_HOSTNAME`: The external hostname for remote streams. Will try to autodetect if omitted.
|
||||||
|
- `TOKEN_ID`: Id of the token to use when connecting remote streams.
|
||||||
|
- `TOKEN_KEY`: Private key for the configured token id.
|
||||||
|
- `BANDWIDTH_INCOMING`: Optional incoming target bandwidth (in megabits per second).
|
||||||
|
- `BANDWIDTH_OUTGOING`: Optional outgoing target bandwidth (in megabits per second).
|
||||||
- `JANUS_URL`: Url to Janus server.
|
- `JANUS_URL`: Url to Janus server.
|
||||||
- `MAX_STREAM_BITRATE`: Optional maximum bitrate for audio/video streams.
|
- `MAX_STREAM_BITRATE`: Optional maximum bitrate for audio/video streams.
|
||||||
- `MAX_SCREEN_BITRATE`: Optional maximum bitrate for screensharing streams.
|
- `MAX_SCREEN_BITRATE`: Optional maximum bitrate for screensharing streams.
|
||||||
|
- `STATS_IPS`: Comma-separated list of IP addresses that are allowed to access the stats endpoint.
|
||||||
|
- `TRUSTED_PROXIES`: Comma-separated list of IPs / networks that are trusted proxies.
|
||||||
- `ETCD_ENDPOINTS`: Static list of etcd endpoints (if etcd should be used).
|
- `ETCD_ENDPOINTS`: Static list of etcd endpoints (if etcd should be used).
|
||||||
- `ETCD_DISCOVERY_SRV`: Alternative domain to use for DNS SRV configuration of etcd endpoints (if etcd should be used).
|
- `ETCD_DISCOVERY_SRV`: Alternative domain to use for DNS SRV configuration of etcd endpoints (if etcd should be used).
|
||||||
- `ETCD_DISCOVERY_SERVICE`: Optional service name for DNS SRV configuration of etcd..
|
- `ETCD_DISCOVERY_SERVICE`: Optional service name for DNS SRV configuration of etcd..
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Modified from https://gitlab.com/powerpaul17/nc_talk_backend/-/blob/dcbb918d8716dad1eb72a889d1e6aa1e3a543641/docker/janus/Dockerfile
|
# Modified from https://gitlab.com/powerpaul17/nc_talk_backend/-/blob/dcbb918d8716dad1eb72a889d1e6aa1e3a543641/docker/janus/Dockerfile
|
||||||
FROM alpine:3.19
|
FROM alpine:3.20
|
||||||
|
|
||||||
RUN apk add --no-cache curl autoconf automake libtool pkgconf build-base \
|
RUN apk add --no-cache curl autoconf automake libtool pkgconf build-base \
|
||||||
glib-dev libconfig-dev libnice-dev jansson-dev openssl-dev zlib libsrtp-dev \
|
glib-dev libconfig-dev libnice-dev jansson-dev openssl-dev zlib libsrtp-dev \
|
||||||
|
@ -15,30 +15,30 @@ RUN cd /tmp && \
|
||||||
git checkout $USRSCTP_VERSION && \
|
git checkout $USRSCTP_VERSION && \
|
||||||
./bootstrap && \
|
./bootstrap && \
|
||||||
./configure --prefix=/usr && \
|
./configure --prefix=/usr && \
|
||||||
make && make install
|
make -j$(nproc) && make install
|
||||||
|
|
||||||
# libsrtp
|
# libsrtp
|
||||||
ARG LIBSRTP_VERSION=2.4.2
|
ARG LIBSRTP_VERSION=2.6.0
|
||||||
RUN cd /tmp && \
|
RUN cd /tmp && \
|
||||||
wget https://github.com/cisco/libsrtp/archive/v$LIBSRTP_VERSION.tar.gz && \
|
wget https://github.com/cisco/libsrtp/archive/v$LIBSRTP_VERSION.tar.gz && \
|
||||||
tar xfv v$LIBSRTP_VERSION.tar.gz && \
|
tar xfv v$LIBSRTP_VERSION.tar.gz && \
|
||||||
cd libsrtp-$LIBSRTP_VERSION && \
|
cd libsrtp-$LIBSRTP_VERSION && \
|
||||||
./configure --prefix=/usr --enable-openssl && \
|
./configure --prefix=/usr --enable-openssl && \
|
||||||
make shared_library && \
|
make shared_library -j$(nproc) && \
|
||||||
make install && \
|
make install && \
|
||||||
rm -fr /libsrtp-$LIBSRTP_VERSION && \
|
rm -fr /libsrtp-$LIBSRTP_VERSION && \
|
||||||
rm -f /v$LIBSRTP_VERSION.tar.gz
|
rm -f /v$LIBSRTP_VERSION.tar.gz
|
||||||
|
|
||||||
# JANUS
|
# JANUS
|
||||||
|
|
||||||
ARG JANUS_VERSION=0.14.1
|
ARG JANUS_VERSION=1.2.2
|
||||||
RUN mkdir -p /usr/src/janus && \
|
RUN mkdir -p /usr/src/janus && \
|
||||||
cd /usr/src/janus && \
|
cd /usr/src/janus && \
|
||||||
curl -L https://github.com/meetecho/janus-gateway/archive/v$JANUS_VERSION.tar.gz | tar -xz && \
|
curl -L https://github.com/meetecho/janus-gateway/archive/v$JANUS_VERSION.tar.gz | tar -xz && \
|
||||||
cd /usr/src/janus/janus-gateway-$JANUS_VERSION && \
|
cd /usr/src/janus/janus-gateway-$JANUS_VERSION && \
|
||||||
./autogen.sh && \
|
./autogen.sh && \
|
||||||
./configure --disable-rabbitmq --disable-mqtt --disable-boringssl && \
|
./configure --disable-rabbitmq --disable-mqtt --disable-boringssl && \
|
||||||
make && \
|
make -j$(nproc) && \
|
||||||
make install && \
|
make install && \
|
||||||
make configs
|
make configs
|
||||||
|
|
||||||
|
|
|
@ -7,8 +7,7 @@ WORKDIR /workdir
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN touch /.dockerenv && \
|
RUN touch /.dockerenv && \
|
||||||
apk add --no-cache bash git build-base protobuf && \
|
apk add --no-cache bash git build-base protobuf && \
|
||||||
if [ -d "vendor" ]; then GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOPROXY=off make proxy; else \
|
GOOS=${TARGETOS} GOARCH=${TARGETARCH} make proxy
|
||||||
GOOS=${TARGETOS} GOARCH=${TARGETARCH} make proxy; fi
|
|
||||||
|
|
||||||
FROM alpine:3
|
FROM alpine:3
|
||||||
|
|
||||||
|
|
|
@ -44,6 +44,22 @@ if [ ! -f "$CONFIG" ]; then
|
||||||
sed -i "s|#country =.*|country = $COUNTRY|" "$CONFIG"
|
sed -i "s|#country =.*|country = $COUNTRY|" "$CONFIG"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -n "$EXTERNAL_HOSTNAME" ]; then
|
||||||
|
sed -i "s|#hostname =.*|hostname = $EXTERNAL_HOSTNAME|" "$CONFIG"
|
||||||
|
fi
|
||||||
|
if [ -n "$TOKEN_ID" ]; then
|
||||||
|
sed -i "s|#token_id =.*|token_id = $TOKEN_ID|" "$CONFIG"
|
||||||
|
fi
|
||||||
|
if [ -n "$TOKEN_KEY" ]; then
|
||||||
|
sed -i "s|#token_key =.*|token_key = $TOKEN_KEY|" "$CONFIG"
|
||||||
|
fi
|
||||||
|
if [ -n "$BANDWIDTH_INCOMING" ]; then
|
||||||
|
sed -i "s|#incoming =.*|incoming = $BANDWIDTH_INCOMING|" "$CONFIG"
|
||||||
|
fi
|
||||||
|
if [ -n "$BANDWIDTH_OUTGOING" ]; then
|
||||||
|
sed -i "s|#outgoing =.*|outgoing = $BANDWIDTH_OUTGOING|" "$CONFIG"
|
||||||
|
fi
|
||||||
|
|
||||||
HAS_ETCD=
|
HAS_ETCD=
|
||||||
if [ -n "$ETCD_ENDPOINTS" ]; then
|
if [ -n "$ETCD_ENDPOINTS" ]; then
|
||||||
sed -i "s|#endpoints =.*|endpoints = $ETCD_ENDPOINTS|" "$CONFIG"
|
sed -i "s|#endpoints =.*|endpoints = $ETCD_ENDPOINTS|" "$CONFIG"
|
||||||
|
@ -109,6 +125,10 @@ if [ ! -f "$CONFIG" ]; then
|
||||||
if [ -n "$STATS_IPS" ]; then
|
if [ -n "$STATS_IPS" ]; then
|
||||||
sed -i "s|#allowed_ips =.*|allowed_ips = $STATS_IPS|" "$CONFIG"
|
sed -i "s|#allowed_ips =.*|allowed_ips = $STATS_IPS|" "$CONFIG"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -n "$TRUSTED_PROXIES" ]; then
|
||||||
|
sed -i "s|#trustedproxies =.*|trustedproxies = $TRUSTED_PROXIES|" "$CONFIG"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Starting signaling proxy with $CONFIG ..."
|
echo "Starting signaling proxy with $CONFIG ..."
|
||||||
|
|
|
@ -7,8 +7,7 @@ WORKDIR /workdir
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN touch /.dockerenv && \
|
RUN touch /.dockerenv && \
|
||||||
apk add --no-cache bash git build-base protobuf && \
|
apk add --no-cache bash git build-base protobuf && \
|
||||||
if [ -d "vendor" ]; then GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOPROXY=off make server; else \
|
GOOS=${TARGETOS} GOARCH=${TARGETARCH} make server
|
||||||
GOOS=${TARGETOS} GOARCH=${TARGETARCH} make server; fi
|
|
||||||
|
|
||||||
FROM alpine:3
|
FROM alpine:3
|
||||||
|
|
||||||
|
@ -19,9 +18,12 @@ RUN adduser -D spreedbackend && \
|
||||||
COPY --from=builder /workdir/bin/signaling /usr/bin/nextcloud-spreed-signaling
|
COPY --from=builder /workdir/bin/signaling /usr/bin/nextcloud-spreed-signaling
|
||||||
COPY ./server.conf.in /config/server.conf.in
|
COPY ./server.conf.in /config/server.conf.in
|
||||||
COPY ./docker/server/entrypoint.sh /
|
COPY ./docker/server/entrypoint.sh /
|
||||||
|
COPY ./docker/server/stop.sh /
|
||||||
|
COPY ./docker/server/wait.sh /
|
||||||
RUN chown spreedbackend /config
|
RUN chown spreedbackend /config
|
||||||
RUN /usr/bin/nextcloud-spreed-signaling -version
|
RUN /usr/bin/nextcloud-spreed-signaling -version
|
||||||
|
|
||||||
USER spreedbackend
|
USER spreedbackend
|
||||||
|
|
||||||
|
STOPSIGNAL SIGUSR1
|
||||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||||
|
|
|
@ -157,6 +157,10 @@ if [ ! -f "$CONFIG" ]; then
|
||||||
sed -i "s|#allowed_ips =.*|allowed_ips = $STATS_IPS|" "$CONFIG"
|
sed -i "s|#allowed_ips =.*|allowed_ips = $STATS_IPS|" "$CONFIG"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -n "$TRUSTED_PROXIES" ]; then
|
||||||
|
sed -i "s|#trustedproxies =.*|trustedproxies = $TRUSTED_PROXIES|" "$CONFIG"
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -n "$GRPC_LISTEN" ]; then
|
if [ -n "$GRPC_LISTEN" ]; then
|
||||||
sed -i "s|#listen = 0.0.0.0:9090|listen = $GRPC_LISTEN|" "$CONFIG"
|
sed -i "s|#listen = 0.0.0.0:9090|listen = $GRPC_LISTEN|" "$CONFIG"
|
||||||
|
|
||||||
|
|
26
docker/server/stop.sh
Executable file
26
docker/server/stop.sh
Executable file
|
@ -0,0 +1,26 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Standalone signaling server for the Nextcloud Spreed app.
|
||||||
|
# Copyright (C) 2024 struktur AG
|
||||||
|
#
|
||||||
|
# @author Joachim Bauch <bauch@struktur.de>
|
||||||
|
#
|
||||||
|
# @license GNU AGPL version 3 or any later version
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "Schedule signaling server to shutdown ..."
|
||||||
|
exec killall -USR1 nextcloud-spreed-signaling
|
33
docker/server/wait.sh
Executable file
33
docker/server/wait.sh
Executable file
|
@ -0,0 +1,33 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Standalone signaling server for the Nextcloud Spreed app.
|
||||||
|
# Copyright (C) 2024 struktur AG
|
||||||
|
#
|
||||||
|
# @author Joachim Bauch <bauch@struktur.de>
|
||||||
|
#
|
||||||
|
# @license GNU AGPL version 3 or any later version
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "Waiting for signaling server to shutdown ..."
|
||||||
|
while true
|
||||||
|
do
|
||||||
|
if ! pgrep nextcloud-spreed-signaling > /dev/null ; then
|
||||||
|
echo "Signaling server has stopped"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
|
@ -49,3 +49,5 @@ The following metrics are available:
|
||||||
| `signaling_grpc_client_calls_total` | Counter | 1.0.0 | The total number of GRPC client calls | `method` |
|
| `signaling_grpc_client_calls_total` | Counter | 1.0.0 | The total number of GRPC client calls | `method` |
|
||||||
| `signaling_grpc_server_calls_total` | Counter | 1.0.0 | The total number of GRPC server calls | `method` |
|
| `signaling_grpc_server_calls_total` | Counter | 1.0.0 | The total number of GRPC server calls | `method` |
|
||||||
| `signaling_http_client_pool_connections` | Gauge | 1.2.4 | The current number of HTTP client connections per host | `host` |
|
| `signaling_http_client_pool_connections` | Gauge | 1.2.4 | The current number of HTTP client connections per host | `host` |
|
||||||
|
| `signaling_throttle_delayed_total` | Counter | 1.2.5 | The total number of delayed requests | `action`, `delay` |
|
||||||
|
| `signaling_throttle_bruteforce_total` | Counter | 1.2.5 | The total number of rejected bruteforce requests | `action` |
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
jinja2==3.1.3
|
jinja2==3.1.4
|
||||||
markdown==3.6
|
markdown==3.6
|
||||||
mkdocs==1.5.3
|
mkdocs==1.6.0
|
||||||
readthedocs-sphinx-search==0.3.2
|
readthedocs-sphinx-search==0.3.2
|
||||||
sphinx==7.2.6
|
sphinx==7.3.7
|
||||||
sphinx_rtd_theme==2.0.0
|
sphinx_rtd_theme==2.0.0
|
||||||
|
|
|
@ -23,6 +23,7 @@ package signaling
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -34,6 +35,8 @@ import (
|
||||||
"go.etcd.io/etcd/client/pkg/v3/srv"
|
"go.etcd.io/etcd/client/pkg/v3/srv"
|
||||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
)
|
)
|
||||||
|
|
||||||
type EtcdClientListener interface {
|
type EtcdClientListener interface {
|
||||||
|
@ -42,8 +45,8 @@ type EtcdClientListener interface {
|
||||||
|
|
||||||
type EtcdClientWatcher interface {
|
type EtcdClientWatcher interface {
|
||||||
EtcdWatchCreated(client *EtcdClient, key string)
|
EtcdWatchCreated(client *EtcdClient, key string)
|
||||||
EtcdKeyUpdated(client *EtcdClient, key string, value []byte)
|
EtcdKeyUpdated(client *EtcdClient, key string, value []byte, prevValue []byte)
|
||||||
EtcdKeyDeleted(client *EtcdClient, key string)
|
EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte)
|
||||||
}
|
}
|
||||||
|
|
||||||
type EtcdClient struct {
|
type EtcdClient struct {
|
||||||
|
@ -112,6 +115,17 @@ func (c *EtcdClient) load(config *goconf.ConfigFile, ignoreErrors bool) error {
|
||||||
DialTimeout: time.Second,
|
DialTimeout: time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if logLevel, _ := config.GetString("etcd", "loglevel"); logLevel != "" {
|
||||||
|
var l zapcore.Level
|
||||||
|
if err := l.Set(logLevel); err != nil {
|
||||||
|
return fmt.Errorf("Unsupported etcd log level %s: %w", logLevel, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
logConfig := zap.NewProductionConfig()
|
||||||
|
logConfig.Level = zap.NewAtomicLevelAt(l)
|
||||||
|
cfg.LogConfig = &logConfig
|
||||||
|
}
|
||||||
|
|
||||||
clientKey := c.getConfigStringWithFallback(config, "clientkey")
|
clientKey := c.getConfigStringWithFallback(config, "clientkey")
|
||||||
clientCert := c.getConfigStringWithFallback(config, "clientcert")
|
clientCert := c.getConfigStringWithFallback(config, "clientcert")
|
||||||
caCert := c.getConfigStringWithFallback(config, "cacert")
|
caCert := c.getConfigStringWithFallback(config, "cacert")
|
||||||
|
@ -176,8 +190,8 @@ func (c *EtcdClient) getEtcdClient() *clientv3.Client {
|
||||||
return client.(*clientv3.Client)
|
return client.(*clientv3.Client)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *EtcdClient) syncClient() error {
|
func (c *EtcdClient) syncClient(ctx context.Context) error {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
return c.getEtcdClient().Sync(ctx)
|
return c.getEtcdClient().Sync(ctx)
|
||||||
|
@ -223,8 +237,10 @@ func (c *EtcdClient) WaitForConnection(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.syncClient(); err != nil {
|
if err := c.syncClient(ctx); err != nil {
|
||||||
if err == context.DeadlineExceeded {
|
if errors.Is(err, context.Canceled) {
|
||||||
|
return err
|
||||||
|
} else if errors.Is(err, context.DeadlineExceeded) {
|
||||||
log.Printf("Timeout waiting for etcd client to connect to the cluster, retry in %s", backoff.NextWait())
|
log.Printf("Timeout waiting for etcd client to connect to the cluster, retry in %s", backoff.NextWait())
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Could not sync etcd client with the cluster, retry in %s: %s", backoff.NextWait(), err)
|
log.Printf("Could not sync etcd client with the cluster, retry in %s: %s", backoff.NextWait(), err)
|
||||||
|
@ -243,27 +259,37 @@ func (c *EtcdClient) Get(ctx context.Context, key string, opts ...clientv3.OpOpt
|
||||||
return c.getEtcdClient().Get(ctx, key, opts...)
|
return c.getEtcdClient().Get(ctx, key, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *EtcdClient) Watch(ctx context.Context, key string, watcher EtcdClientWatcher, opts ...clientv3.OpOption) error {
|
func (c *EtcdClient) Watch(ctx context.Context, key string, nextRevision int64, watcher EtcdClientWatcher, opts ...clientv3.OpOption) (int64, error) {
|
||||||
log.Printf("Wait for leader and start watching on %s", key)
|
log.Printf("Wait for leader and start watching on %s (rev=%d)", key, nextRevision)
|
||||||
|
opts = append(opts, clientv3.WithRev(nextRevision), clientv3.WithPrevKV())
|
||||||
ch := c.getEtcdClient().Watch(clientv3.WithRequireLeader(ctx), key, opts...)
|
ch := c.getEtcdClient().Watch(clientv3.WithRequireLeader(ctx), key, opts...)
|
||||||
log.Printf("Watch created for %s", key)
|
log.Printf("Watch created for %s", key)
|
||||||
watcher.EtcdWatchCreated(c, key)
|
watcher.EtcdWatchCreated(c, key)
|
||||||
for response := range ch {
|
for response := range ch {
|
||||||
if err := response.Err(); err != nil {
|
if err := response.Err(); err != nil {
|
||||||
return err
|
return nextRevision, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nextRevision = response.Header.Revision + 1
|
||||||
for _, ev := range response.Events {
|
for _, ev := range response.Events {
|
||||||
switch ev.Type {
|
switch ev.Type {
|
||||||
case clientv3.EventTypePut:
|
case clientv3.EventTypePut:
|
||||||
watcher.EtcdKeyUpdated(c, string(ev.Kv.Key), ev.Kv.Value)
|
var prevValue []byte
|
||||||
|
if ev.PrevKv != nil {
|
||||||
|
prevValue = ev.PrevKv.Value
|
||||||
|
}
|
||||||
|
watcher.EtcdKeyUpdated(c, string(ev.Kv.Key), ev.Kv.Value, prevValue)
|
||||||
case clientv3.EventTypeDelete:
|
case clientv3.EventTypeDelete:
|
||||||
watcher.EtcdKeyDeleted(c, string(ev.Kv.Key))
|
var prevValue []byte
|
||||||
|
if ev.PrevKv != nil {
|
||||||
|
prevValue = ev.PrevKv.Value
|
||||||
|
}
|
||||||
|
watcher.EtcdKeyDeleted(c, string(ev.Kv.Key), prevValue)
|
||||||
default:
|
default:
|
||||||
log.Printf("Unsupported watch event %s %q -> %q", ev.Type, ev.Kv.Key, ev.Kv.Value)
|
log.Printf("Unsupported watch event %s %q -> %q", ev.Type, ev.Kv.Key, ev.Kv.Value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nextRevision, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
|
||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -39,6 +38,8 @@ import (
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
"go.etcd.io/etcd/server/v3/embed"
|
"go.etcd.io/etcd/server/v3/embed"
|
||||||
"go.etcd.io/etcd/server/v3/lease"
|
"go.etcd.io/etcd/server/v3/lease"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"go.uber.org/zap/zaptest"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -89,6 +90,7 @@ func NewEtcdForTest(t *testing.T) *embed.Etcd {
|
||||||
cfg.ListenPeerUrls = []url.URL{*peerListener}
|
cfg.ListenPeerUrls = []url.URL{*peerListener}
|
||||||
cfg.AdvertisePeerUrls = []url.URL{*peerListener}
|
cfg.AdvertisePeerUrls = []url.URL{*peerListener}
|
||||||
cfg.InitialCluster = "default=" + peerListener.String()
|
cfg.InitialCluster = "default=" + peerListener.String()
|
||||||
|
cfg.ZapLoggerBuilder = embed.NewZapLoggerBuilder(zaptest.NewLogger(t, zaptest.Level(zap.WarnLevel)))
|
||||||
etcd, err = embed.StartEtcd(cfg)
|
etcd, err = embed.StartEtcd(cfg)
|
||||||
if isErrorAddressAlreadyInUse(err) {
|
if isErrorAddressAlreadyInUse(err) {
|
||||||
continue
|
continue
|
||||||
|
@ -103,6 +105,7 @@ func NewEtcdForTest(t *testing.T) *embed.Etcd {
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
etcd.Close()
|
etcd.Close()
|
||||||
|
<-etcd.Server.StopNotify()
|
||||||
})
|
})
|
||||||
// Wait for server to be ready.
|
// Wait for server to be ready.
|
||||||
<-etcd.Server.ReadyNotify()
|
<-etcd.Server.ReadyNotify()
|
||||||
|
@ -115,6 +118,7 @@ func NewEtcdClientForTest(t *testing.T) (*embed.Etcd, *EtcdClient) {
|
||||||
|
|
||||||
config := goconf.NewConfigFile()
|
config := goconf.NewConfigFile()
|
||||||
config.AddOption("etcd", "endpoints", etcd.Config().ListenClientUrls[0].String())
|
config.AddOption("etcd", "endpoints", etcd.Config().ListenClientUrls[0].String())
|
||||||
|
config.AddOption("etcd", "loglevel", "error")
|
||||||
|
|
||||||
client, err := NewEtcdClient(config, "")
|
client, err := NewEtcdClient(config, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -143,6 +147,8 @@ func DeleteEtcdValue(etcd *embed.Etcd, key string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_EtcdClient_Get(t *testing.T) {
|
func Test_EtcdClient_Get(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
etcd, client := NewEtcdClientForTest(t)
|
etcd, client := NewEtcdClientForTest(t)
|
||||||
|
|
||||||
if response, err := client.Get(context.Background(), "foo"); err != nil {
|
if response, err := client.Get(context.Background(), "foo"); err != nil {
|
||||||
|
@ -165,6 +171,8 @@ func Test_EtcdClient_Get(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_EtcdClient_GetPrefix(t *testing.T) {
|
func Test_EtcdClient_GetPrefix(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
etcd, client := NewEtcdClientForTest(t)
|
etcd, client := NewEtcdClientForTest(t)
|
||||||
|
|
||||||
if response, err := client.Get(context.Background(), "foo"); err != nil {
|
if response, err := client.Get(context.Background(), "foo"); err != nil {
|
||||||
|
@ -196,6 +204,8 @@ type etcdEvent struct {
|
||||||
t mvccpb.Event_EventType
|
t mvccpb.Event_EventType
|
||||||
key string
|
key string
|
||||||
value string
|
value string
|
||||||
|
|
||||||
|
prevValue string
|
||||||
}
|
}
|
||||||
|
|
||||||
type EtcdClientTestListener struct {
|
type EtcdClientTestListener struct {
|
||||||
|
@ -204,9 +214,8 @@ type EtcdClientTestListener struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
|
|
||||||
initial chan struct{}
|
initial chan struct{}
|
||||||
initialWg sync.WaitGroup
|
events chan etcdEvent
|
||||||
events chan etcdEvent
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEtcdClientTestListener(ctx context.Context, t *testing.T) *EtcdClientTestListener {
|
func NewEtcdClientTestListener(ctx context.Context, t *testing.T) *EtcdClientTestListener {
|
||||||
|
@ -227,15 +236,7 @@ func (l *EtcdClientTestListener) Close() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *EtcdClientTestListener) EtcdClientCreated(client *EtcdClient) {
|
func (l *EtcdClientTestListener) EtcdClientCreated(client *EtcdClient) {
|
||||||
l.initialWg.Add(1)
|
|
||||||
go func() {
|
go func() {
|
||||||
if err := client.Watch(clientv3.WithRequireLeader(l.ctx), "foo", l, clientv3.WithPrefix()); err != nil {
|
|
||||||
l.t.Error(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer close(l.initial)
|
|
||||||
if err := client.WaitForConnection(l.ctx); err != nil {
|
if err := client.WaitForConnection(l.ctx); err != nil {
|
||||||
l.t.Errorf("error waiting for connection: %s", err)
|
l.t.Errorf("error waiting for connection: %s", err)
|
||||||
return
|
return
|
||||||
|
@ -244,7 +245,8 @@ func (l *EtcdClientTestListener) EtcdClientCreated(client *EtcdClient) {
|
||||||
ctx, cancel := context.WithTimeout(l.ctx, time.Second)
|
ctx, cancel := context.WithTimeout(l.ctx, time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if response, err := client.Get(ctx, "foo", clientv3.WithPrefix()); err != nil {
|
response, err := client.Get(ctx, "foo", clientv3.WithPrefix())
|
||||||
|
if err != nil {
|
||||||
l.t.Error(err)
|
l.t.Error(err)
|
||||||
} else if response.Count != 1 {
|
} else if response.Count != 1 {
|
||||||
l.t.Errorf("expected 1 responses, got %d", response.Count)
|
l.t.Errorf("expected 1 responses, got %d", response.Count)
|
||||||
|
@ -253,30 +255,47 @@ func (l *EtcdClientTestListener) EtcdClientCreated(client *EtcdClient) {
|
||||||
} else if string(response.Kvs[0].Value) != "1" {
|
} else if string(response.Kvs[0].Value) != "1" {
|
||||||
l.t.Errorf("expected value \"1\", got \"%s\"", string(response.Kvs[0].Value))
|
l.t.Errorf("expected value \"1\", got \"%s\"", string(response.Kvs[0].Value))
|
||||||
}
|
}
|
||||||
l.initialWg.Wait()
|
|
||||||
|
close(l.initial)
|
||||||
|
nextRevision := response.Header.Revision + 1
|
||||||
|
for l.ctx.Err() == nil {
|
||||||
|
var err error
|
||||||
|
if nextRevision, err = client.Watch(clientv3.WithRequireLeader(l.ctx), "foo", nextRevision, l, clientv3.WithPrefix()); err != nil {
|
||||||
|
l.t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *EtcdClientTestListener) EtcdWatchCreated(client *EtcdClient, key string) {
|
func (l *EtcdClientTestListener) EtcdWatchCreated(client *EtcdClient, key string) {
|
||||||
l.initialWg.Done()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *EtcdClientTestListener) EtcdKeyUpdated(client *EtcdClient, key string, value []byte) {
|
func (l *EtcdClientTestListener) EtcdKeyUpdated(client *EtcdClient, key string, value []byte, prevValue []byte) {
|
||||||
l.events <- etcdEvent{
|
evt := etcdEvent{
|
||||||
t: clientv3.EventTypePut,
|
t: clientv3.EventTypePut,
|
||||||
key: string(key),
|
key: string(key),
|
||||||
value: string(value),
|
value: string(value),
|
||||||
}
|
}
|
||||||
|
if len(prevValue) > 0 {
|
||||||
|
evt.prevValue = string(prevValue)
|
||||||
|
}
|
||||||
|
l.events <- evt
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *EtcdClientTestListener) EtcdKeyDeleted(client *EtcdClient, key string) {
|
func (l *EtcdClientTestListener) EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte) {
|
||||||
l.events <- etcdEvent{
|
evt := etcdEvent{
|
||||||
t: clientv3.EventTypeDelete,
|
t: clientv3.EventTypeDelete,
|
||||||
key: string(key),
|
key: string(key),
|
||||||
}
|
}
|
||||||
|
if len(prevValue) > 0 {
|
||||||
|
evt.prevValue = string(prevValue)
|
||||||
|
}
|
||||||
|
l.events <- evt
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_EtcdClient_Watch(t *testing.T) {
|
func Test_EtcdClient_Watch(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
etcd, client := NewEtcdClientForTest(t)
|
etcd, client := NewEtcdClientForTest(t)
|
||||||
|
|
||||||
SetEtcdValue(etcd, "foo/a", []byte("1"))
|
SetEtcdValue(etcd, "foo/a", []byte("1"))
|
||||||
|
@ -299,11 +318,23 @@ func Test_EtcdClient_Watch(t *testing.T) {
|
||||||
t.Errorf("expected value %s, got %s", "2", event.value)
|
t.Errorf("expected value %s, got %s", "2", event.value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SetEtcdValue(etcd, "foo/a", []byte("3"))
|
||||||
|
event = <-listener.events
|
||||||
|
if event.t != clientv3.EventTypePut {
|
||||||
|
t.Errorf("expected type %d, got %d", clientv3.EventTypePut, event.t)
|
||||||
|
} else if event.key != "foo/a" {
|
||||||
|
t.Errorf("expected key %s, got %s", "foo/a", event.key)
|
||||||
|
} else if event.value != "3" {
|
||||||
|
t.Errorf("expected value %s, got %s", "3", event.value)
|
||||||
|
}
|
||||||
|
|
||||||
DeleteEtcdValue(etcd, "foo/a")
|
DeleteEtcdValue(etcd, "foo/a")
|
||||||
event = <-listener.events
|
event = <-listener.events
|
||||||
if event.t != clientv3.EventTypeDelete {
|
if event.t != clientv3.EventTypeDelete {
|
||||||
t.Errorf("expected type %d, got %d", clientv3.EventTypeDelete, event.t)
|
t.Errorf("expected type %d, got %d", clientv3.EventTypeDelete, event.t)
|
||||||
} else if event.key != "foo/a" {
|
} else if event.key != "foo/a" {
|
||||||
t.Errorf("expected key %s, got %s", "foo/a", event.key)
|
t.Errorf("expected key %s, got %s", "foo/a", event.key)
|
||||||
|
} else if event.prevValue != "3" {
|
||||||
|
t.Errorf("expected previous value %s, got %s", "3", event.prevValue)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
package signaling
|
package signaling
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
@ -54,7 +55,9 @@ type FileWatcher struct {
|
||||||
target string
|
target string
|
||||||
callback FileWatcherCallback
|
callback FileWatcherCallback
|
||||||
|
|
||||||
watcher *fsnotify.Watcher
|
watcher *fsnotify.Watcher
|
||||||
|
closeCtx context.Context
|
||||||
|
closeFunc context.CancelFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFileWatcher(filename string, callback FileWatcherCallback) (*FileWatcher, error) {
|
func NewFileWatcher(filename string, callback FileWatcherCallback) (*FileWatcher, error) {
|
||||||
|
@ -73,24 +76,28 @@ func NewFileWatcher(filename string, callback FileWatcherCallback) (*FileWatcher
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if filename != realFilename {
|
if err := watcher.Add(path.Dir(filename)); err != nil {
|
||||||
if err := watcher.Add(path.Dir(filename)); err != nil {
|
watcher.Close() // nolint
|
||||||
watcher.Close() // nolint
|
return nil, err
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
closeCtx, closeFunc := context.WithCancel(context.Background())
|
||||||
|
|
||||||
w := &FileWatcher{
|
w := &FileWatcher{
|
||||||
filename: filename,
|
filename: filename,
|
||||||
target: realFilename,
|
target: realFilename,
|
||||||
callback: callback,
|
callback: callback,
|
||||||
watcher: watcher,
|
watcher: watcher,
|
||||||
|
|
||||||
|
closeCtx: closeCtx,
|
||||||
|
closeFunc: closeFunc,
|
||||||
}
|
}
|
||||||
go w.run()
|
go w.run()
|
||||||
return w, nil
|
return w, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FileWatcher) Close() error {
|
func (f *FileWatcher) Close() error {
|
||||||
|
f.closeFunc()
|
||||||
return f.watcher.Close()
|
return f.watcher.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,6 +161,8 @@ func (f *FileWatcher) run() {
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Error watching %s: %s", f.filename, err)
|
log.Printf("Error watching %s: %s", f.filename, err)
|
||||||
|
case <-f.closeCtx.Done():
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,6 +47,53 @@ func TestFileWatcher_NotExist(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFileWatcher_File(t *testing.T) {
|
func TestFileWatcher_File(t *testing.T) {
|
||||||
|
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||||
|
tmpdir := t.TempDir()
|
||||||
|
filename := path.Join(tmpdir, "test.txt")
|
||||||
|
if err := os.WriteFile(filename, []byte("Hello world!"), 0644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
modified := make(chan struct{})
|
||||||
|
w, err := NewFileWatcher(filename, func(filename string) {
|
||||||
|
modified <- struct{}{}
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
if err := os.WriteFile(filename, []byte("Updated"), 0644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
<-modified
|
||||||
|
|
||||||
|
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-modified:
|
||||||
|
t.Error("should not have received another event")
|
||||||
|
case <-ctxTimeout.Done():
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile(filename, []byte("Updated"), 0644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
<-modified
|
||||||
|
|
||||||
|
ctxTimeout, cancel = context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-modified:
|
||||||
|
t.Error("should not have received another event")
|
||||||
|
case <-ctxTimeout.Done():
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFileWatcher_Rename(t *testing.T) {
|
||||||
tmpdir := t.TempDir()
|
tmpdir := t.TempDir()
|
||||||
filename := path.Join(tmpdir, "test.txt")
|
filename := path.Join(tmpdir, "test.txt")
|
||||||
if err := os.WriteFile(filename, []byte("Hello world!"), 0644); err != nil {
|
if err := os.WriteFile(filename, []byte("Hello world!"), 0644); err != nil {
|
||||||
|
@ -62,10 +109,10 @@ func TestFileWatcher_File(t *testing.T) {
|
||||||
}
|
}
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
|
|
||||||
if err := os.WriteFile(filename, []byte("Updated"), 0644); err != nil {
|
filename2 := path.Join(tmpdir, "test.txt.tmp")
|
||||||
|
if err := os.WriteFile(filename2, []byte("Updated"), 0644); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
<-modified
|
|
||||||
|
|
||||||
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
|
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -76,7 +123,7 @@ func TestFileWatcher_File(t *testing.T) {
|
||||||
case <-ctxTimeout.Done():
|
case <-ctxTimeout.Done():
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.WriteFile(filename, []byte("Updated"), 0644); err != nil {
|
if err := os.Rename(filename2, filename); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
<-modified
|
<-modified
|
||||||
|
@ -211,3 +258,53 @@ func TestFileWatcher_OtherSymlink(t *testing.T) {
|
||||||
case <-ctxTimeout.Done():
|
case <-ctxTimeout.Done():
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFileWatcher_RenameSymlinkTarget(t *testing.T) {
|
||||||
|
tmpdir := t.TempDir()
|
||||||
|
sourceFilename1 := path.Join(tmpdir, "test1.txt")
|
||||||
|
if err := os.WriteFile(sourceFilename1, []byte("Hello world!"), 0644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
filename := path.Join(tmpdir, "test.txt")
|
||||||
|
if err := os.Symlink(sourceFilename1, filename); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
modified := make(chan struct{})
|
||||||
|
w, err := NewFileWatcher(filename, func(filename string) {
|
||||||
|
modified <- struct{}{}
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
sourceFilename2 := path.Join(tmpdir, "test1.txt.tmp")
|
||||||
|
if err := os.WriteFile(sourceFilename2, []byte("Updated"), 0644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-modified:
|
||||||
|
t.Error("should not have received another event")
|
||||||
|
case <-ctxTimeout.Done():
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Rename(sourceFilename2, sourceFilename1); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
<-modified
|
||||||
|
|
||||||
|
ctxTimeout, cancel = context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-modified:
|
||||||
|
t.Error("should not have received another event")
|
||||||
|
case <-ctxTimeout.Done():
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -97,6 +97,7 @@ func runConcurrentFlags(t *testing.T, count int, f func()) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFlagsConcurrentAdd(t *testing.T) {
|
func TestFlagsConcurrentAdd(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var flags Flags
|
var flags Flags
|
||||||
|
|
||||||
var added atomic.Int32
|
var added atomic.Int32
|
||||||
|
@ -111,6 +112,7 @@ func TestFlagsConcurrentAdd(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFlagsConcurrentRemove(t *testing.T) {
|
func TestFlagsConcurrentRemove(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var flags Flags
|
var flags Flags
|
||||||
flags.Set(1)
|
flags.Set(1)
|
||||||
|
|
||||||
|
@ -126,6 +128,7 @@ func TestFlagsConcurrentRemove(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFlagsConcurrentSet(t *testing.T) {
|
func TestFlagsConcurrentSet(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var flags Flags
|
var flags Flags
|
||||||
|
|
||||||
var set atomic.Int32
|
var set atomic.Int32
|
||||||
|
|
61
geoip.go
61
geoip.go
|
@ -35,6 +35,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/dlintw/goconf"
|
||||||
"github.com/oschwald/maxminddb-golang"
|
"github.com/oschwald/maxminddb-golang"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -276,3 +277,63 @@ func IsValidContinent(continent string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func LoadGeoIPOverrides(config *goconf.ConfigFile, ignoreErrors bool) (map[*net.IPNet]string, error) {
|
||||||
|
options, _ := GetStringOptions(config, "geoip-overrides", true)
|
||||||
|
if len(options) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
geoipOverrides := make(map[*net.IPNet]string, len(options))
|
||||||
|
for option, value := range options {
|
||||||
|
var ip net.IP
|
||||||
|
var ipNet *net.IPNet
|
||||||
|
if strings.Contains(option, "/") {
|
||||||
|
_, ipNet, err = net.ParseCIDR(option)
|
||||||
|
if err != nil {
|
||||||
|
if ignoreErrors {
|
||||||
|
log.Printf("could not parse CIDR %s (%s), skipping", option, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("could not parse CIDR %s: %s", option, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ip = net.ParseIP(option)
|
||||||
|
if ip == nil {
|
||||||
|
if ignoreErrors {
|
||||||
|
log.Printf("could not parse IP %s, skipping", option)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("could not parse IP %s", option)
|
||||||
|
}
|
||||||
|
|
||||||
|
var mask net.IPMask
|
||||||
|
if ipv4 := ip.To4(); ipv4 != nil {
|
||||||
|
mask = net.CIDRMask(32, 32)
|
||||||
|
} else {
|
||||||
|
mask = net.CIDRMask(128, 128)
|
||||||
|
}
|
||||||
|
ipNet = &net.IPNet{
|
||||||
|
IP: ip,
|
||||||
|
Mask: mask,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
value = strings.ToUpper(strings.TrimSpace(value))
|
||||||
|
if value == "" {
|
||||||
|
log.Printf("IP %s doesn't have a country assigned, skipping", option)
|
||||||
|
continue
|
||||||
|
} else if !IsValidCountry(value) {
|
||||||
|
log.Printf("Country %s for IP %s is invalid, skipping", value, option)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Using country %s for %s", value, ipNet)
|
||||||
|
geoipOverrides[ipNet] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
return geoipOverrides, nil
|
||||||
|
}
|
||||||
|
|
|
@ -78,6 +78,7 @@ func GetGeoIpUrlForTest(t *testing.T) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGeoLookup(t *testing.T) {
|
func TestGeoLookup(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
reader, err := NewGeoLookupFromUrl(GetGeoIpUrlForTest(t))
|
reader, err := NewGeoLookupFromUrl(GetGeoIpUrlForTest(t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -92,6 +93,7 @@ func TestGeoLookup(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGeoLookupCaching(t *testing.T) {
|
func TestGeoLookupCaching(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
reader, err := NewGeoLookupFromUrl(GetGeoIpUrlForTest(t))
|
reader, err := NewGeoLookupFromUrl(GetGeoIpUrlForTest(t))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -138,6 +140,7 @@ func TestGeoLookupContinent(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGeoLookupCloseEmpty(t *testing.T) {
|
func TestGeoLookupCloseEmpty(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
reader, err := NewGeoLookupFromUrl("ignore-url")
|
reader, err := NewGeoLookupFromUrl("ignore-url")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -146,6 +149,7 @@ func TestGeoLookupCloseEmpty(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGeoLookupFromFile(t *testing.T) {
|
func TestGeoLookupFromFile(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
geoIpUrl := GetGeoIpUrlForTest(t)
|
geoIpUrl := GetGeoIpUrlForTest(t)
|
||||||
|
|
||||||
resp, err := http.Get(geoIpUrl)
|
resp, err := http.Get(geoIpUrl)
|
||||||
|
|
53
go.mod
53
go.mod
|
@ -1,6 +1,6 @@
|
||||||
module github.com/strukturag/nextcloud-spreed-signaling
|
module github.com/strukturag/nextcloud-spreed-signaling
|
||||||
|
|
||||||
go 1.20
|
go 1.21
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/dlintw/goconf v0.0.0-20120228082610-dcc070983490
|
github.com/dlintw/goconf v0.0.0-20120228082610-dcc070983490
|
||||||
|
@ -11,19 +11,20 @@ require (
|
||||||
github.com/gorilla/securecookie v1.1.2
|
github.com/gorilla/securecookie v1.1.2
|
||||||
github.com/gorilla/websocket v1.5.1
|
github.com/gorilla/websocket v1.5.1
|
||||||
github.com/mailru/easyjson v0.7.7
|
github.com/mailru/easyjson v0.7.7
|
||||||
github.com/nats-io/nats-server/v2 v2.10.12
|
github.com/nats-io/nats-server/v2 v2.10.16
|
||||||
github.com/nats-io/nats.go v1.34.0
|
github.com/nats-io/nats.go v1.35.0
|
||||||
github.com/notedit/janus-go v0.0.0-20200517101215-10eb8b95d1a0
|
github.com/notedit/janus-go v0.0.0-20200517101215-10eb8b95d1a0
|
||||||
github.com/oschwald/maxminddb-golang v1.12.0
|
github.com/oschwald/maxminddb-golang v1.12.0
|
||||||
github.com/pion/sdp/v3 v3.0.9
|
github.com/pion/sdp/v3 v3.0.9
|
||||||
github.com/prometheus/client_golang v1.19.0
|
github.com/prometheus/client_golang v1.19.1
|
||||||
go.etcd.io/etcd/api/v3 v3.5.12
|
go.etcd.io/etcd/api/v3 v3.5.14
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.12
|
go.etcd.io/etcd/client/pkg/v3 v3.5.14
|
||||||
go.etcd.io/etcd/client/v3 v3.5.12
|
go.etcd.io/etcd/client/v3 v3.5.14
|
||||||
go.etcd.io/etcd/server/v3 v3.5.12
|
go.etcd.io/etcd/server/v3 v3.5.14
|
||||||
google.golang.org/grpc v1.62.1
|
go.uber.org/zap v1.27.0
|
||||||
|
google.golang.org/grpc v1.64.0
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0
|
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0
|
||||||
google.golang.org/protobuf v1.33.0
|
google.golang.org/protobuf v1.34.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
@ -46,26 +47,26 @@ require (
|
||||||
github.com/jonboulle/clockwork v0.2.2 // indirect
|
github.com/jonboulle/clockwork v0.2.2 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/klauspost/compress v1.17.7 // indirect
|
github.com/klauspost/compress v1.17.8 // indirect
|
||||||
github.com/minio/highwayhash v1.0.2 // indirect
|
github.com/minio/highwayhash v1.0.2 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/nats-io/jwt/v2 v2.5.5 // indirect
|
github.com/nats-io/jwt/v2 v2.5.7 // indirect
|
||||||
github.com/nats-io/nkeys v0.4.7 // indirect
|
github.com/nats-io/nkeys v0.4.7 // indirect
|
||||||
github.com/nats-io/nuid v1.0.1 // indirect
|
github.com/nats-io/nuid v1.0.1 // indirect
|
||||||
github.com/pion/randutil v0.1.0 // indirect
|
github.com/pion/randutil v0.1.0 // indirect
|
||||||
github.com/prometheus/client_model v0.5.0 // indirect
|
github.com/prometheus/client_model v0.5.0 // indirect
|
||||||
github.com/prometheus/common v0.48.0 // indirect
|
github.com/prometheus/common v0.48.0 // indirect
|
||||||
github.com/prometheus/procfs v0.12.0 // indirect
|
github.com/prometheus/procfs v0.12.0 // indirect
|
||||||
github.com/sirupsen/logrus v1.7.0 // indirect
|
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||||
github.com/soheilhy/cmux v0.1.5 // indirect
|
github.com/soheilhy/cmux v0.1.5 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
|
||||||
go.etcd.io/bbolt v1.3.8 // indirect
|
go.etcd.io/bbolt v1.3.10 // indirect
|
||||||
go.etcd.io/etcd/client/v2 v2.305.12 // indirect
|
go.etcd.io/etcd/client/v2 v2.305.14 // indirect
|
||||||
go.etcd.io/etcd/pkg/v3 v3.5.12 // indirect
|
go.etcd.io/etcd/pkg/v3 v3.5.14 // indirect
|
||||||
go.etcd.io/etcd/raft/v3 v3.5.12 // indirect
|
go.etcd.io/etcd/raft/v3 v3.5.14 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect
|
||||||
go.opentelemetry.io/otel v1.20.0 // indirect
|
go.opentelemetry.io/otel v1.20.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect
|
||||||
|
@ -74,17 +75,15 @@ require (
|
||||||
go.opentelemetry.io/otel/sdk v1.20.0 // indirect
|
go.opentelemetry.io/otel/sdk v1.20.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.20.0 // indirect
|
go.opentelemetry.io/otel/trace v1.20.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||||
go.uber.org/atomic v1.7.0 // indirect
|
go.uber.org/multierr v1.10.0 // indirect
|
||||||
go.uber.org/multierr v1.6.0 // indirect
|
golang.org/x/crypto v0.23.0 // indirect
|
||||||
go.uber.org/zap v1.17.0 // indirect
|
golang.org/x/net v0.23.0 // indirect
|
||||||
golang.org/x/crypto v0.21.0 // indirect
|
golang.org/x/sys v0.20.0 // indirect
|
||||||
golang.org/x/net v0.21.0 // indirect
|
golang.org/x/text v0.15.0 // indirect
|
||||||
golang.org/x/sys v0.18.0 // indirect
|
|
||||||
golang.org/x/text v0.14.0 // indirect
|
|
||||||
golang.org/x/time v0.5.0 // indirect
|
golang.org/x/time v0.5.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
|
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||||
|
|
129
go.sum
129
go.sum
|
@ -1,8 +1,10 @@
|
||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
|
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
|
||||||
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
|
cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU=
|
||||||
|
cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls=
|
||||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||||
|
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||||
|
@ -15,8 +17,10 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj
|
||||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
|
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
|
||||||
github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
|
github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
|
||||||
|
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
|
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
|
||||||
|
@ -33,6 +37,7 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
|
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
|
||||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
@ -51,6 +56,7 @@ github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOW
|
||||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
|
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
|
||||||
|
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
@ -62,8 +68,10 @@ github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
|
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
@ -89,12 +97,14 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
||||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
|
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||||
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
|
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
|
||||||
|
@ -104,12 +114,12 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||||
github.com/nats-io/jwt/v2 v2.5.5 h1:ROfXb50elFq5c9+1ztaUbdlrArNFl2+fQWP6B8HGEq4=
|
github.com/nats-io/jwt/v2 v2.5.7 h1:j5lH1fUXCnJnY8SsQeB/a/z9Azgu2bYIDvtPVNdxe2c=
|
||||||
github.com/nats-io/jwt/v2 v2.5.5/go.mod h1:ZdWS1nZa6WMZfFwwgpEaqBV8EPGVgOTDHN/wTbz0Y5A=
|
github.com/nats-io/jwt/v2 v2.5.7/go.mod h1:ZdWS1nZa6WMZfFwwgpEaqBV8EPGVgOTDHN/wTbz0Y5A=
|
||||||
github.com/nats-io/nats-server/v2 v2.10.12 h1:G6u+RDrHkw4bkwn7I911O5jqys7jJVRY6MwgndyUsnE=
|
github.com/nats-io/nats-server/v2 v2.10.16 h1:2jXaiydp5oB/nAx/Ytf9fdCi9QN6ItIc9eehX8kwVV0=
|
||||||
github.com/nats-io/nats-server/v2 v2.10.12/go.mod h1:H1n6zXtYLFCgXcf/SF8QNTSIFuS8tyZQMN9NguUHdEs=
|
github.com/nats-io/nats-server/v2 v2.10.16/go.mod h1:Pksi38H2+6xLe1vQx0/EA4bzetM0NqyIHcIbmgXSkIU=
|
||||||
github.com/nats-io/nats.go v1.34.0 h1:fnxnPCNiwIG5w08rlMcEKTUw4AV/nKyGCOJE8TdhSPk=
|
github.com/nats-io/nats.go v1.35.0 h1:XFNqNM7v5B+MQMKqVGAyHwYhyKb48jrenXNxIU20ULk=
|
||||||
github.com/nats-io/nats.go v1.34.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
|
github.com/nats-io/nats.go v1.35.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
|
||||||
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
|
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
|
||||||
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
|
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
|
||||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||||
|
@ -123,12 +133,11 @@ github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
||||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||||
github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY=
|
github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY=
|
||||||
github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M=
|
github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M=
|
||||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||||
|
@ -138,9 +147,10 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k
|
||||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
|
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
|
@ -165,22 +175,22 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
|
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
|
||||||
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
|
||||||
go.etcd.io/etcd/api/v3 v3.5.12 h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c=
|
go.etcd.io/etcd/api/v3 v3.5.14 h1:vHObSCxyB9zlF60w7qzAdTcGaglbJOpSj1Xj9+WGxq0=
|
||||||
go.etcd.io/etcd/api/v3 v3.5.12/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4=
|
go.etcd.io/etcd/api/v3 v3.5.14/go.mod h1:BmtWcRlQvwa1h3G2jvKYwIQy4PkHlDej5t7uLMUdJUU=
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.12 h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarindU2A=
|
go.etcd.io/etcd/client/pkg/v3 v3.5.14 h1:SaNH6Y+rVEdxfpA2Jr5wkEvN6Zykme5+YnbCkxvuWxQ=
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4=
|
go.etcd.io/etcd/client/pkg/v3 v3.5.14/go.mod h1:8uMgAokyG1czCtIdsq+AGyYQMvpIKnSvPjFMunkgeZI=
|
||||||
go.etcd.io/etcd/client/v2 v2.305.12 h1:0m4ovXYo1CHaA/Mp3X/Fak5sRNIWf01wk/X1/G3sGKI=
|
go.etcd.io/etcd/client/v2 v2.305.14 h1:v5ASLyFuMlVd/gKU6uf6Cod+vSWKa4Rsv9+eghl0Nwk=
|
||||||
go.etcd.io/etcd/client/v2 v2.305.12/go.mod h1:aQ/yhsxMu+Oht1FOupSr60oBvcS9cKXHrzBpDsPTf9E=
|
go.etcd.io/etcd/client/v2 v2.305.14/go.mod h1:AWYT0lLEkBuqVaGw0UVMtA4rxCb3/oGE8PxZ8cUS4tI=
|
||||||
go.etcd.io/etcd/client/v3 v3.5.12 h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg=
|
go.etcd.io/etcd/client/v3 v3.5.14 h1:CWfRs4FDaDoSz81giL7zPpZH2Z35tbOrAJkkjMqOupg=
|
||||||
go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw=
|
go.etcd.io/etcd/client/v3 v3.5.14/go.mod h1:k3XfdV/VIHy/97rqWjoUzrj9tk7GgJGH9J8L4dNXmAk=
|
||||||
go.etcd.io/etcd/pkg/v3 v3.5.12 h1:OK2fZKI5hX/+BTK76gXSTyZMrbnARyX9S643GenNGb8=
|
go.etcd.io/etcd/pkg/v3 v3.5.14 h1:keuxhJiDCPjTKpW77GxJnnVVD5n4IsfvkDaqiqUMNEQ=
|
||||||
go.etcd.io/etcd/pkg/v3 v3.5.12/go.mod h1:UVwg/QIMoJncyeb/YxvJBJCE/NEwtHWashqc8A1nj/M=
|
go.etcd.io/etcd/pkg/v3 v3.5.14/go.mod h1:7o+DL6a7DYz9KSjWByX+NGmQPYinoH3D36VAu/B3JqA=
|
||||||
go.etcd.io/etcd/raft/v3 v3.5.12 h1:7r22RufdDsq2z3STjoR7Msz6fYH8tmbkdheGfwJNRmU=
|
go.etcd.io/etcd/raft/v3 v3.5.14 h1:mHnpbljpBBftmK+YUfp+49ivaCc126aBPLAnwDw0DnE=
|
||||||
go.etcd.io/etcd/raft/v3 v3.5.12/go.mod h1:ERQuZVe79PI6vcC3DlKBukDCLja/L7YMu29B74Iwj4U=
|
go.etcd.io/etcd/raft/v3 v3.5.14/go.mod h1:WnIK5blyJGRKsHA3efovdNoLv9QELTZHzpDOVIAuL2s=
|
||||||
go.etcd.io/etcd/server/v3 v3.5.12 h1:EtMjsbfyfkwZuA2JlKOiBfuGkFCekv5H178qjXypbG8=
|
go.etcd.io/etcd/server/v3 v3.5.14 h1:l/3gdiSSoGU6MyKAYiL+8WSOMq9ySG+NqQ04euLtZfY=
|
||||||
go.etcd.io/etcd/server/v3 v3.5.12/go.mod h1:axB0oCjMy+cemo5290/CutIjoxlfA6KVYKD1w0uue10=
|
go.etcd.io/etcd/server/v3 v3.5.14/go.mod h1:SPh0rUtGNDgOZd/aTbkAUYZV+5FFHw5sdbGnO2/byw0=
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M=
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M=
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M=
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M=
|
||||||
go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
|
go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
|
||||||
|
@ -198,20 +208,19 @@ go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCD
|
||||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
|
||||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
|
||||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
|
||||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
|
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
@ -229,31 +238,34 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
|
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
|
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
|
||||||
|
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||||
|
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||||
|
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
@ -271,30 +283,32 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||||
|
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
|
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
|
||||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
|
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU=
|
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA=
|
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||||
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
|
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
|
||||||
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
|
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA=
|
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA=
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y=
|
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y=
|
||||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
@ -303,7 +317,6 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
|
198
grpc_client.go
198
grpc_client.go
|
@ -24,7 +24,9 @@ package signaling
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -37,6 +39,8 @@ import (
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
codes "google.golang.org/grpc/codes"
|
codes "google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
status "google.golang.org/grpc/status"
|
status "google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
@ -49,6 +53,8 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
ErrNoSuchResumeId = fmt.Errorf("unknown resume id")
|
||||||
|
|
||||||
customResolverPrefix atomic.Uint64
|
customResolverPrefix atomic.Uint64
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -136,9 +142,9 @@ func NewGrpcClient(target string, ip net.IP, opts ...grpc.DialOption) (*GrpcClie
|
||||||
hostname: hostname,
|
hostname: hostname,
|
||||||
}
|
}
|
||||||
opts = append(opts, grpc.WithResolvers(resolver))
|
opts = append(opts, grpc.WithResolvers(resolver))
|
||||||
conn, err = grpc.Dial(fmt.Sprintf("%s://%s", resolver.Scheme(), target), opts...)
|
conn, err = grpc.NewClient(fmt.Sprintf("%s://%s", resolver.Scheme(), target), opts...)
|
||||||
} else {
|
} else {
|
||||||
conn, err = grpc.Dial(target, opts...)
|
conn, err = grpc.NewClient(target, opts...)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -183,6 +189,26 @@ func (c *GrpcClient) GetServerId(ctx context.Context) (string, error) {
|
||||||
return response.GetServerId(), nil
|
return response.GetServerId(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *GrpcClient) LookupResumeId(ctx context.Context, resumeId string) (*LookupResumeIdReply, error) {
|
||||||
|
statsGrpcClientCalls.WithLabelValues("LookupResumeId").Inc()
|
||||||
|
// TODO: Remove debug logging
|
||||||
|
log.Printf("Lookup resume id %s on %s", resumeId, c.Target())
|
||||||
|
response, err := c.impl.LookupResumeId(ctx, &LookupResumeIdRequest{
|
||||||
|
ResumeId: resumeId,
|
||||||
|
}, grpc.WaitForReady(true))
|
||||||
|
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
|
||||||
|
return nil, ErrNoSuchResumeId
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if sessionId := response.GetSessionId(); sessionId == "" {
|
||||||
|
return nil, ErrNoSuchResumeId
|
||||||
|
}
|
||||||
|
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *GrpcClient) LookupSessionId(ctx context.Context, roomSessionId string, disconnectReason string) (string, error) {
|
func (c *GrpcClient) LookupSessionId(ctx context.Context, roomSessionId string, disconnectReason string) (string, error) {
|
||||||
statsGrpcClientCalls.WithLabelValues("LookupSessionId").Inc()
|
statsGrpcClientCalls.WithLabelValues("LookupSessionId").Inc()
|
||||||
// TODO: Remove debug logging
|
// TODO: Remove debug logging
|
||||||
|
@ -256,6 +282,86 @@ func (c *GrpcClient) GetSessionCount(ctx context.Context, u *url.URL) (uint32, e
|
||||||
return response.GetCount(), nil
|
return response.GetCount(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ProxySessionReceiver interface {
|
||||||
|
RemoteAddr() string
|
||||||
|
Country() string
|
||||||
|
UserAgent() string
|
||||||
|
|
||||||
|
OnProxyMessage(message *ServerSessionMessage) error
|
||||||
|
OnProxyClose(err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type SessionProxy struct {
|
||||||
|
sessionId string
|
||||||
|
receiver ProxySessionReceiver
|
||||||
|
|
||||||
|
sendMu sync.Mutex
|
||||||
|
client RpcSessions_ProxySessionClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SessionProxy) recvPump() {
|
||||||
|
var closeError error
|
||||||
|
defer func() {
|
||||||
|
p.receiver.OnProxyClose(closeError)
|
||||||
|
if err := p.Close(); err != nil {
|
||||||
|
log.Printf("Error closing proxy for session %s: %s", p.sessionId, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for {
|
||||||
|
msg, err := p.client.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Error receiving message from proxy for session %s: %s", p.sessionId, err)
|
||||||
|
closeError = err
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := p.receiver.OnProxyMessage(msg); err != nil {
|
||||||
|
log.Printf("Error processing message %+v from proxy for session %s: %s", msg, p.sessionId, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SessionProxy) Send(message *ClientSessionMessage) error {
|
||||||
|
p.sendMu.Lock()
|
||||||
|
defer p.sendMu.Unlock()
|
||||||
|
return p.client.Send(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *SessionProxy) Close() error {
|
||||||
|
p.sendMu.Lock()
|
||||||
|
defer p.sendMu.Unlock()
|
||||||
|
return p.client.CloseSend()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *GrpcClient) ProxySession(ctx context.Context, sessionId string, receiver ProxySessionReceiver) (*SessionProxy, error) {
|
||||||
|
statsGrpcClientCalls.WithLabelValues("ProxySession").Inc()
|
||||||
|
md := metadata.Pairs(
|
||||||
|
"sessionId", sessionId,
|
||||||
|
"remoteAddr", receiver.RemoteAddr(),
|
||||||
|
"country", receiver.Country(),
|
||||||
|
"userAgent", receiver.UserAgent(),
|
||||||
|
)
|
||||||
|
client, err := c.impl.ProxySession(metadata.NewOutgoingContext(ctx, md), grpc.WaitForReady(true))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
proxy := &SessionProxy{
|
||||||
|
sessionId: sessionId,
|
||||||
|
receiver: receiver,
|
||||||
|
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
|
||||||
|
go proxy.recvPump()
|
||||||
|
return proxy, nil
|
||||||
|
}
|
||||||
|
|
||||||
type grpcClientsList struct {
|
type grpcClientsList struct {
|
||||||
clients []*GrpcClient
|
clients []*GrpcClient
|
||||||
entry *DnsMonitorEntry
|
entry *DnsMonitorEntry
|
||||||
|
@ -274,21 +380,27 @@ type GrpcClients struct {
|
||||||
targetPrefix string
|
targetPrefix string
|
||||||
targetInformation map[string]*GrpcTargetInformationEtcd
|
targetInformation map[string]*GrpcTargetInformationEtcd
|
||||||
dialOptions atomic.Value // []grpc.DialOption
|
dialOptions atomic.Value // []grpc.DialOption
|
||||||
|
creds credentials.TransportCredentials
|
||||||
|
|
||||||
initializedCtx context.Context
|
initializedCtx context.Context
|
||||||
initializedFunc context.CancelFunc
|
initializedFunc context.CancelFunc
|
||||||
initializedWg sync.WaitGroup
|
|
||||||
wakeupChanForTesting chan struct{}
|
wakeupChanForTesting chan struct{}
|
||||||
selfCheckWaitGroup sync.WaitGroup
|
selfCheckWaitGroup sync.WaitGroup
|
||||||
|
|
||||||
|
closeCtx context.Context
|
||||||
|
closeFunc context.CancelFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewGrpcClients(config *goconf.ConfigFile, etcdClient *EtcdClient, dnsMonitor *DnsMonitor) (*GrpcClients, error) {
|
func NewGrpcClients(config *goconf.ConfigFile, etcdClient *EtcdClient, dnsMonitor *DnsMonitor) (*GrpcClients, error) {
|
||||||
initializedCtx, initializedFunc := context.WithCancel(context.Background())
|
initializedCtx, initializedFunc := context.WithCancel(context.Background())
|
||||||
|
closeCtx, closeFunc := context.WithCancel(context.Background())
|
||||||
result := &GrpcClients{
|
result := &GrpcClients{
|
||||||
dnsMonitor: dnsMonitor,
|
dnsMonitor: dnsMonitor,
|
||||||
etcdClient: etcdClient,
|
etcdClient: etcdClient,
|
||||||
initializedCtx: initializedCtx,
|
initializedCtx: initializedCtx,
|
||||||
initializedFunc: initializedFunc,
|
initializedFunc: initializedFunc,
|
||||||
|
closeCtx: closeCtx,
|
||||||
|
closeFunc: closeFunc,
|
||||||
}
|
}
|
||||||
if err := result.load(config, false); err != nil {
|
if err := result.load(config, false); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -302,6 +414,13 @@ func (c *GrpcClients) load(config *goconf.ConfigFile, fromReload bool) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.creds != nil {
|
||||||
|
if cr, ok := c.creds.(*reloadableCredentials); ok {
|
||||||
|
cr.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.creds = creds
|
||||||
|
|
||||||
opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)}
|
opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)}
|
||||||
c.dialOptions.Store(opts)
|
c.dialOptions.Store(opts)
|
||||||
|
|
||||||
|
@ -375,6 +494,10 @@ loop:
|
||||||
|
|
||||||
id, err := c.getServerIdWithTimeout(ctx, client)
|
id, err := c.getServerIdWithTimeout(ctx, client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if status.Code(err) != codes.Canceled {
|
if status.Code(err) != codes.Canceled {
|
||||||
log.Printf("Error checking GRPC server id of %s, retrying in %s: %s", client.Target(), backoff.NextWait(), err)
|
log.Printf("Error checking GRPC server id of %s, retrying in %s: %s", client.Target(), backoff.NextWait(), err)
|
||||||
}
|
}
|
||||||
|
@ -474,12 +597,13 @@ func (c *GrpcClients) loadTargetsStatic(config *goconf.ConfigFile, fromReload bo
|
||||||
}
|
}
|
||||||
|
|
||||||
c.selfCheckWaitGroup.Add(1)
|
c.selfCheckWaitGroup.Add(1)
|
||||||
go c.checkIsSelf(context.Background(), target, client)
|
go c.checkIsSelf(c.closeCtx, target, client)
|
||||||
|
|
||||||
log.Printf("Adding %s as GRPC target", client.Target())
|
log.Printf("Adding %s as GRPC target", client.Target())
|
||||||
entry, found := clientsMap[target]
|
entry, found := clientsMap[target]
|
||||||
if !found {
|
if !found {
|
||||||
entry = &grpcClientsList{}
|
entry = &grpcClientsList{}
|
||||||
|
clientsMap[target] = entry
|
||||||
}
|
}
|
||||||
entry.clients = append(entry.clients, client)
|
entry.clients = append(entry.clients, client)
|
||||||
clients = append(clients, client)
|
clients = append(clients, client)
|
||||||
|
@ -548,7 +672,7 @@ func (c *GrpcClients) onLookup(entry *DnsMonitorEntry, all []net.IP, added []net
|
||||||
}
|
}
|
||||||
|
|
||||||
c.selfCheckWaitGroup.Add(1)
|
c.selfCheckWaitGroup.Add(1)
|
||||||
go c.checkIsSelf(context.Background(), target, client)
|
go c.checkIsSelf(c.closeCtx, target, client)
|
||||||
|
|
||||||
log.Printf("Adding %s as GRPC target", client.Target())
|
log.Printf("Adding %s as GRPC target", client.Target())
|
||||||
newClients = append(newClients, client)
|
newClients = append(newClients, client)
|
||||||
|
@ -586,54 +710,72 @@ func (c *GrpcClients) loadTargetsEtcd(config *goconf.ConfigFile, fromReload bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GrpcClients) EtcdClientCreated(client *EtcdClient) {
|
func (c *GrpcClients) EtcdClientCreated(client *EtcdClient) {
|
||||||
c.initializedWg.Add(1)
|
|
||||||
go func() {
|
go func() {
|
||||||
if err := client.Watch(context.Background(), c.targetPrefix, c, clientv3.WithPrefix()); err != nil {
|
if err := client.WaitForConnection(c.closeCtx); err != nil {
|
||||||
log.Printf("Error processing watch for %s: %s", c.targetPrefix, err)
|
if errors.Is(err, context.Canceled) {
|
||||||
}
|
return
|
||||||
}()
|
}
|
||||||
|
|
||||||
go func() {
|
|
||||||
if err := client.WaitForConnection(context.Background()); err != nil {
|
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
backoff, _ := NewExponentialBackoff(initialWaitDelay, maxWaitDelay)
|
backoff, _ := NewExponentialBackoff(initialWaitDelay, maxWaitDelay)
|
||||||
for {
|
var nextRevision int64
|
||||||
response, err := c.getGrpcTargets(client, c.targetPrefix)
|
for c.closeCtx.Err() == nil {
|
||||||
|
response, err := c.getGrpcTargets(c.closeCtx, client, c.targetPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == context.DeadlineExceeded {
|
if errors.Is(err, context.Canceled) {
|
||||||
|
return
|
||||||
|
} else if errors.Is(err, context.DeadlineExceeded) {
|
||||||
log.Printf("Timeout getting initial list of GRPC targets, retry in %s", backoff.NextWait())
|
log.Printf("Timeout getting initial list of GRPC targets, retry in %s", backoff.NextWait())
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Could not get initial list of GRPC targets, retry in %s: %s", backoff.NextWait(), err)
|
log.Printf("Could not get initial list of GRPC targets, retry in %s: %s", backoff.NextWait(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
backoff.Wait(context.Background())
|
backoff.Wait(c.closeCtx)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ev := range response.Kvs {
|
for _, ev := range response.Kvs {
|
||||||
c.EtcdKeyUpdated(client, string(ev.Key), ev.Value)
|
c.EtcdKeyUpdated(client, string(ev.Key), ev.Value, nil)
|
||||||
}
|
}
|
||||||
c.initializedWg.Wait()
|
|
||||||
c.initializedFunc()
|
c.initializedFunc()
|
||||||
return
|
nextRevision = response.Header.Revision + 1
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
prevRevision := nextRevision
|
||||||
|
backoff.Reset()
|
||||||
|
for c.closeCtx.Err() == nil {
|
||||||
|
var err error
|
||||||
|
if nextRevision, err = client.Watch(c.closeCtx, c.targetPrefix, nextRevision, c, clientv3.WithPrefix()); err != nil {
|
||||||
|
log.Printf("Error processing watch for %s (%s), retry in %s", c.targetPrefix, err, backoff.NextWait())
|
||||||
|
backoff.Wait(c.closeCtx)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if nextRevision != prevRevision {
|
||||||
|
backoff.Reset()
|
||||||
|
prevRevision = nextRevision
|
||||||
|
} else {
|
||||||
|
log.Printf("Processing watch for %s interrupted, retry in %s", c.targetPrefix, backoff.NextWait())
|
||||||
|
backoff.Wait(c.closeCtx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GrpcClients) EtcdWatchCreated(client *EtcdClient, key string) {
|
func (c *GrpcClients) EtcdWatchCreated(client *EtcdClient, key string) {
|
||||||
c.initializedWg.Done()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GrpcClients) getGrpcTargets(client *EtcdClient, targetPrefix string) (*clientv3.GetResponse, error) {
|
func (c *GrpcClients) getGrpcTargets(ctx context.Context, client *EtcdClient, targetPrefix string) (*clientv3.GetResponse, error) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
return client.Get(ctx, targetPrefix, clientv3.WithPrefix())
|
return client.Get(ctx, targetPrefix, clientv3.WithPrefix())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GrpcClients) EtcdKeyUpdated(client *EtcdClient, key string, data []byte) {
|
func (c *GrpcClients) EtcdKeyUpdated(client *EtcdClient, key string, data []byte, prevValue []byte) {
|
||||||
var info GrpcTargetInformationEtcd
|
var info GrpcTargetInformationEtcd
|
||||||
if err := json.Unmarshal(data, &info); err != nil {
|
if err := json.Unmarshal(data, &info); err != nil {
|
||||||
log.Printf("Could not decode GRPC target %s=%s: %s", key, string(data), err)
|
log.Printf("Could not decode GRPC target %s=%s: %s", key, string(data), err)
|
||||||
|
@ -666,7 +808,7 @@ func (c *GrpcClients) EtcdKeyUpdated(client *EtcdClient, key string, data []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
c.selfCheckWaitGroup.Add(1)
|
c.selfCheckWaitGroup.Add(1)
|
||||||
go c.checkIsSelf(context.Background(), info.Address, cl)
|
go c.checkIsSelf(c.closeCtx, info.Address, cl)
|
||||||
|
|
||||||
log.Printf("Adding %s as GRPC target", cl.Target())
|
log.Printf("Adding %s as GRPC target", cl.Target())
|
||||||
|
|
||||||
|
@ -682,7 +824,7 @@ func (c *GrpcClients) EtcdKeyUpdated(client *EtcdClient, key string, data []byte
|
||||||
c.wakeupForTesting()
|
c.wakeupForTesting()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GrpcClients) EtcdKeyDeleted(client *EtcdClient, key string) {
|
func (c *GrpcClients) EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
@ -766,6 +908,12 @@ func (c *GrpcClients) Close() {
|
||||||
if c.etcdClient != nil {
|
if c.etcdClient != nil {
|
||||||
c.etcdClient.RemoveListener(c)
|
c.etcdClient.RemoveListener(c)
|
||||||
}
|
}
|
||||||
|
if c.creds != nil {
|
||||||
|
if cr, ok := c.creds.(*reloadableCredentials); ok {
|
||||||
|
cr.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.closeFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GrpcClients) GetClients() []*GrpcClient {
|
func (c *GrpcClients) GetClients() []*GrpcClient {
|
||||||
|
|
|
@ -112,27 +112,32 @@ func waitForEvent(ctx context.Context, t *testing.T, ch <-chan struct{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_GrpcClients_EtcdInitial(t *testing.T) {
|
func Test_GrpcClients_EtcdInitial(t *testing.T) {
|
||||||
_, addr1 := NewGrpcServerForTest(t)
|
CatchLogForTest(t)
|
||||||
_, addr2 := NewGrpcServerForTest(t)
|
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||||
|
_, addr1 := NewGrpcServerForTest(t)
|
||||||
|
_, addr2 := NewGrpcServerForTest(t)
|
||||||
|
|
||||||
etcd := NewEtcdForTest(t)
|
etcd := NewEtcdForTest(t)
|
||||||
|
|
||||||
SetEtcdValue(etcd, "/grpctargets/one", []byte("{\"address\":\""+addr1+"\"}"))
|
SetEtcdValue(etcd, "/grpctargets/one", []byte("{\"address\":\""+addr1+"\"}"))
|
||||||
SetEtcdValue(etcd, "/grpctargets/two", []byte("{\"address\":\""+addr2+"\"}"))
|
SetEtcdValue(etcd, "/grpctargets/two", []byte("{\"address\":\""+addr2+"\"}"))
|
||||||
|
|
||||||
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
|
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if err := client.WaitForInitialized(ctx); err != nil {
|
if err := client.WaitForInitialized(ctx); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if clients := client.GetClients(); len(clients) != 2 {
|
if clients := client.GetClients(); len(clients) != 2 {
|
||||||
t.Errorf("Expected two clients, got %+v", clients)
|
t.Errorf("Expected two clients, got %+v", clients)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_GrpcClients_EtcdUpdate(t *testing.T) {
|
func Test_GrpcClients_EtcdUpdate(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
etcd := NewEtcdForTest(t)
|
etcd := NewEtcdForTest(t)
|
||||||
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
|
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
|
||||||
ch := client.getWakeupChannelForTesting()
|
ch := client.getWakeupChannelForTesting()
|
||||||
|
@ -187,6 +192,8 @@ func Test_GrpcClients_EtcdUpdate(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_GrpcClients_EtcdIgnoreSelf(t *testing.T) {
|
func Test_GrpcClients_EtcdIgnoreSelf(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
etcd := NewEtcdForTest(t)
|
etcd := NewEtcdForTest(t)
|
||||||
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
|
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
|
||||||
ch := client.getWakeupChannelForTesting()
|
ch := client.getWakeupChannelForTesting()
|
||||||
|
@ -231,60 +238,65 @@ func Test_GrpcClients_EtcdIgnoreSelf(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_GrpcClients_DnsDiscovery(t *testing.T) {
|
func Test_GrpcClients_DnsDiscovery(t *testing.T) {
|
||||||
lookup := newMockDnsLookupForTest(t)
|
CatchLogForTest(t)
|
||||||
target := "testgrpc:12345"
|
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||||
ip1 := net.ParseIP("192.168.0.1")
|
lookup := newMockDnsLookupForTest(t)
|
||||||
ip2 := net.ParseIP("192.168.0.2")
|
target := "testgrpc:12345"
|
||||||
targetWithIp1 := fmt.Sprintf("%s (%s)", target, ip1)
|
ip1 := net.ParseIP("192.168.0.1")
|
||||||
targetWithIp2 := fmt.Sprintf("%s (%s)", target, ip2)
|
ip2 := net.ParseIP("192.168.0.2")
|
||||||
lookup.Set("testgrpc", []net.IP{ip1})
|
targetWithIp1 := fmt.Sprintf("%s (%s)", target, ip1)
|
||||||
client, dnsMonitor := NewGrpcClientsForTest(t, target)
|
targetWithIp2 := fmt.Sprintf("%s (%s)", target, ip2)
|
||||||
ch := client.getWakeupChannelForTesting()
|
lookup.Set("testgrpc", []net.IP{ip1})
|
||||||
|
client, dnsMonitor := NewGrpcClientsForTest(t, target)
|
||||||
|
ch := client.getWakeupChannelForTesting()
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
dnsMonitor.checkHostnames()
|
dnsMonitor.checkHostnames()
|
||||||
if clients := client.GetClients(); len(clients) != 1 {
|
if clients := client.GetClients(); len(clients) != 1 {
|
||||||
t.Errorf("Expected one client, got %+v", clients)
|
t.Errorf("Expected one client, got %+v", clients)
|
||||||
} else if clients[0].Target() != targetWithIp1 {
|
} else if clients[0].Target() != targetWithIp1 {
|
||||||
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
|
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
|
||||||
} else if !clients[0].ip.Equal(ip1) {
|
} else if !clients[0].ip.Equal(ip1) {
|
||||||
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
|
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
lookup.Set("testgrpc", []net.IP{ip1, ip2})
|
lookup.Set("testgrpc", []net.IP{ip1, ip2})
|
||||||
drainWakeupChannel(ch)
|
drainWakeupChannel(ch)
|
||||||
dnsMonitor.checkHostnames()
|
dnsMonitor.checkHostnames()
|
||||||
waitForEvent(ctx, t, ch)
|
waitForEvent(ctx, t, ch)
|
||||||
|
|
||||||
if clients := client.GetClients(); len(clients) != 2 {
|
if clients := client.GetClients(); len(clients) != 2 {
|
||||||
t.Errorf("Expected two client, got %+v", clients)
|
t.Errorf("Expected two client, got %+v", clients)
|
||||||
} else if clients[0].Target() != targetWithIp1 {
|
} else if clients[0].Target() != targetWithIp1 {
|
||||||
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
|
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
|
||||||
} else if !clients[0].ip.Equal(ip1) {
|
} else if !clients[0].ip.Equal(ip1) {
|
||||||
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
|
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
|
||||||
} else if clients[1].Target() != targetWithIp2 {
|
} else if clients[1].Target() != targetWithIp2 {
|
||||||
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[1].Target())
|
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[1].Target())
|
||||||
} else if !clients[1].ip.Equal(ip2) {
|
} else if !clients[1].ip.Equal(ip2) {
|
||||||
t.Errorf("Expected IP %s, got %s", ip2, clients[1].ip)
|
t.Errorf("Expected IP %s, got %s", ip2, clients[1].ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
lookup.Set("testgrpc", []net.IP{ip2})
|
lookup.Set("testgrpc", []net.IP{ip2})
|
||||||
drainWakeupChannel(ch)
|
drainWakeupChannel(ch)
|
||||||
dnsMonitor.checkHostnames()
|
dnsMonitor.checkHostnames()
|
||||||
waitForEvent(ctx, t, ch)
|
waitForEvent(ctx, t, ch)
|
||||||
|
|
||||||
if clients := client.GetClients(); len(clients) != 1 {
|
if clients := client.GetClients(); len(clients) != 1 {
|
||||||
t.Errorf("Expected one client, got %+v", clients)
|
t.Errorf("Expected one client, got %+v", clients)
|
||||||
} else if clients[0].Target() != targetWithIp2 {
|
} else if clients[0].Target() != targetWithIp2 {
|
||||||
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[0].Target())
|
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[0].Target())
|
||||||
} else if !clients[0].ip.Equal(ip2) {
|
} else if !clients[0].ip.Equal(ip2) {
|
||||||
t.Errorf("Expected IP %s, got %s", ip2, clients[0].ip)
|
t.Errorf("Expected IP %s, got %s", ip2, clients[0].ip)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_GrpcClients_DnsDiscoveryInitialFailed(t *testing.T) {
|
func Test_GrpcClients_DnsDiscoveryInitialFailed(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
lookup := newMockDnsLookupForTest(t)
|
lookup := newMockDnsLookupForTest(t)
|
||||||
target := "testgrpc:12345"
|
target := "testgrpc:12345"
|
||||||
ip1 := net.ParseIP("192.168.0.1")
|
ip1 := net.ParseIP("192.168.0.1")
|
||||||
|
@ -320,55 +332,58 @@ func Test_GrpcClients_DnsDiscoveryInitialFailed(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_GrpcClients_Encryption(t *testing.T) {
|
func Test_GrpcClients_Encryption(t *testing.T) {
|
||||||
serverKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
CatchLogForTest(t)
|
||||||
if err != nil {
|
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||||
t.Fatal(err)
|
serverKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
||||||
}
|
if err != nil {
|
||||||
clientKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serverCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Server cert", serverKey)
|
|
||||||
clientCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Testing client", clientKey)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
|
||||||
serverPrivkeyFile := path.Join(dir, "server-privkey.pem")
|
|
||||||
serverPubkeyFile := path.Join(dir, "server-pubkey.pem")
|
|
||||||
serverCertFile := path.Join(dir, "server-cert.pem")
|
|
||||||
WritePrivateKey(serverKey, serverPrivkeyFile) // nolint
|
|
||||||
WritePublicKey(&serverKey.PublicKey, serverPubkeyFile) // nolint
|
|
||||||
os.WriteFile(serverCertFile, serverCert, 0755) // nolint
|
|
||||||
clientPrivkeyFile := path.Join(dir, "client-privkey.pem")
|
|
||||||
clientPubkeyFile := path.Join(dir, "client-pubkey.pem")
|
|
||||||
clientCertFile := path.Join(dir, "client-cert.pem")
|
|
||||||
WritePrivateKey(clientKey, clientPrivkeyFile) // nolint
|
|
||||||
WritePublicKey(&clientKey.PublicKey, clientPubkeyFile) // nolint
|
|
||||||
os.WriteFile(clientCertFile, clientCert, 0755) // nolint
|
|
||||||
|
|
||||||
serverConfig := goconf.NewConfigFile()
|
|
||||||
serverConfig.AddOption("grpc", "servercertificate", serverCertFile)
|
|
||||||
serverConfig.AddOption("grpc", "serverkey", serverPrivkeyFile)
|
|
||||||
serverConfig.AddOption("grpc", "clientca", clientCertFile)
|
|
||||||
_, addr := NewGrpcServerForTestWithConfig(t, serverConfig)
|
|
||||||
|
|
||||||
clientConfig := goconf.NewConfigFile()
|
|
||||||
clientConfig.AddOption("grpc", "targets", addr)
|
|
||||||
clientConfig.AddOption("grpc", "clientcertificate", clientCertFile)
|
|
||||||
clientConfig.AddOption("grpc", "clientkey", clientPrivkeyFile)
|
|
||||||
clientConfig.AddOption("grpc", "serverca", serverCertFile)
|
|
||||||
clients, _ := NewGrpcClientsForTestWithConfig(t, clientConfig, nil)
|
|
||||||
|
|
||||||
ctx, cancel1 := context.WithTimeout(context.Background(), time.Second)
|
|
||||||
defer cancel1()
|
|
||||||
|
|
||||||
if err := clients.WaitForInitialized(ctx); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, client := range clients.GetClients() {
|
|
||||||
if _, err := client.GetServerId(ctx); err != nil {
|
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
clientKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
serverCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Server cert", serverKey)
|
||||||
|
clientCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Testing client", clientKey)
|
||||||
|
|
||||||
|
dir := t.TempDir()
|
||||||
|
serverPrivkeyFile := path.Join(dir, "server-privkey.pem")
|
||||||
|
serverPubkeyFile := path.Join(dir, "server-pubkey.pem")
|
||||||
|
serverCertFile := path.Join(dir, "server-cert.pem")
|
||||||
|
WritePrivateKey(serverKey, serverPrivkeyFile) // nolint
|
||||||
|
WritePublicKey(&serverKey.PublicKey, serverPubkeyFile) // nolint
|
||||||
|
os.WriteFile(serverCertFile, serverCert, 0755) // nolint
|
||||||
|
clientPrivkeyFile := path.Join(dir, "client-privkey.pem")
|
||||||
|
clientPubkeyFile := path.Join(dir, "client-pubkey.pem")
|
||||||
|
clientCertFile := path.Join(dir, "client-cert.pem")
|
||||||
|
WritePrivateKey(clientKey, clientPrivkeyFile) // nolint
|
||||||
|
WritePublicKey(&clientKey.PublicKey, clientPubkeyFile) // nolint
|
||||||
|
os.WriteFile(clientCertFile, clientCert, 0755) // nolint
|
||||||
|
|
||||||
|
serverConfig := goconf.NewConfigFile()
|
||||||
|
serverConfig.AddOption("grpc", "servercertificate", serverCertFile)
|
||||||
|
serverConfig.AddOption("grpc", "serverkey", serverPrivkeyFile)
|
||||||
|
serverConfig.AddOption("grpc", "clientca", clientCertFile)
|
||||||
|
_, addr := NewGrpcServerForTestWithConfig(t, serverConfig)
|
||||||
|
|
||||||
|
clientConfig := goconf.NewConfigFile()
|
||||||
|
clientConfig.AddOption("grpc", "targets", addr)
|
||||||
|
clientConfig.AddOption("grpc", "clientcertificate", clientCertFile)
|
||||||
|
clientConfig.AddOption("grpc", "clientkey", clientPrivkeyFile)
|
||||||
|
clientConfig.AddOption("grpc", "serverca", serverCertFile)
|
||||||
|
clients, _ := NewGrpcClientsForTestWithConfig(t, clientConfig, nil)
|
||||||
|
|
||||||
|
ctx, cancel1 := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
defer cancel1()
|
||||||
|
|
||||||
|
if err := clients.WaitForInitialized(ctx); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, client := range clients.GetClients() {
|
||||||
|
if _, err := client.GetServerId(ctx); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -125,6 +125,15 @@ func (c *reloadableCredentials) OverrideServerName(serverName string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *reloadableCredentials) Close() {
|
||||||
|
if c.loader != nil {
|
||||||
|
c.loader.Close()
|
||||||
|
}
|
||||||
|
if c.pool != nil {
|
||||||
|
c.pool.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func NewReloadableCredentials(config *goconf.ConfigFile, server bool) (credentials.TransportCredentials, error) {
|
func NewReloadableCredentials(config *goconf.ConfigFile, server bool) (credentials.TransportCredentials, error) {
|
||||||
var prefix string
|
var prefix string
|
||||||
var caPrefix string
|
var caPrefix string
|
||||||
|
|
229
grpc_remote_client.go
Normal file
229
grpc_remote_client.go
Normal file
|
@ -0,0 +1,229 @@
|
||||||
|
/**
|
||||||
|
* Standalone signaling server for the Nextcloud Spreed app.
|
||||||
|
* Copyright (C) 2024 struktur AG
|
||||||
|
*
|
||||||
|
* @author Joachim Bauch <bauch@struktur.de>
|
||||||
|
*
|
||||||
|
* @license GNU AGPL version 3 or any later version
|
||||||
|
*
|
||||||
|
* This program is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU Affero General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
package signaling
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
grpcRemoteClientMessageQueue = 16
|
||||||
|
)
|
||||||
|
|
||||||
|
func getMD(md metadata.MD, key string) string {
|
||||||
|
if values := md.Get(key); len(values) > 0 {
|
||||||
|
return values[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// remoteGrpcClient is a remote client connecting from a GRPC proxy to a Hub.
|
||||||
|
type remoteGrpcClient struct {
|
||||||
|
hub *Hub
|
||||||
|
client RpcSessions_ProxySessionServer
|
||||||
|
|
||||||
|
sessionId string
|
||||||
|
remoteAddr string
|
||||||
|
country string
|
||||||
|
userAgent string
|
||||||
|
|
||||||
|
closeCtx context.Context
|
||||||
|
closeFunc context.CancelCauseFunc
|
||||||
|
|
||||||
|
session atomic.Pointer[Session]
|
||||||
|
messages chan WritableClientMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRemoteGrpcClient(hub *Hub, request RpcSessions_ProxySessionServer) (*remoteGrpcClient, error) {
|
||||||
|
md, found := metadata.FromIncomingContext(request.Context())
|
||||||
|
if !found {
|
||||||
|
return nil, errors.New("no metadata provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
closeCtx, closeFunc := context.WithCancelCause(context.Background())
|
||||||
|
|
||||||
|
result := &remoteGrpcClient{
|
||||||
|
hub: hub,
|
||||||
|
client: request,
|
||||||
|
|
||||||
|
sessionId: getMD(md, "sessionId"),
|
||||||
|
remoteAddr: getMD(md, "remoteAddr"),
|
||||||
|
country: getMD(md, "country"),
|
||||||
|
userAgent: getMD(md, "userAgent"),
|
||||||
|
|
||||||
|
closeCtx: closeCtx,
|
||||||
|
closeFunc: closeFunc,
|
||||||
|
|
||||||
|
messages: make(chan WritableClientMessage, grpcRemoteClientMessageQueue),
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *remoteGrpcClient) readPump() {
|
||||||
|
var closeError error
|
||||||
|
defer func() {
|
||||||
|
c.closeFunc(closeError)
|
||||||
|
c.hub.OnClosed(c)
|
||||||
|
}()
|
||||||
|
|
||||||
|
for {
|
||||||
|
msg, err := c.client.Recv()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
// Connection was closed locally.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if status.Code(err) != codes.Canceled {
|
||||||
|
log.Printf("Error reading from remote client for session %s: %s", c.sessionId, err)
|
||||||
|
closeError = err
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
c.hub.OnMessageReceived(c, msg.Message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *remoteGrpcClient) Context() context.Context {
|
||||||
|
return c.client.Context()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *remoteGrpcClient) RemoteAddr() string {
|
||||||
|
return c.remoteAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *remoteGrpcClient) UserAgent() string {
|
||||||
|
return c.userAgent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *remoteGrpcClient) Country() string {
|
||||||
|
return c.country
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *remoteGrpcClient) IsConnected() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *remoteGrpcClient) IsAuthenticated() bool {
|
||||||
|
return c.GetSession() != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *remoteGrpcClient) GetSession() Session {
|
||||||
|
session := c.session.Load()
|
||||||
|
if session == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return *session
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *remoteGrpcClient) SetSession(session Session) {
|
||||||
|
if session == nil {
|
||||||
|
c.session.Store(nil)
|
||||||
|
} else {
|
||||||
|
c.session.Store(&session)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *remoteGrpcClient) SendError(e *Error) bool {
|
||||||
|
message := &ServerMessage{
|
||||||
|
Type: "error",
|
||||||
|
Error: e,
|
||||||
|
}
|
||||||
|
return c.SendMessage(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *remoteGrpcClient) SendByeResponse(message *ClientMessage) bool {
|
||||||
|
return c.SendByeResponseWithReason(message, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *remoteGrpcClient) SendByeResponseWithReason(message *ClientMessage, reason string) bool {
|
||||||
|
response := &ServerMessage{
|
||||||
|
Type: "bye",
|
||||||
|
}
|
||||||
|
if message != nil {
|
||||||
|
response.Id = message.Id
|
||||||
|
}
|
||||||
|
if reason != "" {
|
||||||
|
if response.Bye == nil {
|
||||||
|
response.Bye = &ByeServerMessage{}
|
||||||
|
}
|
||||||
|
response.Bye.Reason = reason
|
||||||
|
}
|
||||||
|
return c.SendMessage(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *remoteGrpcClient) SendMessage(message WritableClientMessage) bool {
|
||||||
|
if c.closeCtx.Err() != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case c.messages <- message:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
log.Printf("Message queue for remote client of session %s is full, not sending %+v", c.sessionId, message)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *remoteGrpcClient) Close() {
|
||||||
|
c.closeFunc(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *remoteGrpcClient) run() error {
|
||||||
|
go c.readPump()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-c.closeCtx.Done():
|
||||||
|
if err := context.Cause(c.closeCtx); err != context.Canceled {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
case msg := <-c.messages:
|
||||||
|
data, err := json.Marshal(msg)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error marshalling %+v for remote client for session %s: %s", msg, c.sessionId, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.client.Send(&ServerSessionMessage{
|
||||||
|
Message: data,
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("error sending %+v to remote client for session %s: %w", msg, c.sessionId, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -55,6 +55,14 @@ func init() {
|
||||||
GrpcServerId = hex.EncodeToString(md.Sum(nil))
|
GrpcServerId = hex.EncodeToString(md.Sum(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type GrpcServerHub interface {
|
||||||
|
GetSessionByResumeId(resumeId string) Session
|
||||||
|
GetSessionByPublicId(sessionId string) Session
|
||||||
|
GetSessionIdByRoomSessionId(roomSessionId string) (string, error)
|
||||||
|
|
||||||
|
GetBackend(u *url.URL) *Backend
|
||||||
|
}
|
||||||
|
|
||||||
type GrpcServer struct {
|
type GrpcServer struct {
|
||||||
UnimplementedRpcBackendServer
|
UnimplementedRpcBackendServer
|
||||||
UnimplementedRpcInternalServer
|
UnimplementedRpcInternalServer
|
||||||
|
@ -66,12 +74,12 @@ type GrpcServer struct {
|
||||||
listener net.Listener
|
listener net.Listener
|
||||||
serverId string // can be overwritten from tests
|
serverId string // can be overwritten from tests
|
||||||
|
|
||||||
hub *Hub
|
hub GrpcServerHub
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewGrpcServer(config *goconf.ConfigFile) (*GrpcServer, error) {
|
func NewGrpcServer(config *goconf.ConfigFile) (*GrpcServer, error) {
|
||||||
var listener net.Listener
|
var listener net.Listener
|
||||||
if addr, _ := config.GetString("grpc", "listen"); addr != "" {
|
if addr, _ := GetStringOptionWithEnv(config, "grpc", "listen"); addr != "" {
|
||||||
var err error
|
var err error
|
||||||
listener, err = net.Listen("tcp", addr)
|
listener, err = net.Listen("tcp", addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -108,13 +116,30 @@ func (s *GrpcServer) Run() error {
|
||||||
|
|
||||||
func (s *GrpcServer) Close() {
|
func (s *GrpcServer) Close() {
|
||||||
s.conn.GracefulStop()
|
s.conn.GracefulStop()
|
||||||
|
if cr, ok := s.creds.(*reloadableCredentials); ok {
|
||||||
|
cr.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GrpcServer) LookupResumeId(ctx context.Context, request *LookupResumeIdRequest) (*LookupResumeIdReply, error) {
|
||||||
|
statsGrpcServerCalls.WithLabelValues("LookupResumeId").Inc()
|
||||||
|
// TODO: Remove debug logging
|
||||||
|
log.Printf("Lookup session for resume id %s", request.ResumeId)
|
||||||
|
session := s.hub.GetSessionByResumeId(request.ResumeId)
|
||||||
|
if session == nil {
|
||||||
|
return nil, status.Error(codes.NotFound, "no such room session id")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &LookupResumeIdReply{
|
||||||
|
SessionId: session.PublicId(),
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *GrpcServer) LookupSessionId(ctx context.Context, request *LookupSessionIdRequest) (*LookupSessionIdReply, error) {
|
func (s *GrpcServer) LookupSessionId(ctx context.Context, request *LookupSessionIdRequest) (*LookupSessionIdReply, error) {
|
||||||
statsGrpcServerCalls.WithLabelValues("LookupSessionId").Inc()
|
statsGrpcServerCalls.WithLabelValues("LookupSessionId").Inc()
|
||||||
// TODO: Remove debug logging
|
// TODO: Remove debug logging
|
||||||
log.Printf("Lookup session id for room session id %s", request.RoomSessionId)
|
log.Printf("Lookup session id for room session id %s", request.RoomSessionId)
|
||||||
sid, err := s.hub.roomSessions.GetSessionId(request.RoomSessionId)
|
sid, err := s.hub.GetSessionIdByRoomSessionId(request.RoomSessionId)
|
||||||
if errors.Is(err, ErrNoSuchRoomSession) {
|
if errors.Is(err, ErrNoSuchRoomSession) {
|
||||||
return nil, status.Error(codes.NotFound, "no such room session id")
|
return nil, status.Error(codes.NotFound, "no such room session id")
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
|
@ -204,7 +229,7 @@ func (s *GrpcServer) GetSessionCount(ctx context.Context, request *GetSessionCou
|
||||||
return nil, status.Error(codes.InvalidArgument, "invalid url")
|
return nil, status.Error(codes.InvalidArgument, "invalid url")
|
||||||
}
|
}
|
||||||
|
|
||||||
backend := s.hub.backend.GetBackend(u)
|
backend := s.hub.GetBackend(u)
|
||||||
if backend == nil {
|
if backend == nil {
|
||||||
return nil, status.Error(codes.NotFound, "no such backend")
|
return nil, status.Error(codes.NotFound, "no such backend")
|
||||||
}
|
}
|
||||||
|
@ -213,3 +238,21 @@ func (s *GrpcServer) GetSessionCount(ctx context.Context, request *GetSessionCou
|
||||||
Count: uint32(backend.Len()),
|
Count: uint32(backend.Len()),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GrpcServer) ProxySession(request RpcSessions_ProxySessionServer) error {
|
||||||
|
statsGrpcServerCalls.WithLabelValues("ProxySession").Inc()
|
||||||
|
hub, ok := s.hub.(*Hub)
|
||||||
|
if !ok {
|
||||||
|
return status.Error(codes.Internal, "invalid hub type")
|
||||||
|
|
||||||
|
}
|
||||||
|
client, err := newRemoteGrpcClient(hub, request)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sid := hub.registerClient(client)
|
||||||
|
defer hub.unregisterClient(sid)
|
||||||
|
|
||||||
|
return client.run()
|
||||||
|
}
|
||||||
|
|
|
@ -98,6 +98,7 @@ func NewGrpcServerForTest(t *testing.T) (server *GrpcServer, addr string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_GrpcServer_ReloadCerts(t *testing.T) {
|
func Test_GrpcServer_ReloadCerts(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
key, err := rsa.GenerateKey(rand.Reader, 1024)
|
key, err := rsa.GenerateKey(rand.Reader, 1024)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -178,6 +179,7 @@ func Test_GrpcServer_ReloadCerts(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_GrpcServer_ReloadCA(t *testing.T) {
|
func Test_GrpcServer_ReloadCA(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
serverKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
serverKey, err := rsa.GenerateKey(rand.Reader, 1024)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
|
|
@ -26,8 +26,18 @@ option go_package = "github.com/strukturag/nextcloud-spreed-signaling;signaling"
|
||||||
package signaling;
|
package signaling;
|
||||||
|
|
||||||
service RpcSessions {
|
service RpcSessions {
|
||||||
|
rpc LookupResumeId(LookupResumeIdRequest) returns (LookupResumeIdReply) {}
|
||||||
rpc LookupSessionId(LookupSessionIdRequest) returns (LookupSessionIdReply) {}
|
rpc LookupSessionId(LookupSessionIdRequest) returns (LookupSessionIdReply) {}
|
||||||
rpc IsSessionInCall(IsSessionInCallRequest) returns (IsSessionInCallReply) {}
|
rpc IsSessionInCall(IsSessionInCallRequest) returns (IsSessionInCallReply) {}
|
||||||
|
rpc ProxySession(stream ClientSessionMessage) returns (stream ServerSessionMessage) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
message LookupResumeIdRequest {
|
||||||
|
string resumeId = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message LookupResumeIdReply {
|
||||||
|
string sessionId = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message LookupSessionIdRequest {
|
message LookupSessionIdRequest {
|
||||||
|
@ -49,3 +59,11 @@ message IsSessionInCallRequest {
|
||||||
message IsSessionInCallReply {
|
message IsSessionInCallReply {
|
||||||
bool inCall = 1;
|
bool inCall = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message ClientSessionMessage {
|
||||||
|
bytes message = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ServerSessionMessage {
|
||||||
|
bytes message = 1;
|
||||||
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHttpClientPool(t *testing.T) {
|
func TestHttpClientPool(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
if _, err := NewHttpClientPool(0, false); err == nil {
|
if _, err := NewHttpClientPool(0, false); err == nil {
|
||||||
t.Error("should not be possible to create empty pool")
|
t.Error("should not be possible to create empty pool")
|
||||||
}
|
}
|
||||||
|
|
950
hub_test.go
950
hub_test.go
File diff suppressed because it is too large
Load diff
|
@ -258,8 +258,8 @@ type JanusGateway struct {
|
||||||
// return gateway, nil
|
// return gateway, nil
|
||||||
// }
|
// }
|
||||||
|
|
||||||
func NewJanusGateway(wsURL string, listener GatewayListener) (*JanusGateway, error) {
|
func NewJanusGateway(ctx context.Context, wsURL string, listener GatewayListener) (*JanusGateway, error) {
|
||||||
conn, _, err := janusDialer.Dial(wsURL, nil)
|
conn, _, err := janusDialer.DialContext(ctx, wsURL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -310,7 +310,7 @@ func (gateway *JanusGateway) cancelTransactions() {
|
||||||
t.quit()
|
t.quit()
|
||||||
}(t)
|
}(t)
|
||||||
}
|
}
|
||||||
gateway.transactions = make(map[uint64]*transaction)
|
clear(gateway.transactions)
|
||||||
gateway.Unlock()
|
gateway.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -66,7 +66,7 @@ type McuInitiator interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Mcu interface {
|
type Mcu interface {
|
||||||
Start() error
|
Start(ctx context.Context) error
|
||||||
Stop()
|
Stop()
|
||||||
Reload(config *goconf.ConfigFile)
|
Reload(config *goconf.ConfigFile)
|
||||||
|
|
||||||
|
@ -76,7 +76,48 @@ type Mcu interface {
|
||||||
GetStats() interface{}
|
GetStats() interface{}
|
||||||
|
|
||||||
NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error)
|
NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error)
|
||||||
NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType) (McuSubscriber, error)
|
NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType, initiator McuInitiator) (McuSubscriber, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublisherStream contains the available properties when creating a
|
||||||
|
// remote publisher in Janus.
|
||||||
|
type PublisherStream struct {
|
||||||
|
Mid string `json:"mid"`
|
||||||
|
Mindex int `json:"mindex"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
Disabled bool `json:"disabled,omitempty"`
|
||||||
|
|
||||||
|
// For types "audio" and "video"
|
||||||
|
Codec string `json:"codec,omitempty"`
|
||||||
|
|
||||||
|
// For type "audio"
|
||||||
|
Stereo bool `json:"stereo,omitempty"`
|
||||||
|
Fec bool `json:"fec,omitempty"`
|
||||||
|
Dtx bool `json:"dtx,omitempty"`
|
||||||
|
|
||||||
|
// For type "video"
|
||||||
|
Simulcast bool `json:"simulcast,omitempty"`
|
||||||
|
Svc bool `json:"svc,omitempty"`
|
||||||
|
|
||||||
|
ProfileH264 string `json:"h264_profile,omitempty"`
|
||||||
|
ProfileVP9 string `json:"vp9_profile,omitempty"`
|
||||||
|
|
||||||
|
ExtIdVideoOrientation int `json:"videoorient_ext_id,omitempty"`
|
||||||
|
ExtIdPlayoutDelay int `json:"playoutdelay_ext_id,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RemotePublisherController interface {
|
||||||
|
PublisherId() string
|
||||||
|
|
||||||
|
StartPublishing(ctx context.Context, publisher McuRemotePublisherProperties) error
|
||||||
|
GetStreams(ctx context.Context) ([]PublisherStream, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type RemoteMcu interface {
|
||||||
|
NewRemotePublisher(ctx context.Context, listener McuListener, controller RemotePublisherController, streamType StreamType) (McuRemotePublisher, error)
|
||||||
|
NewRemoteSubscriber(ctx context.Context, listener McuListener, publisher McuRemotePublisher) (McuRemoteSubscriber, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type StreamType string
|
type StreamType string
|
||||||
|
@ -116,6 +157,10 @@ type McuPublisher interface {
|
||||||
|
|
||||||
HasMedia(MediaType) bool
|
HasMedia(MediaType) bool
|
||||||
SetMedia(MediaType)
|
SetMedia(MediaType)
|
||||||
|
|
||||||
|
GetStreams(ctx context.Context) ([]PublisherStream, error)
|
||||||
|
PublishRemote(ctx context.Context, remoteId string, hostname string, port int, rtcpPort int) error
|
||||||
|
UnpublishRemote(ctx context.Context, remoteId string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type McuSubscriber interface {
|
type McuSubscriber interface {
|
||||||
|
@ -123,3 +168,18 @@ type McuSubscriber interface {
|
||||||
|
|
||||||
Publisher() string
|
Publisher() string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type McuRemotePublisherProperties interface {
|
||||||
|
Port() int
|
||||||
|
RtcpPort() int
|
||||||
|
}
|
||||||
|
|
||||||
|
type McuRemotePublisher interface {
|
||||||
|
McuClient
|
||||||
|
|
||||||
|
McuRemotePublisherProperties
|
||||||
|
}
|
||||||
|
|
||||||
|
type McuRemoteSubscriber interface {
|
||||||
|
McuSubscriber
|
||||||
|
}
|
||||||
|
|
|
@ -28,3 +28,43 @@ import (
|
||||||
func TestCommonMcuStats(t *testing.T) {
|
func TestCommonMcuStats(t *testing.T) {
|
||||||
collectAndLint(t, commonMcuStats...)
|
collectAndLint(t, commonMcuStats...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type MockMcuListener struct {
|
||||||
|
publicId string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockMcuListener) PublicId() string {
|
||||||
|
return m.publicId
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockMcuListener) OnUpdateOffer(client McuClient, offer map[string]interface{}) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockMcuListener) OnIceCandidate(client McuClient, candidate interface{}) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockMcuListener) OnIceCompleted(client McuClient) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockMcuListener) SubscriberSidUpdated(subscriber McuSubscriber) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockMcuListener) PublisherClosed(publisher McuPublisher) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockMcuListener) SubscriberClosed(subscriber McuSubscriber) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
type MockMcuInitiator struct {
|
||||||
|
country string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockMcuInitiator) Country() string {
|
||||||
|
return m.country
|
||||||
|
}
|
||||||
|
|
1055
mcu_janus.go
1055
mcu_janus.go
File diff suppressed because it is too large
Load diff
216
mcu_janus_client.go
Normal file
216
mcu_janus_client.go
Normal file
|
@ -0,0 +1,216 @@
|
||||||
|
/**
|
||||||
|
* Standalone signaling server for the Nextcloud Spreed app.
|
||||||
|
* Copyright (C) 2017 struktur AG
|
||||||
|
*
|
||||||
|
* @author Joachim Bauch <bauch@struktur.de>
|
||||||
|
*
|
||||||
|
* @license GNU AGPL version 3 or any later version
|
||||||
|
*
|
||||||
|
* This program is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU Affero General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
package signaling
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/notedit/janus-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mcuJanusClient struct {
|
||||||
|
mcu *mcuJanus
|
||||||
|
listener McuListener
|
||||||
|
mu sync.Mutex // nolint
|
||||||
|
|
||||||
|
id uint64
|
||||||
|
session uint64
|
||||||
|
roomId uint64
|
||||||
|
sid string
|
||||||
|
streamType StreamType
|
||||||
|
maxBitrate int
|
||||||
|
|
||||||
|
handle *JanusHandle
|
||||||
|
handleId uint64
|
||||||
|
closeChan chan struct{}
|
||||||
|
deferred chan func()
|
||||||
|
|
||||||
|
handleEvent func(event *janus.EventMsg)
|
||||||
|
handleHangup func(event *janus.HangupMsg)
|
||||||
|
handleDetached func(event *janus.DetachedMsg)
|
||||||
|
handleConnected func(event *janus.WebRTCUpMsg)
|
||||||
|
handleSlowLink func(event *janus.SlowLinkMsg)
|
||||||
|
handleMedia func(event *janus.MediaMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuJanusClient) Id() string {
|
||||||
|
return strconv.FormatUint(c.id, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuJanusClient) Sid() string {
|
||||||
|
return c.sid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuJanusClient) StreamType() StreamType {
|
||||||
|
return c.streamType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuJanusClient) MaxBitrate() int {
|
||||||
|
return c.maxBitrate
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuJanusClient) Close(ctx context.Context) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuJanusClient) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuJanusClient) closeClient(ctx context.Context) bool {
|
||||||
|
if handle := c.handle; handle != nil {
|
||||||
|
c.handle = nil
|
||||||
|
close(c.closeChan)
|
||||||
|
if _, err := handle.Detach(ctx); err != nil {
|
||||||
|
if e, ok := err.(*janus.ErrorMsg); !ok || e.Err.Code != JANUS_ERROR_HANDLE_NOT_FOUND {
|
||||||
|
log.Println("Could not detach client", handle.Id, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuJanusClient) run(handle *JanusHandle, closeChan <-chan struct{}) {
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case msg := <-handle.Events:
|
||||||
|
switch t := msg.(type) {
|
||||||
|
case *janus.EventMsg:
|
||||||
|
c.handleEvent(t)
|
||||||
|
case *janus.HangupMsg:
|
||||||
|
c.handleHangup(t)
|
||||||
|
case *janus.DetachedMsg:
|
||||||
|
c.handleDetached(t)
|
||||||
|
case *janus.MediaMsg:
|
||||||
|
c.handleMedia(t)
|
||||||
|
case *janus.WebRTCUpMsg:
|
||||||
|
c.handleConnected(t)
|
||||||
|
case *janus.SlowLinkMsg:
|
||||||
|
c.handleSlowLink(t)
|
||||||
|
case *TrickleMsg:
|
||||||
|
c.handleTrickle(t)
|
||||||
|
default:
|
||||||
|
log.Println("Received unsupported event type", msg, reflect.TypeOf(msg))
|
||||||
|
}
|
||||||
|
case f := <-c.deferred:
|
||||||
|
f()
|
||||||
|
case <-closeChan:
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuJanusClient) sendOffer(ctx context.Context, offer map[string]interface{}, callback func(error, map[string]interface{})) {
|
||||||
|
handle := c.handle
|
||||||
|
if handle == nil {
|
||||||
|
callback(ErrNotConnected, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
configure_msg := map[string]interface{}{
|
||||||
|
"request": "configure",
|
||||||
|
"audio": true,
|
||||||
|
"video": true,
|
||||||
|
"data": true,
|
||||||
|
}
|
||||||
|
answer_msg, err := handle.Message(ctx, configure_msg, offer)
|
||||||
|
if err != nil {
|
||||||
|
callback(err, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
callback(nil, answer_msg.Jsep)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuJanusClient) sendAnswer(ctx context.Context, answer map[string]interface{}, callback func(error, map[string]interface{})) {
|
||||||
|
handle := c.handle
|
||||||
|
if handle == nil {
|
||||||
|
callback(ErrNotConnected, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
start_msg := map[string]interface{}{
|
||||||
|
"request": "start",
|
||||||
|
"room": c.roomId,
|
||||||
|
}
|
||||||
|
start_response, err := handle.Message(ctx, start_msg, answer)
|
||||||
|
if err != nil {
|
||||||
|
callback(err, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Println("Started listener", start_response)
|
||||||
|
callback(nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuJanusClient) sendCandidate(ctx context.Context, candidate interface{}, callback func(error, map[string]interface{})) {
|
||||||
|
handle := c.handle
|
||||||
|
if handle == nil {
|
||||||
|
callback(ErrNotConnected, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := handle.Trickle(ctx, candidate); err != nil {
|
||||||
|
callback(err, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
callback(nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuJanusClient) handleTrickle(event *TrickleMsg) {
|
||||||
|
if event.Candidate.Completed {
|
||||||
|
c.listener.OnIceCompleted(c)
|
||||||
|
} else {
|
||||||
|
c.listener.OnIceCandidate(c, event.Candidate)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuJanusClient) selectStream(ctx context.Context, stream *streamSelection, callback func(error, map[string]interface{})) {
|
||||||
|
handle := c.handle
|
||||||
|
if handle == nil {
|
||||||
|
callback(ErrNotConnected, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if stream == nil || !stream.HasValues() {
|
||||||
|
callback(nil, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
configure_msg := map[string]interface{}{
|
||||||
|
"request": "configure",
|
||||||
|
}
|
||||||
|
if stream != nil {
|
||||||
|
stream.AddToMessage(configure_msg)
|
||||||
|
}
|
||||||
|
_, err := handle.Message(ctx, configure_msg, nil)
|
||||||
|
if err != nil {
|
||||||
|
callback(err, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
callback(nil, nil)
|
||||||
|
}
|
457
mcu_janus_publisher.go
Normal file
457
mcu_janus_publisher.go
Normal file
|
@ -0,0 +1,457 @@
|
||||||
|
/**
|
||||||
|
* Standalone signaling server for the Nextcloud Spreed app.
|
||||||
|
* Copyright (C) 2017 struktur AG
|
||||||
|
*
|
||||||
|
* @author Joachim Bauch <bauch@struktur.de>
|
||||||
|
*
|
||||||
|
* @license GNU AGPL version 3 or any later version
|
||||||
|
*
|
||||||
|
* This program is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU Affero General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
package signaling
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/notedit/janus-go"
|
||||||
|
"github.com/pion/sdp/v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ExtensionUrlPlayoutDelay = "http://www.webrtc.org/experiments/rtp-hdrext/playout-delay"
|
||||||
|
ExtensionUrlVideoOrientation = "urn:3gpp:video-orientation"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
sdpHasOffer = 1
|
||||||
|
sdpHasAnswer = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
type mcuJanusPublisher struct {
|
||||||
|
mcuJanusClient
|
||||||
|
|
||||||
|
id string
|
||||||
|
bitrate int
|
||||||
|
mediaTypes MediaType
|
||||||
|
stats publisherStatsCounter
|
||||||
|
sdpFlags Flags
|
||||||
|
sdpReady *Closer
|
||||||
|
offerSdp atomic.Pointer[sdp.SessionDescription]
|
||||||
|
answerSdp atomic.Pointer[sdp.SessionDescription]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusPublisher) handleEvent(event *janus.EventMsg) {
|
||||||
|
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
|
||||||
|
ctx := context.TODO()
|
||||||
|
switch videoroom {
|
||||||
|
case "destroyed":
|
||||||
|
log.Printf("Publisher %d: associated room has been destroyed, closing", p.handleId)
|
||||||
|
go p.Close(ctx)
|
||||||
|
case "slow_link":
|
||||||
|
// Ignore, processed through "handleSlowLink" in the general events.
|
||||||
|
default:
|
||||||
|
log.Printf("Unsupported videoroom publisher event in %d: %+v", p.handleId, event)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Printf("Unsupported publisher event in %d: %+v", p.handleId, event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusPublisher) handleHangup(event *janus.HangupMsg) {
|
||||||
|
log.Printf("Publisher %d received hangup (%s), closing", p.handleId, event.Reason)
|
||||||
|
go p.Close(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusPublisher) handleDetached(event *janus.DetachedMsg) {
|
||||||
|
log.Printf("Publisher %d received detached, closing", p.handleId)
|
||||||
|
go p.Close(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusPublisher) handleConnected(event *janus.WebRTCUpMsg) {
|
||||||
|
log.Printf("Publisher %d received connected", p.handleId)
|
||||||
|
p.mcu.publisherConnected.Notify(getStreamId(p.id, p.streamType))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusPublisher) handleSlowLink(event *janus.SlowLinkMsg) {
|
||||||
|
if event.Uplink {
|
||||||
|
log.Printf("Publisher %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||||
|
} else {
|
||||||
|
log.Printf("Publisher %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusPublisher) handleMedia(event *janus.MediaMsg) {
|
||||||
|
mediaType := StreamType(event.Type)
|
||||||
|
if mediaType == StreamTypeVideo && p.streamType == StreamTypeScreen {
|
||||||
|
// We want to differentiate between audio, video and screensharing
|
||||||
|
mediaType = p.streamType
|
||||||
|
}
|
||||||
|
|
||||||
|
p.stats.EnableStream(mediaType, event.Receiving)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusPublisher) HasMedia(mt MediaType) bool {
|
||||||
|
return (p.mediaTypes & mt) == mt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusPublisher) SetMedia(mt MediaType) {
|
||||||
|
p.mediaTypes = mt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusPublisher) NotifyReconnected() {
|
||||||
|
ctx := context.TODO()
|
||||||
|
handle, session, roomId, _, err := p.mcu.getOrCreatePublisherHandle(ctx, p.id, p.streamType, p.bitrate)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Could not reconnect publisher %s: %s", p.id, err)
|
||||||
|
// TODO(jojo): Retry
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p.handle = handle
|
||||||
|
p.handleId = handle.Id
|
||||||
|
p.session = session
|
||||||
|
p.roomId = roomId
|
||||||
|
|
||||||
|
log.Printf("Publisher %s reconnected on handle %d", p.id, p.handleId)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusPublisher) Close(ctx context.Context) {
|
||||||
|
notify := false
|
||||||
|
p.mu.Lock()
|
||||||
|
if handle := p.handle; handle != nil && p.roomId != 0 {
|
||||||
|
destroy_msg := map[string]interface{}{
|
||||||
|
"request": "destroy",
|
||||||
|
"room": p.roomId,
|
||||||
|
}
|
||||||
|
if _, err := handle.Request(ctx, destroy_msg); err != nil {
|
||||||
|
log.Printf("Error destroying room %d: %s", p.roomId, err)
|
||||||
|
} else {
|
||||||
|
log.Printf("Room %d destroyed", p.roomId)
|
||||||
|
}
|
||||||
|
p.mcu.mu.Lock()
|
||||||
|
delete(p.mcu.publishers, getStreamId(p.id, p.streamType))
|
||||||
|
p.mcu.mu.Unlock()
|
||||||
|
p.roomId = 0
|
||||||
|
notify = true
|
||||||
|
}
|
||||||
|
p.closeClient(ctx)
|
||||||
|
p.mu.Unlock()
|
||||||
|
|
||||||
|
p.stats.Reset()
|
||||||
|
|
||||||
|
if notify {
|
||||||
|
statsPublishersCurrent.WithLabelValues(string(p.streamType)).Dec()
|
||||||
|
p.mcu.unregisterClient(p)
|
||||||
|
p.listener.PublisherClosed(p)
|
||||||
|
}
|
||||||
|
p.mcuJanusClient.Close(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusPublisher) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
|
||||||
|
statsMcuMessagesTotal.WithLabelValues(data.Type).Inc()
|
||||||
|
jsep_msg := data.Payload
|
||||||
|
switch data.Type {
|
||||||
|
case "offer":
|
||||||
|
p.deferred <- func() {
|
||||||
|
if data.offerSdp == nil {
|
||||||
|
// Should have been checked before.
|
||||||
|
go callback(errors.New("No sdp found in offer"), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p.offerSdp.Store(data.offerSdp)
|
||||||
|
p.sdpFlags.Add(sdpHasOffer)
|
||||||
|
if p.sdpFlags.Get() == sdpHasAnswer|sdpHasOffer {
|
||||||
|
p.sdpReady.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO Tear down previous publisher and get a new one if sid does
|
||||||
|
// not match?
|
||||||
|
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
p.sendOffer(msgctx, jsep_msg, func(err error, jsep map[string]interface{}) {
|
||||||
|
if err != nil {
|
||||||
|
callback(err, jsep)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sdpData, found := jsep["sdp"]
|
||||||
|
if !found {
|
||||||
|
log.Printf("No sdp found in answer %+v", jsep)
|
||||||
|
} else {
|
||||||
|
sdpString, ok := sdpData.(string)
|
||||||
|
if !ok {
|
||||||
|
log.Printf("Invalid sdp found in answer %+v", jsep)
|
||||||
|
} else {
|
||||||
|
var answerSdp sdp.SessionDescription
|
||||||
|
if err := answerSdp.UnmarshalString(sdpString); err != nil {
|
||||||
|
log.Printf("Error parsing answer sdp %+v: %s", sdpString, err)
|
||||||
|
p.answerSdp.Store(nil)
|
||||||
|
p.sdpFlags.Remove(sdpHasAnswer)
|
||||||
|
} else {
|
||||||
|
p.answerSdp.Store(&answerSdp)
|
||||||
|
p.sdpFlags.Add(sdpHasAnswer)
|
||||||
|
if p.sdpFlags.Get() == sdpHasAnswer|sdpHasOffer {
|
||||||
|
p.sdpReady.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
callback(nil, jsep)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
case "candidate":
|
||||||
|
p.deferred <- func() {
|
||||||
|
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if data.Sid == "" || data.Sid == p.Sid() {
|
||||||
|
p.sendCandidate(msgctx, jsep_msg["candidate"], callback)
|
||||||
|
} else {
|
||||||
|
go callback(fmt.Errorf("Candidate message sid (%s) does not match publisher sid (%s)", data.Sid, p.Sid()), nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "endOfCandidates":
|
||||||
|
// Ignore
|
||||||
|
default:
|
||||||
|
go callback(fmt.Errorf("Unsupported message type: %s", data.Type), nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFmtpValue(fmtp string, key string) (string, bool) {
|
||||||
|
parts := strings.Split(fmtp, ";")
|
||||||
|
for _, part := range parts {
|
||||||
|
kv := strings.SplitN(part, "=", 2)
|
||||||
|
if len(kv) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.EqualFold(strings.TrimSpace(kv[0]), key) {
|
||||||
|
return strings.TrimSpace(kv[1]), true
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusPublisher) GetStreams(ctx context.Context) ([]PublisherStream, error) {
|
||||||
|
offerSdp := p.offerSdp.Load()
|
||||||
|
answerSdp := p.answerSdp.Load()
|
||||||
|
if offerSdp == nil || answerSdp == nil {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
case <-p.sdpReady.C:
|
||||||
|
offerSdp = p.offerSdp.Load()
|
||||||
|
answerSdp = p.answerSdp.Load()
|
||||||
|
if offerSdp == nil || answerSdp == nil {
|
||||||
|
// Only can happen on invalid SDPs.
|
||||||
|
return nil, errors.New("no offer and/or answer processed yet")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var streams []PublisherStream
|
||||||
|
for idx, m := range answerSdp.MediaDescriptions {
|
||||||
|
mid, found := m.Attribute(sdp.AttrKeyMID)
|
||||||
|
if !found {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
s := PublisherStream{
|
||||||
|
Mid: mid,
|
||||||
|
Mindex: idx,
|
||||||
|
Type: m.MediaName.Media,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(m.MediaName.Formats) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.EqualFold(s.Type, "application") && strings.EqualFold(m.MediaName.Formats[0], "webrtc-datachannel") {
|
||||||
|
s.Type = "data"
|
||||||
|
streams = append(streams, s)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
pt, err := strconv.ParseInt(m.MediaName.Formats[0], 10, 8)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
answerCodec, err := answerSdp.GetCodecForPayloadType(uint8(pt))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.EqualFold(s.Type, "audio") {
|
||||||
|
s.Codec = answerCodec.Name
|
||||||
|
if value, found := getFmtpValue(answerCodec.Fmtp, "useinbandfec"); found && value == "1" {
|
||||||
|
s.Fec = true
|
||||||
|
}
|
||||||
|
if value, found := getFmtpValue(answerCodec.Fmtp, "usedtx"); found && value == "1" {
|
||||||
|
s.Dtx = true
|
||||||
|
}
|
||||||
|
if value, found := getFmtpValue(answerCodec.Fmtp, "stereo"); found && value == "1" {
|
||||||
|
s.Stereo = true
|
||||||
|
}
|
||||||
|
} else if strings.EqualFold(s.Type, "video") {
|
||||||
|
s.Codec = answerCodec.Name
|
||||||
|
// TODO: Determine if SVC is used.
|
||||||
|
s.Svc = false
|
||||||
|
|
||||||
|
if strings.EqualFold(answerCodec.Name, "vp9") {
|
||||||
|
// Parse VP9 profile from "profile-id=XXX"
|
||||||
|
// Exampe: "a=fmtp:98 profile-id=0"
|
||||||
|
if profile, found := getFmtpValue(answerCodec.Fmtp, "profile-id"); found {
|
||||||
|
s.ProfileVP9 = profile
|
||||||
|
}
|
||||||
|
} else if strings.EqualFold(answerCodec.Name, "h264") {
|
||||||
|
// Parse H.264 profile from "profile-level-id=XXX"
|
||||||
|
// Example: "a=fmtp:104 level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42001f"
|
||||||
|
if profile, found := getFmtpValue(answerCodec.Fmtp, "profile-level-id"); found {
|
||||||
|
s.ProfileH264 = profile
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var extmap sdp.ExtMap
|
||||||
|
for _, a := range m.Attributes {
|
||||||
|
switch a.Key {
|
||||||
|
case sdp.AttrKeyExtMap:
|
||||||
|
if err := extmap.Unmarshal(extmap.Name() + ":" + a.Value); err != nil {
|
||||||
|
log.Printf("Error parsing extmap %s: %s", a.Value, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch extmap.URI.String() {
|
||||||
|
case ExtensionUrlPlayoutDelay:
|
||||||
|
s.ExtIdPlayoutDelay = extmap.Value
|
||||||
|
case ExtensionUrlVideoOrientation:
|
||||||
|
s.ExtIdVideoOrientation = extmap.Value
|
||||||
|
}
|
||||||
|
case "simulcast":
|
||||||
|
s.Simulcast = true
|
||||||
|
case sdp.AttrKeySSRCGroup:
|
||||||
|
if strings.HasPrefix(a.Value, "SIM ") {
|
||||||
|
s.Simulcast = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, a := range offerSdp.MediaDescriptions[idx].Attributes {
|
||||||
|
switch a.Key {
|
||||||
|
case "simulcast":
|
||||||
|
s.Simulcast = true
|
||||||
|
case sdp.AttrKeySSRCGroup:
|
||||||
|
if strings.HasPrefix(a.Value, "SIM ") {
|
||||||
|
s.Simulcast = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if strings.EqualFold(s.Type, "data") { // nolint
|
||||||
|
// Already handled above.
|
||||||
|
} else {
|
||||||
|
log.Printf("Skip type %s", s.Type)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
streams = append(streams, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return streams, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPublisherRemoteId(id string, remoteId string) string {
|
||||||
|
return fmt.Sprintf("%s@%s", id, remoteId)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusPublisher) PublishRemote(ctx context.Context, remoteId string, hostname string, port int, rtcpPort int) error {
|
||||||
|
msg := map[string]interface{}{
|
||||||
|
"request": "publish_remotely",
|
||||||
|
"room": p.roomId,
|
||||||
|
"publisher_id": streamTypeUserIds[p.streamType],
|
||||||
|
"remote_id": getPublisherRemoteId(p.id, remoteId),
|
||||||
|
"host": hostname,
|
||||||
|
"port": port,
|
||||||
|
"rtcp_port": rtcpPort,
|
||||||
|
}
|
||||||
|
response, err := p.handle.Request(ctx, msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
errorMessage := getPluginStringValue(response.PluginData, pluginVideoRoom, "error")
|
||||||
|
errorCode := getPluginIntValue(response.PluginData, pluginVideoRoom, "error_code")
|
||||||
|
if errorMessage != "" || errorCode != 0 {
|
||||||
|
if errorCode == 0 {
|
||||||
|
errorCode = 500
|
||||||
|
}
|
||||||
|
if errorMessage == "" {
|
||||||
|
errorMessage = "unknown error"
|
||||||
|
}
|
||||||
|
|
||||||
|
return &janus.ErrorMsg{
|
||||||
|
Err: janus.ErrorData{
|
||||||
|
Code: int(errorCode),
|
||||||
|
Reason: errorMessage,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Publishing %s to %s (port=%d, rtcpPort=%d) for %s", p.id, hostname, port, rtcpPort, remoteId)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusPublisher) UnpublishRemote(ctx context.Context, remoteId string) error {
|
||||||
|
msg := map[string]interface{}{
|
||||||
|
"request": "unpublish_remotely",
|
||||||
|
"room": p.roomId,
|
||||||
|
"publisher_id": streamTypeUserIds[p.streamType],
|
||||||
|
"remote_id": getPublisherRemoteId(p.id, remoteId),
|
||||||
|
}
|
||||||
|
response, err := p.handle.Request(ctx, msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
errorMessage := getPluginStringValue(response.PluginData, pluginVideoRoom, "error")
|
||||||
|
errorCode := getPluginIntValue(response.PluginData, pluginVideoRoom, "error_code")
|
||||||
|
if errorMessage != "" || errorCode != 0 {
|
||||||
|
if errorCode == 0 {
|
||||||
|
errorCode = 500
|
||||||
|
}
|
||||||
|
if errorMessage == "" {
|
||||||
|
errorMessage = "unknown error"
|
||||||
|
}
|
||||||
|
|
||||||
|
return &janus.ErrorMsg{
|
||||||
|
Err: janus.ErrorData{
|
||||||
|
Code: int(errorCode),
|
||||||
|
Reason: errorMessage,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Unpublished remote %s for %s", p.id, remoteId)
|
||||||
|
return nil
|
||||||
|
}
|
92
mcu_janus_publisher_test.go
Normal file
92
mcu_janus_publisher_test.go
Normal file
|
@ -0,0 +1,92 @@
|
||||||
|
/**
|
||||||
|
* Standalone signaling server for the Nextcloud Spreed app.
|
||||||
|
* Copyright (C) 2024 struktur AG
|
||||||
|
*
|
||||||
|
* @author Joachim Bauch <bauch@struktur.de>
|
||||||
|
*
|
||||||
|
* @license GNU AGPL version 3 or any later version
|
||||||
|
*
|
||||||
|
* This program is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU Affero General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
package signaling
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetFmtpValueH264(t *testing.T) {
|
||||||
|
testcases := []struct {
|
||||||
|
fmtp string
|
||||||
|
profile string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"",
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42001f",
|
||||||
|
"42001f",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"level-asymmetry-allowed=1;packetization-mode=0",
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"level-asymmetry-allowed=1; packetization-mode=0; profile-level-id = 42001f",
|
||||||
|
"42001f",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testcases {
|
||||||
|
value, found := getFmtpValue(tc.fmtp, "profile-level-id")
|
||||||
|
if !found && tc.profile != "" {
|
||||||
|
t.Errorf("did not find profile \"%s\" in \"%s\"", tc.profile, tc.fmtp)
|
||||||
|
} else if found && tc.profile == "" {
|
||||||
|
t.Errorf("did not expect profile in \"%s\" but got \"%s\"", tc.fmtp, value)
|
||||||
|
} else if found && tc.profile != value {
|
||||||
|
t.Errorf("expected profile \"%s\" in \"%s\" but got \"%s\"", tc.profile, tc.fmtp, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetFmtpValueVP9(t *testing.T) {
|
||||||
|
testcases := []struct {
|
||||||
|
fmtp string
|
||||||
|
profile string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"",
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"profile-id=0",
|
||||||
|
"0",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"profile-id = 0",
|
||||||
|
"0",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testcases {
|
||||||
|
value, found := getFmtpValue(tc.fmtp, "profile-id")
|
||||||
|
if !found && tc.profile != "" {
|
||||||
|
t.Errorf("did not find profile \"%s\" in \"%s\"", tc.profile, tc.fmtp)
|
||||||
|
} else if found && tc.profile == "" {
|
||||||
|
t.Errorf("did not expect profile in \"%s\" but got \"%s\"", tc.fmtp, value)
|
||||||
|
} else if found && tc.profile != value {
|
||||||
|
t.Errorf("expected profile \"%s\" in \"%s\" but got \"%s\"", tc.profile, tc.fmtp, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
150
mcu_janus_remote_publisher.go
Normal file
150
mcu_janus_remote_publisher.go
Normal file
|
@ -0,0 +1,150 @@
|
||||||
|
/**
|
||||||
|
* Standalone signaling server for the Nextcloud Spreed app.
|
||||||
|
* Copyright (C) 2024 struktur AG
|
||||||
|
*
|
||||||
|
* @author Joachim Bauch <bauch@struktur.de>
|
||||||
|
*
|
||||||
|
* @license GNU AGPL version 3 or any later version
|
||||||
|
*
|
||||||
|
* This program is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU Affero General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
package signaling
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/notedit/janus-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mcuJanusRemotePublisher struct {
|
||||||
|
mcuJanusPublisher
|
||||||
|
|
||||||
|
ref atomic.Int64
|
||||||
|
|
||||||
|
port int
|
||||||
|
rtcpPort int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemotePublisher) addRef() int64 {
|
||||||
|
return p.ref.Add(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemotePublisher) release() bool {
|
||||||
|
return p.ref.Add(-1) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemotePublisher) Port() int {
|
||||||
|
return p.port
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemotePublisher) RtcpPort() int {
|
||||||
|
return p.rtcpPort
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemotePublisher) handleEvent(event *janus.EventMsg) {
|
||||||
|
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
|
||||||
|
ctx := context.TODO()
|
||||||
|
switch videoroom {
|
||||||
|
case "destroyed":
|
||||||
|
log.Printf("Remote publisher %d: associated room has been destroyed, closing", p.handleId)
|
||||||
|
go p.Close(ctx)
|
||||||
|
case "slow_link":
|
||||||
|
// Ignore, processed through "handleSlowLink" in the general events.
|
||||||
|
default:
|
||||||
|
log.Printf("Unsupported videoroom remote publisher event in %d: %+v", p.handleId, event)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Printf("Unsupported remote publisher event in %d: %+v", p.handleId, event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemotePublisher) handleHangup(event *janus.HangupMsg) {
|
||||||
|
log.Printf("Remote publisher %d received hangup (%s), closing", p.handleId, event.Reason)
|
||||||
|
go p.Close(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemotePublisher) handleDetached(event *janus.DetachedMsg) {
|
||||||
|
log.Printf("Remote publisher %d received detached, closing", p.handleId)
|
||||||
|
go p.Close(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemotePublisher) handleConnected(event *janus.WebRTCUpMsg) {
|
||||||
|
log.Printf("Remote publisher %d received connected", p.handleId)
|
||||||
|
p.mcu.publisherConnected.Notify(getStreamId(p.id, p.streamType))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemotePublisher) handleSlowLink(event *janus.SlowLinkMsg) {
|
||||||
|
if event.Uplink {
|
||||||
|
log.Printf("Remote publisher %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||||
|
} else {
|
||||||
|
log.Printf("Remote publisher %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemotePublisher) NotifyReconnected() {
|
||||||
|
ctx := context.TODO()
|
||||||
|
handle, session, roomId, _, err := p.mcu.getOrCreatePublisherHandle(ctx, p.id, p.streamType, p.bitrate)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Could not reconnect remote publisher %s: %s", p.id, err)
|
||||||
|
// TODO(jojo): Retry
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p.handle = handle
|
||||||
|
p.handleId = handle.Id
|
||||||
|
p.session = session
|
||||||
|
p.roomId = roomId
|
||||||
|
|
||||||
|
log.Printf("Remote publisher %s reconnected on handle %d", p.id, p.handleId)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemotePublisher) Close(ctx context.Context) {
|
||||||
|
if !p.release() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p.mu.Lock()
|
||||||
|
if handle := p.handle; handle != nil {
|
||||||
|
response, err := p.handle.Request(ctx, map[string]interface{}{
|
||||||
|
"request": "remove_remote_publisher",
|
||||||
|
"room": p.roomId,
|
||||||
|
"id": streamTypeUserIds[p.streamType],
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error removing remote publisher %s in room %d: %s", p.id, p.roomId, err)
|
||||||
|
} else {
|
||||||
|
log.Printf("Removed remote publisher: %+v", response)
|
||||||
|
}
|
||||||
|
if p.roomId != 0 {
|
||||||
|
destroy_msg := map[string]interface{}{
|
||||||
|
"request": "destroy",
|
||||||
|
"room": p.roomId,
|
||||||
|
}
|
||||||
|
if _, err := handle.Request(ctx, destroy_msg); err != nil {
|
||||||
|
log.Printf("Error destroying room %d: %s", p.roomId, err)
|
||||||
|
} else {
|
||||||
|
log.Printf("Room %d destroyed", p.roomId)
|
||||||
|
}
|
||||||
|
p.mcu.mu.Lock()
|
||||||
|
delete(p.mcu.remotePublishers, getStreamId(p.id, p.streamType))
|
||||||
|
p.mcu.mu.Unlock()
|
||||||
|
p.roomId = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p.closeClient(ctx)
|
||||||
|
p.mu.Unlock()
|
||||||
|
}
|
115
mcu_janus_remote_subscriber.go
Normal file
115
mcu_janus_remote_subscriber.go
Normal file
|
@ -0,0 +1,115 @@
|
||||||
|
/**
|
||||||
|
* Standalone signaling server for the Nextcloud Spreed app.
|
||||||
|
* Copyright (C) 2024 struktur AG
|
||||||
|
*
|
||||||
|
* @author Joachim Bauch <bauch@struktur.de>
|
||||||
|
*
|
||||||
|
* @license GNU AGPL version 3 or any later version
|
||||||
|
*
|
||||||
|
* This program is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU Affero General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
package signaling
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"strconv"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/notedit/janus-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mcuJanusRemoteSubscriber struct {
|
||||||
|
mcuJanusSubscriber
|
||||||
|
|
||||||
|
remote atomic.Pointer[mcuJanusRemotePublisher]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemoteSubscriber) handleEvent(event *janus.EventMsg) {
|
||||||
|
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
|
||||||
|
ctx := context.TODO()
|
||||||
|
switch videoroom {
|
||||||
|
case "destroyed":
|
||||||
|
log.Printf("Remote subscriber %d: associated room has been destroyed, closing", p.handleId)
|
||||||
|
go p.Close(ctx)
|
||||||
|
case "event":
|
||||||
|
// Handle renegotiations, but ignore other events like selected
|
||||||
|
// substream / temporal layer.
|
||||||
|
if getPluginStringValue(event.Plugindata, pluginVideoRoom, "configured") == "ok" &&
|
||||||
|
event.Jsep != nil && event.Jsep["type"] == "offer" && event.Jsep["sdp"] != nil {
|
||||||
|
p.listener.OnUpdateOffer(p, event.Jsep)
|
||||||
|
}
|
||||||
|
case "slow_link":
|
||||||
|
// Ignore, processed through "handleSlowLink" in the general events.
|
||||||
|
default:
|
||||||
|
log.Printf("Unsupported videoroom event %s for remote subscriber %d: %+v", videoroom, p.handleId, event)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Printf("Unsupported event for remote subscriber %d: %+v", p.handleId, event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemoteSubscriber) handleHangup(event *janus.HangupMsg) {
|
||||||
|
log.Printf("Remote subscriber %d received hangup (%s), closing", p.handleId, event.Reason)
|
||||||
|
go p.Close(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemoteSubscriber) handleDetached(event *janus.DetachedMsg) {
|
||||||
|
log.Printf("Remote subscriber %d received detached, closing", p.handleId)
|
||||||
|
go p.Close(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemoteSubscriber) handleConnected(event *janus.WebRTCUpMsg) {
|
||||||
|
log.Printf("Remote subscriber %d received connected", p.handleId)
|
||||||
|
p.mcu.SubscriberConnected(p.Id(), p.publisher, p.streamType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemoteSubscriber) handleSlowLink(event *janus.SlowLinkMsg) {
|
||||||
|
if event.Uplink {
|
||||||
|
log.Printf("Remote subscriber %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||||
|
} else {
|
||||||
|
log.Printf("Remote subscriber %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemoteSubscriber) handleMedia(event *janus.MediaMsg) {
|
||||||
|
// Only triggered for publishers
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemoteSubscriber) NotifyReconnected() {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||||
|
defer cancel()
|
||||||
|
handle, pub, err := p.mcu.getOrCreateSubscriberHandle(ctx, p.publisher, p.streamType)
|
||||||
|
if err != nil {
|
||||||
|
// TODO(jojo): Retry?
|
||||||
|
log.Printf("Could not reconnect remote subscriber for publisher %s: %s", p.publisher, err)
|
||||||
|
p.Close(context.Background())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p.handle = handle
|
||||||
|
p.handleId = handle.Id
|
||||||
|
p.roomId = pub.roomId
|
||||||
|
p.sid = strconv.FormatUint(handle.Id, 10)
|
||||||
|
p.listener.SubscriberSidUpdated(p)
|
||||||
|
log.Printf("Subscriber %d for publisher %s reconnected on handle %d", p.id, p.publisher, p.handleId)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusRemoteSubscriber) Close(ctx context.Context) {
|
||||||
|
p.mcuJanusSubscriber.Close(ctx)
|
||||||
|
|
||||||
|
if remote := p.remote.Swap(nil); remote != nil {
|
||||||
|
remote.Close(context.Background())
|
||||||
|
}
|
||||||
|
}
|
110
mcu_janus_stream_selection.go
Normal file
110
mcu_janus_stream_selection.go
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
/**
|
||||||
|
* Standalone signaling server for the Nextcloud Spreed app.
|
||||||
|
* Copyright (C) 2017 struktur AG
|
||||||
|
*
|
||||||
|
* @author Joachim Bauch <bauch@struktur.de>
|
||||||
|
*
|
||||||
|
* @license GNU AGPL version 3 or any later version
|
||||||
|
*
|
||||||
|
* This program is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU Affero General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
package signaling
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type streamSelection struct {
|
||||||
|
substream sql.NullInt16
|
||||||
|
temporal sql.NullInt16
|
||||||
|
audio sql.NullBool
|
||||||
|
video sql.NullBool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *streamSelection) HasValues() bool {
|
||||||
|
return s.substream.Valid || s.temporal.Valid || s.audio.Valid || s.video.Valid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *streamSelection) AddToMessage(message map[string]interface{}) {
|
||||||
|
if s.substream.Valid {
|
||||||
|
message["substream"] = s.substream.Int16
|
||||||
|
}
|
||||||
|
if s.temporal.Valid {
|
||||||
|
message["temporal"] = s.temporal.Int16
|
||||||
|
}
|
||||||
|
if s.audio.Valid {
|
||||||
|
message["audio"] = s.audio.Bool
|
||||||
|
}
|
||||||
|
if s.video.Valid {
|
||||||
|
message["video"] = s.video.Bool
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseStreamSelection(payload map[string]interface{}) (*streamSelection, error) {
|
||||||
|
var stream streamSelection
|
||||||
|
if value, found := payload["substream"]; found {
|
||||||
|
switch value := value.(type) {
|
||||||
|
case int:
|
||||||
|
stream.substream.Valid = true
|
||||||
|
stream.substream.Int16 = int16(value)
|
||||||
|
case float32:
|
||||||
|
stream.substream.Valid = true
|
||||||
|
stream.substream.Int16 = int16(value)
|
||||||
|
case float64:
|
||||||
|
stream.substream.Valid = true
|
||||||
|
stream.substream.Int16 = int16(value)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unsupported substream value: %v", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, found := payload["temporal"]; found {
|
||||||
|
switch value := value.(type) {
|
||||||
|
case int:
|
||||||
|
stream.temporal.Valid = true
|
||||||
|
stream.temporal.Int16 = int16(value)
|
||||||
|
case float32:
|
||||||
|
stream.temporal.Valid = true
|
||||||
|
stream.temporal.Int16 = int16(value)
|
||||||
|
case float64:
|
||||||
|
stream.temporal.Valid = true
|
||||||
|
stream.temporal.Int16 = int16(value)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unsupported temporal value: %v", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, found := payload["audio"]; found {
|
||||||
|
switch value := value.(type) {
|
||||||
|
case bool:
|
||||||
|
stream.audio.Valid = true
|
||||||
|
stream.audio.Bool = value
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unsupported audio value: %v", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, found := payload["video"]; found {
|
||||||
|
switch value := value.(type) {
|
||||||
|
case bool:
|
||||||
|
stream.video.Valid = true
|
||||||
|
stream.video.Bool = value
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unsupported video value: %v", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &stream, nil
|
||||||
|
}
|
321
mcu_janus_subscriber.go
Normal file
321
mcu_janus_subscriber.go
Normal file
|
@ -0,0 +1,321 @@
|
||||||
|
/**
|
||||||
|
* Standalone signaling server for the Nextcloud Spreed app.
|
||||||
|
* Copyright (C) 2017 struktur AG
|
||||||
|
*
|
||||||
|
* @author Joachim Bauch <bauch@struktur.de>
|
||||||
|
*
|
||||||
|
* @license GNU AGPL version 3 or any later version
|
||||||
|
*
|
||||||
|
* This program is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU Affero General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
package signaling
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/notedit/janus-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mcuJanusSubscriber struct {
|
||||||
|
mcuJanusClient
|
||||||
|
|
||||||
|
publisher string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusSubscriber) Publisher() string {
|
||||||
|
return p.publisher
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusSubscriber) handleEvent(event *janus.EventMsg) {
|
||||||
|
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
|
||||||
|
ctx := context.TODO()
|
||||||
|
switch videoroom {
|
||||||
|
case "destroyed":
|
||||||
|
log.Printf("Subscriber %d: associated room has been destroyed, closing", p.handleId)
|
||||||
|
go p.Close(ctx)
|
||||||
|
case "event":
|
||||||
|
// Handle renegotiations, but ignore other events like selected
|
||||||
|
// substream / temporal layer.
|
||||||
|
if getPluginStringValue(event.Plugindata, pluginVideoRoom, "configured") == "ok" &&
|
||||||
|
event.Jsep != nil && event.Jsep["type"] == "offer" && event.Jsep["sdp"] != nil {
|
||||||
|
p.listener.OnUpdateOffer(p, event.Jsep)
|
||||||
|
}
|
||||||
|
case "slow_link":
|
||||||
|
// Ignore, processed through "handleSlowLink" in the general events.
|
||||||
|
default:
|
||||||
|
log.Printf("Unsupported videoroom event %s for subscriber %d: %+v", videoroom, p.handleId, event)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Printf("Unsupported event for subscriber %d: %+v", p.handleId, event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusSubscriber) handleHangup(event *janus.HangupMsg) {
|
||||||
|
log.Printf("Subscriber %d received hangup (%s), closing", p.handleId, event.Reason)
|
||||||
|
go p.Close(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusSubscriber) handleDetached(event *janus.DetachedMsg) {
|
||||||
|
log.Printf("Subscriber %d received detached, closing", p.handleId)
|
||||||
|
go p.Close(context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusSubscriber) handleConnected(event *janus.WebRTCUpMsg) {
|
||||||
|
log.Printf("Subscriber %d received connected", p.handleId)
|
||||||
|
p.mcu.SubscriberConnected(p.Id(), p.publisher, p.streamType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusSubscriber) handleSlowLink(event *janus.SlowLinkMsg) {
|
||||||
|
if event.Uplink {
|
||||||
|
log.Printf("Subscriber %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||||
|
} else {
|
||||||
|
log.Printf("Subscriber %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusSubscriber) handleMedia(event *janus.MediaMsg) {
|
||||||
|
// Only triggered for publishers
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusSubscriber) NotifyReconnected() {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||||
|
defer cancel()
|
||||||
|
handle, pub, err := p.mcu.getOrCreateSubscriberHandle(ctx, p.publisher, p.streamType)
|
||||||
|
if err != nil {
|
||||||
|
// TODO(jojo): Retry?
|
||||||
|
log.Printf("Could not reconnect subscriber for publisher %s: %s", p.publisher, err)
|
||||||
|
p.Close(context.Background())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p.handle = handle
|
||||||
|
p.handleId = handle.Id
|
||||||
|
p.roomId = pub.roomId
|
||||||
|
p.sid = strconv.FormatUint(handle.Id, 10)
|
||||||
|
p.listener.SubscriberSidUpdated(p)
|
||||||
|
log.Printf("Subscriber %d for publisher %s reconnected on handle %d", p.id, p.publisher, p.handleId)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusSubscriber) Close(ctx context.Context) {
|
||||||
|
p.mu.Lock()
|
||||||
|
closed := p.closeClient(ctx)
|
||||||
|
p.mu.Unlock()
|
||||||
|
|
||||||
|
if closed {
|
||||||
|
p.mcu.SubscriberDisconnected(p.Id(), p.publisher, p.streamType)
|
||||||
|
statsSubscribersCurrent.WithLabelValues(string(p.streamType)).Dec()
|
||||||
|
}
|
||||||
|
p.mcu.unregisterClient(p)
|
||||||
|
p.listener.SubscriberClosed(p)
|
||||||
|
p.mcuJanusClient.Close(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusSubscriber) joinRoom(ctx context.Context, stream *streamSelection, callback func(error, map[string]interface{})) {
|
||||||
|
handle := p.handle
|
||||||
|
if handle == nil {
|
||||||
|
callback(ErrNotConnected, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
waiter := p.mcu.publisherConnected.NewWaiter(getStreamId(p.publisher, p.streamType))
|
||||||
|
defer p.mcu.publisherConnected.Release(waiter)
|
||||||
|
|
||||||
|
loggedNotPublishingYet := false
|
||||||
|
retry:
|
||||||
|
join_msg := map[string]interface{}{
|
||||||
|
"request": "join",
|
||||||
|
"ptype": "subscriber",
|
||||||
|
"room": p.roomId,
|
||||||
|
}
|
||||||
|
if p.mcu.isMultistream() {
|
||||||
|
join_msg["streams"] = []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"feed": streamTypeUserIds[p.streamType],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
join_msg["feed"] = streamTypeUserIds[p.streamType]
|
||||||
|
}
|
||||||
|
if stream != nil {
|
||||||
|
stream.AddToMessage(join_msg)
|
||||||
|
}
|
||||||
|
join_response, err := handle.Message(ctx, join_msg, nil)
|
||||||
|
if err != nil {
|
||||||
|
callback(err, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if error_code := getPluginIntValue(join_response.Plugindata, pluginVideoRoom, "error_code"); error_code > 0 {
|
||||||
|
switch error_code {
|
||||||
|
case JANUS_VIDEOROOM_ERROR_ALREADY_JOINED:
|
||||||
|
// The subscriber is already connected to the room. This can happen
|
||||||
|
// if a client leaves a call but keeps the subscriber objects active.
|
||||||
|
// On joining the call again, the subscriber tries to join on the
|
||||||
|
// MCU which will fail because he is still connected.
|
||||||
|
// To get a new Offer SDP, we have to tear down the session on the
|
||||||
|
// MCU and join again.
|
||||||
|
p.mu.Lock()
|
||||||
|
p.closeClient(ctx)
|
||||||
|
p.mu.Unlock()
|
||||||
|
|
||||||
|
var pub *mcuJanusPublisher
|
||||||
|
handle, pub, err = p.mcu.getOrCreateSubscriberHandle(ctx, p.publisher, p.streamType)
|
||||||
|
if err != nil {
|
||||||
|
// Reconnection didn't work, need to unregister/remove subscriber
|
||||||
|
// so a new object will be created if the request is retried.
|
||||||
|
p.mcu.unregisterClient(p)
|
||||||
|
p.listener.SubscriberClosed(p)
|
||||||
|
callback(fmt.Errorf("Already connected as subscriber for %s, error during re-joining: %s", p.streamType, err), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p.handle = handle
|
||||||
|
p.handleId = handle.Id
|
||||||
|
p.roomId = pub.roomId
|
||||||
|
p.sid = strconv.FormatUint(handle.Id, 10)
|
||||||
|
p.listener.SubscriberSidUpdated(p)
|
||||||
|
p.closeChan = make(chan struct{}, 1)
|
||||||
|
go p.run(p.handle, p.closeChan)
|
||||||
|
log.Printf("Already connected subscriber %d for %s, leaving and re-joining on handle %d", p.id, p.streamType, p.handleId)
|
||||||
|
goto retry
|
||||||
|
case JANUS_VIDEOROOM_ERROR_NO_SUCH_ROOM:
|
||||||
|
fallthrough
|
||||||
|
case JANUS_VIDEOROOM_ERROR_NO_SUCH_FEED:
|
||||||
|
switch error_code {
|
||||||
|
case JANUS_VIDEOROOM_ERROR_NO_SUCH_ROOM:
|
||||||
|
log.Printf("Publisher %s not created yet for %s, wait and retry to join room %d as subscriber", p.publisher, p.streamType, p.roomId)
|
||||||
|
case JANUS_VIDEOROOM_ERROR_NO_SUCH_FEED:
|
||||||
|
log.Printf("Publisher %s not sending yet for %s, wait and retry to join room %d as subscriber", p.publisher, p.streamType, p.roomId)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !loggedNotPublishingYet {
|
||||||
|
loggedNotPublishingYet = true
|
||||||
|
statsWaitingForPublisherTotal.WithLabelValues(string(p.streamType)).Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := waiter.Wait(ctx); err != nil {
|
||||||
|
callback(err, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Printf("Retry subscribing %s from %s", p.streamType, p.publisher)
|
||||||
|
goto retry
|
||||||
|
default:
|
||||||
|
// TODO(jojo): Should we handle other errors, too?
|
||||||
|
callback(fmt.Errorf("Error joining room as subscriber: %+v", join_response), nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//log.Println("Joined as listener", join_response)
|
||||||
|
|
||||||
|
p.session = join_response.Session
|
||||||
|
callback(nil, join_response.Jsep)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusSubscriber) update(ctx context.Context, stream *streamSelection, callback func(error, map[string]interface{})) {
|
||||||
|
handle := p.handle
|
||||||
|
if handle == nil {
|
||||||
|
callback(ErrNotConnected, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
configure_msg := map[string]interface{}{
|
||||||
|
"request": "configure",
|
||||||
|
"update": true,
|
||||||
|
}
|
||||||
|
if stream != nil {
|
||||||
|
stream.AddToMessage(configure_msg)
|
||||||
|
}
|
||||||
|
configure_response, err := handle.Message(ctx, configure_msg, nil)
|
||||||
|
if err != nil {
|
||||||
|
callback(err, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
callback(nil, configure_response.Jsep)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuJanusSubscriber) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
|
||||||
|
statsMcuMessagesTotal.WithLabelValues(data.Type).Inc()
|
||||||
|
jsep_msg := data.Payload
|
||||||
|
switch data.Type {
|
||||||
|
case "requestoffer":
|
||||||
|
fallthrough
|
||||||
|
case "sendoffer":
|
||||||
|
p.deferred <- func() {
|
||||||
|
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
stream, err := parseStreamSelection(jsep_msg)
|
||||||
|
if err != nil {
|
||||||
|
go callback(err, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.Sid == "" || data.Sid != p.Sid() {
|
||||||
|
p.joinRoom(msgctx, stream, callback)
|
||||||
|
} else {
|
||||||
|
p.update(msgctx, stream, callback)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "answer":
|
||||||
|
p.deferred <- func() {
|
||||||
|
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if data.Sid == "" || data.Sid == p.Sid() {
|
||||||
|
p.sendAnswer(msgctx, jsep_msg, callback)
|
||||||
|
} else {
|
||||||
|
go callback(fmt.Errorf("Answer message sid (%s) does not match subscriber sid (%s)", data.Sid, p.Sid()), nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "candidate":
|
||||||
|
p.deferred <- func() {
|
||||||
|
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if data.Sid == "" || data.Sid == p.Sid() {
|
||||||
|
p.sendCandidate(msgctx, jsep_msg["candidate"], callback)
|
||||||
|
} else {
|
||||||
|
go callback(fmt.Errorf("Candidate message sid (%s) does not match subscriber sid (%s)", data.Sid, p.Sid()), nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "endOfCandidates":
|
||||||
|
// Ignore
|
||||||
|
case "selectStream":
|
||||||
|
stream, err := parseStreamSelection(jsep_msg)
|
||||||
|
if err != nil {
|
||||||
|
go callback(err, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if stream == nil || !stream.HasValues() {
|
||||||
|
// Nothing to do
|
||||||
|
go callback(nil, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p.deferred <- func() {
|
||||||
|
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
p.selectStream(msgctx, stream, callback)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// Return error asynchronously
|
||||||
|
go callback(fmt.Errorf("Unsupported message type: %s", data.Type), nil)
|
||||||
|
}
|
||||||
|
}
|
663
mcu_proxy.go
663
mcu_proxy.go
|
@ -162,6 +162,7 @@ func (p *mcuProxyPublisher) SetMedia(mt MediaType) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *mcuProxyPublisher) NotifyClosed() {
|
func (p *mcuProxyPublisher) NotifyClosed() {
|
||||||
|
log.Printf("Publisher %s at %s was closed", p.proxyId, p.conn)
|
||||||
p.listener.PublisherClosed(p)
|
p.listener.PublisherClosed(p)
|
||||||
p.conn.removePublisher(p)
|
p.conn.removePublisher(p)
|
||||||
}
|
}
|
||||||
|
@ -185,7 +186,7 @@ func (p *mcuProxyPublisher) Close(ctx context.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Delete publisher %s at %s", p.proxyId, p.conn)
|
log.Printf("Deleted publisher %s at %s", p.proxyId, p.conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *mcuProxyPublisher) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
|
func (p *mcuProxyPublisher) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
|
||||||
|
@ -217,13 +218,26 @@ func (p *mcuProxyPublisher) ProcessEvent(msg *EventProxyServerMessage) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *mcuProxyPublisher) GetStreams(ctx context.Context) ([]PublisherStream, error) {
|
||||||
|
return nil, errors.New("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuProxyPublisher) PublishRemote(ctx context.Context, remoteId string, hostname string, port int, rtcpPort int) error {
|
||||||
|
return errors.New("remote publishing not supported for proxy publishers")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *mcuProxyPublisher) UnpublishRemote(ctx context.Context, remoteId string) error {
|
||||||
|
return errors.New("remote publishing not supported for proxy publishers")
|
||||||
|
}
|
||||||
|
|
||||||
type mcuProxySubscriber struct {
|
type mcuProxySubscriber struct {
|
||||||
mcuProxyPubSubCommon
|
mcuProxyPubSubCommon
|
||||||
|
|
||||||
publisherId string
|
publisherId string
|
||||||
|
publisherConn *mcuProxyConnection
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMcuProxySubscriber(publisherId string, sid string, streamType StreamType, maxBitrate int, proxyId string, conn *mcuProxyConnection, listener McuListener) *mcuProxySubscriber {
|
func newMcuProxySubscriber(publisherId string, sid string, streamType StreamType, maxBitrate int, proxyId string, conn *mcuProxyConnection, listener McuListener, publisherConn *mcuProxyConnection) *mcuProxySubscriber {
|
||||||
return &mcuProxySubscriber{
|
return &mcuProxySubscriber{
|
||||||
mcuProxyPubSubCommon: mcuProxyPubSubCommon{
|
mcuProxyPubSubCommon: mcuProxyPubSubCommon{
|
||||||
sid: sid,
|
sid: sid,
|
||||||
|
@ -234,7 +248,8 @@ func newMcuProxySubscriber(publisherId string, sid string, streamType StreamType
|
||||||
listener: listener,
|
listener: listener,
|
||||||
},
|
},
|
||||||
|
|
||||||
publisherId: publisherId,
|
publisherId: publisherId,
|
||||||
|
publisherConn: publisherConn,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -243,6 +258,11 @@ func (s *mcuProxySubscriber) Publisher() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *mcuProxySubscriber) NotifyClosed() {
|
func (s *mcuProxySubscriber) NotifyClosed() {
|
||||||
|
if s.publisherConn != nil {
|
||||||
|
log.Printf("Remote subscriber %s at %s (forwarded to %s) was closed", s.proxyId, s.conn, s.publisherConn)
|
||||||
|
} else {
|
||||||
|
log.Printf("Subscriber %s at %s was closed", s.proxyId, s.conn)
|
||||||
|
}
|
||||||
s.listener.SubscriberClosed(s)
|
s.listener.SubscriberClosed(s)
|
||||||
s.conn.removeSubscriber(s)
|
s.conn.removeSubscriber(s)
|
||||||
}
|
}
|
||||||
|
@ -259,14 +279,26 @@ func (s *mcuProxySubscriber) Close(ctx context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if response, err := s.conn.performSyncRequest(ctx, msg); err != nil {
|
if response, err := s.conn.performSyncRequest(ctx, msg); err != nil {
|
||||||
log.Printf("Could not delete subscriber %s at %s: %s", s.proxyId, s.conn, err)
|
if s.publisherConn != nil {
|
||||||
|
log.Printf("Could not delete remote subscriber %s at %s (forwarded to %s): %s", s.proxyId, s.conn, s.publisherConn, err)
|
||||||
|
} else {
|
||||||
|
log.Printf("Could not delete subscriber %s at %s: %s", s.proxyId, s.conn, err)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
} else if response.Type == "error" {
|
} else if response.Type == "error" {
|
||||||
log.Printf("Could not delete subscriber %s at %s: %s", s.proxyId, s.conn, response.Error)
|
if s.publisherConn != nil {
|
||||||
|
log.Printf("Could not delete remote subscriber %s at %s (forwarded to %s): %s", s.proxyId, s.conn, s.publisherConn, response.Error)
|
||||||
|
} else {
|
||||||
|
log.Printf("Could not delete subscriber %s at %s: %s", s.proxyId, s.conn, response.Error)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Delete subscriber %s at %s", s.proxyId, s.conn)
|
if s.publisherConn != nil {
|
||||||
|
log.Printf("Deleted remote subscriber %s at %s (forwarded to %s)", s.proxyId, s.conn, s.publisherConn)
|
||||||
|
} else {
|
||||||
|
log.Printf("Deleted subscriber %s at %s", s.proxyId, s.conn)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *mcuProxySubscriber) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
|
func (s *mcuProxySubscriber) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
|
||||||
|
@ -308,6 +340,7 @@ type mcuProxyConnection struct {
|
||||||
ip net.IP
|
ip net.IP
|
||||||
|
|
||||||
load atomic.Int64
|
load atomic.Int64
|
||||||
|
bandwidth atomic.Pointer[EventProxyServerBandwidth]
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
closer *Closer
|
closer *Closer
|
||||||
closedDone *Closer
|
closedDone *Closer
|
||||||
|
@ -326,7 +359,7 @@ type mcuProxyConnection struct {
|
||||||
|
|
||||||
msgId atomic.Int64
|
msgId atomic.Int64
|
||||||
helloMsgId string
|
helloMsgId string
|
||||||
sessionId string
|
sessionId atomic.Value
|
||||||
country atomic.Value
|
country atomic.Value
|
||||||
|
|
||||||
callbacks map[string]func(*ProxyServerMessage)
|
callbacks map[string]func(*ProxyServerMessage)
|
||||||
|
@ -359,6 +392,7 @@ func newMcuProxyConnection(proxy *mcuProxy, baseUrl string, ip net.IP) (*mcuProx
|
||||||
}
|
}
|
||||||
conn.reconnectInterval.Store(int64(initialReconnectInterval))
|
conn.reconnectInterval.Store(int64(initialReconnectInterval))
|
||||||
conn.load.Store(loadNotConnected)
|
conn.load.Store(loadNotConnected)
|
||||||
|
conn.bandwidth.Store(nil)
|
||||||
conn.country.Store("")
|
conn.country.Store("")
|
||||||
return conn, nil
|
return conn, nil
|
||||||
}
|
}
|
||||||
|
@ -371,6 +405,54 @@ func (c *mcuProxyConnection) String() string {
|
||||||
return c.rawUrl
|
return c.rawUrl
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *mcuProxyConnection) IsSameCountry(initiator McuInitiator) bool {
|
||||||
|
if initiator == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
initiatorCountry := initiator.Country()
|
||||||
|
if initiatorCountry == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
connCountry := c.Country()
|
||||||
|
if connCountry == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return initiatorCountry == connCountry
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuProxyConnection) IsSameContinent(initiator McuInitiator) bool {
|
||||||
|
if initiator == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
initiatorCountry := initiator.Country()
|
||||||
|
if initiatorCountry == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
connCountry := c.Country()
|
||||||
|
if connCountry == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
initiatorContinents, found := ContinentMap[initiatorCountry]
|
||||||
|
if found {
|
||||||
|
m := c.proxy.getContinentsMap()
|
||||||
|
// Map continents to other continents (e.g. use Europe for Africa).
|
||||||
|
for _, continent := range initiatorContinents {
|
||||||
|
if toAdd, found := m[continent]; found {
|
||||||
|
initiatorContinents = append(initiatorContinents, toAdd...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
connContinents := ContinentMap[connCountry]
|
||||||
|
return ContinentsOverlap(initiatorContinents, connContinents)
|
||||||
|
}
|
||||||
|
|
||||||
type mcuProxyConnectionStats struct {
|
type mcuProxyConnectionStats struct {
|
||||||
Url string `json:"url"`
|
Url string `json:"url"`
|
||||||
IP net.IP `json:"ip,omitempty"`
|
IP net.IP `json:"ip,omitempty"`
|
||||||
|
@ -414,10 +496,29 @@ func (c *mcuProxyConnection) Load() int64 {
|
||||||
return c.load.Load()
|
return c.load.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *mcuProxyConnection) Bandwidth() *EventProxyServerBandwidth {
|
||||||
|
return c.bandwidth.Load()
|
||||||
|
}
|
||||||
|
|
||||||
func (c *mcuProxyConnection) Country() string {
|
func (c *mcuProxyConnection) Country() string {
|
||||||
return c.country.Load().(string)
|
return c.country.Load().(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *mcuProxyConnection) SessionId() string {
|
||||||
|
sid := c.sessionId.Load()
|
||||||
|
if sid == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return sid.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuProxyConnection) IsConnected() bool {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
return c.conn != nil && c.SessionId() != ""
|
||||||
|
}
|
||||||
|
|
||||||
func (c *mcuProxyConnection) IsTemporary() bool {
|
func (c *mcuProxyConnection) IsTemporary() bool {
|
||||||
return c.temporary.Load()
|
return c.temporary.Load()
|
||||||
}
|
}
|
||||||
|
@ -443,7 +544,10 @@ func (c *mcuProxyConnection) readPump() {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
defer c.close()
|
defer c.close()
|
||||||
defer c.load.Store(loadNotConnected)
|
defer func() {
|
||||||
|
c.load.Store(loadNotConnected)
|
||||||
|
c.bandwidth.Store(nil)
|
||||||
|
}()
|
||||||
|
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
conn := c.conn
|
conn := c.conn
|
||||||
|
@ -744,8 +848,9 @@ func (c *mcuProxyConnection) clearPublishers() {
|
||||||
publisher.NotifyClosed()
|
publisher.NotifyClosed()
|
||||||
}
|
}
|
||||||
}(c.publishers)
|
}(c.publishers)
|
||||||
|
// Can't use clear(...) here as the map is processed by the goroutine above.
|
||||||
c.publishers = make(map[string]*mcuProxyPublisher)
|
c.publishers = make(map[string]*mcuProxyPublisher)
|
||||||
c.publisherIds = make(map[string]string)
|
clear(c.publisherIds)
|
||||||
|
|
||||||
if c.closeScheduled.Load() || c.IsTemporary() {
|
if c.closeScheduled.Load() || c.IsTemporary() {
|
||||||
go c.closeIfEmpty()
|
go c.closeIfEmpty()
|
||||||
|
@ -775,6 +880,7 @@ func (c *mcuProxyConnection) clearSubscribers() {
|
||||||
subscriber.NotifyClosed()
|
subscriber.NotifyClosed()
|
||||||
}
|
}
|
||||||
}(c.subscribers)
|
}(c.subscribers)
|
||||||
|
// Can't use clear(...) here as the map is processed by the goroutine above.
|
||||||
c.subscribers = make(map[string]*mcuProxySubscriber)
|
c.subscribers = make(map[string]*mcuProxySubscriber)
|
||||||
|
|
||||||
if c.closeScheduled.Load() || c.IsTemporary() {
|
if c.closeScheduled.Load() || c.IsTemporary() {
|
||||||
|
@ -786,7 +892,7 @@ func (c *mcuProxyConnection) clearCallbacks() {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
c.callbacks = make(map[string]func(*ProxyServerMessage))
|
clear(c.callbacks)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *mcuProxyConnection) getCallback(id string) func(*ProxyServerMessage) {
|
func (c *mcuProxyConnection) getCallback(id string) func(*ProxyServerMessage) {
|
||||||
|
@ -806,11 +912,11 @@ func (c *mcuProxyConnection) processMessage(msg *ProxyServerMessage) {
|
||||||
switch msg.Type {
|
switch msg.Type {
|
||||||
case "error":
|
case "error":
|
||||||
if msg.Error.Code == "no_such_session" {
|
if msg.Error.Code == "no_such_session" {
|
||||||
log.Printf("Session %s could not be resumed on %s, registering new", c.sessionId, c)
|
log.Printf("Session %s could not be resumed on %s, registering new", c.SessionId(), c)
|
||||||
c.clearPublishers()
|
c.clearPublishers()
|
||||||
c.clearSubscribers()
|
c.clearSubscribers()
|
||||||
c.clearCallbacks()
|
c.clearCallbacks()
|
||||||
c.sessionId = ""
|
c.sessionId.Store("")
|
||||||
if err := c.sendHello(); err != nil {
|
if err := c.sendHello(); err != nil {
|
||||||
log.Printf("Could not send hello request to %s: %s", c, err)
|
log.Printf("Could not send hello request to %s: %s", c, err)
|
||||||
c.scheduleReconnect()
|
c.scheduleReconnect()
|
||||||
|
@ -821,8 +927,8 @@ func (c *mcuProxyConnection) processMessage(msg *ProxyServerMessage) {
|
||||||
log.Printf("Hello connection to %s failed with %+v, reconnecting", c, msg.Error)
|
log.Printf("Hello connection to %s failed with %+v, reconnecting", c, msg.Error)
|
||||||
c.scheduleReconnect()
|
c.scheduleReconnect()
|
||||||
case "hello":
|
case "hello":
|
||||||
resumed := c.sessionId == msg.Hello.SessionId
|
resumed := c.SessionId() == msg.Hello.SessionId
|
||||||
c.sessionId = msg.Hello.SessionId
|
c.sessionId.Store(msg.Hello.SessionId)
|
||||||
country := ""
|
country := ""
|
||||||
if msg.Hello.Server != nil {
|
if msg.Hello.Server != nil {
|
||||||
if country = msg.Hello.Server.Country; country != "" && !IsValidCountry(country) {
|
if country = msg.Hello.Server.Country; country != "" && !IsValidCountry(country) {
|
||||||
|
@ -832,11 +938,11 @@ func (c *mcuProxyConnection) processMessage(msg *ProxyServerMessage) {
|
||||||
}
|
}
|
||||||
c.country.Store(country)
|
c.country.Store(country)
|
||||||
if resumed {
|
if resumed {
|
||||||
log.Printf("Resumed session %s on %s", c.sessionId, c)
|
log.Printf("Resumed session %s on %s", c.SessionId(), c)
|
||||||
} else if country != "" {
|
} else if country != "" {
|
||||||
log.Printf("Received session %s from %s (in %s)", c.sessionId, c, country)
|
log.Printf("Received session %s from %s (in %s)", c.SessionId(), c, country)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Received session %s from %s", c.sessionId, c)
|
log.Printf("Received session %s from %s", c.SessionId(), c)
|
||||||
}
|
}
|
||||||
if c.trackClose.CompareAndSwap(false, true) {
|
if c.trackClose.CompareAndSwap(false, true) {
|
||||||
statsConnectedProxyBackendsCurrent.WithLabelValues(c.Country()).Inc()
|
statsConnectedProxyBackendsCurrent.WithLabelValues(c.Country()).Inc()
|
||||||
|
@ -907,9 +1013,10 @@ func (c *mcuProxyConnection) processEvent(msg *ProxyServerMessage) {
|
||||||
return
|
return
|
||||||
case "update-load":
|
case "update-load":
|
||||||
if proxyDebugMessages {
|
if proxyDebugMessages {
|
||||||
log.Printf("Load of %s now at %d", c, event.Load)
|
log.Printf("Load of %s now at %d (%s)", c, event.Load, event.Bandwidth)
|
||||||
}
|
}
|
||||||
c.load.Store(event.Load)
|
c.load.Store(event.Load)
|
||||||
|
c.bandwidth.Store(event.Bandwidth)
|
||||||
statsProxyBackendLoadCurrent.WithLabelValues(c.url.String()).Set(float64(event.Load))
|
statsProxyBackendLoadCurrent.WithLabelValues(c.url.String()).Set(float64(event.Load))
|
||||||
return
|
return
|
||||||
case "shutdown-scheduled":
|
case "shutdown-scheduled":
|
||||||
|
@ -944,8 +1051,8 @@ func (c *mcuProxyConnection) processBye(msg *ProxyServerMessage) {
|
||||||
bye := msg.Bye
|
bye := msg.Bye
|
||||||
switch bye.Reason {
|
switch bye.Reason {
|
||||||
case "session_resumed":
|
case "session_resumed":
|
||||||
log.Printf("Session %s on %s was resumed by other client, resetting", c.sessionId, c)
|
log.Printf("Session %s on %s was resumed by other client, resetting", c.SessionId(), c)
|
||||||
c.sessionId = ""
|
c.sessionId.Store("")
|
||||||
default:
|
default:
|
||||||
log.Printf("Received bye with unsupported reason from %s %+v", c, bye)
|
log.Printf("Received bye with unsupported reason from %s %+v", c, bye)
|
||||||
}
|
}
|
||||||
|
@ -960,17 +1067,10 @@ func (c *mcuProxyConnection) sendHello() error {
|
||||||
Version: "1.0",
|
Version: "1.0",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if c.sessionId != "" {
|
if sessionId := c.SessionId(); sessionId != "" {
|
||||||
msg.Hello.ResumeId = c.sessionId
|
msg.Hello.ResumeId = sessionId
|
||||||
} else {
|
} else {
|
||||||
claims := &TokenClaims{
|
tokenString, err := c.proxy.createToken("")
|
||||||
jwt.RegisteredClaims{
|
|
||||||
IssuedAt: jwt.NewNumericDate(time.Now()),
|
|
||||||
Issuer: c.proxy.tokenId,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
|
|
||||||
tokenString, err := token.SignedString(c.proxy.tokenKey)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1091,7 +1191,48 @@ func (c *mcuProxyConnection) newSubscriber(ctx context.Context, listener McuList
|
||||||
|
|
||||||
proxyId := response.Command.Id
|
proxyId := response.Command.Id
|
||||||
log.Printf("Created %s subscriber %s on %s for %s", streamType, proxyId, c, publisherSessionId)
|
log.Printf("Created %s subscriber %s on %s for %s", streamType, proxyId, c, publisherSessionId)
|
||||||
subscriber := newMcuProxySubscriber(publisherSessionId, response.Command.Sid, streamType, response.Command.Bitrate, proxyId, c, listener)
|
subscriber := newMcuProxySubscriber(publisherSessionId, response.Command.Sid, streamType, response.Command.Bitrate, proxyId, c, listener, nil)
|
||||||
|
c.subscribersLock.Lock()
|
||||||
|
c.subscribers[proxyId] = subscriber
|
||||||
|
c.subscribersLock.Unlock()
|
||||||
|
statsSubscribersCurrent.WithLabelValues(string(streamType)).Inc()
|
||||||
|
statsSubscribersTotal.WithLabelValues(string(streamType)).Inc()
|
||||||
|
return subscriber, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mcuProxyConnection) newRemoteSubscriber(ctx context.Context, listener McuListener, publisherId string, publisherSessionId string, streamType StreamType, publisherConn *mcuProxyConnection) (McuSubscriber, error) {
|
||||||
|
if c == publisherConn {
|
||||||
|
return c.newSubscriber(ctx, listener, publisherId, publisherSessionId, streamType)
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteToken, err := c.proxy.createToken(publisherId)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := &ProxyClientMessage{
|
||||||
|
Type: "command",
|
||||||
|
Command: &CommandProxyClientMessage{
|
||||||
|
Type: "create-subscriber",
|
||||||
|
StreamType: streamType,
|
||||||
|
PublisherId: publisherId,
|
||||||
|
|
||||||
|
RemoteUrl: publisherConn.rawUrl,
|
||||||
|
RemoteToken: remoteToken,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := c.performSyncRequest(ctx, msg)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: Cancel request
|
||||||
|
return nil, err
|
||||||
|
} else if response.Type == "error" {
|
||||||
|
return nil, fmt.Errorf("Error creating remote %s subscriber for %s on %s (forwarded to %s): %+v", streamType, publisherSessionId, c, publisherConn, response.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
proxyId := response.Command.Id
|
||||||
|
log.Printf("Created remote %s subscriber %s on %s for %s (forwarded to %s)", streamType, proxyId, c, publisherSessionId, publisherConn)
|
||||||
|
subscriber := newMcuProxySubscriber(publisherSessionId, response.Command.Sid, streamType, response.Command.Bitrate, proxyId, c, listener, publisherConn)
|
||||||
c.subscribersLock.Lock()
|
c.subscribersLock.Lock()
|
||||||
c.subscribers[proxyId] = subscriber
|
c.subscribers[proxyId] = subscriber
|
||||||
c.subscribersLock.Unlock()
|
c.subscribersLock.Unlock()
|
||||||
|
@ -1114,8 +1255,8 @@ type mcuProxy struct {
|
||||||
connRequests atomic.Int64
|
connRequests atomic.Int64
|
||||||
nextSort atomic.Int64
|
nextSort atomic.Int64
|
||||||
|
|
||||||
maxStreamBitrate int
|
maxStreamBitrate atomic.Int32
|
||||||
maxScreenBitrate int
|
maxScreenBitrate atomic.Int32
|
||||||
|
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
publishers map[string]*mcuProxyConnection
|
publishers map[string]*mcuProxyConnection
|
||||||
|
@ -1178,14 +1319,14 @@ func NewMcuProxy(config *goconf.ConfigFile, etcdClient *EtcdClient, rpcClients *
|
||||||
connectionsMap: make(map[string][]*mcuProxyConnection),
|
connectionsMap: make(map[string][]*mcuProxyConnection),
|
||||||
proxyTimeout: proxyTimeout,
|
proxyTimeout: proxyTimeout,
|
||||||
|
|
||||||
maxStreamBitrate: maxStreamBitrate,
|
|
||||||
maxScreenBitrate: maxScreenBitrate,
|
|
||||||
|
|
||||||
publishers: make(map[string]*mcuProxyConnection),
|
publishers: make(map[string]*mcuProxyConnection),
|
||||||
|
|
||||||
rpcClients: rpcClients,
|
rpcClients: rpcClients,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mcu.maxStreamBitrate.Store(int32(maxStreamBitrate))
|
||||||
|
mcu.maxScreenBitrate.Store(int32(maxScreenBitrate))
|
||||||
|
|
||||||
if err := mcu.loadContinentsMap(config); err != nil {
|
if err := mcu.loadContinentsMap(config); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1254,9 +1395,9 @@ func (m *mcuProxy) loadContinentsMap(config *goconf.ConfigFile) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mcuProxy) Start() error {
|
func (m *mcuProxy) Start(ctx context.Context) error {
|
||||||
log.Printf("Maximum bandwidth %d bits/sec per publishing stream", m.maxStreamBitrate)
|
log.Printf("Maximum bandwidth %d bits/sec per publishing stream", m.maxStreamBitrate.Load())
|
||||||
log.Printf("Maximum bandwidth %d bits/sec per screensharing stream", m.maxScreenBitrate)
|
log.Printf("Maximum bandwidth %d bits/sec per screensharing stream", m.maxScreenBitrate.Load())
|
||||||
|
|
||||||
return m.config.Start()
|
return m.config.Start()
|
||||||
}
|
}
|
||||||
|
@ -1274,6 +1415,48 @@ func (m *mcuProxy) Stop() {
|
||||||
m.config.Stop()
|
m.config.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *mcuProxy) createToken(subject string) (string, error) {
|
||||||
|
claims := &TokenClaims{
|
||||||
|
jwt.RegisteredClaims{
|
||||||
|
IssuedAt: jwt.NewNumericDate(time.Now()),
|
||||||
|
Issuer: m.tokenId,
|
||||||
|
Subject: subject,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
|
||||||
|
tokenString, err := token.SignedString(m.tokenKey)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tokenString, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mcuProxy) hasConnections() bool {
|
||||||
|
m.connectionsMu.RLock()
|
||||||
|
defer m.connectionsMu.RUnlock()
|
||||||
|
for _, conn := range m.connections {
|
||||||
|
if conn.IsConnected() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mcuProxy) WaitForConnections(ctx context.Context) error {
|
||||||
|
ticker := time.NewTicker(10 * time.Millisecond)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for !m.hasConnections() {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case <-ticker.C:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (m *mcuProxy) AddConnection(ignoreErrors bool, url string, ips ...net.IP) error {
|
func (m *mcuProxy) AddConnection(ignoreErrors bool, url string, ips ...net.IP) error {
|
||||||
m.connectionsMu.Lock()
|
m.connectionsMu.Lock()
|
||||||
defer m.connectionsMu.Unlock()
|
defer m.connectionsMu.Unlock()
|
||||||
|
@ -1373,6 +1556,20 @@ func (m *mcuProxy) KeepConnection(url string, ips ...net.IP) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mcuProxy) Reload(config *goconf.ConfigFile) {
|
func (m *mcuProxy) Reload(config *goconf.ConfigFile) {
|
||||||
|
maxStreamBitrate, _ := config.GetInt("mcu", "maxstreambitrate")
|
||||||
|
if maxStreamBitrate <= 0 {
|
||||||
|
maxStreamBitrate = defaultMaxStreamBitrate
|
||||||
|
}
|
||||||
|
log.Printf("Maximum bandwidth %d bits/sec per publishing stream", m.maxStreamBitrate.Load())
|
||||||
|
m.maxStreamBitrate.Store(int32(maxStreamBitrate))
|
||||||
|
|
||||||
|
maxScreenBitrate, _ := config.GetInt("mcu", "maxscreenbitrate")
|
||||||
|
if maxScreenBitrate <= 0 {
|
||||||
|
maxScreenBitrate = defaultMaxScreenBitrate
|
||||||
|
}
|
||||||
|
log.Printf("Maximum bandwidth %d bits/sec per screensharing stream", m.maxScreenBitrate.Load())
|
||||||
|
m.maxScreenBitrate.Store(int32(maxScreenBitrate))
|
||||||
|
|
||||||
if err := m.loadContinentsMap(config); err != nil {
|
if err := m.loadContinentsMap(config); err != nil {
|
||||||
log.Printf("Error loading continents map: %s", err)
|
log.Printf("Error loading continents map: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -1565,27 +1762,27 @@ func (m *mcuProxy) removePublisher(publisher *mcuProxyPublisher) {
|
||||||
delete(m.publishers, getStreamId(publisher.id, publisher.StreamType()))
|
delete(m.publishers, getStreamId(publisher.id, publisher.StreamType()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mcuProxy) NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error) {
|
func (m *mcuProxy) createPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator, connections []*mcuProxyConnection, isAllowed func(c *mcuProxyConnection) bool) McuPublisher {
|
||||||
connections := m.getSortedConnections(initiator)
|
var maxBitrate int
|
||||||
|
if streamType == StreamTypeScreen {
|
||||||
|
maxBitrate = int(m.maxScreenBitrate.Load())
|
||||||
|
} else {
|
||||||
|
maxBitrate = int(m.maxStreamBitrate.Load())
|
||||||
|
}
|
||||||
|
if bitrate <= 0 {
|
||||||
|
bitrate = maxBitrate
|
||||||
|
} else {
|
||||||
|
bitrate = min(bitrate, maxBitrate)
|
||||||
|
}
|
||||||
|
|
||||||
for _, conn := range connections {
|
for _, conn := range connections {
|
||||||
if conn.IsShutdownScheduled() || conn.IsTemporary() {
|
if !isAllowed(conn) || conn.IsShutdownScheduled() || conn.IsTemporary() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
subctx, cancel := context.WithTimeout(ctx, m.proxyTimeout)
|
subctx, cancel := context.WithTimeout(ctx, m.proxyTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
var maxBitrate int
|
|
||||||
if streamType == StreamTypeScreen {
|
|
||||||
maxBitrate = m.maxScreenBitrate
|
|
||||||
} else {
|
|
||||||
maxBitrate = m.maxStreamBitrate
|
|
||||||
}
|
|
||||||
if bitrate <= 0 {
|
|
||||||
bitrate = maxBitrate
|
|
||||||
} else {
|
|
||||||
bitrate = min(bitrate, maxBitrate)
|
|
||||||
}
|
|
||||||
publisher, err := conn.newPublisher(subctx, listener, id, sid, streamType, bitrate, mediaTypes)
|
publisher, err := conn.newPublisher(subctx, listener, id, sid, streamType, bitrate, mediaTypes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Could not create %s publisher for %s on %s: %s", streamType, id, conn, err)
|
log.Printf("Could not create %s publisher for %s on %s: %s", streamType, id, conn, err)
|
||||||
|
@ -1596,11 +1793,61 @@ func (m *mcuProxy) NewPublisher(ctx context.Context, listener McuListener, id st
|
||||||
m.publishers[getStreamId(id, streamType)] = conn
|
m.publishers[getStreamId(id, streamType)] = conn
|
||||||
m.mu.Unlock()
|
m.mu.Unlock()
|
||||||
m.publisherWaiters.Wakeup()
|
m.publisherWaiters.Wakeup()
|
||||||
return publisher, nil
|
return publisher
|
||||||
}
|
}
|
||||||
|
|
||||||
statsProxyNobackendAvailableTotal.WithLabelValues(string(streamType)).Inc()
|
return nil
|
||||||
return nil, fmt.Errorf("No MCU connection available")
|
}
|
||||||
|
|
||||||
|
func (m *mcuProxy) NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error) {
|
||||||
|
connections := m.getSortedConnections(initiator)
|
||||||
|
publisher := m.createPublisher(ctx, listener, id, sid, streamType, bitrate, mediaTypes, initiator, connections, func(c *mcuProxyConnection) bool {
|
||||||
|
bw := c.Bandwidth()
|
||||||
|
return bw == nil || bw.AllowIncoming()
|
||||||
|
})
|
||||||
|
if publisher == nil {
|
||||||
|
// No proxy has available bandwidth, select one with the lowest currently used bandwidth.
|
||||||
|
connections2 := make([]*mcuProxyConnection, 0, len(connections))
|
||||||
|
for _, c := range connections {
|
||||||
|
if c.Bandwidth() != nil {
|
||||||
|
connections2 = append(connections2, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
SlicesSortFunc(connections2, func(a *mcuProxyConnection, b *mcuProxyConnection) int {
|
||||||
|
var incoming_a *float64
|
||||||
|
if bw := a.Bandwidth(); bw != nil {
|
||||||
|
incoming_a = bw.Incoming
|
||||||
|
}
|
||||||
|
|
||||||
|
var incoming_b *float64
|
||||||
|
if bw := b.Bandwidth(); bw != nil {
|
||||||
|
incoming_b = bw.Incoming
|
||||||
|
}
|
||||||
|
|
||||||
|
if incoming_a == nil && incoming_b == nil {
|
||||||
|
return 0
|
||||||
|
} else if incoming_a == nil && incoming_b != nil {
|
||||||
|
return -1
|
||||||
|
} else if incoming_a != nil && incoming_b == nil {
|
||||||
|
return -1
|
||||||
|
} else if *incoming_a < *incoming_b {
|
||||||
|
return -1
|
||||||
|
} else if *incoming_a > *incoming_b {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
})
|
||||||
|
publisher = m.createPublisher(ctx, listener, id, sid, streamType, bitrate, mediaTypes, initiator, connections2, func(c *mcuProxyConnection) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if publisher == nil {
|
||||||
|
statsProxyNobackendAvailableTotal.WithLabelValues(string(streamType)).Inc()
|
||||||
|
return nil, fmt.Errorf("No MCU connection available")
|
||||||
|
}
|
||||||
|
|
||||||
|
return publisher, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mcuProxy) getPublisherConnection(publisher string, streamType StreamType) *mcuProxyConnection {
|
func (m *mcuProxy) getPublisherConnection(publisher string, streamType StreamType) *mcuProxyConnection {
|
||||||
|
@ -1641,7 +1888,38 @@ func (m *mcuProxy) waitForPublisherConnection(ctx context.Context, publisher str
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mcuProxy) NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType) (McuSubscriber, error) {
|
type proxyPublisherInfo struct {
|
||||||
|
id string
|
||||||
|
conn *mcuProxyConnection
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mcuProxy) createSubscriber(ctx context.Context, listener McuListener, id string, publisher string, streamType StreamType, publisherConn *mcuProxyConnection, connections []*mcuProxyConnection, isAllowed func(c *mcuProxyConnection) bool) McuSubscriber {
|
||||||
|
for _, conn := range connections {
|
||||||
|
if !isAllowed(conn) || conn.IsShutdownScheduled() || conn.IsTemporary() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var subscriber McuSubscriber
|
||||||
|
var err error
|
||||||
|
if conn == publisherConn {
|
||||||
|
subscriber, err = conn.newSubscriber(ctx, listener, id, publisher, streamType)
|
||||||
|
} else {
|
||||||
|
subscriber, err = conn.newRemoteSubscriber(ctx, listener, id, publisher, streamType, publisherConn)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Could not create subscriber for %s publisher %s on %s: %s", streamType, publisher, conn, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return subscriber
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mcuProxy) NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType, initiator McuInitiator) (McuSubscriber, error) {
|
||||||
|
var publisherInfo *proxyPublisherInfo
|
||||||
if conn := m.getPublisherConnection(publisher, streamType); conn != nil {
|
if conn := m.getPublisherConnection(publisher, streamType); conn != nil {
|
||||||
// Fast common path: publisher is available locally.
|
// Fast common path: publisher is available locally.
|
||||||
conn.publishersLock.Lock()
|
conn.publishersLock.Lock()
|
||||||
|
@ -1651,113 +1929,190 @@ func (m *mcuProxy) NewSubscriber(ctx context.Context, listener McuListener, publ
|
||||||
return nil, fmt.Errorf("Unknown publisher %s", publisher)
|
return nil, fmt.Errorf("Unknown publisher %s", publisher)
|
||||||
}
|
}
|
||||||
|
|
||||||
return conn.newSubscriber(ctx, listener, id, publisher, streamType)
|
publisherInfo = &proxyPublisherInfo{
|
||||||
}
|
id: id,
|
||||||
|
conn: conn,
|
||||||
log.Printf("No %s publisher %s found yet, deferring", streamType, publisher)
|
|
||||||
ch := make(chan McuSubscriber)
|
|
||||||
getctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// Wait for publisher to be created locally.
|
|
||||||
go func() {
|
|
||||||
if conn := m.waitForPublisherConnection(getctx, publisher, streamType); conn != nil {
|
|
||||||
cancel() // Cancel pending RPC calls.
|
|
||||||
|
|
||||||
conn.publishersLock.Lock()
|
|
||||||
id, found := conn.publisherIds[getStreamId(publisher, streamType)]
|
|
||||||
conn.publishersLock.Unlock()
|
|
||||||
if !found {
|
|
||||||
log.Printf("Unknown id for local %s publisher %s", streamType, publisher)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
subscriber, err := conn.newSubscriber(ctx, listener, id, publisher, streamType)
|
|
||||||
if subscriber != nil {
|
|
||||||
ch <- subscriber
|
|
||||||
} else if err != nil {
|
|
||||||
log.Printf("Error creating local subscriber for %s publisher %s: %s", streamType, publisher, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}()
|
} else {
|
||||||
|
log.Printf("No %s publisher %s found yet, deferring", streamType, publisher)
|
||||||
|
ch := make(chan *proxyPublisherInfo, 1)
|
||||||
|
getctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
// Wait for publisher to be created on one of the other servers in the cluster.
|
var wg sync.WaitGroup
|
||||||
if clients := m.rpcClients.GetClients(); len(clients) > 0 {
|
|
||||||
for _, client := range clients {
|
|
||||||
go func(client *GrpcClient) {
|
|
||||||
id, url, ip, err := client.GetPublisherId(getctx, publisher, streamType)
|
|
||||||
if errors.Is(err, context.Canceled) {
|
|
||||||
return
|
|
||||||
} else if err != nil {
|
|
||||||
log.Printf("Error getting %s publisher id %s from %s: %s", streamType, publisher, client.Target(), err)
|
|
||||||
return
|
|
||||||
} else if id == "" {
|
|
||||||
// Publisher not found on other server
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// Wait for publisher to be created locally.
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if conn := m.waitForPublisherConnection(getctx, publisher, streamType); conn != nil {
|
||||||
cancel() // Cancel pending RPC calls.
|
cancel() // Cancel pending RPC calls.
|
||||||
log.Printf("Found publisher id %s through %s on proxy %s", id, client.Target(), url)
|
|
||||||
|
|
||||||
m.connectionsMu.RLock()
|
conn.publishersLock.Lock()
|
||||||
connections := m.connections
|
id, found := conn.publisherIds[getStreamId(publisher, streamType)]
|
||||||
m.connectionsMu.RUnlock()
|
conn.publishersLock.Unlock()
|
||||||
var publisherConn *mcuProxyConnection
|
if !found {
|
||||||
for _, conn := range connections {
|
ch <- &proxyPublisherInfo{
|
||||||
if conn.rawUrl != url || !ip.Equal(conn.ip) {
|
err: fmt.Errorf("Unknown id for local %s publisher %s", streamType, publisher),
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Simple case, signaling server has a connection to the same endpoint
|
|
||||||
publisherConn = conn
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if publisherConn == nil {
|
|
||||||
publisherConn, err = newMcuProxyConnection(m, url, ip)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Could not create temporary connection to %s for %s publisher %s: %s", url, streamType, publisher, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
publisherConn.setTemporary()
|
|
||||||
publisherConn.start()
|
|
||||||
if err := publisherConn.waitUntilConnected(ctx); err != nil {
|
|
||||||
log.Printf("Could not establish new connection to %s: %s", publisherConn, err)
|
|
||||||
publisherConn.closeIfEmpty()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
m.connectionsMu.Lock()
|
|
||||||
m.connections = append(m.connections, publisherConn)
|
|
||||||
conns, found := m.connectionsMap[url]
|
|
||||||
if found {
|
|
||||||
conns = append(conns, publisherConn)
|
|
||||||
} else {
|
|
||||||
conns = []*mcuProxyConnection{publisherConn}
|
|
||||||
}
|
|
||||||
m.connectionsMap[url] = conns
|
|
||||||
m.connectionsMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
subscriber, err := publisherConn.newSubscriber(ctx, listener, id, publisher, streamType)
|
|
||||||
if err != nil {
|
|
||||||
if publisherConn.IsTemporary() {
|
|
||||||
publisherConn.closeIfEmpty()
|
|
||||||
}
|
|
||||||
log.Printf("Could not create subscriber for %s publisher %s: %s", streamType, publisher, err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ch <- subscriber
|
ch <- &proxyPublisherInfo{
|
||||||
}(client)
|
id: id,
|
||||||
|
conn: conn,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for publisher to be created on one of the other servers in the cluster.
|
||||||
|
if clients := m.rpcClients.GetClients(); len(clients) > 0 {
|
||||||
|
for _, client := range clients {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(client *GrpcClient) {
|
||||||
|
defer wg.Done()
|
||||||
|
id, url, ip, err := client.GetPublisherId(getctx, publisher, streamType)
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
log.Printf("Error getting %s publisher id %s from %s: %s", streamType, publisher, client.Target(), err)
|
||||||
|
return
|
||||||
|
} else if id == "" {
|
||||||
|
// Publisher not found on other server
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cancel() // Cancel pending RPC calls.
|
||||||
|
log.Printf("Found publisher id %s through %s on proxy %s", id, client.Target(), url)
|
||||||
|
|
||||||
|
m.connectionsMu.RLock()
|
||||||
|
connections := m.connections
|
||||||
|
m.connectionsMu.RUnlock()
|
||||||
|
var publisherConn *mcuProxyConnection
|
||||||
|
for _, conn := range connections {
|
||||||
|
if conn.rawUrl != url || !ip.Equal(conn.ip) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simple case, signaling server has a connection to the same endpoint
|
||||||
|
publisherConn = conn
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if publisherConn == nil {
|
||||||
|
publisherConn, err = newMcuProxyConnection(m, url, ip)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Could not create temporary connection to %s for %s publisher %s: %s", url, streamType, publisher, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
publisherConn.setTemporary()
|
||||||
|
publisherConn.start()
|
||||||
|
if err := publisherConn.waitUntilConnected(ctx); err != nil {
|
||||||
|
log.Printf("Could not establish new connection to %s: %s", publisherConn, err)
|
||||||
|
publisherConn.closeIfEmpty()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
m.connectionsMu.Lock()
|
||||||
|
m.connections = append(m.connections, publisherConn)
|
||||||
|
conns, found := m.connectionsMap[url]
|
||||||
|
if found {
|
||||||
|
conns = append(conns, publisherConn)
|
||||||
|
} else {
|
||||||
|
conns = []*mcuProxyConnection{publisherConn}
|
||||||
|
}
|
||||||
|
m.connectionsMap[url] = conns
|
||||||
|
m.connectionsMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
ch <- &proxyPublisherInfo{
|
||||||
|
id: id,
|
||||||
|
conn: publisherConn,
|
||||||
|
}
|
||||||
|
}(client)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
select {
|
||||||
|
case ch <- &proxyPublisherInfo{
|
||||||
|
err: fmt.Errorf("No %s publisher %s found", streamType, publisher),
|
||||||
|
}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case info := <-ch:
|
||||||
|
publisherInfo = info
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, fmt.Errorf("No %s publisher %s found", streamType, publisher)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
if publisherInfo.err != nil {
|
||||||
case subscriber := <-ch:
|
return nil, publisherInfo.err
|
||||||
return subscriber, nil
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil, fmt.Errorf("No %s publisher %s found", streamType, publisher)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bw := publisherInfo.conn.Bandwidth()
|
||||||
|
allowOutgoing := bw == nil || bw.AllowOutgoing()
|
||||||
|
if !allowOutgoing || !publisherInfo.conn.IsSameCountry(initiator) {
|
||||||
|
connections := m.getSortedConnections(initiator)
|
||||||
|
if !allowOutgoing || len(connections) > 0 && !connections[0].IsSameCountry(publisherInfo.conn) {
|
||||||
|
// Connect to remote publisher through "closer" gateway.
|
||||||
|
subscriber := m.createSubscriber(ctx, listener, publisherInfo.id, publisher, streamType, publisherInfo.conn, connections, func(c *mcuProxyConnection) bool {
|
||||||
|
bw := c.Bandwidth()
|
||||||
|
return bw == nil || bw.AllowOutgoing()
|
||||||
|
})
|
||||||
|
if subscriber == nil {
|
||||||
|
connections2 := make([]*mcuProxyConnection, 0, len(connections))
|
||||||
|
for _, c := range connections {
|
||||||
|
if c.Bandwidth() != nil {
|
||||||
|
connections2 = append(connections2, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
SlicesSortFunc(connections2, func(a *mcuProxyConnection, b *mcuProxyConnection) int {
|
||||||
|
var outgoing_a *float64
|
||||||
|
if bw := a.Bandwidth(); bw != nil {
|
||||||
|
outgoing_a = bw.Outgoing
|
||||||
|
}
|
||||||
|
|
||||||
|
var outgoing_b *float64
|
||||||
|
if bw := b.Bandwidth(); bw != nil {
|
||||||
|
outgoing_b = bw.Outgoing
|
||||||
|
}
|
||||||
|
|
||||||
|
if outgoing_a == nil && outgoing_b == nil {
|
||||||
|
return 0
|
||||||
|
} else if outgoing_a == nil && outgoing_b != nil {
|
||||||
|
return -1
|
||||||
|
} else if outgoing_a != nil && outgoing_b == nil {
|
||||||
|
return -1
|
||||||
|
} else if *outgoing_a < *outgoing_b {
|
||||||
|
return -1
|
||||||
|
} else if *outgoing_a > *outgoing_b {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
})
|
||||||
|
subscriber = m.createSubscriber(ctx, listener, publisherInfo.id, publisher, streamType, publisherInfo.conn, connections2, func(c *mcuProxyConnection) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if subscriber != nil {
|
||||||
|
return subscriber, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
subscriber, err := publisherInfo.conn.newSubscriber(ctx, listener, publisherInfo.id, publisher, streamType)
|
||||||
|
if err != nil {
|
||||||
|
if publisherInfo.conn.IsTemporary() {
|
||||||
|
publisherInfo.conn.closeIfEmpty()
|
||||||
|
}
|
||||||
|
log.Printf("Could not create subscriber for %s publisher %s on %s: %s", streamType, publisher, publisherInfo.conn, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return subscriber, nil
|
||||||
}
|
}
|
||||||
|
|
1558
mcu_proxy_test.go
1558
mcu_proxy_test.go
File diff suppressed because it is too large
Load diff
19
mcu_test.go
19
mcu_test.go
|
@ -23,6 +23,7 @@ package signaling
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -49,7 +50,7 @@ func NewTestMCU() (*TestMCU, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TestMCU) Start() error {
|
func (m *TestMCU) Start(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,7 +118,7 @@ func (m *TestMCU) GetPublisher(id string) *TestMCUPublisher {
|
||||||
return m.publishers[id]
|
return m.publishers[id]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *TestMCU) NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType) (McuSubscriber, error) {
|
func (m *TestMCU) NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType, initiator McuInitiator) (McuSubscriber, error) {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
defer m.mu.Unlock()
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
|
@ -222,6 +223,18 @@ func (p *TestMCUPublisher) SendMessage(ctx context.Context, message *MessageClie
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *TestMCUPublisher) GetStreams(ctx context.Context) ([]PublisherStream, error) {
|
||||||
|
return nil, errors.New("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *TestMCUPublisher) PublishRemote(ctx context.Context, remoteId string, hostname string, port int, rtcpPort int) error {
|
||||||
|
return errors.New("remote publishing not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *TestMCUPublisher) UnpublishRemote(ctx context.Context, remoteId string) error {
|
||||||
|
return errors.New("remote publishing not supported")
|
||||||
|
}
|
||||||
|
|
||||||
type TestMCUSubscriber struct {
|
type TestMCUSubscriber struct {
|
||||||
TestMCUClient
|
TestMCUClient
|
||||||
|
|
||||||
|
@ -253,6 +266,8 @@ func (s *TestMCUSubscriber) SendMessage(ctx context.Context, message *MessageCli
|
||||||
"type": "offer",
|
"type": "offer",
|
||||||
"sdp": sdp,
|
"sdp": sdp,
|
||||||
})
|
})
|
||||||
|
case "answer":
|
||||||
|
callback(nil, nil)
|
||||||
default:
|
default:
|
||||||
callback(fmt.Errorf("Message type %s is not implemented", data.Type), nil)
|
callback(fmt.Errorf("Message type %s is not implemented", data.Type), nil)
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,6 +104,7 @@ func testNatsClient_Subscribe(t *testing.T, client NatsClient) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNatsClient_Subscribe(t *testing.T) {
|
func TestNatsClient_Subscribe(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||||
client := CreateLocalNatsClientForTest(t)
|
client := CreateLocalNatsClientForTest(t)
|
||||||
|
|
||||||
|
@ -120,6 +121,7 @@ func testNatsClient_PublishAfterClose(t *testing.T, client NatsClient) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNatsClient_PublishAfterClose(t *testing.T) {
|
func TestNatsClient_PublishAfterClose(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||||
client := CreateLocalNatsClientForTest(t)
|
client := CreateLocalNatsClientForTest(t)
|
||||||
|
|
||||||
|
@ -137,6 +139,7 @@ func testNatsClient_SubscribeAfterClose(t *testing.T, client NatsClient) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNatsClient_SubscribeAfterClose(t *testing.T) {
|
func TestNatsClient_SubscribeAfterClose(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||||
client := CreateLocalNatsClientForTest(t)
|
client := CreateLocalNatsClientForTest(t)
|
||||||
|
|
||||||
|
@ -159,6 +162,7 @@ func testNatsClient_BadSubjects(t *testing.T, client NatsClient) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNatsClient_BadSubjects(t *testing.T) {
|
func TestNatsClient_BadSubjects(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
ensureNoGoroutinesLeak(t, func(t *testing.T) {
|
||||||
client := CreateLocalNatsClientForTest(t)
|
client := CreateLocalNatsClientForTest(t)
|
||||||
|
|
||||||
|
|
|
@ -118,6 +118,7 @@ func TestNotifierResetWillNotify(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNotifierDuplicate(t *testing.T) {
|
func TestNotifierDuplicate(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var notifier Notifier
|
var notifier Notifier
|
||||||
var wgStart sync.WaitGroup
|
var wgStart sync.WaitGroup
|
||||||
var wgEnd sync.WaitGroup
|
var wgEnd sync.WaitGroup
|
||||||
|
|
|
@ -8,6 +8,12 @@
|
||||||
# See "https://golang.org/pkg/net/http/pprof/" for further information.
|
# See "https://golang.org/pkg/net/http/pprof/" for further information.
|
||||||
#debug = false
|
#debug = false
|
||||||
|
|
||||||
|
# Comma separated list of trusted proxies (IPs or CIDR networks) that may set
|
||||||
|
# the "X-Real-Ip" or "X-Forwarded-For" headers. If both are provided, the
|
||||||
|
# "X-Real-Ip" header will take precedence (if valid).
|
||||||
|
# Leave empty to allow loopback and local addresses.
|
||||||
|
#trustedproxies =
|
||||||
|
|
||||||
# ISO 3166 country this proxy is located at. This will be used by the signaling
|
# ISO 3166 country this proxy is located at. This will be used by the signaling
|
||||||
# servers to determine the closest proxy for publishers.
|
# servers to determine the closest proxy for publishers.
|
||||||
#country = DE
|
#country = DE
|
||||||
|
@ -20,6 +26,36 @@
|
||||||
# - etcd: Token information are retrieved from an etcd cluster (see below).
|
# - etcd: Token information are retrieved from an etcd cluster (see below).
|
||||||
tokentype = static
|
tokentype = static
|
||||||
|
|
||||||
|
# The external hostname for remote streams. Leaving this empty will autodetect
|
||||||
|
# and use the first public IP found on the available network interfaces.
|
||||||
|
#hostname =
|
||||||
|
|
||||||
|
# The token id to use when connecting remote stream.
|
||||||
|
#token_id = server1
|
||||||
|
|
||||||
|
# The private key for the configured token id to use when connecting remote
|
||||||
|
# streams.
|
||||||
|
#token_key = privkey.pem
|
||||||
|
|
||||||
|
# If set to "true", certificate validation of remote stream requests will be
|
||||||
|
# skipped. This should only be enabled during development, e.g. to work with
|
||||||
|
# self-signed certificates.
|
||||||
|
#skipverify = false
|
||||||
|
|
||||||
|
[bandwidth]
|
||||||
|
# Target bandwidth limit for incoming streams (in megabits per second).
|
||||||
|
# Set to 0 to disable the limit. If the limit is reached, the proxy notifies
|
||||||
|
# the signaling servers that another proxy should be used for publishing if
|
||||||
|
# possible.
|
||||||
|
#incoming = 1024
|
||||||
|
|
||||||
|
# Target bandwidth limit for outgoing streams (in megabits per second).
|
||||||
|
# Set to 0 to disable the limit. If the limit is reached, the proxy notifies
|
||||||
|
# the signaling servers that another proxy should be used for subscribing if
|
||||||
|
# possible. Note that this might require additional outgoing bandwidth for the
|
||||||
|
# remote streams.
|
||||||
|
#outgoing = 1024
|
||||||
|
|
||||||
[tokens]
|
[tokens]
|
||||||
# For token type "static": Mapping of <tokenid> = <publickey> of signaling
|
# For token type "static": Mapping of <tokenid> = <publickey> of signaling
|
||||||
# servers allowed to connect.
|
# servers allowed to connect.
|
||||||
|
|
|
@ -36,6 +36,8 @@ import (
|
||||||
|
|
||||||
"github.com/dlintw/goconf"
|
"github.com/dlintw/goconf"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
|
||||||
|
signaling "github.com/strukturag/nextcloud-spreed-signaling"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -90,7 +92,7 @@ func main() {
|
||||||
}
|
}
|
||||||
defer proxy.Stop()
|
defer proxy.Stop()
|
||||||
|
|
||||||
if addr, _ := config.GetString("http", "listen"); addr != "" {
|
if addr, _ := signaling.GetStringOptionWithEnv(config, "http", "listen"); addr != "" {
|
||||||
readTimeout, _ := config.GetInt("http", "readtimeout")
|
readTimeout, _ := config.GetInt("http", "readtimeout")
|
||||||
if readTimeout <= 0 {
|
if readTimeout <= 0 {
|
||||||
readTimeout = defaultReadTimeout
|
readTimeout = defaultReadTimeout
|
||||||
|
|
|
@ -53,18 +53,18 @@ func (c *ProxyClient) SetSession(session *ProxySession) {
|
||||||
c.session.Store(session)
|
c.session.Store(session)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ProxyClient) OnClosed(client *signaling.Client) {
|
func (c *ProxyClient) OnClosed(client signaling.HandlerClient) {
|
||||||
if session := c.GetSession(); session != nil {
|
if session := c.GetSession(); session != nil {
|
||||||
session.MarkUsed()
|
session.MarkUsed()
|
||||||
}
|
}
|
||||||
c.proxy.clientClosed(&c.Client)
|
c.proxy.clientClosed(&c.Client)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ProxyClient) OnMessageReceived(client *signaling.Client, data []byte) {
|
func (c *ProxyClient) OnMessageReceived(client signaling.HandlerClient, data []byte) {
|
||||||
c.proxy.processMessage(c, data)
|
c.proxy.processMessage(c, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ProxyClient) OnRTTReceived(client *signaling.Client, rtt time.Duration) {
|
func (c *ProxyClient) OnRTTReceived(client signaling.HandlerClient, rtt time.Duration) {
|
||||||
if session := c.GetSession(); session != nil {
|
if session := c.GetSession(); session != nil {
|
||||||
session.MarkUsed()
|
session.MarkUsed()
|
||||||
}
|
}
|
||||||
|
|
490
proxy/proxy_remote.go
Normal file
490
proxy/proxy_remote.go
Normal file
|
@ -0,0 +1,490 @@
|
||||||
|
/**
|
||||||
|
* Standalone signaling server for the Nextcloud Spreed app.
|
||||||
|
* Copyright (C) 2024 struktur AG
|
||||||
|
*
|
||||||
|
* @author Joachim Bauch <bauch@struktur.de>
|
||||||
|
*
|
||||||
|
* @license GNU AGPL version 3 or any later version
|
||||||
|
*
|
||||||
|
* This program is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU Affero General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang-jwt/jwt/v4"
|
||||||
|
"github.com/gorilla/websocket"
|
||||||
|
|
||||||
|
signaling "github.com/strukturag/nextcloud-spreed-signaling"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
initialReconnectInterval = 1 * time.Second
|
||||||
|
maxReconnectInterval = 32 * time.Second
|
||||||
|
|
||||||
|
// Time allowed to write a message to the peer.
|
||||||
|
writeWait = 10 * time.Second
|
||||||
|
|
||||||
|
// Time allowed to read the next pong message from the peer.
|
||||||
|
pongWait = 60 * time.Second
|
||||||
|
|
||||||
|
// Send pings to peer with this period. Must be less than pongWait.
|
||||||
|
pingPeriod = (pongWait * 9) / 10
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNotConnected = errors.New("not connected")
|
||||||
|
)
|
||||||
|
|
||||||
|
type RemoteConnection struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
url *url.URL
|
||||||
|
conn *websocket.Conn
|
||||||
|
closer *signaling.Closer
|
||||||
|
closed atomic.Bool
|
||||||
|
|
||||||
|
tokenId string
|
||||||
|
tokenKey *rsa.PrivateKey
|
||||||
|
tlsConfig *tls.Config
|
||||||
|
|
||||||
|
connectedSince time.Time
|
||||||
|
reconnectTimer *time.Timer
|
||||||
|
reconnectInterval atomic.Int64
|
||||||
|
|
||||||
|
msgId atomic.Int64
|
||||||
|
helloMsgId string
|
||||||
|
sessionId string
|
||||||
|
|
||||||
|
pendingMessages []*signaling.ProxyClientMessage
|
||||||
|
messageCallbacks map[string]chan *signaling.ProxyServerMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRemoteConnection(proxyUrl string, tokenId string, tokenKey *rsa.PrivateKey, tlsConfig *tls.Config) (*RemoteConnection, error) {
|
||||||
|
u, err := url.Parse(proxyUrl)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result := &RemoteConnection{
|
||||||
|
url: u,
|
||||||
|
closer: signaling.NewCloser(),
|
||||||
|
|
||||||
|
tokenId: tokenId,
|
||||||
|
tokenKey: tokenKey,
|
||||||
|
tlsConfig: tlsConfig,
|
||||||
|
|
||||||
|
reconnectTimer: time.NewTimer(0),
|
||||||
|
|
||||||
|
messageCallbacks: make(map[string]chan *signaling.ProxyServerMessage),
|
||||||
|
}
|
||||||
|
result.reconnectInterval.Store(int64(initialReconnectInterval))
|
||||||
|
|
||||||
|
go result.writePump()
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) String() string {
|
||||||
|
return c.url.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) reconnect() {
|
||||||
|
u, err := c.url.Parse("proxy")
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Could not resolve url to proxy at %s: %s", c, err)
|
||||||
|
c.scheduleReconnect()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if u.Scheme == "http" {
|
||||||
|
u.Scheme = "ws"
|
||||||
|
} else if u.Scheme == "https" {
|
||||||
|
u.Scheme = "wss"
|
||||||
|
}
|
||||||
|
|
||||||
|
dialer := websocket.Dialer{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
TLSClientConfig: c.tlsConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, _, err := dialer.DialContext(context.TODO(), u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error connecting to proxy at %s: %s", c, err)
|
||||||
|
c.scheduleReconnect()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Connected to %s", c)
|
||||||
|
c.closed.Store(false)
|
||||||
|
|
||||||
|
c.mu.Lock()
|
||||||
|
c.connectedSince = time.Now()
|
||||||
|
c.conn = conn
|
||||||
|
c.mu.Unlock()
|
||||||
|
|
||||||
|
c.reconnectInterval.Store(int64(initialReconnectInterval))
|
||||||
|
|
||||||
|
if err := c.sendHello(); err != nil {
|
||||||
|
log.Printf("Error sending hello request to proxy at %s: %s", c, err)
|
||||||
|
c.scheduleReconnect()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !c.sendPing() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
go c.readPump(conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) scheduleReconnect() {
|
||||||
|
if err := c.sendClose(); err != nil && err != ErrNotConnected {
|
||||||
|
log.Printf("Could not send close message to %s: %s", c, err)
|
||||||
|
}
|
||||||
|
c.close()
|
||||||
|
|
||||||
|
interval := c.reconnectInterval.Load()
|
||||||
|
c.reconnectTimer.Reset(time.Duration(interval))
|
||||||
|
|
||||||
|
interval = interval * 2
|
||||||
|
if interval > int64(maxReconnectInterval) {
|
||||||
|
interval = int64(maxReconnectInterval)
|
||||||
|
}
|
||||||
|
c.reconnectInterval.Store(interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) sendHello() error {
|
||||||
|
c.helloMsgId = strconv.FormatInt(c.msgId.Add(1), 10)
|
||||||
|
msg := &signaling.ProxyClientMessage{
|
||||||
|
Id: c.helloMsgId,
|
||||||
|
Type: "hello",
|
||||||
|
Hello: &signaling.HelloProxyClientMessage{
|
||||||
|
Version: "1.0",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if sessionId := c.sessionId; sessionId != "" {
|
||||||
|
msg.Hello.ResumeId = sessionId
|
||||||
|
} else {
|
||||||
|
tokenString, err := c.createToken("")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
msg.Hello.Token = tokenString
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SendMessage(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) sendClose() error {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
if c.conn == nil {
|
||||||
|
return ErrNotConnected
|
||||||
|
}
|
||||||
|
|
||||||
|
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
|
||||||
|
return c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) close() {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
if c.conn != nil {
|
||||||
|
c.conn.Close()
|
||||||
|
c.conn = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) Close() error {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
c.reconnectTimer.Stop()
|
||||||
|
if c.conn == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
c.sendClose()
|
||||||
|
err1 := c.conn.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), time.Time{})
|
||||||
|
err2 := c.conn.Close()
|
||||||
|
c.conn = nil
|
||||||
|
if err1 != nil {
|
||||||
|
return err1
|
||||||
|
}
|
||||||
|
return err2
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) createToken(subject string) (string, error) {
|
||||||
|
claims := &signaling.TokenClaims{
|
||||||
|
RegisteredClaims: jwt.RegisteredClaims{
|
||||||
|
IssuedAt: jwt.NewNumericDate(time.Now()),
|
||||||
|
Issuer: c.tokenId,
|
||||||
|
Subject: subject,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
|
||||||
|
tokenString, err := token.SignedString(c.tokenKey)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tokenString, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) SendMessage(msg *signaling.ProxyClientMessage) error {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
return c.sendMessageLocked(context.Background(), msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) deferMessage(ctx context.Context, msg *signaling.ProxyClientMessage) {
|
||||||
|
c.pendingMessages = append(c.pendingMessages, msg)
|
||||||
|
if ctx.Done() != nil {
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
for idx, m := range c.pendingMessages {
|
||||||
|
if m == msg {
|
||||||
|
c.pendingMessages[idx] = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) sendMessageLocked(ctx context.Context, msg *signaling.ProxyClientMessage) error {
|
||||||
|
if c.conn == nil {
|
||||||
|
// Defer until connected.
|
||||||
|
c.deferMessage(ctx, msg)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.helloMsgId != "" && c.helloMsgId != msg.Id {
|
||||||
|
// Hello request is still inflight, defer.
|
||||||
|
c.deferMessage(ctx, msg)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
|
||||||
|
return c.conn.WriteJSON(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) readPump(conn *websocket.Conn) {
|
||||||
|
defer func() {
|
||||||
|
if !c.closed.Load() {
|
||||||
|
c.scheduleReconnect()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
defer c.close()
|
||||||
|
|
||||||
|
for {
|
||||||
|
msgType, msg, err := conn.ReadMessage()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, websocket.ErrCloseSent) {
|
||||||
|
break
|
||||||
|
} else if _, ok := err.(*websocket.CloseError); !ok || websocket.IsUnexpectedCloseError(err,
|
||||||
|
websocket.CloseNormalClosure,
|
||||||
|
websocket.CloseGoingAway,
|
||||||
|
websocket.CloseNoStatusReceived) {
|
||||||
|
log.Printf("Error reading from %s: %v", c, err)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if msgType != websocket.TextMessage {
|
||||||
|
log.Printf("unexpected message type %q (%s)", msgType, string(msg))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var message signaling.ProxyServerMessage
|
||||||
|
if err := json.Unmarshal(msg, &message); err != nil {
|
||||||
|
log.Printf("could not decode message %s: %s", string(msg), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
c.mu.Lock()
|
||||||
|
helloMsgId := c.helloMsgId
|
||||||
|
c.mu.Unlock()
|
||||||
|
|
||||||
|
if helloMsgId != "" && message.Id == helloMsgId {
|
||||||
|
c.processHello(&message)
|
||||||
|
} else {
|
||||||
|
c.processMessage(&message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) sendPing() bool {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
if c.conn == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
msg := strconv.FormatInt(now.UnixNano(), 10)
|
||||||
|
c.conn.SetWriteDeadline(now.Add(writeWait)) // nolint
|
||||||
|
if err := c.conn.WriteMessage(websocket.PingMessage, []byte(msg)); err != nil {
|
||||||
|
log.Printf("Could not send ping to proxy at %s: %v", c, err)
|
||||||
|
go c.scheduleReconnect()
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) writePump() {
|
||||||
|
ticker := time.NewTicker(pingPeriod)
|
||||||
|
defer func() {
|
||||||
|
ticker.Stop()
|
||||||
|
}()
|
||||||
|
|
||||||
|
defer c.reconnectTimer.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-c.reconnectTimer.C:
|
||||||
|
c.reconnect()
|
||||||
|
case <-ticker.C:
|
||||||
|
c.sendPing()
|
||||||
|
case <-c.closer.C:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) processHello(msg *signaling.ProxyServerMessage) {
|
||||||
|
c.helloMsgId = ""
|
||||||
|
switch msg.Type {
|
||||||
|
case "error":
|
||||||
|
if msg.Error.Code == "no_such_session" {
|
||||||
|
log.Printf("Session %s could not be resumed on %s, registering new", c.sessionId, c)
|
||||||
|
c.sessionId = ""
|
||||||
|
if err := c.sendHello(); err != nil {
|
||||||
|
log.Printf("Could not send hello request to %s: %s", c, err)
|
||||||
|
c.scheduleReconnect()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Hello connection to %s failed with %+v, reconnecting", c, msg.Error)
|
||||||
|
c.scheduleReconnect()
|
||||||
|
case "hello":
|
||||||
|
resumed := c.sessionId == msg.Hello.SessionId
|
||||||
|
c.sessionId = msg.Hello.SessionId
|
||||||
|
country := ""
|
||||||
|
if msg.Hello.Server != nil {
|
||||||
|
if country = msg.Hello.Server.Country; country != "" && !signaling.IsValidCountry(country) {
|
||||||
|
log.Printf("Proxy %s sent invalid country %s in hello response", c, country)
|
||||||
|
country = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if resumed {
|
||||||
|
log.Printf("Resumed session %s on %s", c.sessionId, c)
|
||||||
|
} else if country != "" {
|
||||||
|
log.Printf("Received session %s from %s (in %s)", c.sessionId, c, country)
|
||||||
|
} else {
|
||||||
|
log.Printf("Received session %s from %s", c.sessionId, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
pending := c.pendingMessages
|
||||||
|
c.pendingMessages = nil
|
||||||
|
for _, m := range pending {
|
||||||
|
if m == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.sendMessageLocked(context.Background(), m); err != nil {
|
||||||
|
log.Printf("Could not send pending message %+v to %s: %s", m, c, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
log.Printf("Received unsupported hello response %+v from %s, reconnecting", msg, c)
|
||||||
|
c.scheduleReconnect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) processMessage(msg *signaling.ProxyServerMessage) {
|
||||||
|
if msg.Id != "" {
|
||||||
|
c.mu.Lock()
|
||||||
|
ch, found := c.messageCallbacks[msg.Id]
|
||||||
|
if found {
|
||||||
|
delete(c.messageCallbacks, msg.Id)
|
||||||
|
c.mu.Unlock()
|
||||||
|
ch <- msg
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
switch msg.Type {
|
||||||
|
case "event":
|
||||||
|
c.processEvent(msg)
|
||||||
|
default:
|
||||||
|
log.Printf("Received unsupported message %+v from %s", msg, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) processEvent(msg *signaling.ProxyServerMessage) {
|
||||||
|
switch msg.Event.Type {
|
||||||
|
case "update-load":
|
||||||
|
default:
|
||||||
|
log.Printf("Received unsupported event %+v from %s", msg, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RemoteConnection) RequestMessage(ctx context.Context, msg *signaling.ProxyClientMessage) (*signaling.ProxyServerMessage, error) {
|
||||||
|
msg.Id = strconv.FormatInt(c.msgId.Add(1), 10)
|
||||||
|
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
if err := c.sendMessageLocked(ctx, msg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ch := make(chan *signaling.ProxyServerMessage, 1)
|
||||||
|
c.messageCallbacks[msg.Id] = ch
|
||||||
|
c.mu.Unlock()
|
||||||
|
defer func() {
|
||||||
|
c.mu.Lock()
|
||||||
|
delete(c.messageCallbacks, msg.Id)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// TODO: Cancel request.
|
||||||
|
return nil, ctx.Err()
|
||||||
|
case response := <-ch:
|
||||||
|
if response.Type == "error" {
|
||||||
|
return nil, response.Error
|
||||||
|
}
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
}
|
|
@ -24,7 +24,10 @@ package main
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/tls"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
|
@ -45,6 +48,7 @@ import (
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"github.com/gorilla/securecookie"
|
"github.com/gorilla/securecookie"
|
||||||
"github.com/gorilla/websocket"
|
"github.com/gorilla/websocket"
|
||||||
|
"github.com/notedit/janus-go"
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
|
||||||
signaling "github.com/strukturag/nextcloud-spreed-signaling"
|
signaling "github.com/strukturag/nextcloud-spreed-signaling"
|
||||||
|
@ -63,6 +67,16 @@ const (
|
||||||
|
|
||||||
// Maximum age a token may have to prevent reuse of old tokens.
|
// Maximum age a token may have to prevent reuse of old tokens.
|
||||||
maxTokenAge = 5 * time.Minute
|
maxTokenAge = 5 * time.Minute
|
||||||
|
|
||||||
|
remotePublisherTimeout = 5 * time.Second
|
||||||
|
|
||||||
|
ProxyFeatureRemoteStreams = "remote-streams"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultProxyFeatures = []string{
|
||||||
|
ProxyFeatureRemoteStreams,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
type ContextKey string
|
type ContextKey string
|
||||||
|
@ -70,35 +84,44 @@ type ContextKey string
|
||||||
var (
|
var (
|
||||||
ContextKeySession = ContextKey("session")
|
ContextKeySession = ContextKey("session")
|
||||||
|
|
||||||
TimeoutCreatingPublisher = signaling.NewError("timeout", "Timeout creating publisher.")
|
TimeoutCreatingPublisher = signaling.NewError("timeout", "Timeout creating publisher.")
|
||||||
TimeoutCreatingSubscriber = signaling.NewError("timeout", "Timeout creating subscriber.")
|
TimeoutCreatingSubscriber = signaling.NewError("timeout", "Timeout creating subscriber.")
|
||||||
TokenAuthFailed = signaling.NewError("auth_failed", "The token could not be authenticated.")
|
TokenAuthFailed = signaling.NewError("auth_failed", "The token could not be authenticated.")
|
||||||
TokenExpired = signaling.NewError("token_expired", "The token is expired.")
|
TokenExpired = signaling.NewError("token_expired", "The token is expired.")
|
||||||
TokenNotValidYet = signaling.NewError("token_not_valid_yet", "The token is not valid yet.")
|
TokenNotValidYet = signaling.NewError("token_not_valid_yet", "The token is not valid yet.")
|
||||||
UnknownClient = signaling.NewError("unknown_client", "Unknown client id given.")
|
UnknownClient = signaling.NewError("unknown_client", "Unknown client id given.")
|
||||||
UnsupportedCommand = signaling.NewError("bad_request", "Unsupported command received.")
|
UnsupportedCommand = signaling.NewError("bad_request", "Unsupported command received.")
|
||||||
UnsupportedMessage = signaling.NewError("bad_request", "Unsupported message received.")
|
UnsupportedMessage = signaling.NewError("bad_request", "Unsupported message received.")
|
||||||
UnsupportedPayload = signaling.NewError("unsupported_payload", "Unsupported payload type.")
|
UnsupportedPayload = signaling.NewError("unsupported_payload", "Unsupported payload type.")
|
||||||
ShutdownScheduled = signaling.NewError("shutdown_scheduled", "The server is scheduled to shutdown.")
|
ShutdownScheduled = signaling.NewError("shutdown_scheduled", "The server is scheduled to shutdown.")
|
||||||
|
RemoteSubscribersNotSupported = signaling.NewError("unsupported_subscriber", "Remote subscribers are not supported.")
|
||||||
)
|
)
|
||||||
|
|
||||||
type ProxyServer struct {
|
type ProxyServer struct {
|
||||||
version string
|
version string
|
||||||
country string
|
country string
|
||||||
welcomeMessage string
|
welcomeMessage string
|
||||||
|
welcomeMsg *signaling.WelcomeServerMessage
|
||||||
|
config *goconf.ConfigFile
|
||||||
|
|
||||||
url string
|
url string
|
||||||
mcu signaling.Mcu
|
mcu signaling.Mcu
|
||||||
stopped atomic.Bool
|
stopped atomic.Bool
|
||||||
load atomic.Int64
|
load atomic.Int64
|
||||||
|
|
||||||
|
maxIncoming atomic.Int64
|
||||||
|
currentIncoming atomic.Int64
|
||||||
|
maxOutgoing atomic.Int64
|
||||||
|
currentOutgoing atomic.Int64
|
||||||
|
|
||||||
shutdownChannel chan struct{}
|
shutdownChannel chan struct{}
|
||||||
shutdownScheduled atomic.Bool
|
shutdownScheduled atomic.Bool
|
||||||
|
|
||||||
upgrader websocket.Upgrader
|
upgrader websocket.Upgrader
|
||||||
|
|
||||||
tokens ProxyTokens
|
tokens ProxyTokens
|
||||||
statsAllowedIps *signaling.AllowedIps
|
statsAllowedIps atomic.Pointer[signaling.AllowedIps]
|
||||||
|
trustedProxies atomic.Pointer[signaling.AllowedIps]
|
||||||
|
|
||||||
sid atomic.Uint64
|
sid atomic.Uint64
|
||||||
cookie *securecookie.SecureCookie
|
cookie *securecookie.SecureCookie
|
||||||
|
@ -108,6 +131,71 @@ type ProxyServer struct {
|
||||||
clients map[string]signaling.McuClient
|
clients map[string]signaling.McuClient
|
||||||
clientIds map[string]string
|
clientIds map[string]string
|
||||||
clientsLock sync.RWMutex
|
clientsLock sync.RWMutex
|
||||||
|
|
||||||
|
tokenId string
|
||||||
|
tokenKey *rsa.PrivateKey
|
||||||
|
remoteTlsConfig *tls.Config
|
||||||
|
remoteHostname string
|
||||||
|
remoteConnections map[string]*RemoteConnection
|
||||||
|
remoteConnectionsLock sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsPublicIP(IP net.IP) bool {
|
||||||
|
if IP.IsLoopback() || IP.IsLinkLocalMulticast() || IP.IsLinkLocalUnicast() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if ip4 := IP.To4(); ip4 != nil {
|
||||||
|
switch {
|
||||||
|
case ip4[0] == 10:
|
||||||
|
return false
|
||||||
|
case ip4[0] == 172 && ip4[1] >= 16 && ip4[1] <= 31:
|
||||||
|
return false
|
||||||
|
case ip4[0] == 192 && ip4[1] == 168:
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetLocalIP() (string, error) {
|
||||||
|
addrs, err := net.InterfaceAddrs()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, address := range addrs {
|
||||||
|
if ipnet, ok := address.(*net.IPNet); ok && IsPublicIP(ipnet.IP) {
|
||||||
|
if ipnet.IP.To4() != nil {
|
||||||
|
return ipnet.IP.String(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTargetBandwidths(config *goconf.ConfigFile) (int, int) {
|
||||||
|
maxIncoming, _ := config.GetInt("bandwidth", "incoming")
|
||||||
|
if maxIncoming < 0 {
|
||||||
|
maxIncoming = 0
|
||||||
|
}
|
||||||
|
if maxIncoming > 0 {
|
||||||
|
log.Printf("Target bandwidth for incoming streams: %d MBit/s", maxIncoming)
|
||||||
|
} else {
|
||||||
|
log.Printf("Target bandwidth for incoming streams: unlimited")
|
||||||
|
}
|
||||||
|
maxOutgoing, _ := config.GetInt("bandwidth", "outgoing")
|
||||||
|
if maxOutgoing < 0 {
|
||||||
|
maxOutgoing = 0
|
||||||
|
}
|
||||||
|
if maxIncoming > 0 {
|
||||||
|
log.Printf("Target bandwidth for outgoing streams: %d MBit/s", maxOutgoing)
|
||||||
|
} else {
|
||||||
|
log.Printf("Target bandwidth for outgoing streams: unlimited")
|
||||||
|
}
|
||||||
|
|
||||||
|
return maxIncoming, maxOutgoing
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewProxyServer(r *mux.Router, version string, config *goconf.ConfigFile) (*ProxyServer, error) {
|
func NewProxyServer(r *mux.Router, version string, config *goconf.ConfigFile) (*ProxyServer, error) {
|
||||||
|
@ -153,6 +241,19 @@ func NewProxyServer(r *mux.Router, version string, config *goconf.ConfigFile) (*
|
||||||
statsAllowedIps = signaling.DefaultAllowedIps()
|
statsAllowedIps = signaling.DefaultAllowedIps()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
trustedProxies, _ := config.GetString("app", "trustedproxies")
|
||||||
|
trustedProxiesIps, err := signaling.ParseAllowedIps(trustedProxies)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !trustedProxiesIps.Empty() {
|
||||||
|
log.Printf("Trusted proxies: %s", trustedProxiesIps)
|
||||||
|
} else {
|
||||||
|
trustedProxiesIps = signaling.DefaultTrustedProxies
|
||||||
|
log.Printf("No trusted proxies configured, only allowing for %s", trustedProxiesIps)
|
||||||
|
}
|
||||||
|
|
||||||
country, _ := config.GetString("app", "country")
|
country, _ := config.GetString("app", "country")
|
||||||
country = strings.ToUpper(country)
|
country = strings.ToUpper(country)
|
||||||
if signaling.IsValidCountry(country) {
|
if signaling.IsValidCountry(country) {
|
||||||
|
@ -173,10 +274,61 @@ func NewProxyServer(r *mux.Router, version string, config *goconf.ConfigFile) (*
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tokenId, _ := config.GetString("app", "token_id")
|
||||||
|
var tokenKey *rsa.PrivateKey
|
||||||
|
var remoteHostname string
|
||||||
|
var remoteTlsConfig *tls.Config
|
||||||
|
if tokenId != "" {
|
||||||
|
tokenKeyFilename, _ := config.GetString("app", "token_key")
|
||||||
|
if tokenKeyFilename == "" {
|
||||||
|
return nil, fmt.Errorf("No token key configured")
|
||||||
|
}
|
||||||
|
tokenKeyData, err := os.ReadFile(tokenKeyFilename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Could not read private key from %s: %s", tokenKeyFilename, err)
|
||||||
|
}
|
||||||
|
tokenKey, err = jwt.ParseRSAPrivateKeyFromPEM(tokenKeyData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Could not parse private key from %s: %s", tokenKeyFilename, err)
|
||||||
|
}
|
||||||
|
log.Printf("Using \"%s\" as token id for remote streams", tokenId)
|
||||||
|
|
||||||
|
remoteHostname, _ = config.GetString("app", "hostname")
|
||||||
|
if remoteHostname == "" {
|
||||||
|
remoteHostname, err = GetLocalIP()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not get local ip: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if remoteHostname == "" {
|
||||||
|
log.Printf("WARNING: Could not determine hostname for remote streams, will be disabled. Please configure manually.")
|
||||||
|
} else {
|
||||||
|
log.Printf("Using \"%s\" as hostname for remote streams", remoteHostname)
|
||||||
|
}
|
||||||
|
|
||||||
|
skipverify, _ := config.GetBool("backend", "skipverify")
|
||||||
|
if skipverify {
|
||||||
|
log.Println("WARNING: Remote stream requests verification is disabled!")
|
||||||
|
remoteTlsConfig = &tls.Config{
|
||||||
|
InsecureSkipVerify: skipverify,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Printf("No token id configured, remote streams will be disabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
maxIncoming, maxOutgoing := getTargetBandwidths(config)
|
||||||
|
|
||||||
result := &ProxyServer{
|
result := &ProxyServer{
|
||||||
version: version,
|
version: version,
|
||||||
country: country,
|
country: country,
|
||||||
welcomeMessage: string(welcomeMessage) + "\n",
|
welcomeMessage: string(welcomeMessage) + "\n",
|
||||||
|
welcomeMsg: &signaling.WelcomeServerMessage{
|
||||||
|
Version: version,
|
||||||
|
Country: country,
|
||||||
|
Features: defaultProxyFeatures,
|
||||||
|
},
|
||||||
|
config: config,
|
||||||
|
|
||||||
shutdownChannel: make(chan struct{}),
|
shutdownChannel: make(chan struct{}),
|
||||||
|
|
||||||
|
@ -185,16 +337,25 @@ func NewProxyServer(r *mux.Router, version string, config *goconf.ConfigFile) (*
|
||||||
WriteBufferSize: websocketWriteBufferSize,
|
WriteBufferSize: websocketWriteBufferSize,
|
||||||
},
|
},
|
||||||
|
|
||||||
tokens: tokens,
|
tokens: tokens,
|
||||||
statsAllowedIps: statsAllowedIps,
|
|
||||||
|
|
||||||
cookie: securecookie.New(hashKey, blockKey).MaxAge(0),
|
cookie: securecookie.New(hashKey, blockKey).MaxAge(0),
|
||||||
sessions: make(map[uint64]*ProxySession),
|
sessions: make(map[uint64]*ProxySession),
|
||||||
|
|
||||||
clients: make(map[string]signaling.McuClient),
|
clients: make(map[string]signaling.McuClient),
|
||||||
clientIds: make(map[string]string),
|
clientIds: make(map[string]string),
|
||||||
|
|
||||||
|
tokenId: tokenId,
|
||||||
|
tokenKey: tokenKey,
|
||||||
|
remoteTlsConfig: remoteTlsConfig,
|
||||||
|
remoteHostname: remoteHostname,
|
||||||
|
remoteConnections: make(map[string]*RemoteConnection),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
result.maxIncoming.Store(int64(maxIncoming) * 1024 * 1024)
|
||||||
|
result.maxOutgoing.Store(int64(maxOutgoing) * 1024 * 1024)
|
||||||
|
result.statsAllowedIps.Store(statsAllowedIps)
|
||||||
|
result.trustedProxies.Store(trustedProxiesIps)
|
||||||
result.upgrader.CheckOrigin = result.checkOrigin
|
result.upgrader.CheckOrigin = result.checkOrigin
|
||||||
|
|
||||||
if debug, _ := config.GetBool("app", "debug"); debug {
|
if debug, _ := config.GetBool("app", "debug"); debug {
|
||||||
|
@ -223,7 +384,7 @@ func (s *ProxyServer) checkOrigin(r *http.Request) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxyServer) Start(config *goconf.ConfigFile) error {
|
func (s *ProxyServer) Start(config *goconf.ConfigFile) error {
|
||||||
s.url, _ = config.GetString("mcu", "url")
|
s.url, _ = signaling.GetStringOptionWithEnv(config, "mcu", "url")
|
||||||
if s.url == "" {
|
if s.url == "" {
|
||||||
return fmt.Errorf("No MCU server url configured")
|
return fmt.Errorf("No MCU server url configured")
|
||||||
}
|
}
|
||||||
|
@ -245,7 +406,7 @@ func (s *ProxyServer) Start(config *goconf.ConfigFile) error {
|
||||||
for {
|
for {
|
||||||
switch mcuType {
|
switch mcuType {
|
||||||
case signaling.McuTypeJanus:
|
case signaling.McuTypeJanus:
|
||||||
mcu, err = signaling.NewMcuJanus(s.url, config)
|
mcu, err = signaling.NewMcuJanus(ctx, s.url, config)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
signaling.RegisterJanusMcuStats()
|
signaling.RegisterJanusMcuStats()
|
||||||
}
|
}
|
||||||
|
@ -255,7 +416,7 @@ func (s *ProxyServer) Start(config *goconf.ConfigFile) error {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
mcu.SetOnConnected(s.onMcuConnected)
|
mcu.SetOnConnected(s.onMcuConnected)
|
||||||
mcu.SetOnDisconnected(s.onMcuDisconnected)
|
mcu.SetOnDisconnected(s.onMcuDisconnected)
|
||||||
err = mcu.Start()
|
err = mcu.Start(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Could not create %s MCU at %s: %s", mcuType, s.url, err)
|
log.Printf("Could not create %s MCU at %s: %s", mcuType, s.url, err)
|
||||||
}
|
}
|
||||||
|
@ -298,18 +459,7 @@ loop:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxyServer) updateLoad() {
|
func (s *ProxyServer) newLoadEvent(load int64, incoming int64, outgoing int64) *signaling.ProxyServerMessage {
|
||||||
load := s.GetClientsLoad()
|
|
||||||
if load == s.load.Load() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.load.Store(load)
|
|
||||||
if s.shutdownScheduled.Load() {
|
|
||||||
// Server is scheduled to shutdown, no need to update clients with current load.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
msg := &signaling.ProxyServerMessage{
|
msg := &signaling.ProxyServerMessage{
|
||||||
Type: "event",
|
Type: "event",
|
||||||
Event: &signaling.EventProxyServerMessage{
|
Event: &signaling.EventProxyServerMessage{
|
||||||
|
@ -317,7 +467,41 @@ func (s *ProxyServer) updateLoad() {
|
||||||
Load: load,
|
Load: load,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
maxIncoming := s.maxIncoming.Load()
|
||||||
|
maxOutgoing := s.maxOutgoing.Load()
|
||||||
|
if maxIncoming > 0 || maxOutgoing > 0 {
|
||||||
|
msg.Event.Bandwidth = &signaling.EventProxyServerBandwidth{}
|
||||||
|
if maxIncoming > 0 {
|
||||||
|
value := float64(incoming) / float64(maxIncoming) * 100
|
||||||
|
msg.Event.Bandwidth.Incoming = &value
|
||||||
|
}
|
||||||
|
if maxOutgoing > 0 {
|
||||||
|
value := float64(outgoing) / float64(maxOutgoing) * 100
|
||||||
|
msg.Event.Bandwidth.Outgoing = &value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ProxyServer) updateLoad() {
|
||||||
|
load, incoming, outgoing := s.GetClientsLoad()
|
||||||
|
oldLoad := s.load.Swap(load)
|
||||||
|
oldIncoming := s.currentIncoming.Swap(incoming)
|
||||||
|
oldOutgoing := s.currentOutgoing.Swap(outgoing)
|
||||||
|
if oldLoad == load && oldIncoming == incoming && oldOutgoing == outgoing {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.sendLoadToAll(load, incoming, outgoing)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ProxyServer) sendLoadToAll(load int64, incoming int64, outgoing int64) {
|
||||||
|
if s.shutdownScheduled.Load() {
|
||||||
|
// Server is scheduled to shutdown, no need to update clients with current load.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := s.newLoadEvent(load, incoming, outgoing)
|
||||||
s.IterateSessions(func(session *ProxySession) {
|
s.IterateSessions(func(session *ProxySession) {
|
||||||
session.sendMessage(msg)
|
session.sendMessage(msg)
|
||||||
})
|
})
|
||||||
|
@ -388,7 +572,42 @@ func (s *ProxyServer) ScheduleShutdown() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxyServer) Reload(config *goconf.ConfigFile) {
|
func (s *ProxyServer) Reload(config *goconf.ConfigFile) {
|
||||||
|
statsAllowed, _ := config.GetString("stats", "allowed_ips")
|
||||||
|
if statsAllowedIps, err := signaling.ParseAllowedIps(statsAllowed); err == nil {
|
||||||
|
if !statsAllowedIps.Empty() {
|
||||||
|
log.Printf("Only allowing access to the stats endpoint from %s", statsAllowed)
|
||||||
|
} else {
|
||||||
|
log.Printf("No IPs configured for the stats endpoint, only allowing access from 127.0.0.1")
|
||||||
|
statsAllowedIps = signaling.DefaultAllowedIps()
|
||||||
|
}
|
||||||
|
s.statsAllowedIps.Store(statsAllowedIps)
|
||||||
|
} else {
|
||||||
|
log.Printf("Error parsing allowed stats ips from \"%s\": %s", statsAllowedIps, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
trustedProxies, _ := config.GetString("app", "trustedproxies")
|
||||||
|
if trustedProxiesIps, err := signaling.ParseAllowedIps(trustedProxies); err == nil {
|
||||||
|
if !trustedProxiesIps.Empty() {
|
||||||
|
log.Printf("Trusted proxies: %s", trustedProxiesIps)
|
||||||
|
} else {
|
||||||
|
trustedProxiesIps = signaling.DefaultTrustedProxies
|
||||||
|
log.Printf("No trusted proxies configured, only allowing for %s", trustedProxiesIps)
|
||||||
|
}
|
||||||
|
s.trustedProxies.Store(trustedProxiesIps)
|
||||||
|
} else {
|
||||||
|
log.Printf("Error parsing trusted proxies from \"%s\": %s", trustedProxies, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
maxIncoming, maxOutgoing := getTargetBandwidths(config)
|
||||||
|
oldIncoming := s.maxIncoming.Swap(int64(maxIncoming))
|
||||||
|
oldOutgoing := s.maxOutgoing.Swap(int64(maxOutgoing))
|
||||||
|
if oldIncoming != int64(maxIncoming) || oldOutgoing != int64(maxOutgoing) {
|
||||||
|
// Notify sessions about updated load / bandwidth usage.
|
||||||
|
go s.sendLoadToAll(s.load.Load(), s.currentIncoming.Load(), s.currentOutgoing.Load())
|
||||||
|
}
|
||||||
|
|
||||||
s.tokens.Reload(config)
|
s.tokens.Reload(config)
|
||||||
|
s.mcu.Reload(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxyServer) setCommonHeaders(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
|
func (s *ProxyServer) setCommonHeaders(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
|
||||||
|
@ -398,24 +617,6 @@ func (s *ProxyServer) setCommonHeaders(f func(http.ResponseWriter, *http.Request
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRealUserIP(r *http.Request) string {
|
|
||||||
// Note this function assumes it is running behind a trusted proxy, so
|
|
||||||
// the headers can be trusted.
|
|
||||||
if ip := r.Header.Get("X-Real-IP"); ip != "" {
|
|
||||||
return ip
|
|
||||||
}
|
|
||||||
|
|
||||||
if ip := r.Header.Get("X-Forwarded-For"); ip != "" {
|
|
||||||
// Result could be a list "clientip, proxy1, proxy2", so only use first element.
|
|
||||||
if pos := strings.Index(ip, ","); pos >= 0 {
|
|
||||||
ip = strings.TrimSpace(ip[:pos])
|
|
||||||
}
|
|
||||||
return ip
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.RemoteAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ProxyServer) welcomeHandler(w http.ResponseWriter, r *http.Request) {
|
func (s *ProxyServer) welcomeHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
|
@ -423,8 +624,11 @@ func (s *ProxyServer) welcomeHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxyServer) proxyHandler(w http.ResponseWriter, r *http.Request) {
|
func (s *ProxyServer) proxyHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
addr := getRealUserIP(r)
|
addr := signaling.GetRealUserIP(r, s.trustedProxies.Load())
|
||||||
conn, err := s.upgrader.Upgrade(w, r, nil)
|
header := http.Header{}
|
||||||
|
header.Set("Server", "nextcloud-spreed-signaling-proxy/"+s.version)
|
||||||
|
header.Set("X-Spreed-Signaling-Features", strings.Join(s.welcomeMsg.Features, ", "))
|
||||||
|
conn, err := s.upgrader.Upgrade(w, r, header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Could not upgrade request from %s: %s", addr, err)
|
log.Printf("Could not upgrade request from %s: %s", addr, err)
|
||||||
return
|
return
|
||||||
|
@ -479,13 +683,7 @@ func (s *ProxyServer) onMcuDisconnected() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxyServer) sendCurrentLoad(session *ProxySession) {
|
func (s *ProxyServer) sendCurrentLoad(session *ProxySession) {
|
||||||
msg := &signaling.ProxyServerMessage{
|
msg := s.newLoadEvent(s.load.Load(), s.currentIncoming.Load(), s.currentOutgoing.Load())
|
||||||
Type: "event",
|
|
||||||
Event: &signaling.EventProxyServerMessage{
|
|
||||||
Type: "update-load",
|
|
||||||
Load: s.load.Load(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
session.sendMessage(msg)
|
session.sendMessage(msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -579,10 +777,7 @@ func (s *ProxyServer) processMessage(client *ProxyClient, data []byte) {
|
||||||
Hello: &signaling.HelloProxyServerMessage{
|
Hello: &signaling.HelloProxyServerMessage{
|
||||||
Version: signaling.HelloVersionV1,
|
Version: signaling.HelloVersionV1,
|
||||||
SessionId: session.PublicId(),
|
SessionId: session.PublicId(),
|
||||||
Server: &signaling.WelcomeServerMessage{
|
Server: s.welcomeMsg,
|
||||||
Version: s.version,
|
|
||||||
Country: s.country,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
client.SendMessage(response)
|
client.SendMessage(response)
|
||||||
|
@ -613,6 +808,59 @@ func (i *emptyInitiator) Country() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type proxyRemotePublisher struct {
|
||||||
|
proxy *ProxyServer
|
||||||
|
remoteUrl string
|
||||||
|
|
||||||
|
publisherId string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *proxyRemotePublisher) PublisherId() string {
|
||||||
|
return p.publisherId
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *proxyRemotePublisher) StartPublishing(ctx context.Context, publisher signaling.McuRemotePublisherProperties) error {
|
||||||
|
conn, err := p.proxy.getRemoteConnection(p.remoteUrl)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := conn.RequestMessage(ctx, &signaling.ProxyClientMessage{
|
||||||
|
Type: "command",
|
||||||
|
Command: &signaling.CommandProxyClientMessage{
|
||||||
|
Type: "publish-remote",
|
||||||
|
ClientId: p.publisherId,
|
||||||
|
Hostname: p.proxy.remoteHostname,
|
||||||
|
Port: publisher.Port(),
|
||||||
|
RtcpPort: publisher.RtcpPort(),
|
||||||
|
},
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *proxyRemotePublisher) GetStreams(ctx context.Context) ([]signaling.PublisherStream, error) {
|
||||||
|
conn, err := p.proxy.getRemoteConnection(p.remoteUrl)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := conn.RequestMessage(ctx, &signaling.ProxyClientMessage{
|
||||||
|
Type: "command",
|
||||||
|
Command: &signaling.CommandProxyClientMessage{
|
||||||
|
Type: "get-publisher-streams",
|
||||||
|
ClientId: p.publisherId,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return response.Command.Streams, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *ProxyServer) processCommand(ctx context.Context, client *ProxyClient, session *ProxySession, message *signaling.ProxyClientMessage) {
|
func (s *ProxyServer) processCommand(ctx context.Context, client *ProxyClient, session *ProxySession, message *signaling.ProxyClientMessage) {
|
||||||
cmd := message.Command
|
cmd := message.Command
|
||||||
|
|
||||||
|
@ -655,18 +903,89 @@ func (s *ProxyServer) processCommand(ctx context.Context, client *ProxyClient, s
|
||||||
case "create-subscriber":
|
case "create-subscriber":
|
||||||
id := uuid.New().String()
|
id := uuid.New().String()
|
||||||
publisherId := cmd.PublisherId
|
publisherId := cmd.PublisherId
|
||||||
subscriber, err := s.mcu.NewSubscriber(ctx, session, publisherId, cmd.StreamType)
|
var subscriber signaling.McuSubscriber
|
||||||
if err == context.DeadlineExceeded {
|
var err error
|
||||||
log.Printf("Timeout while creating %s subscriber on %s for %s", cmd.StreamType, publisherId, session.PublicId())
|
|
||||||
session.sendMessage(message.NewErrorServerMessage(TimeoutCreatingSubscriber))
|
handleCreateError := func(err error) {
|
||||||
return
|
if err == context.DeadlineExceeded {
|
||||||
} else if err != nil {
|
log.Printf("Timeout while creating %s subscriber on %s for %s", cmd.StreamType, publisherId, session.PublicId())
|
||||||
|
session.sendMessage(message.NewErrorServerMessage(TimeoutCreatingSubscriber))
|
||||||
|
return
|
||||||
|
} else if errors.Is(err, signaling.ErrRemoteStreamsNotSupported) {
|
||||||
|
session.sendMessage(message.NewErrorServerMessage(RemoteSubscribersNotSupported))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
log.Printf("Error while creating %s subscriber on %s for %s: %s", cmd.StreamType, publisherId, session.PublicId(), err)
|
log.Printf("Error while creating %s subscriber on %s for %s: %s", cmd.StreamType, publisherId, session.PublicId(), err)
|
||||||
session.sendMessage(message.NewWrappedErrorServerMessage(err))
|
session.sendMessage(message.NewWrappedErrorServerMessage(err))
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Created %s subscriber %s as %s for %s", cmd.StreamType, subscriber.Id(), id, session.PublicId())
|
if cmd.RemoteUrl != "" {
|
||||||
|
if s.tokenId == "" || s.tokenKey == nil || s.remoteHostname == "" {
|
||||||
|
session.sendMessage(message.NewErrorServerMessage(RemoteSubscribersNotSupported))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteMcu, ok := s.mcu.(signaling.RemoteMcu)
|
||||||
|
if !ok {
|
||||||
|
session.sendMessage(message.NewErrorServerMessage(RemoteSubscribersNotSupported))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
claims, _, err := s.parseToken(cmd.RemoteToken)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(*signaling.Error); ok {
|
||||||
|
client.SendMessage(message.NewErrorServerMessage(e))
|
||||||
|
} else {
|
||||||
|
client.SendMessage(message.NewWrappedErrorServerMessage(err))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if claims.Subject != publisherId {
|
||||||
|
session.sendMessage(message.NewErrorServerMessage(TokenAuthFailed))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
subCtx, cancel := context.WithTimeout(ctx, remotePublisherTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
log.Printf("Creating remote subscriber for %s on %s", publisherId, cmd.RemoteUrl)
|
||||||
|
|
||||||
|
controller := &proxyRemotePublisher{
|
||||||
|
proxy: s,
|
||||||
|
remoteUrl: cmd.RemoteUrl,
|
||||||
|
publisherId: publisherId,
|
||||||
|
}
|
||||||
|
|
||||||
|
var publisher signaling.McuRemotePublisher
|
||||||
|
publisher, err = remoteMcu.NewRemotePublisher(subCtx, session, controller, cmd.StreamType)
|
||||||
|
if err != nil {
|
||||||
|
handleCreateError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
go publisher.Close(context.Background())
|
||||||
|
}()
|
||||||
|
|
||||||
|
subscriber, err = remoteMcu.NewRemoteSubscriber(subCtx, session, publisher)
|
||||||
|
if err != nil {
|
||||||
|
handleCreateError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Created remote %s subscriber %s as %s for %s on %s", cmd.StreamType, subscriber.Id(), id, session.PublicId(), cmd.RemoteUrl)
|
||||||
|
} else {
|
||||||
|
subscriber, err = s.mcu.NewSubscriber(ctx, session, publisherId, cmd.StreamType, &emptyInitiator{})
|
||||||
|
if err != nil {
|
||||||
|
handleCreateError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Created %s subscriber %s as %s for %s", cmd.StreamType, subscriber.Id(), id, session.PublicId())
|
||||||
|
}
|
||||||
|
|
||||||
session.StoreSubscriber(ctx, id, subscriber)
|
session.StoreSubscriber(ctx, id, subscriber)
|
||||||
s.StoreClient(id, subscriber)
|
s.StoreClient(id, subscriber)
|
||||||
|
|
||||||
|
@ -751,6 +1070,77 @@ func (s *ProxyServer) processCommand(ctx context.Context, client *ProxyClient, s
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
session.sendMessage(response)
|
session.sendMessage(response)
|
||||||
|
case "publish-remote":
|
||||||
|
client := s.GetClient(cmd.ClientId)
|
||||||
|
if client == nil {
|
||||||
|
session.sendMessage(message.NewErrorServerMessage(UnknownClient))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
publisher, ok := client.(signaling.McuPublisher)
|
||||||
|
if !ok {
|
||||||
|
session.sendMessage(message.NewErrorServerMessage(UnknownClient))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := publisher.PublishRemote(ctx, session.PublicId(), cmd.Hostname, cmd.Port, cmd.RtcpPort); err != nil {
|
||||||
|
var je *janus.ErrorMsg
|
||||||
|
if !errors.As(err, &je) || je.Err.Code != signaling.JANUS_VIDEOROOM_ERROR_ID_EXISTS {
|
||||||
|
log.Printf("Error publishing %s %s to remote %s (port=%d, rtcpPort=%d): %s", publisher.StreamType(), cmd.ClientId, cmd.Hostname, cmd.Port, cmd.RtcpPort, err)
|
||||||
|
session.sendMessage(message.NewWrappedErrorServerMessage(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := publisher.UnpublishRemote(ctx, session.PublicId()); err != nil {
|
||||||
|
log.Printf("Error unpublishing old %s %s to remote %s (port=%d, rtcpPort=%d): %s", publisher.StreamType(), cmd.ClientId, cmd.Hostname, cmd.Port, cmd.RtcpPort, err)
|
||||||
|
session.sendMessage(message.NewWrappedErrorServerMessage(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := publisher.PublishRemote(ctx, session.PublicId(), cmd.Hostname, cmd.Port, cmd.RtcpPort); err != nil {
|
||||||
|
log.Printf("Error publishing %s %s to remote %s (port=%d, rtcpPort=%d): %s", publisher.StreamType(), cmd.ClientId, cmd.Hostname, cmd.Port, cmd.RtcpPort, err)
|
||||||
|
session.sendMessage(message.NewWrappedErrorServerMessage(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
response := &signaling.ProxyServerMessage{
|
||||||
|
Id: message.Id,
|
||||||
|
Type: "command",
|
||||||
|
Command: &signaling.CommandProxyServerMessage{
|
||||||
|
Id: cmd.ClientId,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
session.sendMessage(response)
|
||||||
|
case "get-publisher-streams":
|
||||||
|
client := s.GetClient(cmd.ClientId)
|
||||||
|
if client == nil {
|
||||||
|
session.sendMessage(message.NewErrorServerMessage(UnknownClient))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
publisher, ok := client.(signaling.McuPublisher)
|
||||||
|
if !ok {
|
||||||
|
session.sendMessage(message.NewErrorServerMessage(UnknownClient))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
streams, err := publisher.GetStreams(ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Could not get streams of publisher %s: %s", publisher.Id(), err)
|
||||||
|
session.sendMessage(message.NewWrappedErrorServerMessage(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response := &signaling.ProxyServerMessage{
|
||||||
|
Id: message.Id,
|
||||||
|
Type: "command",
|
||||||
|
Command: &signaling.CommandProxyServerMessage{
|
||||||
|
Id: cmd.ClientId,
|
||||||
|
Streams: streams,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
session.sendMessage(response)
|
||||||
default:
|
default:
|
||||||
log.Printf("Unsupported command %+v", message.Command)
|
log.Printf("Unsupported command %+v", message.Command)
|
||||||
session.sendMessage(message.NewErrorServerMessage(UnsupportedCommand))
|
session.sendMessage(message.NewErrorServerMessage(UnsupportedCommand))
|
||||||
|
@ -777,9 +1167,10 @@ func (s *ProxyServer) processPayload(ctx context.Context, client *ProxyClient, s
|
||||||
fallthrough
|
fallthrough
|
||||||
case "candidate":
|
case "candidate":
|
||||||
mcuData = &signaling.MessageClientMessageData{
|
mcuData = &signaling.MessageClientMessageData{
|
||||||
Type: payload.Type,
|
RoomType: string(mcuClient.StreamType()),
|
||||||
Sid: payload.Sid,
|
Type: payload.Type,
|
||||||
Payload: payload.Payload,
|
Sid: payload.Sid,
|
||||||
|
Payload: payload.Payload,
|
||||||
}
|
}
|
||||||
case "endOfCandidates":
|
case "endOfCandidates":
|
||||||
// Ignore but confirm, not passed along to Janus anyway.
|
// Ignore but confirm, not passed along to Janus anyway.
|
||||||
|
@ -796,14 +1187,21 @@ func (s *ProxyServer) processPayload(ctx context.Context, client *ProxyClient, s
|
||||||
fallthrough
|
fallthrough
|
||||||
case "sendoffer":
|
case "sendoffer":
|
||||||
mcuData = &signaling.MessageClientMessageData{
|
mcuData = &signaling.MessageClientMessageData{
|
||||||
Type: payload.Type,
|
RoomType: string(mcuClient.StreamType()),
|
||||||
Sid: payload.Sid,
|
Type: payload.Type,
|
||||||
|
Sid: payload.Sid,
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
session.sendMessage(message.NewErrorServerMessage(UnsupportedPayload))
|
session.sendMessage(message.NewErrorServerMessage(UnsupportedPayload))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := mcuData.CheckValid(); err != nil {
|
||||||
|
log.Printf("Received invalid payload %+v for %s client %s: %s", mcuData, mcuClient.StreamType(), payload.ClientId, err)
|
||||||
|
session.sendMessage(message.NewErrorServerMessage(UnsupportedPayload))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
mcuClient.SendMessage(ctx, nil, mcuData, func(err error, response map[string]interface{}) {
|
mcuClient.SendMessage(ctx, nil, mcuData, func(err error, response map[string]interface{}) {
|
||||||
var responseMsg *signaling.ProxyServerMessage
|
var responseMsg *signaling.ProxyServerMessage
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -825,13 +1223,9 @@ func (s *ProxyServer) processPayload(ctx context.Context, client *ProxyClient, s
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxyServer) NewSession(hello *signaling.HelloProxyClientMessage) (*ProxySession, error) {
|
func (s *ProxyServer) parseToken(tokenValue string) (*signaling.TokenClaims, string, error) {
|
||||||
if proxyDebugMessages {
|
|
||||||
log.Printf("Hello: %+v", hello)
|
|
||||||
}
|
|
||||||
|
|
||||||
reason := "auth-failed"
|
reason := "auth-failed"
|
||||||
token, err := jwt.ParseWithClaims(hello.Token, &signaling.TokenClaims{}, func(token *jwt.Token) (interface{}, error) {
|
token, err := jwt.ParseWithClaims(tokenValue, &signaling.TokenClaims{}, func(token *jwt.Token) (interface{}, error) {
|
||||||
// Don't forget to validate the alg is what you expect:
|
// Don't forget to validate the alg is what you expect:
|
||||||
if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
|
if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
|
||||||
log.Printf("Unexpected signing method: %v", token.Header["alg"])
|
log.Printf("Unexpected signing method: %v", token.Header["alg"])
|
||||||
|
@ -863,25 +1257,35 @@ func (s *ProxyServer) NewSession(hello *signaling.HelloProxyClientMessage) (*Pro
|
||||||
})
|
})
|
||||||
if err, ok := err.(*jwt.ValidationError); ok {
|
if err, ok := err.(*jwt.ValidationError); ok {
|
||||||
if err.Errors&jwt.ValidationErrorIssuedAt == jwt.ValidationErrorIssuedAt {
|
if err.Errors&jwt.ValidationErrorIssuedAt == jwt.ValidationErrorIssuedAt {
|
||||||
statsTokenErrorsTotal.WithLabelValues("not-valid-yet").Inc()
|
return nil, "not-valid-yet", TokenNotValidYet
|
||||||
return nil, TokenNotValidYet
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
statsTokenErrorsTotal.WithLabelValues(reason).Inc()
|
return nil, reason, TokenAuthFailed
|
||||||
return nil, TokenAuthFailed
|
|
||||||
}
|
}
|
||||||
|
|
||||||
claims, ok := token.Claims.(*signaling.TokenClaims)
|
claims, ok := token.Claims.(*signaling.TokenClaims)
|
||||||
if !ok || !token.Valid {
|
if !ok || !token.Valid {
|
||||||
statsTokenErrorsTotal.WithLabelValues("auth-failed").Inc()
|
return nil, "auth-failed", TokenAuthFailed
|
||||||
return nil, TokenAuthFailed
|
|
||||||
}
|
}
|
||||||
|
|
||||||
minIssuedAt := time.Now().Add(-maxTokenAge)
|
minIssuedAt := time.Now().Add(-maxTokenAge)
|
||||||
if issuedAt := claims.IssuedAt; issuedAt != nil && issuedAt.Before(minIssuedAt) {
|
if issuedAt := claims.IssuedAt; issuedAt != nil && issuedAt.Before(minIssuedAt) {
|
||||||
statsTokenErrorsTotal.WithLabelValues("expired").Inc()
|
return nil, "expired", TokenExpired
|
||||||
return nil, TokenExpired
|
}
|
||||||
|
|
||||||
|
return claims, "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ProxyServer) NewSession(hello *signaling.HelloProxyClientMessage) (*ProxySession, error) {
|
||||||
|
if proxyDebugMessages {
|
||||||
|
log.Printf("Hello: %+v", hello)
|
||||||
|
}
|
||||||
|
|
||||||
|
claims, reason, err := s.parseToken(hello.Token)
|
||||||
|
if err != nil {
|
||||||
|
statsTokenErrorsTotal.WithLabelValues(reason).Inc()
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
sid := s.sid.Add(1)
|
sid := s.sid.Add(1)
|
||||||
|
@ -977,15 +1381,21 @@ func (s *ProxyServer) HasClients() bool {
|
||||||
return len(s.clients) > 0
|
return len(s.clients) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxyServer) GetClientsLoad() int64 {
|
func (s *ProxyServer) GetClientsLoad() (load int64, incoming int64, outgoing int64) {
|
||||||
s.clientsLock.RLock()
|
s.clientsLock.RLock()
|
||||||
defer s.clientsLock.RUnlock()
|
defer s.clientsLock.RUnlock()
|
||||||
|
|
||||||
var load int64
|
|
||||||
for _, c := range s.clients {
|
for _, c := range s.clients {
|
||||||
load += int64(c.MaxBitrate())
|
bitrate := int64(c.MaxBitrate())
|
||||||
|
load += bitrate
|
||||||
|
if _, ok := c.(signaling.McuPublisher); ok {
|
||||||
|
incoming += bitrate
|
||||||
|
} else if _, ok := c.(signaling.McuSubscriber); ok {
|
||||||
|
outgoing += bitrate
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return load / 1024
|
load = load / 1024
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxyServer) GetClient(id string) signaling.McuClient {
|
func (s *ProxyServer) GetClient(id string) signaling.McuClient {
|
||||||
|
@ -994,6 +1404,22 @@ func (s *ProxyServer) GetClient(id string) signaling.McuClient {
|
||||||
return s.clients[id]
|
return s.clients[id]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *ProxyServer) GetPublisher(publisherId string) signaling.McuPublisher {
|
||||||
|
s.clientsLock.RLock()
|
||||||
|
defer s.clientsLock.RUnlock()
|
||||||
|
for _, c := range s.clients {
|
||||||
|
pub, ok := c.(signaling.McuPublisher)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if pub.Id() == publisherId {
|
||||||
|
return pub
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *ProxyServer) GetClientId(client signaling.McuClient) string {
|
func (s *ProxyServer) GetClientId(client signaling.McuClient) string {
|
||||||
s.clientsLock.RLock()
|
s.clientsLock.RLock()
|
||||||
defer s.clientsLock.RUnlock()
|
defer s.clientsLock.RUnlock()
|
||||||
|
@ -1010,19 +1436,14 @@ func (s *ProxyServer) getStats() map[string]interface{} {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxyServer) allowStatsAccess(r *http.Request) bool {
|
func (s *ProxyServer) allowStatsAccess(r *http.Request) bool {
|
||||||
addr := getRealUserIP(r)
|
addr := signaling.GetRealUserIP(r, s.trustedProxies.Load())
|
||||||
if strings.Contains(addr, ":") {
|
|
||||||
if host, _, err := net.SplitHostPort(addr); err == nil {
|
|
||||||
addr = host
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ip := net.ParseIP(addr)
|
ip := net.ParseIP(addr)
|
||||||
if ip == nil {
|
if len(ip) == 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.statsAllowedIps.Allowed(ip)
|
allowed := s.statsAllowedIps.Load()
|
||||||
|
return allowed != nil && allowed.Allowed(ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxyServer) validateStatsRequest(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
|
func (s *ProxyServer) validateStatsRequest(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
|
||||||
|
@ -1055,3 +1476,21 @@ func (s *ProxyServer) metricsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
// Expose prometheus metrics at "/metrics".
|
// Expose prometheus metrics at "/metrics".
|
||||||
promhttp.Handler().ServeHTTP(w, r)
|
promhttp.Handler().ServeHTTP(w, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *ProxyServer) getRemoteConnection(url string) (*RemoteConnection, error) {
|
||||||
|
s.remoteConnectionsLock.Lock()
|
||||||
|
defer s.remoteConnectionsLock.Unlock()
|
||||||
|
|
||||||
|
conn, found := s.remoteConnections[url]
|
||||||
|
if found {
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := NewRemoteConnection(url, s.tokenId, s.tokenKey, s.remoteTlsConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.remoteConnections[url] = conn
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
|
@ -22,17 +22,22 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/rsa"
|
"crypto/rsa"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
|
"net"
|
||||||
|
"net/http/httptest"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/dlintw/goconf"
|
"github.com/dlintw/goconf"
|
||||||
"github.com/golang-jwt/jwt/v4"
|
"github.com/golang-jwt/jwt/v4"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/gorilla/websocket"
|
||||||
signaling "github.com/strukturag/nextcloud-spreed-signaling"
|
signaling "github.com/strukturag/nextcloud-spreed-signaling"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -41,12 +46,22 @@ const (
|
||||||
TokenIdForTest = "foo"
|
TokenIdForTest = "foo"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newProxyServerForTest(t *testing.T) (*ProxyServer, *rsa.PrivateKey) {
|
func getWebsocketUrl(url string) string {
|
||||||
|
if strings.HasPrefix(url, "http://") {
|
||||||
|
return "ws://" + url[7:] + "/proxy"
|
||||||
|
} else if strings.HasPrefix(url, "https://") {
|
||||||
|
return "wss://" + url[8:] + "/proxy"
|
||||||
|
} else {
|
||||||
|
panic("Unsupported URL: " + url)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newProxyServerForTest(t *testing.T) (*ProxyServer, *rsa.PrivateKey, *httptest.Server) {
|
||||||
tempdir := t.TempDir()
|
tempdir := t.TempDir()
|
||||||
var server *ProxyServer
|
var proxy *ProxyServer
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
if server != nil {
|
if proxy != nil {
|
||||||
server.Stop()
|
proxy.Stop()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -86,14 +101,107 @@ func newProxyServerForTest(t *testing.T) (*ProxyServer, *rsa.PrivateKey) {
|
||||||
config := goconf.NewConfigFile()
|
config := goconf.NewConfigFile()
|
||||||
config.AddOption("tokens", TokenIdForTest, pubkey.Name())
|
config.AddOption("tokens", TokenIdForTest, pubkey.Name())
|
||||||
|
|
||||||
if server, err = NewProxyServer(r, "0.0", config); err != nil {
|
if proxy, err = NewProxyServer(r, "0.0", config); err != nil {
|
||||||
t.Fatalf("could not create server: %s", err)
|
t.Fatalf("could not create proxy server: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
server := httptest.NewServer(r)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
server.Close()
|
||||||
|
})
|
||||||
|
|
||||||
|
return proxy, key, server
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTokenValid(t *testing.T) {
|
||||||
|
signaling.CatchLogForTest(t)
|
||||||
|
proxy, key, _ := newProxyServerForTest(t)
|
||||||
|
|
||||||
|
claims := &signaling.TokenClaims{
|
||||||
|
RegisteredClaims: jwt.RegisteredClaims{
|
||||||
|
IssuedAt: jwt.NewNumericDate(time.Now().Add(-maxTokenAge / 2)),
|
||||||
|
Issuer: TokenIdForTest,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
|
||||||
|
tokenString, err := token.SignedString(key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not create token: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hello := &signaling.HelloProxyClientMessage{
|
||||||
|
Version: "1.0",
|
||||||
|
Token: tokenString,
|
||||||
|
}
|
||||||
|
session, err := proxy.NewSession(hello)
|
||||||
|
if session != nil {
|
||||||
|
defer session.Close()
|
||||||
|
} else if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTokenNotSigned(t *testing.T) {
|
||||||
|
signaling.CatchLogForTest(t)
|
||||||
|
proxy, _, _ := newProxyServerForTest(t)
|
||||||
|
|
||||||
|
claims := &signaling.TokenClaims{
|
||||||
|
RegisteredClaims: jwt.RegisteredClaims{
|
||||||
|
IssuedAt: jwt.NewNumericDate(time.Now().Add(-maxTokenAge / 2)),
|
||||||
|
Issuer: TokenIdForTest,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
token := jwt.NewWithClaims(jwt.SigningMethodNone, claims)
|
||||||
|
tokenString, err := token.SignedString(jwt.UnsafeAllowNoneSignatureType)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not create token: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hello := &signaling.HelloProxyClientMessage{
|
||||||
|
Version: "1.0",
|
||||||
|
Token: tokenString,
|
||||||
|
}
|
||||||
|
session, err := proxy.NewSession(hello)
|
||||||
|
if session != nil {
|
||||||
|
defer session.Close()
|
||||||
|
t.Errorf("should not have created session")
|
||||||
|
} else if err != TokenAuthFailed {
|
||||||
|
t.Errorf("could have failed with TokenAuthFailed, got %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTokenUnknown(t *testing.T) {
|
||||||
|
signaling.CatchLogForTest(t)
|
||||||
|
proxy, key, _ := newProxyServerForTest(t)
|
||||||
|
|
||||||
|
claims := &signaling.TokenClaims{
|
||||||
|
RegisteredClaims: jwt.RegisteredClaims{
|
||||||
|
IssuedAt: jwt.NewNumericDate(time.Now().Add(-maxTokenAge / 2)),
|
||||||
|
Issuer: TokenIdForTest + "2",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
|
||||||
|
tokenString, err := token.SignedString(key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not create token: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hello := &signaling.HelloProxyClientMessage{
|
||||||
|
Version: "1.0",
|
||||||
|
Token: tokenString,
|
||||||
|
}
|
||||||
|
session, err := proxy.NewSession(hello)
|
||||||
|
if session != nil {
|
||||||
|
defer session.Close()
|
||||||
|
t.Errorf("should not have created session")
|
||||||
|
} else if err != TokenAuthFailed {
|
||||||
|
t.Errorf("could have failed with TokenAuthFailed, got %s", err)
|
||||||
}
|
}
|
||||||
return server, key
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTokenInFuture(t *testing.T) {
|
func TestTokenInFuture(t *testing.T) {
|
||||||
server, key := newProxyServerForTest(t)
|
signaling.CatchLogForTest(t)
|
||||||
|
proxy, key, _ := newProxyServerForTest(t)
|
||||||
|
|
||||||
claims := &signaling.TokenClaims{
|
claims := &signaling.TokenClaims{
|
||||||
RegisteredClaims: jwt.RegisteredClaims{
|
RegisteredClaims: jwt.RegisteredClaims{
|
||||||
|
@ -111,7 +219,7 @@ func TestTokenInFuture(t *testing.T) {
|
||||||
Version: "1.0",
|
Version: "1.0",
|
||||||
Token: tokenString,
|
Token: tokenString,
|
||||||
}
|
}
|
||||||
session, err := server.NewSession(hello)
|
session, err := proxy.NewSession(hello)
|
||||||
if session != nil {
|
if session != nil {
|
||||||
defer session.Close()
|
defer session.Close()
|
||||||
t.Errorf("should not have created session")
|
t.Errorf("should not have created session")
|
||||||
|
@ -119,3 +227,103 @@ func TestTokenInFuture(t *testing.T) {
|
||||||
t.Errorf("could have failed with TokenNotValidYet, got %s", err)
|
t.Errorf("could have failed with TokenNotValidYet, got %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTokenExpired(t *testing.T) {
|
||||||
|
signaling.CatchLogForTest(t)
|
||||||
|
proxy, key, _ := newProxyServerForTest(t)
|
||||||
|
|
||||||
|
claims := &signaling.TokenClaims{
|
||||||
|
RegisteredClaims: jwt.RegisteredClaims{
|
||||||
|
IssuedAt: jwt.NewNumericDate(time.Now().Add(-maxTokenAge * 2)),
|
||||||
|
Issuer: TokenIdForTest,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
|
||||||
|
tokenString, err := token.SignedString(key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not create token: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hello := &signaling.HelloProxyClientMessage{
|
||||||
|
Version: "1.0",
|
||||||
|
Token: tokenString,
|
||||||
|
}
|
||||||
|
session, err := proxy.NewSession(hello)
|
||||||
|
if session != nil {
|
||||||
|
defer session.Close()
|
||||||
|
t.Errorf("should not have created session")
|
||||||
|
} else if err != TokenExpired {
|
||||||
|
t.Errorf("could have failed with TokenExpired, got %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPublicIPs(t *testing.T) {
|
||||||
|
public := []string{
|
||||||
|
"8.8.8.8",
|
||||||
|
"172.15.1.2",
|
||||||
|
"172.32.1.2",
|
||||||
|
"192.167.0.1",
|
||||||
|
"192.169.0.1",
|
||||||
|
}
|
||||||
|
private := []string{
|
||||||
|
"127.0.0.1",
|
||||||
|
"10.1.2.3",
|
||||||
|
"172.16.1.2",
|
||||||
|
"172.31.1.2",
|
||||||
|
"192.168.0.1",
|
||||||
|
"192.168.254.254",
|
||||||
|
}
|
||||||
|
for _, s := range public {
|
||||||
|
ip := net.ParseIP(s)
|
||||||
|
if len(ip) == 0 {
|
||||||
|
t.Errorf("invalid IP: %s", s)
|
||||||
|
} else if !IsPublicIP(ip) {
|
||||||
|
t.Errorf("should be public IP: %s", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range private {
|
||||||
|
ip := net.ParseIP(s)
|
||||||
|
if len(ip) == 0 {
|
||||||
|
t.Errorf("invalid IP: %s", s)
|
||||||
|
} else if IsPublicIP(ip) {
|
||||||
|
t.Errorf("should be private IP: %s", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWebsocketFeatures(t *testing.T) {
|
||||||
|
signaling.CatchLogForTest(t)
|
||||||
|
_, _, server := newProxyServerForTest(t)
|
||||||
|
|
||||||
|
conn, response, err := websocket.DefaultDialer.DialContext(context.Background(), getWebsocketUrl(server.URL), nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer conn.Close() // nolint
|
||||||
|
|
||||||
|
if server := response.Header.Get("Server"); !strings.HasPrefix(server, "nextcloud-spreed-signaling-proxy/") {
|
||||||
|
t.Errorf("expected valid server header, got \"%s\"", server)
|
||||||
|
}
|
||||||
|
features := response.Header.Get("X-Spreed-Signaling-Features")
|
||||||
|
featuresList := make(map[string]bool)
|
||||||
|
for _, f := range strings.Split(features, ",") {
|
||||||
|
f = strings.TrimSpace(f)
|
||||||
|
if f != "" {
|
||||||
|
if _, found := featuresList[f]; found {
|
||||||
|
t.Errorf("duplicate feature id \"%s\" in \"%s\"", f, features)
|
||||||
|
}
|
||||||
|
featuresList[f] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(featuresList) == 0 {
|
||||||
|
t.Errorf("expected valid features header, got \"%s\"", features)
|
||||||
|
}
|
||||||
|
if _, found := featuresList["remote-streams"]; !found {
|
||||||
|
t.Errorf("expected feature \"remote-streams\", got \"%s\"", features)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := conn.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), time.Time{}); err != nil {
|
||||||
|
t.Errorf("could not write close message: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -299,8 +299,9 @@ func (s *ProxySession) clearPublishers() {
|
||||||
publisher.Close(context.Background())
|
publisher.Close(context.Background())
|
||||||
}
|
}
|
||||||
}(s.publishers)
|
}(s.publishers)
|
||||||
|
// Can't use clear(...) here as the map is processed by the goroutine above.
|
||||||
s.publishers = make(map[string]signaling.McuPublisher)
|
s.publishers = make(map[string]signaling.McuPublisher)
|
||||||
s.publisherIds = make(map[signaling.McuPublisher]string)
|
clear(s.publisherIds)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxySession) clearSubscribers() {
|
func (s *ProxySession) clearSubscribers() {
|
||||||
|
@ -315,8 +316,9 @@ func (s *ProxySession) clearSubscribers() {
|
||||||
subscriber.Close(context.Background())
|
subscriber.Close(context.Background())
|
||||||
}
|
}
|
||||||
}(s.subscribers)
|
}(s.subscribers)
|
||||||
|
// Can't use clear(...) here as the map is processed by the goroutine above.
|
||||||
s.subscribers = make(map[string]signaling.McuSubscriber)
|
s.subscribers = make(map[string]signaling.McuSubscriber)
|
||||||
s.subscriberIds = make(map[signaling.McuSubscriber]string)
|
clear(s.subscriberIds)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ProxySession) NotifyDisconnected() {
|
func (s *ProxySession) NotifyDisconnected() {
|
||||||
|
|
|
@ -39,6 +39,8 @@ import (
|
||||||
"github.com/dlintw/goconf"
|
"github.com/dlintw/goconf"
|
||||||
"go.etcd.io/etcd/server/v3/embed"
|
"go.etcd.io/etcd/server/v3/embed"
|
||||||
"go.etcd.io/etcd/server/v3/lease"
|
"go.etcd.io/etcd/server/v3/lease"
|
||||||
|
|
||||||
|
signaling "github.com/strukturag/nextcloud-spreed-signaling"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -100,6 +102,7 @@ func newEtcdForTesting(t *testing.T) *embed.Etcd {
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
etcd.Close()
|
etcd.Close()
|
||||||
|
<-etcd.Server.StopNotify()
|
||||||
})
|
})
|
||||||
// Wait for server to be ready.
|
// Wait for server to be ready.
|
||||||
<-etcd.Server.ReadyNotify()
|
<-etcd.Server.ReadyNotify()
|
||||||
|
@ -160,6 +163,7 @@ func generateAndSaveKey(t *testing.T, etcd *embed.Etcd, name string) *rsa.Privat
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProxyTokensEtcd(t *testing.T) {
|
func TestProxyTokensEtcd(t *testing.T) {
|
||||||
|
signaling.CatchLogForTest(t)
|
||||||
tokens, etcd := newTokensEtcdForTesting(t)
|
tokens, etcd := newTokensEtcdForTesting(t)
|
||||||
|
|
||||||
key1 := generateAndSaveKey(t, etcd, "/foo")
|
key1 := generateAndSaveKey(t, etcd, "/foo")
|
||||||
|
|
|
@ -41,6 +41,9 @@ type proxyConfigEtcd struct {
|
||||||
keyPrefix string
|
keyPrefix string
|
||||||
keyInfos map[string]*ProxyInformationEtcd
|
keyInfos map[string]*ProxyInformationEtcd
|
||||||
urlToKey map[string]string
|
urlToKey map[string]string
|
||||||
|
|
||||||
|
closeCtx context.Context
|
||||||
|
closeFunc context.CancelFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewProxyConfigEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient, proxy McuProxy) (ProxyConfig, error) {
|
func NewProxyConfigEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient, proxy McuProxy) (ProxyConfig, error) {
|
||||||
|
@ -48,12 +51,17 @@ func NewProxyConfigEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient, proxy
|
||||||
return nil, errors.New("No etcd endpoints configured")
|
return nil, errors.New("No etcd endpoints configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
closeCtx, closeFunc := context.WithCancel(context.Background())
|
||||||
|
|
||||||
result := &proxyConfigEtcd{
|
result := &proxyConfigEtcd{
|
||||||
proxy: proxy,
|
proxy: proxy,
|
||||||
|
|
||||||
client: etcdClient,
|
client: etcdClient,
|
||||||
keyInfos: make(map[string]*ProxyInformationEtcd),
|
keyInfos: make(map[string]*ProxyInformationEtcd),
|
||||||
urlToKey: make(map[string]string),
|
urlToKey: make(map[string]string),
|
||||||
|
|
||||||
|
closeCtx: closeCtx,
|
||||||
|
closeFunc: closeFunc,
|
||||||
}
|
}
|
||||||
if err := result.configure(config, false); err != nil {
|
if err := result.configure(config, false); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -83,17 +91,16 @@ func (p *proxyConfigEtcd) Reload(config *goconf.ConfigFile) error {
|
||||||
|
|
||||||
func (p *proxyConfigEtcd) Stop() {
|
func (p *proxyConfigEtcd) Stop() {
|
||||||
p.client.RemoveListener(p)
|
p.client.RemoveListener(p)
|
||||||
|
p.closeFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *proxyConfigEtcd) EtcdClientCreated(client *EtcdClient) {
|
func (p *proxyConfigEtcd) EtcdClientCreated(client *EtcdClient) {
|
||||||
go func() {
|
go func() {
|
||||||
if err := client.Watch(context.Background(), p.keyPrefix, p, clientv3.WithPrefix()); err != nil {
|
if err := client.WaitForConnection(p.closeCtx); err != nil {
|
||||||
log.Printf("Error processing watch for %s: %s", p.keyPrefix, err)
|
if errors.Is(err, context.Canceled) {
|
||||||
}
|
return
|
||||||
}()
|
}
|
||||||
|
|
||||||
go func() {
|
|
||||||
if err := client.WaitForConnection(context.Background()); err != nil {
|
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,23 +108,47 @@ func (p *proxyConfigEtcd) EtcdClientCreated(client *EtcdClient) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
for {
|
|
||||||
response, err := p.getProxyUrls(client, p.keyPrefix)
|
var nextRevision int64
|
||||||
|
for p.closeCtx.Err() == nil {
|
||||||
|
response, err := p.getProxyUrls(p.closeCtx, client, p.keyPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == context.DeadlineExceeded {
|
if errors.Is(err, context.Canceled) {
|
||||||
|
return
|
||||||
|
} else if errors.Is(err, context.DeadlineExceeded) {
|
||||||
log.Printf("Timeout getting initial list of proxy URLs, retry in %s", backoff.NextWait())
|
log.Printf("Timeout getting initial list of proxy URLs, retry in %s", backoff.NextWait())
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Could not get initial list of proxy URLs, retry in %s: %s", backoff.NextWait(), err)
|
log.Printf("Could not get initial list of proxy URLs, retry in %s: %s", backoff.NextWait(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
backoff.Wait(context.Background())
|
backoff.Wait(p.closeCtx)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ev := range response.Kvs {
|
for _, ev := range response.Kvs {
|
||||||
p.EtcdKeyUpdated(client, string(ev.Key), ev.Value)
|
p.EtcdKeyUpdated(client, string(ev.Key), ev.Value, nil)
|
||||||
|
}
|
||||||
|
nextRevision = response.Header.Revision + 1
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
prevRevision := nextRevision
|
||||||
|
backoff.Reset()
|
||||||
|
for p.closeCtx.Err() == nil {
|
||||||
|
var err error
|
||||||
|
if nextRevision, err = client.Watch(p.closeCtx, p.keyPrefix, nextRevision, p, clientv3.WithPrefix()); err != nil {
|
||||||
|
log.Printf("Error processing watch for %s (%s), retry in %s", p.keyPrefix, err, backoff.NextWait())
|
||||||
|
backoff.Wait(p.closeCtx)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if nextRevision != prevRevision {
|
||||||
|
backoff.Reset()
|
||||||
|
prevRevision = nextRevision
|
||||||
|
} else {
|
||||||
|
log.Printf("Processing watch for %s interrupted, retry in %s", p.keyPrefix, backoff.NextWait())
|
||||||
|
backoff.Wait(p.closeCtx)
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
@ -125,14 +156,14 @@ func (p *proxyConfigEtcd) EtcdClientCreated(client *EtcdClient) {
|
||||||
func (p *proxyConfigEtcd) EtcdWatchCreated(client *EtcdClient, key string) {
|
func (p *proxyConfigEtcd) EtcdWatchCreated(client *EtcdClient, key string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *proxyConfigEtcd) getProxyUrls(client *EtcdClient, keyPrefix string) (*clientv3.GetResponse, error) {
|
func (p *proxyConfigEtcd) getProxyUrls(ctx context.Context, client *EtcdClient, keyPrefix string) (*clientv3.GetResponse, error) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
return client.Get(ctx, keyPrefix, clientv3.WithPrefix())
|
return client.Get(ctx, keyPrefix, clientv3.WithPrefix())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *proxyConfigEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []byte) {
|
func (p *proxyConfigEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []byte, prevValue []byte) {
|
||||||
var info ProxyInformationEtcd
|
var info ProxyInformationEtcd
|
||||||
if err := json.Unmarshal(data, &info); err != nil {
|
if err := json.Unmarshal(data, &info); err != nil {
|
||||||
log.Printf("Could not decode proxy information %s: %s", string(data), err)
|
log.Printf("Could not decode proxy information %s: %s", string(data), err)
|
||||||
|
@ -173,7 +204,7 @@ func (p *proxyConfigEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *proxyConfigEtcd) EtcdKeyDeleted(client *EtcdClient, key string) {
|
func (p *proxyConfigEtcd) EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte) {
|
||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
defer p.mu.Unlock()
|
defer p.mu.Unlock()
|
||||||
|
|
||||||
|
|
|
@ -62,6 +62,8 @@ func SetEtcdProxy(t *testing.T, etcd *embed.Etcd, path string, proxy *TestProxyI
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProxyConfigEtcd(t *testing.T) {
|
func TestProxyConfigEtcd(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
proxy := newMcuProxyForConfig(t)
|
proxy := newMcuProxyForConfig(t)
|
||||||
etcd, config := newProxyConfigEtcd(t, proxy)
|
etcd, config := newProxyConfigEtcd(t, proxy)
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,7 @@ func (p *proxyConfigStatic) configure(config *goconf.ConfigFile, fromReload bool
|
||||||
remove[u] = ips
|
remove[u] = ips
|
||||||
}
|
}
|
||||||
|
|
||||||
mcuUrl, _ := config.GetString("mcu", "url")
|
mcuUrl, _ := GetStringOptionWithEnv(config, "mcu", "url")
|
||||||
for _, u := range strings.Split(mcuUrl, " ") {
|
for _, u := range strings.Split(mcuUrl, " ") {
|
||||||
u = strings.TrimSpace(u)
|
u = strings.TrimSpace(u)
|
||||||
if u == "" {
|
if u == "" {
|
||||||
|
|
|
@ -59,6 +59,7 @@ func updateProxyConfigStatic(t *testing.T, config ProxyConfig, dns bool, urls ..
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProxyConfigStaticSimple(t *testing.T) {
|
func TestProxyConfigStaticSimple(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
proxy := newMcuProxyForConfig(t)
|
proxy := newMcuProxyForConfig(t)
|
||||||
config, _ := newProxyConfigStatic(t, proxy, false, "https://foo/")
|
config, _ := newProxyConfigStatic(t, proxy, false, "https://foo/")
|
||||||
proxy.Expect("add", "https://foo/")
|
proxy.Expect("add", "https://foo/")
|
||||||
|
@ -77,6 +78,7 @@ func TestProxyConfigStaticSimple(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProxyConfigStaticDNS(t *testing.T) {
|
func TestProxyConfigStaticDNS(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
lookup := newMockDnsLookupForTest(t)
|
lookup := newMockDnsLookupForTest(t)
|
||||||
proxy := newMcuProxyForConfig(t)
|
proxy := newMcuProxyForConfig(t)
|
||||||
config, dnsMonitor := newProxyConfigStatic(t, proxy, true, "https://foo/")
|
config, dnsMonitor := newProxyConfigStatic(t, proxy, true, "https://foo/")
|
||||||
|
|
99
publisher_stats_counter.go
Normal file
99
publisher_stats_counter.go
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
/**
|
||||||
|
* Standalone signaling server for the Nextcloud Spreed app.
|
||||||
|
* Copyright (C) 2021 struktur AG
|
||||||
|
*
|
||||||
|
* @author Joachim Bauch <bauch@struktur.de>
|
||||||
|
*
|
||||||
|
* @license GNU AGPL version 3 or any later version
|
||||||
|
*
|
||||||
|
* This program is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU Affero General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
package signaling
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type publisherStatsCounter struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
|
||||||
|
streamTypes map[StreamType]bool
|
||||||
|
subscribers map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *publisherStatsCounter) Reset() {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
count := len(c.subscribers)
|
||||||
|
for streamType := range c.streamTypes {
|
||||||
|
statsMcuPublisherStreamTypesCurrent.WithLabelValues(string(streamType)).Dec()
|
||||||
|
statsMcuSubscriberStreamTypesCurrent.WithLabelValues(string(streamType)).Sub(float64(count))
|
||||||
|
}
|
||||||
|
c.streamTypes = nil
|
||||||
|
c.subscribers = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *publisherStatsCounter) EnableStream(streamType StreamType, enable bool) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
if enable == c.streamTypes[streamType] {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if enable {
|
||||||
|
if c.streamTypes == nil {
|
||||||
|
c.streamTypes = make(map[StreamType]bool)
|
||||||
|
}
|
||||||
|
c.streamTypes[streamType] = true
|
||||||
|
statsMcuPublisherStreamTypesCurrent.WithLabelValues(string(streamType)).Inc()
|
||||||
|
statsMcuSubscriberStreamTypesCurrent.WithLabelValues(string(streamType)).Add(float64(len(c.subscribers)))
|
||||||
|
} else {
|
||||||
|
delete(c.streamTypes, streamType)
|
||||||
|
statsMcuPublisherStreamTypesCurrent.WithLabelValues(string(streamType)).Dec()
|
||||||
|
statsMcuSubscriberStreamTypesCurrent.WithLabelValues(string(streamType)).Sub(float64(len(c.subscribers)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *publisherStatsCounter) AddSubscriber(id string) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
if c.subscribers[id] {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.subscribers == nil {
|
||||||
|
c.subscribers = make(map[string]bool)
|
||||||
|
}
|
||||||
|
c.subscribers[id] = true
|
||||||
|
for streamType := range c.streamTypes {
|
||||||
|
statsMcuSubscriberStreamTypesCurrent.WithLabelValues(string(streamType)).Inc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *publisherStatsCounter) RemoveSubscriber(id string) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
if !c.subscribers[id] {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(c.subscribers, id)
|
||||||
|
for streamType := range c.streamTypes {
|
||||||
|
statsMcuSubscriberStreamTypesCurrent.WithLabelValues(string(streamType)).Dec()
|
||||||
|
}
|
||||||
|
}
|
154
remotesession.go
Normal file
154
remotesession.go
Normal file
|
@ -0,0 +1,154 @@
|
||||||
|
/**
|
||||||
|
* Standalone signaling server for the Nextcloud Spreed app.
|
||||||
|
* Copyright (C) 2024 struktur AG
|
||||||
|
*
|
||||||
|
* @author Joachim Bauch <bauch@struktur.de>
|
||||||
|
*
|
||||||
|
* @license GNU AGPL version 3 or any later version
|
||||||
|
*
|
||||||
|
* This program is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU Affero General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
package signaling
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RemoteSession struct {
|
||||||
|
hub *Hub
|
||||||
|
client *Client
|
||||||
|
remoteClient *GrpcClient
|
||||||
|
sessionId string
|
||||||
|
|
||||||
|
proxy atomic.Pointer[SessionProxy]
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRemoteSession(hub *Hub, client *Client, remoteClient *GrpcClient, sessionId string) (*RemoteSession, error) {
|
||||||
|
remoteSession := &RemoteSession{
|
||||||
|
hub: hub,
|
||||||
|
client: client,
|
||||||
|
remoteClient: remoteClient,
|
||||||
|
sessionId: sessionId,
|
||||||
|
}
|
||||||
|
|
||||||
|
client.SetSessionId(sessionId)
|
||||||
|
client.SetHandler(remoteSession)
|
||||||
|
|
||||||
|
// Don't use "client.Context()" here as it could close the proxy connection
|
||||||
|
// before any final messages are forwarded to the remote end.
|
||||||
|
proxy, err := remoteClient.ProxySession(context.Background(), sessionId, remoteSession)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
remoteSession.proxy.Store(proxy)
|
||||||
|
|
||||||
|
return remoteSession, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteSession) Country() string {
|
||||||
|
return s.client.Country()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteSession) RemoteAddr() string {
|
||||||
|
return s.client.RemoteAddr()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteSession) UserAgent() string {
|
||||||
|
return s.client.UserAgent()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteSession) IsConnected() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteSession) Start(message *ClientMessage) error {
|
||||||
|
return s.sendMessage(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteSession) OnProxyMessage(msg *ServerSessionMessage) error {
|
||||||
|
var message *ServerMessage
|
||||||
|
if err := json.Unmarshal(msg.Message, &message); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !s.client.SendMessage(message) {
|
||||||
|
return fmt.Errorf("could not send message to client")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteSession) OnProxyClose(err error) {
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Proxy connection for session %s to %s was closed with error: %s", s.sessionId, s.remoteClient.Target(), err)
|
||||||
|
}
|
||||||
|
s.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteSession) SendMessage(message WritableClientMessage) bool {
|
||||||
|
return s.sendMessage(message) == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteSession) sendProxyMessage(message []byte) error {
|
||||||
|
proxy := s.proxy.Load()
|
||||||
|
if proxy == nil {
|
||||||
|
return errors.New("proxy already closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := &ClientSessionMessage{
|
||||||
|
Message: message,
|
||||||
|
}
|
||||||
|
return proxy.Send(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteSession) sendMessage(message interface{}) error {
|
||||||
|
data, err := json.Marshal(message)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.sendProxyMessage(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteSession) Close() {
|
||||||
|
if proxy := s.proxy.Swap(nil); proxy != nil {
|
||||||
|
proxy.Close()
|
||||||
|
}
|
||||||
|
s.hub.unregisterRemoteSession(s)
|
||||||
|
s.client.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteSession) OnLookupCountry(client HandlerClient) string {
|
||||||
|
return s.hub.OnLookupCountry(client)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteSession) OnClosed(client HandlerClient) {
|
||||||
|
s.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteSession) OnMessageReceived(client HandlerClient, message []byte) {
|
||||||
|
if err := s.sendProxyMessage(message); err != nil {
|
||||||
|
log.Printf("Error sending %s to the proxy for session %s: %s", string(message), s.sessionId, err)
|
||||||
|
s.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteSession) OnRTTReceived(client HandlerClient, rtt time.Duration) {
|
||||||
|
}
|
22
room.go
22
room.go
|
@ -65,7 +65,7 @@ type Room struct {
|
||||||
events AsyncEvents
|
events AsyncEvents
|
||||||
backend *Backend
|
backend *Backend
|
||||||
|
|
||||||
properties *json.RawMessage
|
properties json.RawMessage
|
||||||
|
|
||||||
closer *Closer
|
closer *Closer
|
||||||
mu *sync.RWMutex
|
mu *sync.RWMutex
|
||||||
|
@ -95,7 +95,7 @@ func getRoomIdForBackend(id string, backend *Backend) string {
|
||||||
return backend.Id() + "|" + id
|
return backend.Id() + "|" + id
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRoom(roomId string, properties *json.RawMessage, hub *Hub, events AsyncEvents, backend *Backend) (*Room, error) {
|
func NewRoom(roomId string, properties json.RawMessage, hub *Hub, events AsyncEvents, backend *Backend) (*Room, error) {
|
||||||
room := &Room{
|
room := &Room{
|
||||||
id: roomId,
|
id: roomId,
|
||||||
hub: hub,
|
hub: hub,
|
||||||
|
@ -136,7 +136,7 @@ func (r *Room) Id() string {
|
||||||
return r.id
|
return r.id
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Room) Properties() *json.RawMessage {
|
func (r *Room) Properties() json.RawMessage {
|
||||||
r.mu.RLock()
|
r.mu.RLock()
|
||||||
defer r.mu.RUnlock()
|
defer r.mu.RUnlock()
|
||||||
return r.properties
|
return r.properties
|
||||||
|
@ -270,12 +270,12 @@ func (r *Room) processBackendRoomRequestAsyncRoom(message *AsyncRoomMessage) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Room) AddSession(session Session, sessionData *json.RawMessage) {
|
func (r *Room) AddSession(session Session, sessionData json.RawMessage) {
|
||||||
var roomSessionData *RoomSessionData
|
var roomSessionData *RoomSessionData
|
||||||
if sessionData != nil && len(*sessionData) > 0 {
|
if len(sessionData) > 0 {
|
||||||
roomSessionData = &RoomSessionData{}
|
roomSessionData = &RoomSessionData{}
|
||||||
if err := json.Unmarshal(*sessionData, roomSessionData); err != nil {
|
if err := json.Unmarshal(sessionData, roomSessionData); err != nil {
|
||||||
log.Printf("Error decoding room session data \"%s\": %s", string(*sessionData), err)
|
log.Printf("Error decoding room session data \"%s\": %s", string(sessionData), err)
|
||||||
roomSessionData = nil
|
roomSessionData = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -480,11 +480,11 @@ func (r *Room) publish(message *ServerMessage) error {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Room) UpdateProperties(properties *json.RawMessage) {
|
func (r *Room) UpdateProperties(properties json.RawMessage) {
|
||||||
r.mu.Lock()
|
r.mu.Lock()
|
||||||
defer r.mu.Unlock()
|
defer r.mu.Unlock()
|
||||||
if (r.properties == nil && properties == nil) ||
|
if (len(r.properties) == 0 && len(properties) == 0) ||
|
||||||
(r.properties != nil && properties != nil && bytes.Equal(*r.properties, *properties)) {
|
(len(r.properties) > 0 && len(properties) > 0 && bytes.Equal(r.properties, properties)) {
|
||||||
// Don't notify if properties didn't change.
|
// Don't notify if properties didn't change.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -769,7 +769,7 @@ func (r *Room) PublishUsersInCallChangedAll(inCall int) {
|
||||||
Type: "update",
|
Type: "update",
|
||||||
Update: &RoomEventServerMessage{
|
Update: &RoomEventServerMessage{
|
||||||
RoomId: r.id,
|
RoomId: r.id,
|
||||||
InCall: &inCallMsg,
|
InCall: inCallMsg,
|
||||||
All: true,
|
All: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -63,6 +63,7 @@ func NewRoomPingForTest(t *testing.T) (*url.URL, *RoomPing) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSingleRoomPing(t *testing.T) {
|
func TestSingleRoomPing(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
u, ping := NewRoomPingForTest(t)
|
u, ping := NewRoomPingForTest(t)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||||
|
@ -113,6 +114,7 @@ func TestSingleRoomPing(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiRoomPing(t *testing.T) {
|
func TestMultiRoomPing(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
u, ping := NewRoomPingForTest(t)
|
u, ping := NewRoomPingForTest(t)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||||
|
@ -159,6 +161,7 @@ func TestMultiRoomPing(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiRoomPing_Separate(t *testing.T) {
|
func TestMultiRoomPing_Separate(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
u, ping := NewRoomPingForTest(t)
|
u, ping := NewRoomPingForTest(t)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||||
|
@ -201,6 +204,7 @@ func TestMultiRoomPing_Separate(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiRoomPing_DeleteRoom(t *testing.T) {
|
func TestMultiRoomPing_DeleteRoom(t *testing.T) {
|
||||||
|
CatchLogForTest(t)
|
||||||
u, ping := NewRoomPingForTest(t)
|
u, ping := NewRoomPingForTest(t)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
|
||||||
|
|
16
room_test.go
16
room_test.go
|
@ -73,6 +73,8 @@ func TestRoom_InCall(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRoom_Update(t *testing.T) {
|
func TestRoom_Update(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
hub, _, router, server := CreateHubForTest(t)
|
hub, _, router, server := CreateHubForTest(t)
|
||||||
|
|
||||||
config, err := getTestConfig(server)
|
config, err := getTestConfig(server)
|
||||||
|
@ -123,7 +125,7 @@ func TestRoom_Update(t *testing.T) {
|
||||||
UserIds: []string{
|
UserIds: []string{
|
||||||
testDefaultUserId,
|
testDefaultUserId,
|
||||||
},
|
},
|
||||||
Properties: &roomProperties,
|
Properties: roomProperties,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,13 +166,13 @@ func TestRoom_Update(t *testing.T) {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
} else if msg.RoomId != roomId {
|
} else if msg.RoomId != roomId {
|
||||||
t.Errorf("Expected room id %s, got %+v", roomId, msg)
|
t.Errorf("Expected room id %s, got %+v", roomId, msg)
|
||||||
} else if msg.Properties == nil || !bytes.Equal(*msg.Properties, roomProperties) {
|
} else if len(msg.Properties) == 0 || !bytes.Equal(msg.Properties, roomProperties) {
|
||||||
t.Errorf("Expected room properties %s, got %+v", string(roomProperties), msg)
|
t.Errorf("Expected room properties %s, got %+v", string(roomProperties), msg)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if msg.RoomId != roomId {
|
if msg.RoomId != roomId {
|
||||||
t.Errorf("Expected room id %s, got %+v", roomId, msg)
|
t.Errorf("Expected room id %s, got %+v", roomId, msg)
|
||||||
} else if msg.Properties == nil || !bytes.Equal(*msg.Properties, roomProperties) {
|
} else if len(msg.Properties) == 0 || !bytes.Equal(msg.Properties, roomProperties) {
|
||||||
t.Errorf("Expected room properties %s, got %+v", string(roomProperties), msg)
|
t.Errorf("Expected room properties %s, got %+v", string(roomProperties), msg)
|
||||||
}
|
}
|
||||||
if err := checkMessageRoomId(message2, roomId); err != nil {
|
if err := checkMessageRoomId(message2, roomId); err != nil {
|
||||||
|
@ -191,7 +193,7 @@ loop:
|
||||||
// The internal room has been updated with the new properties.
|
// The internal room has been updated with the new properties.
|
||||||
if room := hub.getRoom(roomId); room == nil {
|
if room := hub.getRoom(roomId); room == nil {
|
||||||
err = fmt.Errorf("Room %s not found in hub", roomId)
|
err = fmt.Errorf("Room %s not found in hub", roomId)
|
||||||
} else if room.Properties() == nil || !bytes.Equal(*room.Properties(), roomProperties) {
|
} else if len(room.Properties()) == 0 || !bytes.Equal(room.Properties(), roomProperties) {
|
||||||
err = fmt.Errorf("Expected room properties %s, got %+v", string(roomProperties), room.Properties())
|
err = fmt.Errorf("Expected room properties %s, got %+v", string(roomProperties), room.Properties())
|
||||||
} else {
|
} else {
|
||||||
err = nil
|
err = nil
|
||||||
|
@ -210,6 +212,8 @@ loop:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRoom_Delete(t *testing.T) {
|
func TestRoom_Delete(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
hub, _, router, server := CreateHubForTest(t)
|
hub, _, router, server := CreateHubForTest(t)
|
||||||
|
|
||||||
config, err := getTestConfig(server)
|
config, err := getTestConfig(server)
|
||||||
|
@ -352,6 +356,8 @@ loop:
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRoom_RoomSessionData(t *testing.T) {
|
func TestRoom_RoomSessionData(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
hub, _, router, server := CreateHubForTest(t)
|
hub, _, router, server := CreateHubForTest(t)
|
||||||
|
|
||||||
config, err := getTestConfig(server)
|
config, err := getTestConfig(server)
|
||||||
|
@ -421,6 +427,8 @@ func TestRoom_RoomSessionData(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRoom_InCallAll(t *testing.T) {
|
func TestRoom_InCallAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
CatchLogForTest(t)
|
||||||
hub, _, router, server := CreateHubForTest(t)
|
hub, _, router, server := CreateHubForTest(t)
|
||||||
|
|
||||||
config, err := getTestConfig(server)
|
config, err := getTestConfig(server)
|
||||||
|
|
|
@ -22,17 +22,21 @@
|
||||||
package signaling
|
package signaling
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"net/url"
|
"net/url"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type DummySession struct {
|
type DummySession struct {
|
||||||
publicId string
|
publicId string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *DummySession) Context() context.Context {
|
||||||
|
return context.Background()
|
||||||
|
}
|
||||||
|
|
||||||
func (s *DummySession) PrivateId() string {
|
func (s *DummySession) PrivateId() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
@ -53,7 +57,7 @@ func (s *DummySession) UserId() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DummySession) UserData() *json.RawMessage {
|
func (s *DummySession) UserData() json.RawMessage {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,10 +84,6 @@ func (s *DummySession) LeaveRoom(notify bool) *Room {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DummySession) IsExpired(now time.Time) bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *DummySession) Close() {
|
func (s *DummySession) Close() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,6 +91,14 @@ func (s *DummySession) HasPermission(permission Permission) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *DummySession) SendError(e *Error) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DummySession) SendMessage(message *ServerMessage) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func checkSession(t *testing.T, sessions RoomSessions, sessionId string, roomSessionId string) Session {
|
func checkSession(t *testing.T, sessions RoomSessions, sessionId string, roomSessionId string) Session {
|
||||||
session := &DummySession{
|
session := &DummySession{
|
||||||
publicId: sessionId,
|
publicId: sessionId,
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue