Compare commits

..

No commits in common. "master" and "v1.2.4" have entirely different histories.

118 changed files with 2114 additions and 10061 deletions

View file

@ -73,7 +73,7 @@ jobs:
- name: Build and push
id: docker_build
uses: docker/build-push-action@v6
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/server/Dockerfile
@ -143,7 +143,7 @@ jobs:
- name: Build and push
id: docker_build
uses: docker/build-push-action@v6
uses: docker/build-push-action@v5
with:
context: .
file: ./docker/proxy/Dockerfile

View file

@ -29,8 +29,12 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Build Docker image
uses: docker/build-push-action@v6
uses: docker/build-push-action@v5
with:
context: docker/janus
load: true
tags: ${{ env.TEST_TAG }}
- name: Test Docker image
run: |
docker run --rm ${{ env.TEST_TAG }} /usr/local/bin/janus --version

View file

@ -42,7 +42,7 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Build Docker image
uses: docker/build-push-action@v6
uses: docker/build-push-action@v5
with:
context: .
file: docker/server/Dockerfile
@ -61,7 +61,7 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Build Docker image
uses: docker/build-push-action@v6
uses: docker/build-push-action@v5
with:
context: .
file: docker/proxy/Dockerfile

View file

@ -1,46 +0,0 @@
name: Go Vulnerability Checker
on:
push:
branches: [ master ]
paths:
- '.github/workflows/govuln.yml'
- '**.go'
- 'go.*'
pull_request:
branches: [ master ]
paths:
- '.github/workflows/govuln.yml'
- '**.go'
- 'go.*'
schedule:
- cron: "0 2 * * SUN"
permissions:
contents: read
jobs:
run:
runs-on: ubuntu-latest
strategy:
matrix:
go-version:
- "1.21"
- "1.22"
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- run: date
- name: Install dependencies
run: |
sudo apt -y update && sudo apt -y install protobuf-compiler
make common
- name: Install and run govulncheck
run: |
set -euo pipefail
go install golang.org/x/vuln/cmd/govulncheck@latest
govulncheck ./...

View file

@ -28,7 +28,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: "1.21"
go-version: "1.20"
- name: Install dependencies
run: |
@ -36,11 +36,13 @@ jobs:
make common
- name: lint
uses: golangci/golangci-lint-action@v6.0.1
uses: golangci/golangci-lint-action@v4.0.0
with:
version: latest
args: --timeout=2m0s
skip-cache: true
skip-pkg-cache: true
skip-build-cache: true
dependencies:
name: dependencies
@ -54,7 +56,7 @@ jobs:
- name: Check minimum supported version of Go
run: |
go mod tidy -go=1.21 -compat=1.21
go mod tidy -go=1.20 -compat=1.20
- name: Check go.mod / go.sum
run: |

View file

@ -1,27 +0,0 @@
name: shellcheck
on:
push:
branches: [ master ]
paths:
- '.github/workflows/shellcheck.yml'
- '**.sh'
pull_request:
branches: [ master ]
paths:
- '.github/workflows/shellcheck.yml'
- '**.sh'
permissions:
contents: read
jobs:
lint:
name: shellcheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: shellcheck
run: |
find -name "*.sh" | xargs shellcheck

View file

@ -24,6 +24,7 @@ jobs:
strategy:
matrix:
go-version:
- "1.20"
- "1.21"
- "1.22"
runs-on: ubuntu-latest
@ -52,6 +53,7 @@ jobs:
strategy:
matrix:
go-version:
- "1.20"
- "1.21"
- "1.22"
runs-on: ubuntu-latest
@ -77,12 +79,15 @@ jobs:
[ -d "tmp/vendor" ] || exit 1
- name: Build
env:
GOPROXY: off
run: |
echo "Building with $(nproc) threads"
make -C tmp build -j$(nproc)
- name: Run tests
env:
GOPROXY: off
USE_DB_IP_GEOIP_DATABASE: "1"
run: |
make -C tmp test TIMEOUT=120s

View file

@ -27,6 +27,7 @@ jobs:
strategy:
matrix:
go-version:
- "1.20"
- "1.21"
- "1.22"
runs-on: ubuntu-latest
@ -63,7 +64,7 @@ jobs:
outfile: cover.lcov
- name: Coveralls Parallel
uses: coverallsapp/github-action@v2.3.0
uses: coverallsapp/github-action@v2.2.3
env:
COVERALLS_FLAG_NAME: run-${{ matrix.go-version }}
with:
@ -78,7 +79,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Coveralls Finished
uses: coverallsapp/github-action@v2.3.0
uses: coverallsapp/github-action@v2.2.3
with:
github-token: ${{ secrets.github_token }}
parallel-finished: true

View file

@ -2,122 +2,6 @@
All notable changes to this project will be documented in this file.
## 1.3.1 - 2024-05-23
### Changed
- Bump alpine from 3.19 to 3.20 in /docker/janus
[#746](https://github.com/strukturag/nextcloud-spreed-signaling/pull/746)
- CI: Remove deprecated options from lint workflow.
[#748](https://github.com/strukturag/nextcloud-spreed-signaling/pull/748)
- docker: Update Janus in example image to 1.2.2
[#749](https://github.com/strukturag/nextcloud-spreed-signaling/pull/749)
- Improve detection of actual client IP.
[#747](https://github.com/strukturag/nextcloud-spreed-signaling/pull/747)
### Fixed
- docker: Fix proxy entrypoint.
[#745](https://github.com/strukturag/nextcloud-spreed-signaling/pull/745)
## 1.3.0 - 2024-05-22
### Added
- Support resuming remote sessions
[#715](https://github.com/strukturag/nextcloud-spreed-signaling/pull/715)
- Gracefully shut down signaling server on SIGUSR1.
[#706](https://github.com/strukturag/nextcloud-spreed-signaling/pull/706)
- docker: Add helper scripts to gracefully stop / wait for server.
[#722](https://github.com/strukturag/nextcloud-spreed-signaling/pull/722)
- Support environment variables in some configuration.
[#721](https://github.com/strukturag/nextcloud-spreed-signaling/pull/721)
- Add Context to clients / sessions.
[#732](https://github.com/strukturag/nextcloud-spreed-signaling/pull/732)
- Drop support for Golang 1.20
[#737](https://github.com/strukturag/nextcloud-spreed-signaling/pull/737)
- CI: Run "govulncheck".
[#694](https://github.com/strukturag/nextcloud-spreed-signaling/pull/694)
- Make trusted proxies configurable and default to loopback / private IPs.
[#738](https://github.com/strukturag/nextcloud-spreed-signaling/pull/738)
- Add support for remote streams (preview)
[#708](https://github.com/strukturag/nextcloud-spreed-signaling/pull/708)
- Add throttler for backend requests
[#744](https://github.com/strukturag/nextcloud-spreed-signaling/pull/744)
### Changed
- build(deps): Bump github.com/nats-io/nats.go from 1.34.0 to 1.34.1
[#697](https://github.com/strukturag/nextcloud-spreed-signaling/pull/697)
- build(deps): Bump google.golang.org/grpc from 1.62.1 to 1.63.0
[#699](https://github.com/strukturag/nextcloud-spreed-signaling/pull/699)
- build(deps): Bump google.golang.org/grpc from 1.63.0 to 1.63.2
[#700](https://github.com/strukturag/nextcloud-spreed-signaling/pull/700)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.12 to 2.10.14
[#702](https://github.com/strukturag/nextcloud-spreed-signaling/pull/702)
- Include previous value with etcd watch events.
[#704](https://github.com/strukturag/nextcloud-spreed-signaling/pull/704)
- build(deps): Bump go.uber.org/zap from 1.17.0 to 1.27.0
[#705](https://github.com/strukturag/nextcloud-spreed-signaling/pull/705)
- Improve support for Janus 1.x
[#669](https://github.com/strukturag/nextcloud-spreed-signaling/pull/669)
- build(deps): Bump sphinx from 7.2.6 to 7.3.5 in /docs
[#709](https://github.com/strukturag/nextcloud-spreed-signaling/pull/709)
- build(deps): Bump sphinx from 7.3.5 to 7.3.7 in /docs
[#712](https://github.com/strukturag/nextcloud-spreed-signaling/pull/712)
- build(deps): Bump golang.org/x/net from 0.21.0 to 0.23.0
[#711](https://github.com/strukturag/nextcloud-spreed-signaling/pull/711)
- Don't keep expiration timestamp in each session.
[#713](https://github.com/strukturag/nextcloud-spreed-signaling/pull/713)
- build(deps): Bump mkdocs from 1.5.3 to 1.6.0 in /docs
[#714](https://github.com/strukturag/nextcloud-spreed-signaling/pull/714)
- Speedup tests by running in parallel
[#718](https://github.com/strukturag/nextcloud-spreed-signaling/pull/718)
- build(deps): Bump golangci/golangci-lint-action from 4.0.0 to 5.0.0
[#719](https://github.com/strukturag/nextcloud-spreed-signaling/pull/719)
- build(deps): Bump golangci/golangci-lint-action from 5.0.0 to 5.1.0
[#720](https://github.com/strukturag/nextcloud-spreed-signaling/pull/720)
- build(deps): Bump coverallsapp/github-action from 2.2.3 to 2.3.0
[#728](https://github.com/strukturag/nextcloud-spreed-signaling/pull/728)
- build(deps): Bump jinja2 from 3.1.3 to 3.1.4 in /docs
[#726](https://github.com/strukturag/nextcloud-spreed-signaling/pull/726)
- build(deps): Bump google.golang.org/protobuf from 1.33.0 to 1.34.1
[#725](https://github.com/strukturag/nextcloud-spreed-signaling/pull/725)
- build(deps): Bump github.com/prometheus/client_golang from 1.19.0 to 1.19.1
[#730](https://github.com/strukturag/nextcloud-spreed-signaling/pull/730)
- build(deps): Bump golangci/golangci-lint-action from 5.1.0 to 6.0.1
[#729](https://github.com/strukturag/nextcloud-spreed-signaling/pull/729)
- build(deps): Bump google.golang.org/grpc from 1.63.2 to 1.64.0
[#734](https://github.com/strukturag/nextcloud-spreed-signaling/pull/734)
- Validate received SDP earlier.
[#707](https://github.com/strukturag/nextcloud-spreed-signaling/pull/707)
- Log something if mcu publisher / subscriber was closed.
[#736](https://github.com/strukturag/nextcloud-spreed-signaling/pull/736)
- build(deps): Bump the etcd group with 4 updates
[#693](https://github.com/strukturag/nextcloud-spreed-signaling/pull/693)
- build(deps): Bump github.com/nats-io/nats.go from 1.34.1 to 1.35.0
[#740](https://github.com/strukturag/nextcloud-spreed-signaling/pull/740)
- Don't use unnecessary pointer to "json.RawMessage".
[#739](https://github.com/strukturag/nextcloud-spreed-signaling/pull/739)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.14 to 2.10.15
[#741](https://github.com/strukturag/nextcloud-spreed-signaling/pull/741)
- build(deps): Bump github.com/nats-io/nats-server/v2 from 2.10.15 to 2.10.16
[#743](https://github.com/strukturag/nextcloud-spreed-signaling/pull/743)
### Fixed
- Improve detecting renames in file watcher.
[#698](https://github.com/strukturag/nextcloud-spreed-signaling/pull/698)
- Update etcd watch handling.
[#701](https://github.com/strukturag/nextcloud-spreed-signaling/pull/701)
- Prevent goroutine leaks in GRPC tests.
[#716](https://github.com/strukturag/nextcloud-spreed-signaling/pull/716)
- Fix potential race in capabilities test.
[#731](https://github.com/strukturag/nextcloud-spreed-signaling/pull/731)
- Don't log read error after we closed the connection.
[#735](https://github.com/strukturag/nextcloud-spreed-signaling/pull/735)
- Fix lock order inversion when leaving room / publishing room sessions.
[#742](https://github.com/strukturag/nextcloud-spreed-signaling/pull/742)
- Relax "MessageClientMessageData" validation.
[#733](https://github.com/strukturag/nextcloud-spreed-signaling/pull/733)
## 1.2.4 - 2024-04-03
### Added

View file

@ -52,14 +52,6 @@ ifneq ($(COUNT),)
TESTARGS := $(TESTARGS) -count $(COUNT)
endif
ifneq ($(PARALLEL),)
TESTARGS := $(TESTARGS) -parallel $(PARALLEL)
endif
ifneq ($(VERBOSE),)
TESTARGS := $(TESTARGS) -v
endif
ifeq ($(GOARCH), amd64)
GOPATHBIN := $(GOPATH)/bin
else
@ -70,12 +62,14 @@ hook:
[ ! -d "$(CURDIR)/.git/hooks" ] || ln -sf "$(CURDIR)/scripts/pre-commit.hook" "$(CURDIR)/.git/hooks/pre-commit"
$(GOPATHBIN)/easyjson: go.mod go.sum
[ "$(GOPROXY)" = "off" ] || $(GO) get -d github.com/mailru/easyjson/...
$(GO) install github.com/mailru/easyjson/...
$(GOPATHBIN)/protoc-gen-go: go.mod go.sum
$(GO) install google.golang.org/protobuf/cmd/protoc-gen-go
$(GOPATHBIN)/protoc-gen-go-grpc: go.mod go.sum
[ "$(GOPROXY)" = "off" ] || $(GO) get -d google.golang.org/grpc/cmd/protoc-gen-go-grpc
$(GO) install google.golang.org/grpc/cmd/protoc-gen-go-grpc
continentmap.go:
@ -99,18 +93,18 @@ vet: common
$(GO) vet $(ALL_PACKAGES)
test: vet common
$(GO) test -timeout $(TIMEOUT) $(TESTARGS) $(ALL_PACKAGES)
$(GO) test -v -timeout $(TIMEOUT) $(TESTARGS) $(ALL_PACKAGES)
cover: vet common
rm -f cover.out && \
$(GO) test -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
$(GO) test -v -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
sed -i "/_easyjson/d" cover.out && \
sed -i "/\.pb\.go/d" cover.out && \
$(GO) tool cover -func=cover.out
coverhtml: vet common
rm -f cover.out && \
$(GO) test -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
$(GO) test -v -timeout $(TIMEOUT) -coverprofile cover.out $(ALL_PACKAGES) && \
sed -i "/_easyjson/d" cover.out && \
sed -i "/\.pb\.go/d" cover.out && \
$(GO) tool cover -html=cover.out -o coverage.html
@ -153,6 +147,7 @@ build: server proxy
vendor: go.mod go.sum common
set -e ;\
rm -rf $(VENDORDIR)
$(GO) mod tidy; \
$(GO) mod vendor
tarball: vendor

View file

@ -1,6 +1,6 @@
# Spreed standalone signaling server
![Build Status](https://github.com/strukturag/nextcloud-spreed-signaling/actions/workflows/test.yml/badge.svg)
![Build Status](https://github.com/strukturag/nextcloud-spreed-signaling/workflows/test/badge.svg)
[![Coverage Status](https://coveralls.io/repos/github/strukturag/nextcloud-spreed-signaling/badge.svg?branch=master)](https://coveralls.io/github/strukturag/nextcloud-spreed-signaling?branch=master)
[![Documentation Status](https://readthedocs.org/projects/nextcloud-spreed-signaling/badge/?version=latest)](https://nextcloud-spreed-signaling.readthedocs.io/en/latest/?badge=latest)
[![Go Report](https://goreportcard.com/badge/github.com/strukturag/nextcloud-spreed-signaling)](https://goreportcard.com/report/github.com/strukturag/nextcloud-spreed-signaling)
@ -17,7 +17,7 @@ information on the API of the signaling server.
The following tools are required for building the signaling server.
- git
- go >= 1.21
- go >= 1.20
- make
- protobuf-compiler >= 3
@ -171,17 +171,7 @@ proxy process gracefully after all clients have been disconnected. No new
publishers will be accepted in this case.
### Remote streams (preview)
With Janus 1.1.0 or newer, remote streams are supported, i.e. a subscriber can
receive a published stream from any server. For this, you need to configure
`hostname`, `token_id` and `token_key` in the proxy configuration. Each proxy
server also supports configuring maximum `incoming` and `outgoing` bandwidth
settings, which will also be used to select remote streams.
See `proxy.conf.in` in section `app` for details.
## Clustering
### Clustering
The signaling server supports a clustering mode where multiple running servers
can be interconnected to form a single "virtual" server. This can be used to
@ -309,8 +299,6 @@ interface on port `8080` below):
# Enable proxying Websocket requests to the standalone signaling server.
ProxyPass "/standalone-signaling/" "ws://127.0.0.1:8080/"
RequestHeader set X-Real-IP %{REMOTE_ADDR}s
RewriteEngine On
# Websocket connections from the clients.
RewriteRule ^/standalone-signaling/spreed/$ - [L]
@ -346,7 +334,6 @@ myserver.domain.invalid {
route /standalone-signaling/* {
uri strip_prefix /standalone-signaling
reverse_proxy http://127.0.0.1:8080
header_up X-Real-IP {remote_host}
}
}
```

View file

@ -22,7 +22,6 @@
package signaling
import (
"bytes"
"fmt"
"net"
"strings"
@ -32,19 +31,6 @@ type AllowedIps struct {
allowed []*net.IPNet
}
func (a *AllowedIps) String() string {
var b bytes.Buffer
b.WriteString("[")
for idx, n := range a.allowed {
if idx > 0 {
b.WriteString(", ")
}
b.WriteString(n.String())
}
b.WriteString("]")
return b.String()
}
func (a *AllowedIps) Empty() bool {
return len(a.allowed) == 0
}
@ -113,22 +99,3 @@ func DefaultAllowedIps() *AllowedIps {
}
return result
}
var (
privateIpNets = []string{
// Loopback addresses.
"127.0.0.0/8",
// Private addresses.
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
}
)
func DefaultPrivateIps() *AllowedIps {
allowed, err := ParseAllowedIps(strings.Join(privateIpNets, ","))
if err != nil {
panic(fmt.Errorf("could not parse private ips %+v: %w", privateIpNets, err))
}
return allowed
}

View file

@ -34,9 +34,6 @@ func TestAllowedIps(t *testing.T) {
if a.Empty() {
t.Fatal("should not be empty")
}
if expected := `[127.0.0.1/32, 192.168.0.1/32, 192.168.1.0/24]`; a.String() != expected {
t.Errorf("expected %s, got %s", expected, a.String())
}
allowed := []string{
"127.0.0.1",

View file

@ -118,8 +118,8 @@ type BackendRoomInviteRequest struct {
UserIds []string `json:"userids,omitempty"`
// TODO(jojo): We should get rid of "AllUserIds" and find a better way to
// notify existing users the room has changed and they need to update it.
AllUserIds []string `json:"alluserids,omitempty"`
Properties json.RawMessage `json:"properties,omitempty"`
AllUserIds []string `json:"alluserids,omitempty"`
Properties *json.RawMessage `json:"properties,omitempty"`
}
type BackendRoomDisinviteRequest struct {
@ -127,13 +127,13 @@ type BackendRoomDisinviteRequest struct {
SessionIds []string `json:"sessionids,omitempty"`
// TODO(jojo): We should get rid of "AllUserIds" and find a better way to
// notify existing users the room has changed and they need to update it.
AllUserIds []string `json:"alluserids,omitempty"`
Properties json.RawMessage `json:"properties,omitempty"`
AllUserIds []string `json:"alluserids,omitempty"`
Properties *json.RawMessage `json:"properties,omitempty"`
}
type BackendRoomUpdateRequest struct {
UserIds []string `json:"userids,omitempty"`
Properties json.RawMessage `json:"properties,omitempty"`
UserIds []string `json:"userids,omitempty"`
Properties *json.RawMessage `json:"properties,omitempty"`
}
type BackendRoomDeleteRequest struct {
@ -154,7 +154,7 @@ type BackendRoomParticipantsRequest struct {
}
type BackendRoomMessageRequest struct {
Data json.RawMessage `json:"data,omitempty"`
Data *json.RawMessage `json:"data,omitempty"`
}
type BackendRoomSwitchToSessionsList []string
@ -169,7 +169,7 @@ type BackendRoomSwitchToMessageRequest struct {
// In the map, the key is the session id, the value additional details
// (or null) for the session. The details will be included in the request
// to the connected client.
Sessions json.RawMessage `json:"sessions,omitempty"`
Sessions *json.RawMessage `json:"sessions,omitempty"`
// Internal properties
SessionsList BackendRoomSwitchToSessionsList `json:"sessionslist,omitempty"`
@ -237,8 +237,8 @@ type BackendRoomDialoutResponse struct {
// Requests from the signaling server to the Nextcloud backend.
type BackendClientAuthRequest struct {
Version string `json:"version"`
Params json.RawMessage `json:"params"`
Version string `json:"version"`
Params *json.RawMessage `json:"params"`
}
type BackendClientRequest struct {
@ -256,7 +256,7 @@ type BackendClientRequest struct {
Session *BackendClientSessionRequest `json:"session,omitempty"`
}
func NewBackendClientAuthRequest(params json.RawMessage) *BackendClientRequest {
func NewBackendClientAuthRequest(params *json.RawMessage) *BackendClientRequest {
return &BackendClientRequest{
Type: "auth",
Auth: &BackendClientAuthRequest{
@ -284,9 +284,9 @@ type BackendClientResponse struct {
}
type BackendClientAuthResponse struct {
Version string `json:"version"`
UserId string `json:"userid"`
User json.RawMessage `json:"user"`
Version string `json:"version"`
UserId string `json:"userid"`
User *json.RawMessage `json:"user"`
}
type BackendClientRoomRequest struct {
@ -315,14 +315,14 @@ func NewBackendClientRoomRequest(roomid string, userid string, sessionid string)
}
type BackendClientRoomResponse struct {
Version string `json:"version"`
RoomId string `json:"roomid"`
Properties json.RawMessage `json:"properties"`
Version string `json:"version"`
RoomId string `json:"roomid"`
Properties *json.RawMessage `json:"properties"`
// Optional information about the Nextcloud Talk session. Can be used for
// example to define a "userid" for otherwise anonymous users.
// See "RoomSessionData" for a possible content.
Session json.RawMessage `json:"session,omitempty"`
Session *json.RawMessage `json:"session,omitempty"`
Permissions *[]Permission `json:"permissions,omitempty"`
}
@ -359,12 +359,12 @@ type BackendClientRingResponse struct {
}
type BackendClientSessionRequest struct {
Version string `json:"version"`
RoomId string `json:"roomid"`
Action string `json:"action"`
SessionId string `json:"sessionid"`
UserId string `json:"userid,omitempty"`
User json.RawMessage `json:"user,omitempty"`
Version string `json:"version"`
RoomId string `json:"roomid"`
Action string `json:"action"`
SessionId string `json:"sessionid"`
UserId string `json:"userid,omitempty"`
User *json.RawMessage `json:"user,omitempty"`
}
type BackendClientSessionResponse struct {
@ -396,8 +396,8 @@ type OcsMeta struct {
}
type OcsBody struct {
Meta OcsMeta `json:"meta"`
Data json.RawMessage `json:"data"`
Meta OcsMeta `json:"meta"`
Data *json.RawMessage `json:"data"`
}
type OcsResponse struct {

View file

@ -27,7 +27,6 @@ import (
)
func TestBackendChecksum(t *testing.T) {
t.Parallel()
rnd := newRandomString(32)
body := []byte{1, 2, 3, 4, 5}
secret := []byte("shared-secret")
@ -59,7 +58,6 @@ func TestBackendChecksum(t *testing.T) {
}
func TestValidNumbers(t *testing.T) {
t.Parallel()
valid := []string{
"+12",
"+12345",

View file

@ -24,7 +24,6 @@ package signaling
import (
"encoding/json"
"fmt"
"net/url"
"github.com/golang-jwt/jwt/v4"
)
@ -49,14 +48,6 @@ type ProxyClientMessage struct {
Payload *PayloadProxyClientMessage `json:"payload,omitempty"`
}
func (m *ProxyClientMessage) String() string {
data, err := json.Marshal(m)
if err != nil {
return fmt.Sprintf("Could not serialize %#v: %s", m, err)
}
return string(data)
}
func (m *ProxyClientMessage) CheckValid() error {
switch m.Type {
case "":
@ -124,14 +115,6 @@ type ProxyServerMessage struct {
Event *EventProxyServerMessage `json:"event,omitempty"`
}
func (r *ProxyServerMessage) String() string {
data, err := json.Marshal(r)
if err != nil {
return fmt.Sprintf("Could not serialize %#v: %s", r, err)
}
return string(data)
}
func (r *ProxyServerMessage) CloseAfterSend(session Session) bool {
switch r.Type {
case "bye":
@ -202,14 +185,6 @@ type CommandProxyClientMessage struct {
ClientId string `json:"clientId,omitempty"`
Bitrate int `json:"bitrate,omitempty"`
MediaTypes MediaType `json:"mediatypes,omitempty"`
RemoteUrl string `json:"remoteUrl,omitempty"`
remoteUrl *url.URL
RemoteToken string `json:"remoteToken,omitempty"`
Hostname string `json:"hostname,omitempty"`
Port int `json:"port,omitempty"`
RtcpPort int `json:"rtcpPort,omitempty"`
}
func (m *CommandProxyClientMessage) CheckValid() error {
@ -227,17 +202,6 @@ func (m *CommandProxyClientMessage) CheckValid() error {
if m.StreamType == "" {
return fmt.Errorf("stream type missing")
}
if m.RemoteUrl != "" {
if m.RemoteToken == "" {
return fmt.Errorf("remote token missing")
}
remoteUrl, err := url.Parse(m.RemoteUrl)
if err != nil {
return fmt.Errorf("invalid remote url: %w", err)
}
m.remoteUrl = remoteUrl
}
case "delete-publisher":
fallthrough
case "delete-subscriber":
@ -253,8 +217,6 @@ type CommandProxyServerMessage struct {
Sid string `json:"sid,omitempty"`
Bitrate int `json:"bitrate,omitempty"`
Streams []PublisherStream `json:"streams,omitempty"`
}
// Type "payload"
@ -299,41 +261,12 @@ type PayloadProxyServerMessage struct {
// Type "event"
type EventProxyServerBandwidth struct {
// Incoming is the bandwidth utilization for publishers in percent.
Incoming *float64 `json:"incoming,omitempty"`
// Outgoing is the bandwidth utilization for subscribers in percent.
Outgoing *float64 `json:"outgoing,omitempty"`
}
func (b *EventProxyServerBandwidth) String() string {
if b.Incoming != nil && b.Outgoing != nil {
return fmt.Sprintf("bandwidth: incoming=%.3f%%, outgoing=%.3f%%", *b.Incoming, *b.Outgoing)
} else if b.Incoming != nil {
return fmt.Sprintf("bandwidth: incoming=%.3f%%, outgoing=unlimited", *b.Incoming)
} else if b.Outgoing != nil {
return fmt.Sprintf("bandwidth: incoming=unlimited, outgoing=%.3f%%", *b.Outgoing)
} else {
return "bandwidth: incoming=unlimited, outgoing=unlimited"
}
}
func (b EventProxyServerBandwidth) AllowIncoming() bool {
return b.Incoming == nil || *b.Incoming < 100
}
func (b EventProxyServerBandwidth) AllowOutgoing() bool {
return b.Outgoing == nil || *b.Outgoing < 100
}
type EventProxyServerMessage struct {
Type string `json:"type"`
ClientId string `json:"clientId,omitempty"`
Load int64 `json:"load,omitempty"`
Sid string `json:"sid,omitempty"`
Bandwidth *EventProxyServerBandwidth `json:"bandwidth,omitempty"`
}
// Information on a proxy in the etcd cluster.

View file

@ -32,7 +32,6 @@ import (
"time"
"github.com/golang-jwt/jwt/v4"
"github.com/pion/sdp/v3"
)
const (
@ -43,11 +42,6 @@ const (
HelloVersionV2 = "2.0"
)
var (
ErrNoSdp = NewError("no_sdp", "Payload does not contain a SDP.")
ErrInvalidSdp = NewError("invalid_sdp", "Payload does not contain a valid SDP.")
)
// ClientMessage is a message that is sent from a client to the server.
type ClientMessage struct {
json.Marshaler
@ -198,12 +192,12 @@ func (r *ServerMessage) CloseAfterSend(session Session) bool {
}
func (r *ServerMessage) IsChatRefresh() bool {
if r.Type != "message" || r.Message == nil || len(r.Message.Data) == 0 {
if r.Type != "message" || r.Message == nil || r.Message.Data == nil || len(*r.Message.Data) == 0 {
return false
}
var data MessageServerMessageData
if err := json.Unmarshal(r.Message.Data, &data); err != nil {
if err := json.Unmarshal(*r.Message.Data, &data); err != nil {
return false
}
@ -366,7 +360,7 @@ func (p *HelloV2AuthParams) CheckValid() error {
type HelloV2TokenClaims struct {
jwt.RegisteredClaims
UserData json.RawMessage `json:"userdata,omitempty"`
UserData *json.RawMessage `json:"userdata,omitempty"`
}
type HelloClientMessageAuth struct {
@ -374,7 +368,7 @@ type HelloClientMessageAuth struct {
// "HelloClientTypeClient"
Type string `json:"type,omitempty"`
Params json.RawMessage `json:"params"`
Params *json.RawMessage `json:"params"`
Url string `json:"url"`
parsedUrl *url.URL
@ -393,7 +387,7 @@ type HelloClientMessage struct {
Features []string `json:"features,omitempty"`
// The authentication credentials.
Auth *HelloClientMessageAuth `json:"auth,omitempty"`
Auth HelloClientMessageAuth `json:"auth"`
}
func (m *HelloClientMessage) CheckValid() error {
@ -401,7 +395,7 @@ func (m *HelloClientMessage) CheckValid() error {
return InvalidHelloVersion
}
if m.ResumeId == "" {
if m.Auth == nil || len(m.Auth.Params) == 0 {
if m.Auth.Params == nil || len(*m.Auth.Params) == 0 {
return fmt.Errorf("params missing")
}
if m.Auth.Type == "" {
@ -425,14 +419,14 @@ func (m *HelloClientMessage) CheckValid() error {
case HelloVersionV1:
// No additional validation necessary.
case HelloVersionV2:
if err := json.Unmarshal(m.Auth.Params, &m.Auth.helloV2Params); err != nil {
if err := json.Unmarshal(*m.Auth.Params, &m.Auth.helloV2Params); err != nil {
return err
} else if err := m.Auth.helloV2Params.CheckValid(); err != nil {
return err
}
}
case HelloClientTypeInternal:
if err := json.Unmarshal(m.Auth.Params, &m.Auth.internalParams); err != nil {
if err := json.Unmarshal(*m.Auth.Params, &m.Auth.internalParams); err != nil {
return err
} else if err := m.Auth.internalParams.CheckValid(); err != nil {
return err
@ -534,8 +528,8 @@ func (m *RoomClientMessage) CheckValid() error {
}
type RoomServerMessage struct {
RoomId string `json:"roomid"`
Properties json.RawMessage `json:"properties,omitempty"`
RoomId string `json:"roomid"`
Properties *json.RawMessage `json:"properties,omitempty"`
}
type RoomErrorDetails struct {
@ -560,7 +554,7 @@ type MessageClientMessageRecipient struct {
type MessageClientMessage struct {
Recipient MessageClientMessageRecipient `json:"recipient"`
Data json.RawMessage `json:"data"`
Data *json.RawMessage `json:"data"`
}
type MessageClientMessageData struct {
@ -569,44 +563,17 @@ type MessageClientMessageData struct {
RoomType string `json:"roomType"`
Bitrate int `json:"bitrate,omitempty"`
Payload map[string]interface{} `json:"payload"`
offerSdp *sdp.SessionDescription // Only set if Type == "offer"
answerSdp *sdp.SessionDescription // Only set if Type == "answer"
}
func (m *MessageClientMessageData) CheckValid() error {
if m.RoomType != "" && !IsValidStreamType(m.RoomType) {
if !IsValidStreamType(m.RoomType) {
return fmt.Errorf("invalid room type: %s", m.RoomType)
}
if m.Type == "offer" || m.Type == "answer" {
sdpValue, found := m.Payload["sdp"]
if !found {
return ErrNoSdp
}
sdpText, ok := sdpValue.(string)
if !ok {
return ErrInvalidSdp
}
var sdp sdp.SessionDescription
if err := sdp.Unmarshal([]byte(sdpText)); err != nil {
return NewErrorDetail("invalid_sdp", "Error parsing SDP from payload.", map[string]interface{}{
"error": err.Error(),
})
}
switch m.Type {
case "offer":
m.offerSdp = &sdp
case "answer":
m.answerSdp = &sdp
}
}
return nil
}
func (m *MessageClientMessage) CheckValid() error {
if len(m.Data) == 0 {
if m.Data == nil || len(*m.Data) == 0 {
return fmt.Errorf("message empty")
}
switch m.Recipient.Type {
@ -647,7 +614,7 @@ type MessageServerMessage struct {
Sender *MessageServerMessageSender `json:"sender"`
Recipient *MessageClientMessageRecipient `json:"recipient,omitempty"`
Data json.RawMessage `json:"data"`
Data *json.RawMessage `json:"data"`
}
// Type "control"
@ -664,7 +631,7 @@ type ControlServerMessage struct {
Sender *MessageServerMessageSender `json:"sender"`
Recipient *MessageClientMessageRecipient `json:"recipient,omitempty"`
Data json.RawMessage `json:"data"`
Data *json.RawMessage `json:"data"`
}
// Type "internal"
@ -693,10 +660,10 @@ type AddSessionOptions struct {
type AddSessionInternalClientMessage struct {
CommonSessionInternalClientMessage
UserId string `json:"userid,omitempty"`
User json.RawMessage `json:"user,omitempty"`
Flags uint32 `json:"flags,omitempty"`
InCall *int `json:"incall,omitempty"`
UserId string `json:"userid,omitempty"`
User *json.RawMessage `json:"user,omitempty"`
Flags uint32 `json:"flags,omitempty"`
InCall *int `json:"incall,omitempty"`
Options *AddSessionOptions `json:"options,omitempty"`
}
@ -848,10 +815,10 @@ type InternalServerMessage struct {
// Type "event"
type RoomEventServerMessage struct {
RoomId string `json:"roomid"`
Properties json.RawMessage `json:"properties,omitempty"`
RoomId string `json:"roomid"`
Properties *json.RawMessage `json:"properties,omitempty"`
// TODO(jojo): Change "InCall" to "int" when #914 has landed in NC Talk.
InCall json.RawMessage `json:"incall,omitempty"`
InCall *json.RawMessage `json:"incall,omitempty"`
Changed []map[string]interface{} `json:"changed,omitempty"`
Users []map[string]interface{} `json:"users,omitempty"`
@ -878,8 +845,8 @@ type RoomDisinviteEventServerMessage struct {
}
type RoomEventMessage struct {
RoomId string `json:"roomid"`
Data json.RawMessage `json:"data,omitempty"`
RoomId string `json:"roomid"`
Data *json.RawMessage `json:"data,omitempty"`
}
type RoomFlagsServerMessage struct {
@ -929,10 +896,10 @@ func (m *EventServerMessage) String() string {
}
type EventServerMessageSessionEntry struct {
SessionId string `json:"sessionid"`
UserId string `json:"userid"`
User json.RawMessage `json:"user,omitempty"`
RoomSessionId string `json:"roomsessionid,omitempty"`
SessionId string `json:"sessionid"`
UserId string `json:"userid"`
User *json.RawMessage `json:"user,omitempty"`
RoomSessionId string `json:"roomsessionid,omitempty"`
}
func (e *EventServerMessageSessionEntry) Clone() *EventServerMessageSessionEntry {
@ -965,9 +932,9 @@ type AnswerOfferMessage struct {
type TransientDataClientMessage struct {
Type string `json:"type"`
Key string `json:"key,omitempty"`
Value json.RawMessage `json:"value,omitempty"`
TTL time.Duration `json:"ttl,omitempty"`
Key string `json:"key,omitempty"`
Value *json.RawMessage `json:"value,omitempty"`
TTL time.Duration `json:"ttl,omitempty"`
}
func (m *TransientDataClientMessage) CheckValid() error {

View file

@ -81,7 +81,6 @@ func testMessages(t *testing.T, messageType string, valid_messages []testCheckVa
}
func TestClientMessage(t *testing.T) {
t.Parallel()
// The message needs a type.
msg := ClientMessage{}
if err := msg.CheckValid(); err == nil {
@ -90,31 +89,30 @@ func TestClientMessage(t *testing.T) {
}
func TestHelloClientMessage(t *testing.T) {
t.Parallel()
internalAuthParams := []byte("{\"backend\":\"https://domain.invalid\"}")
tokenAuthParams := []byte("{\"token\":\"invalid-token\"}")
valid_messages := []testCheckValid{
// Hello version 1
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Params: json.RawMessage("{}"),
Auth: HelloClientMessageAuth{
Params: &json.RawMessage{'{', '}'},
Url: "https://domain.invalid",
},
},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Auth: HelloClientMessageAuth{
Type: "client",
Params: json.RawMessage("{}"),
Params: &json.RawMessage{'{', '}'},
Url: "https://domain.invalid",
},
},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Auth: HelloClientMessageAuth{
Type: "internal",
Params: internalAuthParams,
Params: (*json.RawMessage)(&internalAuthParams),
},
},
&HelloClientMessage{
@ -124,16 +122,16 @@ func TestHelloClientMessage(t *testing.T) {
// Hello version 2
&HelloClientMessage{
Version: HelloVersionV2,
Auth: &HelloClientMessageAuth{
Params: tokenAuthParams,
Auth: HelloClientMessageAuth{
Params: (*json.RawMessage)(&tokenAuthParams),
Url: "https://domain.invalid",
},
},
&HelloClientMessage{
Version: HelloVersionV2,
Auth: &HelloClientMessageAuth{
Auth: HelloClientMessageAuth{
Type: "client",
Params: tokenAuthParams,
Params: (*json.RawMessage)(&tokenAuthParams),
Url: "https://domain.invalid",
},
},
@ -149,75 +147,75 @@ func TestHelloClientMessage(t *testing.T) {
&HelloClientMessage{Version: HelloVersionV1},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Params: json.RawMessage("{}"),
Auth: HelloClientMessageAuth{
Params: &json.RawMessage{'{', '}'},
Type: "invalid-type",
},
},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Auth: HelloClientMessageAuth{
Url: "https://domain.invalid",
},
},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Params: json.RawMessage("{}"),
Auth: HelloClientMessageAuth{
Params: &json.RawMessage{'{', '}'},
},
},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Params: json.RawMessage("{}"),
Auth: HelloClientMessageAuth{
Params: &json.RawMessage{'{', '}'},
Url: "invalid-url",
},
},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Auth: HelloClientMessageAuth{
Type: "internal",
Params: json.RawMessage("{}"),
Params: &json.RawMessage{'{', '}'},
},
},
&HelloClientMessage{
Version: HelloVersionV1,
Auth: &HelloClientMessageAuth{
Auth: HelloClientMessageAuth{
Type: "internal",
Params: json.RawMessage("xyz"), // Invalid JSON.
Params: &json.RawMessage{'x', 'y', 'z'}, // Invalid JSON.
},
},
// Hello version 2
&HelloClientMessage{
Version: HelloVersionV2,
Auth: &HelloClientMessageAuth{
Auth: HelloClientMessageAuth{
Url: "https://domain.invalid",
},
},
&HelloClientMessage{
Version: HelloVersionV2,
Auth: &HelloClientMessageAuth{
Params: tokenAuthParams,
Auth: HelloClientMessageAuth{
Params: (*json.RawMessage)(&tokenAuthParams),
},
},
&HelloClientMessage{
Version: HelloVersionV2,
Auth: &HelloClientMessageAuth{
Params: tokenAuthParams,
Auth: HelloClientMessageAuth{
Params: (*json.RawMessage)(&tokenAuthParams),
Url: "invalid-url",
},
},
&HelloClientMessage{
Version: HelloVersionV2,
Auth: &HelloClientMessageAuth{
Params: internalAuthParams,
Auth: HelloClientMessageAuth{
Params: (*json.RawMessage)(&internalAuthParams),
Url: "https://domain.invalid",
},
},
&HelloClientMessage{
Version: HelloVersionV2,
Auth: &HelloClientMessageAuth{
Params: json.RawMessage("xyz"), // Invalid JSON.
Auth: HelloClientMessageAuth{
Params: &json.RawMessage{'x', 'y', 'z'}, // Invalid JSON.
Url: "https://domain.invalid",
},
},
@ -235,27 +233,26 @@ func TestHelloClientMessage(t *testing.T) {
}
func TestMessageClientMessage(t *testing.T) {
t.Parallel()
valid_messages := []testCheckValid{
&MessageClientMessage{
Recipient: MessageClientMessageRecipient{
Type: "session",
SessionId: "the-session-id",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
&MessageClientMessage{
Recipient: MessageClientMessageRecipient{
Type: "user",
UserId: "the-user-id",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
&MessageClientMessage{
Recipient: MessageClientMessageRecipient{
Type: "room",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
}
invalid_messages := []testCheckValid{
@ -270,20 +267,20 @@ func TestMessageClientMessage(t *testing.T) {
Recipient: MessageClientMessageRecipient{
Type: "session",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
&MessageClientMessage{
Recipient: MessageClientMessageRecipient{
Type: "session",
UserId: "the-user-id",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
&MessageClientMessage{
Recipient: MessageClientMessageRecipient{
Type: "user",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
&MessageClientMessage{
Recipient: MessageClientMessageRecipient{
@ -296,13 +293,13 @@ func TestMessageClientMessage(t *testing.T) {
Type: "user",
SessionId: "the-user-id",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
&MessageClientMessage{
Recipient: MessageClientMessageRecipient{
Type: "unknown-type",
},
Data: json.RawMessage("{}"),
Data: &json.RawMessage{'{', '}'},
},
}
testMessages(t, "message", valid_messages, invalid_messages)
@ -317,7 +314,6 @@ func TestMessageClientMessage(t *testing.T) {
}
func TestByeClientMessage(t *testing.T) {
t.Parallel()
// Any "bye" message is valid.
valid_messages := []testCheckValid{
&ByeClientMessage{},
@ -336,7 +332,6 @@ func TestByeClientMessage(t *testing.T) {
}
func TestRoomClientMessage(t *testing.T) {
t.Parallel()
// Any "room" message is valid.
valid_messages := []testCheckValid{
&RoomClientMessage{},
@ -355,7 +350,6 @@ func TestRoomClientMessage(t *testing.T) {
}
func TestErrorMessages(t *testing.T) {
t.Parallel()
id := "request-id"
msg := ClientMessage{
Id: id,
@ -388,13 +382,12 @@ func TestErrorMessages(t *testing.T) {
}
func TestIsChatRefresh(t *testing.T) {
t.Parallel()
var msg ServerMessage
data_true := []byte("{\"type\":\"chat\",\"chat\":{\"refresh\":true}}")
msg = ServerMessage{
Type: "message",
Message: &MessageServerMessage{
Data: data_true,
Data: (*json.RawMessage)(&data_true),
},
}
if !msg.IsChatRefresh() {
@ -405,7 +398,7 @@ func TestIsChatRefresh(t *testing.T) {
msg = ServerMessage{
Type: "message",
Message: &MessageServerMessage{
Data: data_false,
Data: (*json.RawMessage)(&data_false),
},
}
if msg.IsChatRefresh() {
@ -433,7 +426,6 @@ func assertEqualStrings(t *testing.T, expected, result []string) {
}
func Test_Welcome_AddRemoveFeature(t *testing.T) {
t.Parallel()
var msg WelcomeServerMessage
assertEqualStrings(t, []string{}, msg.Features)

View file

@ -280,8 +280,6 @@ func (e *asyncEventsNats) Close() {
sub.close()
}
}(e.sessionSubscriptions)
// Can't use clear(...) here as the maps are processed asynchronously by the
// goroutines above.
e.backendRoomSubscriptions = make(map[string]*asyncBackendRoomSubscriberNats)
e.roomSubscriptions = make(map[string]*asyncRoomSubscriberNats)
e.userSubscriptions = make(map[string]*asyncUserSubscriberNats)

View file

@ -194,7 +194,7 @@ func (b *BackendClient) PerformJSONRequest(ctx context.Context, u *url.URL, requ
if err := json.Unmarshal(body, &ocs); err != nil {
log.Printf("Could not decode OCS response %s from %s: %s", string(body), req.URL, err)
return err
} else if ocs.Ocs == nil || len(ocs.Ocs.Data) == 0 {
} else if ocs.Ocs == nil || ocs.Ocs.Data == nil {
log.Printf("Incomplete OCS response %s from %s", string(body), req.URL)
return ErrIncompleteResponse
}
@ -205,8 +205,8 @@ func (b *BackendClient) PerformJSONRequest(ctx context.Context, u *url.URL, requ
return ErrThrottledResponse
}
if err := json.Unmarshal(ocs.Ocs.Data, response); err != nil {
log.Printf("Could not decode OCS response body %s from %s: %s", string(ocs.Ocs.Data), req.URL, err)
if err := json.Unmarshal(*ocs.Ocs.Data, response); err != nil {
log.Printf("Could not decode OCS response body %s from %s: %s", string(*ocs.Ocs.Data), req.URL, err)
return err
}
} else if err := json.Unmarshal(body, response); err != nil {

View file

@ -45,7 +45,7 @@ func returnOCS(t *testing.T, w http.ResponseWriter, body []byte) {
StatusCode: http.StatusOK,
Message: "OK",
},
Data: body,
Data: (*json.RawMessage)(&body),
},
}
if strings.Contains(t.Name(), "Throttled") {
@ -70,8 +70,6 @@ func returnOCS(t *testing.T, w http.ResponseWriter, body []byte) {
}
func TestPostOnRedirect(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
r := mux.NewRouter()
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/ocs/v2.php/two", http.StatusTemporaryRedirect)
@ -127,8 +125,6 @@ func TestPostOnRedirect(t *testing.T) {
}
func TestPostOnRedirectDifferentHost(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
r := mux.NewRouter()
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "http://domain.invalid/ocs/v2.php/two", http.StatusTemporaryRedirect)
@ -169,8 +165,6 @@ func TestPostOnRedirectDifferentHost(t *testing.T) {
}
func TestPostOnRedirectStatusFound(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
r := mux.NewRouter()
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/ocs/v2.php/two", http.StatusFound)
@ -223,8 +217,6 @@ func TestPostOnRedirectStatusFound(t *testing.T) {
}
func TestHandleThrottled(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
r := mux.NewRouter()
r.HandleFunc("/ocs/v2.php/one", func(w http.ResponseWriter, r *http.Request) {
returnOCS(t, w, []byte("[]"))

View file

@ -92,7 +92,6 @@ func testBackends(t *testing.T, config *BackendConfiguration, valid_urls [][]str
}
func TestIsUrlAllowed_Compat(t *testing.T) {
CatchLogForTest(t)
// Old-style configuration
valid_urls := []string{
"http://domain.invalid",
@ -115,7 +114,6 @@ func TestIsUrlAllowed_Compat(t *testing.T) {
}
func TestIsUrlAllowed_CompatForceHttps(t *testing.T) {
CatchLogForTest(t)
// Old-style configuration, force HTTPS
valid_urls := []string{
"https://domain.invalid",
@ -137,7 +135,6 @@ func TestIsUrlAllowed_CompatForceHttps(t *testing.T) {
}
func TestIsUrlAllowed(t *testing.T) {
CatchLogForTest(t)
valid_urls := [][]string{
{"https://domain.invalid/foo", string(testBackendSecret) + "-foo"},
{"https://domain.invalid/foo/", string(testBackendSecret) + "-foo"},
@ -183,7 +180,6 @@ func TestIsUrlAllowed(t *testing.T) {
}
func TestIsUrlAllowed_EmptyAllowlist(t *testing.T) {
CatchLogForTest(t)
valid_urls := []string{}
invalid_urls := []string{
"http://domain.invalid",
@ -201,7 +197,6 @@ func TestIsUrlAllowed_EmptyAllowlist(t *testing.T) {
}
func TestIsUrlAllowed_AllowAll(t *testing.T) {
CatchLogForTest(t)
valid_urls := []string{
"http://domain.invalid",
"https://domain.invalid",
@ -227,7 +222,6 @@ type ParseBackendIdsTestcase struct {
}
func TestParseBackendIds(t *testing.T) {
CatchLogForTest(t)
testcases := []ParseBackendIdsTestcase{
{"", nil},
{"backend1", []string{"backend1"}},
@ -247,7 +241,6 @@ func TestParseBackendIds(t *testing.T) {
}
func TestBackendReloadNoChange(t *testing.T) {
CatchLogForTest(t)
current := testutil.ToFloat64(statsBackendsCurrent)
original_config := goconf.NewConfigFile()
original_config.AddOption("backend", "backends", "backend1, backend2")
@ -283,7 +276,6 @@ func TestBackendReloadNoChange(t *testing.T) {
}
func TestBackendReloadChangeExistingURL(t *testing.T) {
CatchLogForTest(t)
current := testutil.ToFloat64(statsBackendsCurrent)
original_config := goconf.NewConfigFile()
original_config.AddOption("backend", "backends", "backend1, backend2")
@ -324,7 +316,6 @@ func TestBackendReloadChangeExistingURL(t *testing.T) {
}
func TestBackendReloadChangeSecret(t *testing.T) {
CatchLogForTest(t)
current := testutil.ToFloat64(statsBackendsCurrent)
original_config := goconf.NewConfigFile()
original_config.AddOption("backend", "backends", "backend1, backend2")
@ -363,7 +354,6 @@ func TestBackendReloadChangeSecret(t *testing.T) {
}
func TestBackendReloadAddBackend(t *testing.T) {
CatchLogForTest(t)
current := testutil.ToFloat64(statsBackendsCurrent)
original_config := goconf.NewConfigFile()
original_config.AddOption("backend", "backends", "backend1")
@ -404,7 +394,6 @@ func TestBackendReloadAddBackend(t *testing.T) {
}
func TestBackendReloadRemoveHost(t *testing.T) {
CatchLogForTest(t)
current := testutil.ToFloat64(statsBackendsCurrent)
original_config := goconf.NewConfigFile()
original_config.AddOption("backend", "backends", "backend1, backend2")
@ -442,7 +431,6 @@ func TestBackendReloadRemoveHost(t *testing.T) {
}
func TestBackendReloadRemoveBackendFromSharedHost(t *testing.T) {
CatchLogForTest(t)
current := testutil.ToFloat64(statsBackendsCurrent)
original_config := goconf.NewConfigFile()
original_config.AddOption("backend", "backends", "backend1, backend2")
@ -498,8 +486,6 @@ func mustParse(s string) *url.URL {
}
func TestBackendConfiguration_Etcd(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
etcd, client := NewEtcdClientForTest(t)
url1 := "https://domain1.invalid/foo"
@ -633,8 +619,6 @@ func TestBackendConfiguration_Etcd(t *testing.T) {
}
func TestBackendCommonSecret(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
u1, err := url.Parse("http://domain1.invalid")
if err != nil {
t.Fatal(err)

View file

@ -68,7 +68,7 @@ type BackendServer struct {
turnvalid time.Duration
turnservers []string
statsAllowedIps atomic.Pointer[AllowedIps]
statsAllowedIps *AllowedIps
invalidSecret []byte
}
@ -120,7 +120,7 @@ func NewBackendServer(config *goconf.ConfigFile, hub *Hub, version string) (*Bac
return nil, err
}
result := &BackendServer{
return &BackendServer{
hub: hub,
events: hub.events,
roomSessions: hub.roomSessions,
@ -131,27 +131,9 @@ func NewBackendServer(config *goconf.ConfigFile, hub *Hub, version string) (*Bac
turnvalid: turnvalid,
turnservers: turnserverslist,
invalidSecret: invalidSecret,
}
result.statsAllowedIps.Store(statsAllowedIps)
return result, nil
}
func (b *BackendServer) Reload(config *goconf.ConfigFile) {
statsAllowed, _ := config.GetString("stats", "allowed_ips")
if statsAllowedIps, err := ParseAllowedIps(statsAllowed); err == nil {
if !statsAllowedIps.Empty() {
log.Printf("Only allowing access to the stats endpoint from %s", statsAllowed)
} else {
log.Printf("No IPs configured for the stats endpoint, only allowing access from 127.0.0.1")
statsAllowedIps = DefaultAllowedIps()
}
b.statsAllowedIps.Store(statsAllowedIps)
} else {
log.Printf("Error parsing allowed stats ips from \"%s\": %s", statsAllowedIps, err)
}
statsAllowedIps: statsAllowedIps,
invalidSecret: invalidSecret,
}, nil
}
func (b *BackendServer) Start(r *mux.Router) error {
@ -295,7 +277,7 @@ func (b *BackendServer) parseRequestBody(f func(http.ResponseWriter, *http.Reque
}
}
func (b *BackendServer) sendRoomInvite(roomid string, backend *Backend, userids []string, properties json.RawMessage) {
func (b *BackendServer) sendRoomInvite(roomid string, backend *Backend, userids []string, properties *json.RawMessage) {
msg := &AsyncMessage{
Type: "message",
Message: &ServerMessage{
@ -365,7 +347,7 @@ func (b *BackendServer) sendRoomDisinvite(roomid string, backend *Backend, reaso
wg.Wait()
}
func (b *BackendServer) sendRoomUpdate(roomid string, backend *Backend, notified_userids []string, all_userids []string, properties json.RawMessage) {
func (b *BackendServer) sendRoomUpdate(roomid string, backend *Backend, notified_userids []string, all_userids []string, properties *json.RawMessage) {
msg := &AsyncMessage{
Type: "message",
Message: &ServerMessage{
@ -571,11 +553,11 @@ func (b *BackendServer) sendRoomSwitchTo(roomid string, backend *Backend, reques
var wg sync.WaitGroup
var mu sync.Mutex
if len(request.SwitchTo.Sessions) > 0 {
if request.SwitchTo.Sessions != nil {
// We support both a list of sessions or a map with additional details per session.
if request.SwitchTo.Sessions[0] == '[' {
if (*request.SwitchTo.Sessions)[0] == '[' {
var sessionsList BackendRoomSwitchToSessionsList
if err := json.Unmarshal(request.SwitchTo.Sessions, &sessionsList); err != nil {
if err := json.Unmarshal(*request.SwitchTo.Sessions, &sessionsList); err != nil {
return err
}
@ -613,7 +595,7 @@ func (b *BackendServer) sendRoomSwitchTo(roomid string, backend *Backend, reques
request.SwitchTo.SessionsMap = nil
} else {
var sessionsMap BackendRoomSwitchToSessionsMap
if err := json.Unmarshal(request.SwitchTo.Sessions, &sessionsMap); err != nil {
if err := json.Unmarshal(*request.SwitchTo.Sessions, &sessionsMap); err != nil {
return err
}
@ -779,16 +761,6 @@ func (b *BackendServer) startDialout(roomid string, backend *Backend, backendUrl
}
func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body []byte) {
throttle, err := b.hub.throttler.CheckBruteforce(r.Context(), b.hub.getRealUserIP(r), "BackendRoomAuth")
if err == ErrBruteforceDetected {
http.Error(w, "Too many requests", http.StatusTooManyRequests)
return
} else if err != nil {
log.Printf("Error checking for bruteforce: %s", err)
http.Error(w, "Could not check for bruteforce", http.StatusInternalServerError)
return
}
v := mux.Vars(r)
roomid := v["roomid"]
@ -801,7 +773,6 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
if backend == nil {
// Unknown backend URL passed, return immediately.
throttle(r.Context())
http.Error(w, "Authentication check failed", http.StatusForbidden)
return
}
@ -823,14 +794,12 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
}
if backend == nil {
throttle(r.Context())
http.Error(w, "Authentication check failed", http.StatusForbidden)
return
}
}
if !ValidateBackendChecksum(r, body, backend.Secret()) {
throttle(r.Context())
http.Error(w, "Authentication check failed", http.StatusForbidden)
return
}
@ -845,6 +814,7 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
request.ReceivedTime = time.Now().UnixNano()
var response any
var err error
switch request.Type {
case "invite":
b.sendRoomInvite(roomid, backend, request.Invite.UserIds, request.Invite.Properties)
@ -911,14 +881,19 @@ func (b *BackendServer) roomHandler(w http.ResponseWriter, r *http.Request, body
}
func (b *BackendServer) allowStatsAccess(r *http.Request) bool {
addr := b.hub.getRealUserIP(r)
addr := getRealUserIP(r)
if strings.Contains(addr, ":") {
if host, _, err := net.SplitHostPort(addr); err == nil {
addr = host
}
}
ip := net.ParseIP(addr)
if len(ip) == 0 {
if ip == nil {
return false
}
allowed := b.statsAllowedIps.Load()
return allowed != nil && allowed.Allowed(ip)
return b.statsAllowedIps.Allowed(ip)
}
func (b *BackendServer) validateStatsRequest(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {

View file

@ -30,7 +30,6 @@ import (
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/http/httptest"
"net/textproto"
@ -276,8 +275,6 @@ func expectRoomlistEvent(ch chan *AsyncMessage, msgType string) (*EventServerMes
}
func TestBackendServer_NoAuth(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, _, _, server := CreateBackendServerForTest(t)
roomId := "the-room-id"
@ -304,8 +301,6 @@ func TestBackendServer_NoAuth(t *testing.T) {
}
func TestBackendServer_InvalidAuth(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, _, _, server := CreateBackendServerForTest(t)
roomId := "the-room-id"
@ -334,8 +329,6 @@ func TestBackendServer_InvalidAuth(t *testing.T) {
}
func TestBackendServer_OldCompatAuth(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, _, _, server := CreateBackendServerForTest(t)
roomId := "the-room-id"
@ -350,7 +343,7 @@ func TestBackendServer_OldCompatAuth(t *testing.T) {
AllUserIds: []string{
userid,
},
Properties: roomProperties,
Properties: &roomProperties,
},
}
@ -385,8 +378,6 @@ func TestBackendServer_OldCompatAuth(t *testing.T) {
}
func TestBackendServer_InvalidBody(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, _, _, server := CreateBackendServerForTest(t)
roomId := "the-room-id"
@ -406,8 +397,6 @@ func TestBackendServer_InvalidBody(t *testing.T) {
}
func TestBackendServer_UnsupportedRequest(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, _, _, server := CreateBackendServerForTest(t)
msg := &BackendServerRoomRequest{
@ -434,10 +423,8 @@ func TestBackendServer_UnsupportedRequest(t *testing.T) {
}
func TestBackendServer_RoomInvite(t *testing.T) {
CatchLogForTest(t)
for _, backend := range eventBackendsForTest {
t.Run(backend, func(t *testing.T) {
t.Parallel()
RunTestBackendServer_RoomInvite(t)
})
}
@ -481,7 +468,7 @@ func RunTestBackendServer_RoomInvite(t *testing.T) {
AllUserIds: []string{
userid,
},
Properties: roomProperties,
Properties: &roomProperties,
},
}
@ -510,16 +497,14 @@ func RunTestBackendServer_RoomInvite(t *testing.T) {
t.Errorf("Expected invite, got %+v", event)
} else if event.Invite.RoomId != roomId {
t.Errorf("Expected room %s, got %+v", roomId, event)
} else if !bytes.Equal(event.Invite.Properties, roomProperties) {
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(event.Invite.Properties))
} else if event.Invite.Properties == nil || !bytes.Equal(*event.Invite.Properties, roomProperties) {
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(*event.Invite.Properties))
}
}
func TestBackendServer_RoomDisinvite(t *testing.T) {
CatchLogForTest(t)
for _, backend := range eventBackendsForTest {
t.Run(backend, func(t *testing.T) {
t.Parallel()
RunTestBackendServer_RoomDisinvite(t)
})
}
@ -583,7 +568,7 @@ func RunTestBackendServer_RoomDisinvite(t *testing.T) {
roomId + "-" + hello.Hello.SessionId,
},
AllUserIds: []string{},
Properties: roomProperties,
Properties: &roomProperties,
},
}
@ -611,8 +596,8 @@ func RunTestBackendServer_RoomDisinvite(t *testing.T) {
t.Errorf("Expected disinvite, got %+v", event)
} else if event.Disinvite.RoomId != roomId {
t.Errorf("Expected room %s, got %+v", roomId, event)
} else if len(event.Disinvite.Properties) > 0 {
t.Errorf("Room properties should be omitted, got %s", string(event.Disinvite.Properties))
} else if event.Disinvite.Properties != nil {
t.Errorf("Room properties should be omitted, got %s", string(*event.Disinvite.Properties))
} else if event.Disinvite.Reason != "disinvited" {
t.Errorf("Reason should be disinvited, got %s", event.Disinvite.Reason)
}
@ -631,8 +616,6 @@ func RunTestBackendServer_RoomDisinvite(t *testing.T) {
}
func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client1 := NewTestClient(t, server, hub)
@ -729,7 +712,7 @@ func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) {
UserIds: []string{
testDefaultUserId,
},
Properties: testRoomProperties,
Properties: (*json.RawMessage)(&testRoomProperties),
},
}
@ -758,10 +741,8 @@ func TestBackendServer_RoomDisinviteDifferentRooms(t *testing.T) {
}
func TestBackendServer_RoomUpdate(t *testing.T) {
CatchLogForTest(t)
for _, backend := range eventBackendsForTest {
t.Run(backend, func(t *testing.T) {
t.Parallel()
RunTestBackendServer_RoomUpdate(t)
})
}
@ -781,7 +762,7 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
if backend == nil {
t.Fatalf("Did not find backend")
}
room, err := hub.createRoom(roomId, emptyProperties, backend)
room, err := hub.createRoom(roomId, &emptyProperties, backend)
if err != nil {
t.Fatalf("Could not create room: %s", err)
}
@ -805,7 +786,7 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
UserIds: []string{
userid,
},
Properties: roomProperties,
Properties: &roomProperties,
},
}
@ -833,8 +814,8 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
t.Errorf("Expected update, got %+v", event)
} else if event.Update.RoomId != roomId {
t.Errorf("Expected room %s, got %+v", roomId, event)
} else if !bytes.Equal(event.Update.Properties, roomProperties) {
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(event.Update.Properties))
} else if event.Update.Properties == nil || !bytes.Equal(*event.Update.Properties, roomProperties) {
t.Errorf("Room properties don't match: expected %s, got %s", string(roomProperties), string(*event.Update.Properties))
}
// TODO: Use event to wait for asynchronous messages.
@ -844,16 +825,14 @@ func RunTestBackendServer_RoomUpdate(t *testing.T) {
if room == nil {
t.Fatalf("Room %s does not exist", roomId)
}
if string(room.Properties()) != string(roomProperties) {
t.Errorf("Expected properties %s for room %s, got %s", string(roomProperties), room.Id(), string(room.Properties()))
if string(*room.Properties()) != string(roomProperties) {
t.Errorf("Expected properties %s for room %s, got %s", string(roomProperties), room.Id(), string(*room.Properties()))
}
}
func TestBackendServer_RoomDelete(t *testing.T) {
CatchLogForTest(t)
for _, backend := range eventBackendsForTest {
t.Run(backend, func(t *testing.T) {
t.Parallel()
RunTestBackendServer_RoomDelete(t)
})
}
@ -873,7 +852,7 @@ func RunTestBackendServer_RoomDelete(t *testing.T) {
if backend == nil {
t.Fatalf("Did not find backend")
}
if _, err := hub.createRoom(roomId, emptyProperties, backend); err != nil {
if _, err := hub.createRoom(roomId, &emptyProperties, backend); err != nil {
t.Fatalf("Could not create room: %s", err)
}
@ -921,8 +900,8 @@ func RunTestBackendServer_RoomDelete(t *testing.T) {
t.Errorf("Expected disinvite, got %+v", event)
} else if event.Disinvite.RoomId != roomId {
t.Errorf("Expected room %s, got %+v", roomId, event)
} else if len(event.Disinvite.Properties) > 0 {
t.Errorf("Room properties should be omitted, got %s", string(event.Disinvite.Properties))
} else if event.Disinvite.Properties != nil {
t.Errorf("Room properties should be omitted, got %s", string(*event.Disinvite.Properties))
} else if event.Disinvite.Reason != "deleted" {
t.Errorf("Reason should be deleted, got %s", event.Disinvite.Reason)
}
@ -937,10 +916,8 @@ func RunTestBackendServer_RoomDelete(t *testing.T) {
}
func TestBackendServer_ParticipantsUpdatePermissions(t *testing.T) {
CatchLogForTest(t)
for _, subtest := range clusteredTests {
t.Run(subtest, func(t *testing.T) {
t.Parallel()
var hub1 *Hub
var hub2 *Hub
var server1 *httptest.Server
@ -1070,8 +1047,6 @@ func TestBackendServer_ParticipantsUpdatePermissions(t *testing.T) {
}
func TestBackendServer_ParticipantsUpdateEmptyPermissions(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client := NewTestClient(t, server, hub)
@ -1157,8 +1132,6 @@ func TestBackendServer_ParticipantsUpdateEmptyPermissions(t *testing.T) {
}
func TestBackendServer_ParticipantsUpdateTimeout(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client1 := NewTestClient(t, server, hub)
@ -1372,10 +1345,8 @@ func TestBackendServer_ParticipantsUpdateTimeout(t *testing.T) {
}
func TestBackendServer_InCallAll(t *testing.T) {
CatchLogForTest(t)
for _, subtest := range clusteredTests {
t.Run(subtest, func(t *testing.T) {
t.Parallel()
var hub1 *Hub
var hub2 *Hub
var server1 *httptest.Server
@ -1500,8 +1471,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
t.Error(err)
} else if !in_call_1.All {
t.Errorf("All flag not set in message %+v", in_call_1)
} else if !bytes.Equal(in_call_1.InCall, []byte("7")) {
t.Errorf("Expected inCall flag 7, got %s", string(in_call_1.InCall))
} else if !bytes.Equal(*in_call_1.InCall, []byte("7")) {
t.Errorf("Expected inCall flag 7, got %s", string(*in_call_1.InCall))
}
if msg2_a, err := client2.RunUntilMessage(ctx); err != nil {
@ -1510,8 +1481,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
t.Error(err)
} else if !in_call_1.All {
t.Errorf("All flag not set in message %+v", in_call_1)
} else if !bytes.Equal(in_call_1.InCall, []byte("7")) {
t.Errorf("Expected inCall flag 7, got %s", string(in_call_1.InCall))
} else if !bytes.Equal(*in_call_1.InCall, []byte("7")) {
t.Errorf("Expected inCall flag 7, got %s", string(*in_call_1.InCall))
}
if !room1.IsSessionInCall(session1) {
@ -1581,8 +1552,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
t.Error(err)
} else if !in_call_1.All {
t.Errorf("All flag not set in message %+v", in_call_1)
} else if !bytes.Equal(in_call_1.InCall, []byte("0")) {
t.Errorf("Expected inCall flag 0, got %s", string(in_call_1.InCall))
} else if !bytes.Equal(*in_call_1.InCall, []byte("0")) {
t.Errorf("Expected inCall flag 0, got %s", string(*in_call_1.InCall))
}
if msg2_a, err := client2.RunUntilMessage(ctx); err != nil {
@ -1591,8 +1562,8 @@ func TestBackendServer_InCallAll(t *testing.T) {
t.Error(err)
} else if !in_call_1.All {
t.Errorf("All flag not set in message %+v", in_call_1)
} else if !bytes.Equal(in_call_1.InCall, []byte("0")) {
t.Errorf("Expected inCall flag 0, got %s", string(in_call_1.InCall))
} else if !bytes.Equal(*in_call_1.InCall, []byte("0")) {
t.Errorf("Expected inCall flag 0, got %s", string(*in_call_1.InCall))
}
if room1.IsSessionInCall(session1) {
@ -1624,8 +1595,6 @@ func TestBackendServer_InCallAll(t *testing.T) {
}
func TestBackendServer_RoomMessage(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client := NewTestClient(t, server, hub)
@ -1659,7 +1628,7 @@ func TestBackendServer_RoomMessage(t *testing.T) {
msg := &BackendServerRoomRequest{
Type: "message",
Message: &BackendRoomMessageRequest{
Data: messageData,
Data: &messageData,
},
}
@ -1685,14 +1654,12 @@ func TestBackendServer_RoomMessage(t *testing.T) {
t.Error(err)
} else if message.RoomId != roomId {
t.Errorf("Expected message for room %s, got %s", roomId, message.RoomId)
} else if !bytes.Equal(messageData, message.Data) {
t.Errorf("Expected message data %s, got %s", string(messageData), string(message.Data))
} else if !bytes.Equal(messageData, *message.Data) {
t.Errorf("Expected message data %s, got %s", string(messageData), string(*message.Data))
}
}
func TestBackendServer_TurnCredentials(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, _, _, server := CreateBackendServerForTestWithTurn(t)
q := make(url.Values)
@ -1736,9 +1703,7 @@ func TestBackendServer_TurnCredentials(t *testing.T) {
}
func TestBackendServer_StatsAllowedIps(t *testing.T) {
CatchLogForTest(t)
config := goconf.NewConfigFile()
config.AddOption("app", "trustedproxies", "1.2.3.4")
config.AddOption("stats", "allowed_ips", "127.0.0.1, 192.168.0.1, 192.168.1.1/24")
_, backend, _, _, _, _ := CreateBackendServerForTestFromConfig(t, config)
@ -1755,9 +1720,7 @@ func TestBackendServer_StatsAllowedIps(t *testing.T) {
}
for _, addr := range allowed {
addr := addr
t.Run(addr, func(t *testing.T) {
t.Parallel()
r1 := &http.Request{
RemoteAddr: addr,
}
@ -1765,10 +1728,6 @@ func TestBackendServer_StatsAllowedIps(t *testing.T) {
t.Errorf("should allow %s", addr)
}
if host, _, err := net.SplitHostPort(addr); err == nil {
addr = host
}
r2 := &http.Request{
RemoteAddr: "1.2.3.4:12345",
Header: http.Header{
@ -1802,9 +1761,7 @@ func TestBackendServer_StatsAllowedIps(t *testing.T) {
}
for _, addr := range notAllowed {
addr := addr
t.Run(addr, func(t *testing.T) {
t.Parallel()
r := &http.Request{
RemoteAddr: addr,
}
@ -1816,7 +1773,6 @@ func TestBackendServer_StatsAllowedIps(t *testing.T) {
}
func Test_IsNumeric(t *testing.T) {
t.Parallel()
numeric := []string{
"0",
"1",
@ -1846,8 +1802,6 @@ func Test_IsNumeric(t *testing.T) {
}
func TestBackendServer_DialoutNoSipBridge(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client := NewTestClient(t, server, hub)
@ -1906,8 +1860,6 @@ func TestBackendServer_DialoutNoSipBridge(t *testing.T) {
}
func TestBackendServer_DialoutAccepted(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client := NewTestClient(t, server, hub)
@ -2014,8 +1966,6 @@ func TestBackendServer_DialoutAccepted(t *testing.T) {
}
func TestBackendServer_DialoutAcceptedCompat(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client := NewTestClient(t, server, hub)
@ -2122,8 +2072,6 @@ func TestBackendServer_DialoutAcceptedCompat(t *testing.T) {
}
func TestBackendServer_DialoutRejected(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
_, _, _, hub, _, server := CreateBackendServerForTest(t)
client := NewTestClient(t, server, hub)

View file

@ -24,10 +24,10 @@ package signaling
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"net/url"
"sync"
"time"
"github.com/dlintw/goconf"
@ -43,10 +43,8 @@ type backendStorageEtcd struct {
initializedCtx context.Context
initializedFunc context.CancelFunc
initializedWg sync.WaitGroup
wakeupChanForTesting chan struct{}
closeCtx context.Context
closeFunc context.CancelFunc
}
func NewBackendStorageEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient) (BackendStorage, error) {
@ -60,7 +58,6 @@ func NewBackendStorageEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient) (B
}
initializedCtx, initializedFunc := context.WithCancel(context.Background())
closeCtx, closeFunc := context.WithCancel(context.Background())
result := &backendStorageEtcd{
backendStorageCommon: backendStorageCommon{
backends: make(map[string][]*Backend),
@ -71,8 +68,6 @@ func NewBackendStorageEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient) (B
initializedCtx: initializedCtx,
initializedFunc: initializedFunc,
closeCtx: closeCtx,
closeFunc: closeFunc,
}
etcdClient.AddListener(result)
@ -100,12 +95,15 @@ func (s *backendStorageEtcd) wakeupForTesting() {
}
func (s *backendStorageEtcd) EtcdClientCreated(client *EtcdClient) {
s.initializedWg.Add(1)
go func() {
if err := client.WaitForConnection(s.closeCtx); err != nil {
if errors.Is(err, context.Canceled) {
return
}
if err := client.Watch(context.Background(), s.keyPrefix, s, clientv3.WithPrefix()); err != nil {
log.Printf("Error processing watch for %s: %s", s.keyPrefix, err)
}
}()
go func() {
if err := client.WaitForConnection(context.Background()); err != nil {
panic(err)
}
@ -113,61 +111,41 @@ func (s *backendStorageEtcd) EtcdClientCreated(client *EtcdClient) {
if err != nil {
panic(err)
}
for s.closeCtx.Err() == nil {
response, err := s.getBackends(s.closeCtx, client, s.keyPrefix)
for {
response, err := s.getBackends(client, s.keyPrefix)
if err != nil {
if errors.Is(err, context.Canceled) {
return
} else if errors.Is(err, context.DeadlineExceeded) {
if err == context.DeadlineExceeded {
log.Printf("Timeout getting initial list of backends, retry in %s", backoff.NextWait())
} else {
log.Printf("Could not get initial list of backends, retry in %s: %s", backoff.NextWait(), err)
}
backoff.Wait(s.closeCtx)
backoff.Wait(context.Background())
continue
}
for _, ev := range response.Kvs {
s.EtcdKeyUpdated(client, string(ev.Key), ev.Value, nil)
s.EtcdKeyUpdated(client, string(ev.Key), ev.Value)
}
s.initializedWg.Wait()
s.initializedFunc()
nextRevision := response.Header.Revision + 1
prevRevision := nextRevision
backoff.Reset()
for s.closeCtx.Err() == nil {
var err error
if nextRevision, err = client.Watch(s.closeCtx, s.keyPrefix, nextRevision, s, clientv3.WithPrefix()); err != nil {
log.Printf("Error processing watch for %s (%s), retry in %s", s.keyPrefix, err, backoff.NextWait())
backoff.Wait(s.closeCtx)
continue
}
if nextRevision != prevRevision {
backoff.Reset()
prevRevision = nextRevision
} else {
log.Printf("Processing watch for %s interrupted, retry in %s", s.keyPrefix, backoff.NextWait())
backoff.Wait(s.closeCtx)
}
}
return
}
}()
}
func (s *backendStorageEtcd) EtcdWatchCreated(client *EtcdClient, key string) {
s.initializedWg.Done()
}
func (s *backendStorageEtcd) getBackends(ctx context.Context, client *EtcdClient, keyPrefix string) (*clientv3.GetResponse, error) {
ctx, cancel := context.WithTimeout(ctx, time.Second)
func (s *backendStorageEtcd) getBackends(client *EtcdClient, keyPrefix string) (*clientv3.GetResponse, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
return client.Get(ctx, keyPrefix, clientv3.WithPrefix())
}
func (s *backendStorageEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []byte, prevValue []byte) {
func (s *backendStorageEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []byte) {
var info BackendInformationEtcd
if err := json.Unmarshal(data, &info); err != nil {
log.Printf("Could not decode backend information %s: %s", string(data), err)
@ -227,7 +205,7 @@ func (s *backendStorageEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data
s.wakeupForTesting()
}
func (s *backendStorageEtcd) EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte) {
func (s *backendStorageEtcd) EtcdKeyDeleted(client *EtcdClient, key string) {
s.mu.Lock()
defer s.mu.Unlock()
@ -263,7 +241,6 @@ func (s *backendStorageEtcd) EtcdKeyDeleted(client *EtcdClient, key string, prev
func (s *backendStorageEtcd) Close() {
s.etcdClient.RemoveListener(s)
s.closeFunc()
}
func (s *backendStorageEtcd) Reload(config *goconf.ConfigFile) {

View file

@ -21,13 +21,6 @@
*/
package signaling
import (
"testing"
"github.com/dlintw/goconf"
"go.etcd.io/etcd/server/v3/embed"
)
func (s *backendStorageEtcd) getWakeupChannelForTesting() <-chan struct{} {
s.mu.Lock()
defer s.mu.Unlock()
@ -40,38 +33,3 @@ func (s *backendStorageEtcd) getWakeupChannelForTesting() <-chan struct{} {
s.wakeupChanForTesting = ch
return ch
}
type testListener struct {
etcd *embed.Etcd
closed chan struct{}
}
func (tl *testListener) EtcdClientCreated(client *EtcdClient) {
tl.etcd.Server.Stop()
close(tl.closed)
}
func Test_BackendStorageEtcdNoLeak(t *testing.T) {
CatchLogForTest(t)
ensureNoGoroutinesLeak(t, func(t *testing.T) {
etcd, client := NewEtcdClientForTest(t)
tl := &testListener{
etcd: etcd,
closed: make(chan struct{}),
}
client.AddListener(tl)
defer client.RemoveListener(tl)
config := goconf.NewConfigFile()
config.AddOption("backend", "backendtype", "etcd")
config.AddOption("backend", "backendprefix", "/backends")
cfg, err := NewBackendConfiguration(config, client)
if err != nil {
t.Fatal(err)
}
<-tl.closed
cfg.Close()
})
}

View file

@ -28,7 +28,6 @@ import (
)
func TestBackoff_Exponential(t *testing.T) {
t.Parallel()
backoff, err := NewExponentialBackoff(100*time.Millisecond, 500*time.Millisecond)
if err != nil {
t.Fatal(err)

View file

@ -48,6 +48,9 @@ const (
maxInvalidateInterval = time.Minute
)
// Can be overwritten by tests.
var getCapabilitiesNow = time.Now
type capabilitiesEntry struct {
nextUpdate time.Time
capabilities map[string]interface{}
@ -56,9 +59,6 @@ type capabilitiesEntry struct {
type Capabilities struct {
mu sync.RWMutex
// Can be overwritten by tests.
getNow func() time.Time
version string
pool *HttpClientPool
entries map[string]*capabilitiesEntry
@ -67,8 +67,6 @@ type Capabilities struct {
func NewCapabilities(version string, pool *HttpClientPool) (*Capabilities, error) {
result := &Capabilities{
getNow: time.Now,
version: version,
pool: pool,
entries: make(map[string]*capabilitiesEntry),
@ -88,15 +86,15 @@ type CapabilitiesVersion struct {
}
type CapabilitiesResponse struct {
Version CapabilitiesVersion `json:"version"`
Capabilities map[string]json.RawMessage `json:"capabilities"`
Version CapabilitiesVersion `json:"version"`
Capabilities map[string]*json.RawMessage `json:"capabilities"`
}
func (c *Capabilities) getCapabilities(key string) (map[string]interface{}, bool) {
c.mu.RLock()
defer c.mu.RUnlock()
now := c.getNow()
now := getCapabilitiesNow()
if entry, found := c.entries[key]; found && entry.nextUpdate.After(now) {
return entry.capabilities, true
}
@ -105,15 +103,14 @@ func (c *Capabilities) getCapabilities(key string) (map[string]interface{}, bool
}
func (c *Capabilities) setCapabilities(key string, capabilities map[string]interface{}) {
c.mu.Lock()
defer c.mu.Unlock()
now := c.getNow()
now := getCapabilitiesNow()
entry := &capabilitiesEntry{
nextUpdate: now.Add(CapabilitiesCacheDuration),
capabilities: capabilities,
}
c.mu.Lock()
defer c.mu.Unlock()
c.entries[key] = entry
}
@ -121,7 +118,7 @@ func (c *Capabilities) invalidateCapabilities(key string) {
c.mu.Lock()
defer c.mu.Unlock()
now := c.getNow()
now := getCapabilitiesNow()
if entry, found := c.nextInvalidate[key]; found && entry.After(now) {
return
}
@ -191,25 +188,25 @@ func (c *Capabilities) loadCapabilities(ctx context.Context, u *url.URL) (map[st
if err := json.Unmarshal(body, &ocs); err != nil {
log.Printf("Could not decode OCS response %s from %s: %s", string(body), capUrl.String(), err)
return nil, false, err
} else if ocs.Ocs == nil || len(ocs.Ocs.Data) == 0 {
} else if ocs.Ocs == nil || ocs.Ocs.Data == nil {
log.Printf("Incomplete OCS response %s from %s", string(body), u)
return nil, false, fmt.Errorf("incomplete OCS response")
}
var response CapabilitiesResponse
if err := json.Unmarshal(ocs.Ocs.Data, &response); err != nil {
log.Printf("Could not decode OCS response body %s from %s: %s", string(ocs.Ocs.Data), capUrl.String(), err)
if err := json.Unmarshal(*ocs.Ocs.Data, &response); err != nil {
log.Printf("Could not decode OCS response body %s from %s: %s", string(*ocs.Ocs.Data), capUrl.String(), err)
return nil, false, err
}
capaObj, found := response.Capabilities[AppNameSpreed]
if !found || len(capaObj) == 0 {
if !found || capaObj == nil {
log.Printf("No capabilities received for app spreed from %s: %+v", capUrl.String(), response)
return nil, false, nil
}
var capa map[string]interface{}
if err := json.Unmarshal(capaObj, &capa); err != nil {
if err := json.Unmarshal(*capaObj, &capa); err != nil {
log.Printf("Unsupported capabilities received for app spreed from %s: %+v", capUrl.String(), response)
return nil, false, nil
}

View file

@ -80,9 +80,9 @@ func NewCapabilitiesForTestWithCallback(t *testing.T, callback func(*Capabilitie
Version: CapabilitiesVersion{
Major: 20,
},
Capabilities: map[string]json.RawMessage{
"anotherApp": emptyArray,
"spreed": spreedCapa,
Capabilities: map[string]*json.RawMessage{
"anotherApp": (*json.RawMessage)(&emptyArray),
"spreed": (*json.RawMessage)(&spreedCapa),
},
}
@ -102,7 +102,7 @@ func NewCapabilitiesForTestWithCallback(t *testing.T, callback func(*Capabilitie
StatusCode: http.StatusOK,
Message: http.StatusText(http.StatusOK),
},
Data: data,
Data: (*json.RawMessage)(&data),
}
if data, err = json.Marshal(ocs); err != nil {
t.Fatal(err)
@ -120,25 +120,16 @@ func NewCapabilitiesForTest(t *testing.T) (*url.URL, *Capabilities) {
return NewCapabilitiesForTestWithCallback(t, nil)
}
func SetCapabilitiesGetNow(t *testing.T, capabilities *Capabilities, f func() time.Time) {
capabilities.mu.Lock()
defer capabilities.mu.Unlock()
old := capabilities.getNow
func SetCapabilitiesGetNow(t *testing.T, f func() time.Time) {
old := getCapabilitiesNow
t.Cleanup(func() {
capabilities.mu.Lock()
defer capabilities.mu.Unlock()
capabilities.getNow = old
getCapabilitiesNow = old
})
capabilities.getNow = f
getCapabilitiesNow = f
}
func TestCapabilities(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
url, capabilities := NewCapabilitiesForTest(t)
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
@ -201,8 +192,6 @@ func TestCapabilities(t *testing.T) {
}
func TestInvalidateCapabilities(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
var called atomic.Uint32
url, capabilities := NewCapabilitiesForTestWithCallback(t, func(cr *CapabilitiesResponse) {
called.Add(1)
@ -255,7 +244,7 @@ func TestInvalidateCapabilities(t *testing.T) {
}
// At a later time, invalidating can be done again.
SetCapabilitiesGetNow(t, capabilities, func() time.Time {
SetCapabilitiesGetNow(t, func() time.Time {
return time.Now().Add(2 * time.Minute)
})

View file

@ -66,11 +66,6 @@ func NewCertificateReloader(certFile string, keyFile string) (*CertificateReload
return reloader, nil
}
func (r *CertificateReloader) Close() {
r.keyWatcher.Close()
r.certWatcher.Close()
}
func (r *CertificateReloader) reload(filename string) {
log.Printf("reloading certificate from %s with %s", r.certFile, r.keyFile)
pair, err := tls.LoadX509KeyPair(r.certFile, r.keyFile)
@ -140,10 +135,6 @@ func NewCertPoolReloader(certFile string) (*CertPoolReloader, error) {
return reloader, nil
}
func (r *CertPoolReloader) Close() {
r.certWatcher.Close()
}
func (r *CertPoolReloader) reload(filename string) {
log.Printf("reloading certificate pool from %s", r.certFile)
pool, err := loadCertPool(r.certFile)

View file

@ -28,9 +28,6 @@ import (
)
func UpdateCertificateCheckIntervalForTest(t *testing.T, interval time.Duration) {
t.Helper()
// Make sure test is not executed with "t.Parallel()"
t.Setenv("PARALLEL_CHECK", "1")
old := deduplicateWatchEvents.Load()
t.Cleanup(func() {
deduplicateWatchEvents.Store(old)

144
client.go
View file

@ -23,11 +23,8 @@ package signaling
import (
"bytes"
"context"
"encoding/json"
"errors"
"log"
"net"
"strconv"
"strings"
"sync"
@ -95,49 +92,26 @@ type WritableClientMessage interface {
CloseAfterSend(session Session) bool
}
type HandlerClient interface {
Context() context.Context
RemoteAddr() string
Country() string
UserAgent() string
IsConnected() bool
IsAuthenticated() bool
GetSession() Session
SetSession(session Session)
SendError(e *Error) bool
SendByeResponse(message *ClientMessage) bool
SendByeResponseWithReason(message *ClientMessage, reason string) bool
SendMessage(message WritableClientMessage) bool
Close()
}
type ClientHandler interface {
OnClosed(HandlerClient)
OnMessageReceived(HandlerClient, []byte)
OnRTTReceived(HandlerClient, time.Duration)
OnClosed(*Client)
OnMessageReceived(*Client, []byte)
OnRTTReceived(*Client, time.Duration)
}
type ClientGeoIpHandler interface {
OnLookupCountry(HandlerClient) string
OnLookupCountry(*Client) string
}
type Client struct {
ctx context.Context
conn *websocket.Conn
addr string
handler ClientHandler
agent string
closed atomic.Int32
country *string
logRTT bool
handlerMu sync.RWMutex
handler ClientHandler
session atomic.Pointer[Session]
sessionId atomic.Pointer[string]
session atomic.Pointer[ClientSession]
mu sync.Mutex
@ -147,7 +121,7 @@ type Client struct {
messageChan chan *bytes.Buffer
}
func NewClient(ctx context.Context, conn *websocket.Conn, remoteAddress string, agent string, handler ClientHandler) (*Client, error) {
func NewClient(conn *websocket.Conn, remoteAddress string, agent string, handler ClientHandler) (*Client, error) {
remoteAddress = strings.TrimSpace(remoteAddress)
if remoteAddress == "" {
remoteAddress = "unknown remote address"
@ -158,7 +132,6 @@ func NewClient(ctx context.Context, conn *websocket.Conn, remoteAddress string,
}
client := &Client{
ctx: ctx,
agent: agent,
logRTT: true,
}
@ -169,28 +142,12 @@ func NewClient(ctx context.Context, conn *websocket.Conn, remoteAddress string,
func (c *Client) SetConn(conn *websocket.Conn, remoteAddress string, handler ClientHandler) {
c.conn = conn
c.addr = remoteAddress
c.SetHandler(handler)
c.handler = handler
c.closer = NewCloser()
c.messageChan = make(chan *bytes.Buffer, 16)
c.messagesDone = make(chan struct{})
}
func (c *Client) SetHandler(handler ClientHandler) {
c.handlerMu.Lock()
defer c.handlerMu.Unlock()
c.handler = handler
}
func (c *Client) getHandler() ClientHandler {
c.handlerMu.RLock()
defer c.handlerMu.RUnlock()
return c.handler
}
func (c *Client) Context() context.Context {
return c.ctx
}
func (c *Client) IsConnected() bool {
return c.closed.Load() == 0
}
@ -199,39 +156,12 @@ func (c *Client) IsAuthenticated() bool {
return c.GetSession() != nil
}
func (c *Client) GetSession() Session {
session := c.session.Load()
if session == nil {
return nil
}
return *session
func (c *Client) GetSession() *ClientSession {
return c.session.Load()
}
func (c *Client) SetSession(session Session) {
if session == nil {
c.session.Store(nil)
} else {
c.session.Store(&session)
}
}
func (c *Client) SetSessionId(sessionId string) {
c.sessionId.Store(&sessionId)
}
func (c *Client) GetSessionId() string {
sessionId := c.sessionId.Load()
if sessionId == nil {
session := c.GetSession()
if session == nil {
return ""
}
return session.PublicId()
}
return *sessionId
func (c *Client) SetSession(session *ClientSession) {
c.session.Store(session)
}
func (c *Client) RemoteAddr() string {
@ -245,7 +175,7 @@ func (c *Client) UserAgent() string {
func (c *Client) Country() string {
if c.country == nil {
var country string
if handler, ok := c.getHandler().(ClientGeoIpHandler); ok {
if handler, ok := c.handler.(ClientGeoIpHandler); ok {
country = handler.OnLookupCountry(c)
} else {
country = unknownCountry
@ -284,7 +214,7 @@ func (c *Client) doClose() {
c.closer.Close()
<-c.messagesDone
c.getHandler().OnClosed(c)
c.handler.OnClosed(c)
c.SetSession(nil)
}
}
@ -304,14 +234,12 @@ func (c *Client) SendByeResponse(message *ClientMessage) bool {
func (c *Client) SendByeResponseWithReason(message *ClientMessage, reason string) bool {
response := &ServerMessage{
Type: "bye",
Bye: &ByeServerMessage{},
}
if message != nil {
response.Id = message.Id
}
if reason != "" {
if response.Bye == nil {
response.Bye = &ByeServerMessage{}
}
response.Bye.Reason = reason
}
return c.SendMessage(response)
@ -349,13 +277,13 @@ func (c *Client) ReadPump() {
rtt := now.Sub(time.Unix(0, ts))
if c.logRTT {
rtt_ms := rtt.Nanoseconds() / time.Millisecond.Nanoseconds()
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Client %s has RTT of %d ms (%s)", sessionId, rtt_ms, rtt)
if session := c.GetSession(); session != nil {
log.Printf("Client %s has RTT of %d ms (%s)", session.PublicId(), rtt_ms, rtt)
} else {
log.Printf("Client from %s has RTT of %d ms (%s)", addr, rtt_ms, rtt)
}
}
c.getHandler().OnRTTReceived(c, rtt)
c.handler.OnRTTReceived(c, rtt)
}
return nil
})
@ -364,15 +292,12 @@ func (c *Client) ReadPump() {
conn.SetReadDeadline(time.Now().Add(pongWait)) // nolint
messageType, reader, err := conn.NextReader()
if err != nil {
// Gorilla websocket hides the original net.Error, so also compare error messages
if errors.Is(err, net.ErrClosed) || strings.Contains(err.Error(), net.ErrClosed.Error()) {
break
} else if _, ok := err.(*websocket.CloseError); !ok || websocket.IsUnexpectedCloseError(err,
if _, ok := err.(*websocket.CloseError); !ok || websocket.IsUnexpectedCloseError(err,
websocket.CloseNormalClosure,
websocket.CloseGoingAway,
websocket.CloseNoStatusReceived) {
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Error reading from client %s: %v", sessionId, err)
if session := c.GetSession(); session != nil {
log.Printf("Error reading from client %s: %v", session.PublicId(), err)
} else {
log.Printf("Error reading from %s: %v", addr, err)
}
@ -381,8 +306,8 @@ func (c *Client) ReadPump() {
}
if messageType != websocket.TextMessage {
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Unsupported message type %v from client %s", messageType, sessionId)
if session := c.GetSession(); session != nil {
log.Printf("Unsupported message type %v from client %s", messageType, session.PublicId())
} else {
log.Printf("Unsupported message type %v from %s", messageType, addr)
}
@ -394,8 +319,8 @@ func (c *Client) ReadPump() {
decodeBuffer.Reset()
if _, err := decodeBuffer.ReadFrom(reader); err != nil {
bufferPool.Put(decodeBuffer)
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Error reading message from client %s: %v", sessionId, err)
if session := c.GetSession(); session != nil {
log.Printf("Error reading message from client %s: %v", session.PublicId(), err)
} else {
log.Printf("Error reading message from %s: %v", addr, err)
}
@ -419,7 +344,7 @@ func (c *Client) processMessages() {
break
}
c.getHandler().OnMessageReceived(c, buffer.Bytes())
c.handler.OnMessageReceived(c, buffer.Bytes())
bufferPool.Put(buffer)
}
@ -448,8 +373,8 @@ func (c *Client) writeInternal(message json.Marshaler) bool {
return false
}
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Could not send message %+v to client %s: %v", message, sessionId, err)
if session := c.GetSession(); session != nil {
log.Printf("Could not send message %+v to client %s: %v", message, session.PublicId(), err)
} else {
log.Printf("Could not send message %+v to %s: %v", message, c.RemoteAddr(), err)
}
@ -461,8 +386,8 @@ func (c *Client) writeInternal(message json.Marshaler) bool {
close:
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
if err := c.conn.WriteMessage(websocket.CloseMessage, closeData); err != nil {
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Could not send close message to client %s: %v", sessionId, err)
if session := c.GetSession(); session != nil {
log.Printf("Could not send close message to client %s: %v", session.PublicId(), err)
} else {
log.Printf("Could not send close message to %s: %v", c.RemoteAddr(), err)
}
@ -488,8 +413,8 @@ func (c *Client) writeError(e error) bool { // nolint
closeData := websocket.FormatCloseMessage(websocket.CloseInternalServerErr, e.Error())
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
if err := c.conn.WriteMessage(websocket.CloseMessage, closeData); err != nil {
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Could not send close message to client %s: %v", sessionId, err)
if session := c.GetSession(); session != nil {
log.Printf("Could not send close message to client %s: %v", session.PublicId(), err)
} else {
log.Printf("Could not send close message to %s: %v", c.RemoteAddr(), err)
}
@ -520,6 +445,7 @@ func (c *Client) writeMessageLocked(message WritableClientMessage) bool {
go session.Close()
}
go c.Close()
return false
}
return true
@ -536,8 +462,8 @@ func (c *Client) sendPing() bool {
msg := strconv.FormatInt(now, 10)
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
if err := c.conn.WriteMessage(websocket.PingMessage, []byte(msg)); err != nil {
if sessionId := c.GetSessionId(); sessionId != "" {
log.Printf("Could not send ping to client %s: %v", sessionId, err)
if session := c.GetSession(); session != nil {
log.Printf("Could not send ping to client %s: %v", session.PublicId(), err)
} else {
log.Printf("Could not send ping to %s: %v", c.RemoteAddr(), err)
}

View file

@ -248,7 +248,7 @@ func (c *SignalingClient) PublicSessionId() string {
func (c *SignalingClient) processMessageMessage(message *signaling.ServerMessage) {
var msg MessagePayload
if err := json.Unmarshal(message.Message.Data, &msg); err != nil {
if err := json.Unmarshal(*message.Message.Data, &msg); err != nil {
log.Println("Error in unmarshal", err)
return
}
@ -404,7 +404,7 @@ func (c *SignalingClient) SendMessages(clients []*SignalingClient) {
Type: "session",
SessionId: sessionIds[recipient],
},
Data: data,
Data: (*json.RawMessage)(&data),
},
}
sender.Send(msg)
@ -461,7 +461,7 @@ func registerAuthHandler(router *mux.Router) {
StatusCode: http.StatusOK,
Message: http.StatusText(http.StatusOK),
},
Data: rawdata,
Data: &rawdata,
},
}
@ -601,9 +601,9 @@ func main() {
Type: "hello",
Hello: &signaling.HelloClientMessage{
Version: signaling.HelloVersionV1,
Auth: &signaling.HelloClientMessageAuth{
Auth: signaling.HelloClientMessageAuth{
Url: backendUrl + "/auth",
Params: json.RawMessage("{}"),
Params: &json.RawMessage{'{', '}'},
},
},
}

View file

@ -36,6 +36,9 @@ import (
)
var (
// Sessions expire 30 seconds after the connection closed.
sessionExpireDuration = 30 * time.Second
// Warn if a session has 32 or more pending messages.
warnPendingMessagesCount = 32
@ -51,13 +54,11 @@ type ClientSession struct {
privateId string
publicId string
data *SessionIdData
ctx context.Context
closeFunc context.CancelFunc
clientType string
features []string
userId string
userData json.RawMessage
userData *json.RawMessage
inCall Flags
supportsPermissions bool
@ -67,14 +68,14 @@ type ClientSession struct {
backendUrl string
parsedBackendUrl *url.URL
expires time.Time
mu sync.Mutex
client HandlerClient
room atomic.Pointer[Room]
roomJoinTime atomic.Int64
roomSessionIdLock sync.RWMutex
roomSessionId string
client *Client
room atomic.Pointer[Room]
roomJoinTime atomic.Int64
roomSessionId string
publisherWaiters ChannelWaiters
@ -95,15 +96,12 @@ type ClientSession struct {
}
func NewClientSession(hub *Hub, privateId string, publicId string, data *SessionIdData, backend *Backend, hello *HelloClientMessage, auth *BackendClientAuthResponse) (*ClientSession, error) {
ctx, closeFunc := context.WithCancel(context.Background())
s := &ClientSession{
hub: hub,
events: hub.events,
privateId: privateId,
publicId: publicId,
data: data,
ctx: ctx,
closeFunc: closeFunc,
clientType: hello.Auth.Type,
features: hello.Features,
@ -147,10 +145,6 @@ func NewClientSession(hub *Hub, privateId string, publicId string, data *Session
return s, nil
}
func (s *ClientSession) Context() context.Context {
return s.ctx
}
func (s *ClientSession) PrivateId() string {
return s.privateId
}
@ -160,8 +154,8 @@ func (s *ClientSession) PublicId() string {
}
func (s *ClientSession) RoomSessionId() string {
s.roomSessionIdLock.RLock()
defer s.roomSessionIdLock.RUnlock()
s.mu.Lock()
defer s.mu.Unlock()
return s.roomSessionId
}
@ -315,10 +309,25 @@ func (s *ClientSession) UserId() string {
return userId
}
func (s *ClientSession) UserData() json.RawMessage {
func (s *ClientSession) UserData() *json.RawMessage {
return s.userData
}
func (s *ClientSession) StartExpire() {
// The hub mutex must be held when calling this method.
s.expires = time.Now().Add(sessionExpireDuration)
s.hub.expiredSessions[s] = true
}
func (s *ClientSession) StopExpire() {
// The hub mutex must be held when calling this method.
delete(s.hub.expiredSessions, s)
}
func (s *ClientSession) IsExpired(now time.Time) bool {
return now.After(s.expires)
}
func (s *ClientSession) SetRoom(room *Room) {
s.room.Store(room)
if room != nil {
@ -348,7 +357,7 @@ func (s *ClientSession) getRoomJoinTime() time.Time {
func (s *ClientSession) releaseMcuObjects() {
if len(s.publishers) > 0 {
go func(publishers map[StreamType]McuPublisher) {
ctx := context.Background()
ctx := context.TODO()
for _, publisher := range publishers {
publisher.Close(ctx)
}
@ -357,7 +366,7 @@ func (s *ClientSession) releaseMcuObjects() {
}
if len(s.subscribers) > 0 {
go func(subscribers map[string]McuSubscriber) {
ctx := context.Background()
ctx := context.TODO()
for _, subscriber := range subscribers {
subscriber.Close(ctx)
}
@ -371,7 +380,6 @@ func (s *ClientSession) Close() {
}
func (s *ClientSession) closeAndWait(wait bool) {
s.closeFunc()
s.hub.removeSession(s)
s.mu.Lock()
@ -405,8 +413,8 @@ func (s *ClientSession) SubscribeEvents() error {
}
func (s *ClientSession) UpdateRoomSessionId(roomSessionId string) error {
s.roomSessionIdLock.Lock()
defer s.roomSessionIdLock.Unlock()
s.mu.Lock()
defer s.mu.Unlock()
if s.roomSessionId == roomSessionId {
return nil
@ -435,8 +443,8 @@ func (s *ClientSession) UpdateRoomSessionId(roomSessionId string) error {
}
func (s *ClientSession) SubscribeRoomEvents(roomid string, roomSessionId string) error {
s.roomSessionIdLock.Lock()
defer s.roomSessionIdLock.Unlock()
s.mu.Lock()
defer s.mu.Unlock()
if err := s.events.RegisterRoomListener(roomid, s.backend, s); err != nil {
return err
@ -495,9 +503,6 @@ func (s *ClientSession) doUnsubscribeRoomEvents(notify bool) {
s.events.UnregisterRoomListener(room.Id(), s.Backend(), s)
}
s.hub.roomSessions.DeleteRoomSession(s)
s.roomSessionIdLock.Lock()
defer s.roomSessionIdLock.Unlock()
if notify && room != nil && s.roomSessionId != "" {
// Notify
go func(sid string) {
@ -515,14 +520,14 @@ func (s *ClientSession) doUnsubscribeRoomEvents(notify bool) {
s.roomSessionId = ""
}
func (s *ClientSession) ClearClient(client HandlerClient) {
func (s *ClientSession) ClearClient(client *Client) {
s.mu.Lock()
defer s.mu.Unlock()
s.clearClientLocked(client)
}
func (s *ClientSession) clearClientLocked(client HandlerClient) {
func (s *ClientSession) clearClientLocked(client *Client) {
if s.client == nil {
return
} else if client != nil && s.client != client {
@ -535,18 +540,18 @@ func (s *ClientSession) clearClientLocked(client HandlerClient) {
prevClient.SetSession(nil)
}
func (s *ClientSession) GetClient() HandlerClient {
func (s *ClientSession) GetClient() *Client {
s.mu.Lock()
defer s.mu.Unlock()
return s.getClientUnlocked()
}
func (s *ClientSession) getClientUnlocked() HandlerClient {
func (s *ClientSession) getClientUnlocked() *Client {
return s.client
}
func (s *ClientSession) SetClient(client HandlerClient) HandlerClient {
func (s *ClientSession) SetClient(client *Client) *Client {
if client == nil {
panic("Use ClearClient to set the client to nil")
}
@ -589,7 +594,7 @@ func (s *ClientSession) sendOffer(client McuClient, sender string, streamType St
Type: "session",
SessionId: sender,
},
Data: offer_data,
Data: (*json.RawMessage)(&offer_data),
},
}
@ -619,7 +624,7 @@ func (s *ClientSession) sendCandidate(client McuClient, sender string, streamTyp
Type: "session",
SessionId: sender,
},
Data: candidate_data,
Data: (*json.RawMessage)(&candidate_data),
},
}
@ -735,6 +740,23 @@ func (s *ClientSession) SubscriberClosed(subscriber McuSubscriber) {
}
}
type SdpError struct {
message string
}
func (e *SdpError) Error() string {
return e.message
}
type WrappedSdpError struct {
SdpError
err error
}
func (e *WrappedSdpError) Unwrap() error {
return e.err
}
type PermissionError struct {
permission Permission
}
@ -747,10 +769,23 @@ func (e *PermissionError) Error() string {
return fmt.Sprintf("permission \"%s\" not found", e.permission)
}
func (s *ClientSession) isSdpAllowedToSendLocked(sdp *sdp.SessionDescription) (MediaType, error) {
if sdp == nil {
// Should have already been checked when data was validated.
return 0, ErrNoSdp
func (s *ClientSession) isSdpAllowedToSendLocked(payload map[string]interface{}) (MediaType, error) {
sdpValue, found := payload["sdp"]
if !found {
return 0, &SdpError{"payload does not contain a sdp"}
}
sdpText, ok := sdpValue.(string)
if !ok {
return 0, &SdpError{"payload does not contain a valid sdp"}
}
var sdp sdp.SessionDescription
if err := sdp.Unmarshal([]byte(sdpText)); err != nil {
return 0, &WrappedSdpError{
SdpError: SdpError{
message: fmt.Sprintf("could not parse sdp: %s", err),
},
err: err,
}
}
var mediaTypes MediaType
@ -788,8 +823,8 @@ func (s *ClientSession) IsAllowedToSend(data *MessageClientMessageData) error {
// Client is allowed to publish any media (audio / video).
return nil
} else if data != nil && data.Type == "offer" {
// Check what user is trying to publish and check permissions accordingly.
if _, err := s.isSdpAllowedToSendLocked(data.offerSdp); err != nil {
// Parse SDP to check what user is trying to publish and check permissions accordingly.
if _, err := s.isSdpAllowedToSendLocked(data.Payload); err != nil {
return err
}
@ -819,7 +854,7 @@ func (s *ClientSession) checkOfferTypeLocked(streamType StreamType, data *Messag
return MediaTypeScreen, nil
} else if data != nil && data.Type == "offer" {
mediaTypes, err := s.isSdpAllowedToSendLocked(data.offerSdp)
mediaTypes, err := s.isSdpAllowedToSendLocked(data.Payload)
if err != nil {
return 0, err
}
@ -870,7 +905,7 @@ func (s *ClientSession) GetOrCreatePublisher(ctx context.Context, mcu Mcu, strea
if prev, found := s.publishers[streamType]; found {
// Another thread created the publisher while we were waiting.
go func(pub McuPublisher) {
closeCtx := context.Background()
closeCtx := context.TODO()
pub.Close(closeCtx)
}(publisher)
publisher = prev
@ -934,10 +969,9 @@ func (s *ClientSession) GetOrCreateSubscriber(ctx context.Context, mcu Mcu, id s
subscriber, found := s.subscribers[getStreamId(id, streamType)]
if !found {
client := s.getClientUnlocked()
s.mu.Unlock()
var err error
subscriber, err = mcu.NewSubscriber(ctx, s, id, streamType, client)
subscriber, err = mcu.NewSubscriber(ctx, s, id, streamType)
s.mu.Lock()
if err != nil {
return nil, err
@ -948,7 +982,7 @@ func (s *ClientSession) GetOrCreateSubscriber(ctx context.Context, mcu Mcu, id s
if prev, found := s.subscribers[getStreamId(id, streamType)]; found {
// Another thread created the subscriber while we were waiting.
go func(sub McuSubscriber) {
closeCtx := context.Background()
closeCtx := context.TODO()
sub.Close(closeCtx)
}(subscriber)
subscriber = prev
@ -1022,7 +1056,7 @@ func (s *ClientSession) processAsyncMessage(message *AsyncMessage) {
case "sendoffer":
// Process asynchronously to not block other messages received.
go func() {
ctx, cancel := context.WithTimeout(s.Context(), s.hub.mcuTimeout)
ctx, cancel := context.WithTimeout(context.Background(), s.hub.mcuTimeout)
defer cancel()
mc, err := s.GetOrCreateSubscriber(ctx, s.hub.mcu, message.SendOffer.SessionId, StreamType(message.SendOffer.Data.RoomType))
@ -1054,7 +1088,7 @@ func (s *ClientSession) processAsyncMessage(message *AsyncMessage) {
return
}
mc.SendMessage(s.Context(), nil, message.SendOffer.Data, func(err error, response map[string]interface{}) {
mc.SendMessage(context.TODO(), nil, message.SendOffer.Data, func(err error, response map[string]interface{}) {
if err != nil {
log.Printf("Could not send MCU message %+v for session %s to %s: %s", message.SendOffer.Data, message.SendOffer.SessionId, s.PublicId(), err)
if err := s.events.PublishSessionMessage(message.SendOffer.SessionId, s.backend, &AsyncMessage{
@ -1112,13 +1146,13 @@ func (s *ClientSession) storePendingMessage(message *ServerMessage) {
func filterDisplayNames(events []*EventServerMessageSessionEntry) []*EventServerMessageSessionEntry {
result := make([]*EventServerMessageSessionEntry, 0, len(events))
for _, event := range events {
if len(event.User) == 0 {
if event.User == nil {
result = append(result, event)
continue
}
var userdata map[string]interface{}
if err := json.Unmarshal(event.User, &userdata); err != nil {
if err := json.Unmarshal(*event.User, &userdata); err != nil {
result = append(result, event)
continue
}
@ -1144,7 +1178,7 @@ func filterDisplayNames(events []*EventServerMessageSessionEntry) []*EventServer
}
e := event.Clone()
e.User = data
e.User = (*json.RawMessage)(&data)
result = append(result, e)
}
return result
@ -1239,12 +1273,12 @@ func (s *ClientSession) filterMessage(message *ServerMessage) *ServerMessage {
delete(s.seenJoinedEvents, e)
}
case "message":
if message.Event.Message == nil || len(message.Event.Message.Data) == 0 || !s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
if message.Event.Message == nil || message.Event.Message.Data == nil || len(*message.Event.Message.Data) == 0 || !s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
return message
}
var data RoomEventMessageData
if err := json.Unmarshal(message.Event.Message.Data, &data); err != nil {
if err := json.Unmarshal(*message.Event.Message.Data, &data); err != nil {
return message
}
@ -1261,7 +1295,7 @@ func (s *ClientSession) filterMessage(message *ServerMessage) *ServerMessage {
Target: message.Event.Target,
Message: &RoomEventMessage{
RoomId: message.Event.Message.RoomId,
Data: encoded,
Data: (*json.RawMessage)(&encoded),
},
},
}
@ -1271,9 +1305,9 @@ func (s *ClientSession) filterMessage(message *ServerMessage) *ServerMessage {
}
}
case "message":
if message.Message != nil && len(message.Message.Data) > 0 && s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
if message.Message != nil && message.Message.Data != nil && len(*message.Message.Data) > 0 && s.HasPermission(PERMISSION_HIDE_DISPLAYNAMES) {
var data MessageServerMessageData
if err := json.Unmarshal(message.Message.Data, &data); err != nil {
if err := json.Unmarshal(*message.Message.Data, &data); err != nil {
return message
}
@ -1327,7 +1361,7 @@ func (s *ClientSession) filterAsyncMessage(msg *AsyncMessage) *ServerMessage {
}
}
func (s *ClientSession) NotifySessionResumed(client HandlerClient) {
func (s *ClientSession) NotifySessionResumed(client *Client) {
s.mu.Lock()
if len(s.pendingClientMessages) == 0 {
s.mu.Unlock()

View file

@ -117,7 +117,6 @@ func Test_permissionsEqual(t *testing.T) {
for idx, test := range tests {
test := test
t.Run(strconv.Itoa(idx), func(t *testing.T) {
t.Parallel()
equal := permissionsEqual(test.a, test.b)
if equal != test.equal {
t.Errorf("Expected %+v to be %s to %+v but was %s", test.a, equalStrings[test.equal], test.b, equalStrings[equal])
@ -127,17 +126,12 @@ func Test_permissionsEqual(t *testing.T) {
}
func TestBandwidth_Client(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
hub, _, _, server := CreateHubForTest(t)
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
mcu, err := NewTestMCU()
if err != nil {
t.Fatal(err)
} else if err := mcu.Start(ctx); err != nil {
} else if err := mcu.Start(); err != nil {
t.Fatal(err)
}
defer mcu.Stop()
@ -151,6 +145,9 @@ func TestBandwidth_Client(t *testing.T) {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
hello, err := client.RunUntilHello(ctx)
if err != nil {
t.Fatal(err)
@ -201,8 +198,6 @@ func TestBandwidth_Client(t *testing.T) {
}
func TestBandwidth_Backend(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
hub, _, _, server := CreateHubWithMultipleBackendsForTest(t)
u, err := url.Parse(server.URL + "/one")
@ -217,13 +212,10 @@ func TestBandwidth_Backend(t *testing.T) {
backend.maxScreenBitrate = 1000
backend.maxStreamBitrate = 2000
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
mcu, err := NewTestMCU()
if err != nil {
t.Fatal(err)
} else if err := mcu.Start(ctx); err != nil {
} else if err := mcu.Start(); err != nil {
t.Fatal(err)
}
defer mcu.Stop()
@ -235,6 +227,9 @@ func TestBandwidth_Backend(t *testing.T) {
StreamTypeScreen,
}
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
for _, streamType := range streamTypes {
t.Run(string(streamType), func(t *testing.T) {
client := NewTestClient(t, server, hub)

View file

@ -23,40 +23,10 @@ package signaling
import (
"errors"
"os"
"regexp"
"github.com/dlintw/goconf"
)
var (
searchVarsRegexp = regexp.MustCompile(`\$\([A-Za-z][A-Za-z0-9_]*\)`)
)
func replaceEnvVars(s string) string {
return searchVarsRegexp.ReplaceAllStringFunc(s, func(name string) string {
name = name[2 : len(name)-1]
value, found := os.LookupEnv(name)
if !found {
return name
}
return value
})
}
// GetStringOptionWithEnv will get the string option and resolve any environment
// variable references in the form "$(VAR)".
func GetStringOptionWithEnv(config *goconf.ConfigFile, section string, option string) (string, error) {
value, err := config.GetString(section, option)
if err != nil {
return "", err
}
value = replaceEnvVars(value)
return value, nil
}
func GetStringOptions(config *goconf.ConfigFile, section string, ignoreErrors bool) (map[string]string, error) {
options, _ := config.GetOptions(section)
if len(options) == 0 {
@ -65,7 +35,7 @@ func GetStringOptions(config *goconf.ConfigFile, section string, ignoreErrors bo
result := make(map[string]string)
for _, option := range options {
value, err := GetStringOptionWithEnv(config, section, option)
value, err := config.GetString(section, option)
if err != nil {
if ignoreErrors {
continue

View file

@ -29,19 +29,13 @@ import (
)
func TestStringOptions(t *testing.T) {
t.Setenv("FOO", "foo")
expected := map[string]string{
"one": "1",
"two": "2",
"foo": "http://foo/1",
}
config := goconf.NewConfigFile()
for k, v := range expected {
if k == "foo" {
config.AddOption("foo", k, "http://$(FOO)/1")
} else {
config.AddOption("foo", k, v)
}
config.AddOption("foo", k, v)
}
config.AddOption("default", "three", "3")
@ -54,39 +48,3 @@ func TestStringOptions(t *testing.T) {
t.Errorf("expected %+v, got %+v", expected, options)
}
}
func TestStringOptionWithEnv(t *testing.T) {
t.Setenv("FOO", "foo")
t.Setenv("BAR", "")
t.Setenv("BA_R", "bar")
config := goconf.NewConfigFile()
config.AddOption("test", "foo", "http://$(FOO)/1")
config.AddOption("test", "bar", "http://$(BAR)/2")
config.AddOption("test", "bar2", "http://$(BA_R)/3")
config.AddOption("test", "baz", "http://$(BAZ)/4")
config.AddOption("test", "inv1", "http://$(FOO")
config.AddOption("test", "inv2", "http://$FOO)")
config.AddOption("test", "inv3", "http://$((FOO)")
config.AddOption("test", "inv4", "http://$(F.OO)")
expected := map[string]string{
"foo": "http://foo/1",
"bar": "http:///2",
"bar2": "http://bar/3",
"baz": "http://BAZ/4",
"inv1": "http://$(FOO",
"inv2": "http://$FOO)",
"inv3": "http://$((FOO)",
"inv4": "http://$(F.OO)",
}
for k, v := range expected {
value, err := GetStringOptionWithEnv(config, "test", k)
if err != nil {
t.Errorf("expected value for %s, got %s", k, err)
} else if value != v {
t.Errorf("expected value %s for %s, got %s", v, k, value)
}
}
}

View file

@ -35,7 +35,6 @@ func TestDeferredExecutor_MultiClose(t *testing.T) {
}
func TestDeferredExecutor_QueueSize(t *testing.T) {
t.Parallel()
e := NewDeferredExecutor(0)
defer e.waitForStop()
defer e.Close()
@ -101,7 +100,6 @@ func TestDeferredExecutor_CloseFromFunc(t *testing.T) {
}
func TestDeferredExecutor_DeferAfterClose(t *testing.T) {
CatchLogForTest(t)
e := NewDeferredExecutor(64)
defer e.waitForStop()

View file

@ -55,7 +55,6 @@ The running container can be configured through different environment variables:
- `GEOIP_OVERRIDES`: Optional space-separated list of overrides for GeoIP lookups.
- `CONTINENT_OVERRIDES`: Optional space-separated list of overrides for continent mappings.
- `STATS_IPS`: Comma-separated list of IP addresses that are allowed to access the stats endpoint.
- `TRUSTED_PROXIES`: Comma-separated list of IPs / networks that are trusted proxies.
- `GRPC_LISTEN`: IP and port to listen on for GRPC requests.
- `GRPC_SERVER_CERTIFICATE`: Certificate to use for the GRPC server.
- `GRPC_SERVER_KEY`: Private key to use for the GRPC server.
@ -100,16 +99,9 @@ The running container can be configured through different environment variables:
- `CONFIG`: Optional name of configuration file to use.
- `HTTP_LISTEN`: Address of HTTP listener.
- `COUNTRY`: Optional ISO 3166 country this proxy is located at.
- `EXTERNAL_HOSTNAME`: The external hostname for remote streams. Will try to autodetect if omitted.
- `TOKEN_ID`: Id of the token to use when connecting remote streams.
- `TOKEN_KEY`: Private key for the configured token id.
- `BANDWIDTH_INCOMING`: Optional incoming target bandwidth (in megabits per second).
- `BANDWIDTH_OUTGOING`: Optional outgoing target bandwidth (in megabits per second).
- `JANUS_URL`: Url to Janus server.
- `MAX_STREAM_BITRATE`: Optional maximum bitrate for audio/video streams.
- `MAX_SCREEN_BITRATE`: Optional maximum bitrate for screensharing streams.
- `STATS_IPS`: Comma-separated list of IP addresses that are allowed to access the stats endpoint.
- `TRUSTED_PROXIES`: Comma-separated list of IPs / networks that are trusted proxies.
- `ETCD_ENDPOINTS`: Static list of etcd endpoints (if etcd should be used).
- `ETCD_DISCOVERY_SRV`: Alternative domain to use for DNS SRV configuration of etcd endpoints (if etcd should be used).
- `ETCD_DISCOVERY_SERVICE`: Optional service name for DNS SRV configuration of etcd..

View file

@ -1,5 +1,5 @@
# Modified from https://gitlab.com/powerpaul17/nc_talk_backend/-/blob/dcbb918d8716dad1eb72a889d1e6aa1e3a543641/docker/janus/Dockerfile
FROM alpine:3.20
FROM alpine:3.19
RUN apk add --no-cache curl autoconf automake libtool pkgconf build-base \
glib-dev libconfig-dev libnice-dev jansson-dev openssl-dev zlib libsrtp-dev \
@ -15,30 +15,30 @@ RUN cd /tmp && \
git checkout $USRSCTP_VERSION && \
./bootstrap && \
./configure --prefix=/usr && \
make -j$(nproc) && make install
make && make install
# libsrtp
ARG LIBSRTP_VERSION=2.6.0
ARG LIBSRTP_VERSION=2.4.2
RUN cd /tmp && \
wget https://github.com/cisco/libsrtp/archive/v$LIBSRTP_VERSION.tar.gz && \
tar xfv v$LIBSRTP_VERSION.tar.gz && \
cd libsrtp-$LIBSRTP_VERSION && \
./configure --prefix=/usr --enable-openssl && \
make shared_library -j$(nproc) && \
make shared_library && \
make install && \
rm -fr /libsrtp-$LIBSRTP_VERSION && \
rm -f /v$LIBSRTP_VERSION.tar.gz
# JANUS
ARG JANUS_VERSION=1.2.2
ARG JANUS_VERSION=0.14.1
RUN mkdir -p /usr/src/janus && \
cd /usr/src/janus && \
curl -L https://github.com/meetecho/janus-gateway/archive/v$JANUS_VERSION.tar.gz | tar -xz && \
cd /usr/src/janus/janus-gateway-$JANUS_VERSION && \
./autogen.sh && \
./configure --disable-rabbitmq --disable-mqtt --disable-boringssl && \
make -j$(nproc) && \
make && \
make install && \
make configs

View file

@ -7,7 +7,8 @@ WORKDIR /workdir
COPY . .
RUN touch /.dockerenv && \
apk add --no-cache bash git build-base protobuf && \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} make proxy
if [ -d "vendor" ]; then GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOPROXY=off make proxy; else \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} make proxy; fi
FROM alpine:3

View file

@ -44,22 +44,6 @@ if [ ! -f "$CONFIG" ]; then
sed -i "s|#country =.*|country = $COUNTRY|" "$CONFIG"
fi
if [ -n "$EXTERNAL_HOSTNAME" ]; then
sed -i "s|#hostname =.*|hostname = $EXTERNAL_HOSTNAME|" "$CONFIG"
fi
if [ -n "$TOKEN_ID" ]; then
sed -i "s|#token_id =.*|token_id = $TOKEN_ID|" "$CONFIG"
fi
if [ -n "$TOKEN_KEY" ]; then
sed -i "s|#token_key =.*|token_key = $TOKEN_KEY|" "$CONFIG"
fi
if [ -n "$BANDWIDTH_INCOMING" ]; then
sed -i "s|#incoming =.*|incoming = $BANDWIDTH_INCOMING|" "$CONFIG"
fi
if [ -n "$BANDWIDTH_OUTGOING" ]; then
sed -i "s|#outgoing =.*|outgoing = $BANDWIDTH_OUTGOING|" "$CONFIG"
fi
HAS_ETCD=
if [ -n "$ETCD_ENDPOINTS" ]; then
sed -i "s|#endpoints =.*|endpoints = $ETCD_ENDPOINTS|" "$CONFIG"
@ -125,10 +109,6 @@ if [ ! -f "$CONFIG" ]; then
if [ -n "$STATS_IPS" ]; then
sed -i "s|#allowed_ips =.*|allowed_ips = $STATS_IPS|" "$CONFIG"
fi
if [ -n "$TRUSTED_PROXIES" ]; then
sed -i "s|#trustedproxies =.*|trustedproxies = $TRUSTED_PROXIES|" "$CONFIG"
fi
fi
echo "Starting signaling proxy with $CONFIG ..."

View file

@ -7,7 +7,8 @@ WORKDIR /workdir
COPY . .
RUN touch /.dockerenv && \
apk add --no-cache bash git build-base protobuf && \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} make server
if [ -d "vendor" ]; then GOOS=${TARGETOS} GOARCH=${TARGETARCH} GOPROXY=off make server; else \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} make server; fi
FROM alpine:3
@ -18,12 +19,9 @@ RUN adduser -D spreedbackend && \
COPY --from=builder /workdir/bin/signaling /usr/bin/nextcloud-spreed-signaling
COPY ./server.conf.in /config/server.conf.in
COPY ./docker/server/entrypoint.sh /
COPY ./docker/server/stop.sh /
COPY ./docker/server/wait.sh /
RUN chown spreedbackend /config
RUN /usr/bin/nextcloud-spreed-signaling -version
USER spreedbackend
STOPSIGNAL SIGUSR1
ENTRYPOINT [ "/entrypoint.sh" ]

View file

@ -157,10 +157,6 @@ if [ ! -f "$CONFIG" ]; then
sed -i "s|#allowed_ips =.*|allowed_ips = $STATS_IPS|" "$CONFIG"
fi
if [ -n "$TRUSTED_PROXIES" ]; then
sed -i "s|#trustedproxies =.*|trustedproxies = $TRUSTED_PROXIES|" "$CONFIG"
fi
if [ -n "$GRPC_LISTEN" ]; then
sed -i "s|#listen = 0.0.0.0:9090|listen = $GRPC_LISTEN|" "$CONFIG"

View file

@ -1,26 +0,0 @@
#!/bin/bash
#
# Standalone signaling server for the Nextcloud Spreed app.
# Copyright (C) 2024 struktur AG
#
# @author Joachim Bauch <bauch@struktur.de>
#
# @license GNU AGPL version 3 or any later version
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
set -e
echo "Schedule signaling server to shutdown ..."
exec killall -USR1 nextcloud-spreed-signaling

View file

@ -1,33 +0,0 @@
#!/bin/bash
#
# Standalone signaling server for the Nextcloud Spreed app.
# Copyright (C) 2024 struktur AG
#
# @author Joachim Bauch <bauch@struktur.de>
#
# @license GNU AGPL version 3 or any later version
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
set -e
echo "Waiting for signaling server to shutdown ..."
while true
do
if ! pgrep nextcloud-spreed-signaling > /dev/null ; then
echo "Signaling server has stopped"
exit 0
fi
sleep 1
done

View file

@ -49,5 +49,3 @@ The following metrics are available:
| `signaling_grpc_client_calls_total` | Counter | 1.0.0 | The total number of GRPC client calls | `method` |
| `signaling_grpc_server_calls_total` | Counter | 1.0.0 | The total number of GRPC server calls | `method` |
| `signaling_http_client_pool_connections` | Gauge | 1.2.4 | The current number of HTTP client connections per host | `host` |
| `signaling_throttle_delayed_total` | Counter | 1.2.5 | The total number of delayed requests | `action`, `delay` |
| `signaling_throttle_bruteforce_total` | Counter | 1.2.5 | The total number of rejected bruteforce requests | `action` |

View file

@ -1,6 +1,6 @@
jinja2==3.1.4
jinja2==3.1.3
markdown==3.6
mkdocs==1.6.0
mkdocs==1.5.3
readthedocs-sphinx-search==0.3.2
sphinx==7.3.7
sphinx==7.2.6
sphinx_rtd_theme==2.0.0

View file

@ -23,7 +23,6 @@ package signaling
import (
"context"
"errors"
"fmt"
"log"
"strings"
@ -35,8 +34,6 @@ import (
"go.etcd.io/etcd/client/pkg/v3/srv"
"go.etcd.io/etcd/client/pkg/v3/transport"
clientv3 "go.etcd.io/etcd/client/v3"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
type EtcdClientListener interface {
@ -45,8 +42,8 @@ type EtcdClientListener interface {
type EtcdClientWatcher interface {
EtcdWatchCreated(client *EtcdClient, key string)
EtcdKeyUpdated(client *EtcdClient, key string, value []byte, prevValue []byte)
EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte)
EtcdKeyUpdated(client *EtcdClient, key string, value []byte)
EtcdKeyDeleted(client *EtcdClient, key string)
}
type EtcdClient struct {
@ -115,17 +112,6 @@ func (c *EtcdClient) load(config *goconf.ConfigFile, ignoreErrors bool) error {
DialTimeout: time.Second,
}
if logLevel, _ := config.GetString("etcd", "loglevel"); logLevel != "" {
var l zapcore.Level
if err := l.Set(logLevel); err != nil {
return fmt.Errorf("Unsupported etcd log level %s: %w", logLevel, err)
}
logConfig := zap.NewProductionConfig()
logConfig.Level = zap.NewAtomicLevelAt(l)
cfg.LogConfig = &logConfig
}
clientKey := c.getConfigStringWithFallback(config, "clientkey")
clientCert := c.getConfigStringWithFallback(config, "clientcert")
caCert := c.getConfigStringWithFallback(config, "cacert")
@ -190,8 +176,8 @@ func (c *EtcdClient) getEtcdClient() *clientv3.Client {
return client.(*clientv3.Client)
}
func (c *EtcdClient) syncClient(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, time.Second)
func (c *EtcdClient) syncClient() error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
return c.getEtcdClient().Sync(ctx)
@ -237,10 +223,8 @@ func (c *EtcdClient) WaitForConnection(ctx context.Context) error {
return err
}
if err := c.syncClient(ctx); err != nil {
if errors.Is(err, context.Canceled) {
return err
} else if errors.Is(err, context.DeadlineExceeded) {
if err := c.syncClient(); err != nil {
if err == context.DeadlineExceeded {
log.Printf("Timeout waiting for etcd client to connect to the cluster, retry in %s", backoff.NextWait())
} else {
log.Printf("Could not sync etcd client with the cluster, retry in %s: %s", backoff.NextWait(), err)
@ -259,37 +243,27 @@ func (c *EtcdClient) Get(ctx context.Context, key string, opts ...clientv3.OpOpt
return c.getEtcdClient().Get(ctx, key, opts...)
}
func (c *EtcdClient) Watch(ctx context.Context, key string, nextRevision int64, watcher EtcdClientWatcher, opts ...clientv3.OpOption) (int64, error) {
log.Printf("Wait for leader and start watching on %s (rev=%d)", key, nextRevision)
opts = append(opts, clientv3.WithRev(nextRevision), clientv3.WithPrevKV())
func (c *EtcdClient) Watch(ctx context.Context, key string, watcher EtcdClientWatcher, opts ...clientv3.OpOption) error {
log.Printf("Wait for leader and start watching on %s", key)
ch := c.getEtcdClient().Watch(clientv3.WithRequireLeader(ctx), key, opts...)
log.Printf("Watch created for %s", key)
watcher.EtcdWatchCreated(c, key)
for response := range ch {
if err := response.Err(); err != nil {
return nextRevision, err
return err
}
nextRevision = response.Header.Revision + 1
for _, ev := range response.Events {
switch ev.Type {
case clientv3.EventTypePut:
var prevValue []byte
if ev.PrevKv != nil {
prevValue = ev.PrevKv.Value
}
watcher.EtcdKeyUpdated(c, string(ev.Kv.Key), ev.Kv.Value, prevValue)
watcher.EtcdKeyUpdated(c, string(ev.Kv.Key), ev.Kv.Value)
case clientv3.EventTypeDelete:
var prevValue []byte
if ev.PrevKv != nil {
prevValue = ev.PrevKv.Value
}
watcher.EtcdKeyDeleted(c, string(ev.Kv.Key), prevValue)
watcher.EtcdKeyDeleted(c, string(ev.Kv.Key))
default:
log.Printf("Unsupported watch event %s %q -> %q", ev.Type, ev.Kv.Key, ev.Kv.Value)
}
}
}
return nextRevision, nil
return nil
}

View file

@ -29,6 +29,7 @@ import (
"os"
"runtime"
"strconv"
"sync"
"syscall"
"testing"
"time"
@ -38,8 +39,6 @@ import (
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/embed"
"go.etcd.io/etcd/server/v3/lease"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
var (
@ -90,7 +89,6 @@ func NewEtcdForTest(t *testing.T) *embed.Etcd {
cfg.ListenPeerUrls = []url.URL{*peerListener}
cfg.AdvertisePeerUrls = []url.URL{*peerListener}
cfg.InitialCluster = "default=" + peerListener.String()
cfg.ZapLoggerBuilder = embed.NewZapLoggerBuilder(zaptest.NewLogger(t, zaptest.Level(zap.WarnLevel)))
etcd, err = embed.StartEtcd(cfg)
if isErrorAddressAlreadyInUse(err) {
continue
@ -105,7 +103,6 @@ func NewEtcdForTest(t *testing.T) *embed.Etcd {
t.Cleanup(func() {
etcd.Close()
<-etcd.Server.StopNotify()
})
// Wait for server to be ready.
<-etcd.Server.ReadyNotify()
@ -118,7 +115,6 @@ func NewEtcdClientForTest(t *testing.T) (*embed.Etcd, *EtcdClient) {
config := goconf.NewConfigFile()
config.AddOption("etcd", "endpoints", etcd.Config().ListenClientUrls[0].String())
config.AddOption("etcd", "loglevel", "error")
client, err := NewEtcdClient(config, "")
if err != nil {
@ -147,8 +143,6 @@ func DeleteEtcdValue(etcd *embed.Etcd, key string) {
}
func Test_EtcdClient_Get(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
etcd, client := NewEtcdClientForTest(t)
if response, err := client.Get(context.Background(), "foo"); err != nil {
@ -171,8 +165,6 @@ func Test_EtcdClient_Get(t *testing.T) {
}
func Test_EtcdClient_GetPrefix(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
etcd, client := NewEtcdClientForTest(t)
if response, err := client.Get(context.Background(), "foo"); err != nil {
@ -204,8 +196,6 @@ type etcdEvent struct {
t mvccpb.Event_EventType
key string
value string
prevValue string
}
type EtcdClientTestListener struct {
@ -214,8 +204,9 @@ type EtcdClientTestListener struct {
ctx context.Context
cancel context.CancelFunc
initial chan struct{}
events chan etcdEvent
initial chan struct{}
initialWg sync.WaitGroup
events chan etcdEvent
}
func NewEtcdClientTestListener(ctx context.Context, t *testing.T) *EtcdClientTestListener {
@ -236,7 +227,15 @@ func (l *EtcdClientTestListener) Close() {
}
func (l *EtcdClientTestListener) EtcdClientCreated(client *EtcdClient) {
l.initialWg.Add(1)
go func() {
if err := client.Watch(clientv3.WithRequireLeader(l.ctx), "foo", l, clientv3.WithPrefix()); err != nil {
l.t.Error(err)
}
}()
go func() {
defer close(l.initial)
if err := client.WaitForConnection(l.ctx); err != nil {
l.t.Errorf("error waiting for connection: %s", err)
return
@ -245,8 +244,7 @@ func (l *EtcdClientTestListener) EtcdClientCreated(client *EtcdClient) {
ctx, cancel := context.WithTimeout(l.ctx, time.Second)
defer cancel()
response, err := client.Get(ctx, "foo", clientv3.WithPrefix())
if err != nil {
if response, err := client.Get(ctx, "foo", clientv3.WithPrefix()); err != nil {
l.t.Error(err)
} else if response.Count != 1 {
l.t.Errorf("expected 1 responses, got %d", response.Count)
@ -255,47 +253,30 @@ func (l *EtcdClientTestListener) EtcdClientCreated(client *EtcdClient) {
} else if string(response.Kvs[0].Value) != "1" {
l.t.Errorf("expected value \"1\", got \"%s\"", string(response.Kvs[0].Value))
}
close(l.initial)
nextRevision := response.Header.Revision + 1
for l.ctx.Err() == nil {
var err error
if nextRevision, err = client.Watch(clientv3.WithRequireLeader(l.ctx), "foo", nextRevision, l, clientv3.WithPrefix()); err != nil {
l.t.Error(err)
}
}
l.initialWg.Wait()
}()
}
func (l *EtcdClientTestListener) EtcdWatchCreated(client *EtcdClient, key string) {
l.initialWg.Done()
}
func (l *EtcdClientTestListener) EtcdKeyUpdated(client *EtcdClient, key string, value []byte, prevValue []byte) {
evt := etcdEvent{
func (l *EtcdClientTestListener) EtcdKeyUpdated(client *EtcdClient, key string, value []byte) {
l.events <- etcdEvent{
t: clientv3.EventTypePut,
key: string(key),
value: string(value),
}
if len(prevValue) > 0 {
evt.prevValue = string(prevValue)
}
l.events <- evt
}
func (l *EtcdClientTestListener) EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte) {
evt := etcdEvent{
func (l *EtcdClientTestListener) EtcdKeyDeleted(client *EtcdClient, key string) {
l.events <- etcdEvent{
t: clientv3.EventTypeDelete,
key: string(key),
}
if len(prevValue) > 0 {
evt.prevValue = string(prevValue)
}
l.events <- evt
}
func Test_EtcdClient_Watch(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
etcd, client := NewEtcdClientForTest(t)
SetEtcdValue(etcd, "foo/a", []byte("1"))
@ -318,23 +299,11 @@ func Test_EtcdClient_Watch(t *testing.T) {
t.Errorf("expected value %s, got %s", "2", event.value)
}
SetEtcdValue(etcd, "foo/a", []byte("3"))
event = <-listener.events
if event.t != clientv3.EventTypePut {
t.Errorf("expected type %d, got %d", clientv3.EventTypePut, event.t)
} else if event.key != "foo/a" {
t.Errorf("expected key %s, got %s", "foo/a", event.key)
} else if event.value != "3" {
t.Errorf("expected value %s, got %s", "3", event.value)
}
DeleteEtcdValue(etcd, "foo/a")
event = <-listener.events
if event.t != clientv3.EventTypeDelete {
t.Errorf("expected type %d, got %d", clientv3.EventTypeDelete, event.t)
} else if event.key != "foo/a" {
t.Errorf("expected key %s, got %s", "foo/a", event.key)
} else if event.prevValue != "3" {
t.Errorf("expected previous value %s, got %s", "3", event.prevValue)
}
}

View file

@ -22,7 +22,6 @@
package signaling
import (
"context"
"errors"
"log"
"os"
@ -55,9 +54,7 @@ type FileWatcher struct {
target string
callback FileWatcherCallback
watcher *fsnotify.Watcher
closeCtx context.Context
closeFunc context.CancelFunc
watcher *fsnotify.Watcher
}
func NewFileWatcher(filename string, callback FileWatcherCallback) (*FileWatcher, error) {
@ -76,28 +73,24 @@ func NewFileWatcher(filename string, callback FileWatcherCallback) (*FileWatcher
return nil, err
}
if err := watcher.Add(path.Dir(filename)); err != nil {
watcher.Close() // nolint
return nil, err
if filename != realFilename {
if err := watcher.Add(path.Dir(filename)); err != nil {
watcher.Close() // nolint
return nil, err
}
}
closeCtx, closeFunc := context.WithCancel(context.Background())
w := &FileWatcher{
filename: filename,
target: realFilename,
callback: callback,
watcher: watcher,
closeCtx: closeCtx,
closeFunc: closeFunc,
}
go w.run()
return w, nil
}
func (f *FileWatcher) Close() error {
f.closeFunc()
return f.watcher.Close()
}
@ -161,8 +154,6 @@ func (f *FileWatcher) run() {
}
log.Printf("Error watching %s: %s", f.filename, err)
case <-f.closeCtx.Done():
return
}
}
}

View file

@ -47,53 +47,6 @@ func TestFileWatcher_NotExist(t *testing.T) {
}
func TestFileWatcher_File(t *testing.T) {
ensureNoGoroutinesLeak(t, func(t *testing.T) {
tmpdir := t.TempDir()
filename := path.Join(tmpdir, "test.txt")
if err := os.WriteFile(filename, []byte("Hello world!"), 0644); err != nil {
t.Fatal(err)
}
modified := make(chan struct{})
w, err := NewFileWatcher(filename, func(filename string) {
modified <- struct{}{}
})
if err != nil {
t.Fatal(err)
}
defer w.Close()
if err := os.WriteFile(filename, []byte("Updated"), 0644); err != nil {
t.Fatal(err)
}
<-modified
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
defer cancel()
select {
case <-modified:
t.Error("should not have received another event")
case <-ctxTimeout.Done():
}
if err := os.WriteFile(filename, []byte("Updated"), 0644); err != nil {
t.Fatal(err)
}
<-modified
ctxTimeout, cancel = context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
defer cancel()
select {
case <-modified:
t.Error("should not have received another event")
case <-ctxTimeout.Done():
}
})
}
func TestFileWatcher_Rename(t *testing.T) {
tmpdir := t.TempDir()
filename := path.Join(tmpdir, "test.txt")
if err := os.WriteFile(filename, []byte("Hello world!"), 0644); err != nil {
@ -109,10 +62,10 @@ func TestFileWatcher_Rename(t *testing.T) {
}
defer w.Close()
filename2 := path.Join(tmpdir, "test.txt.tmp")
if err := os.WriteFile(filename2, []byte("Updated"), 0644); err != nil {
if err := os.WriteFile(filename, []byte("Updated"), 0644); err != nil {
t.Fatal(err)
}
<-modified
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
defer cancel()
@ -123,7 +76,7 @@ func TestFileWatcher_Rename(t *testing.T) {
case <-ctxTimeout.Done():
}
if err := os.Rename(filename2, filename); err != nil {
if err := os.WriteFile(filename, []byte("Updated"), 0644); err != nil {
t.Fatal(err)
}
<-modified
@ -258,53 +211,3 @@ func TestFileWatcher_OtherSymlink(t *testing.T) {
case <-ctxTimeout.Done():
}
}
func TestFileWatcher_RenameSymlinkTarget(t *testing.T) {
tmpdir := t.TempDir()
sourceFilename1 := path.Join(tmpdir, "test1.txt")
if err := os.WriteFile(sourceFilename1, []byte("Hello world!"), 0644); err != nil {
t.Fatal(err)
}
filename := path.Join(tmpdir, "test.txt")
if err := os.Symlink(sourceFilename1, filename); err != nil {
t.Fatal(err)
}
modified := make(chan struct{})
w, err := NewFileWatcher(filename, func(filename string) {
modified <- struct{}{}
})
if err != nil {
t.Fatal(err)
}
defer w.Close()
sourceFilename2 := path.Join(tmpdir, "test1.txt.tmp")
if err := os.WriteFile(sourceFilename2, []byte("Updated"), 0644); err != nil {
t.Fatal(err)
}
ctxTimeout, cancel := context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
defer cancel()
select {
case <-modified:
t.Error("should not have received another event")
case <-ctxTimeout.Done():
}
if err := os.Rename(sourceFilename2, sourceFilename1); err != nil {
t.Fatal(err)
}
<-modified
ctxTimeout, cancel = context.WithTimeout(context.Background(), testWatcherNoEventTimeout)
defer cancel()
select {
case <-modified:
t.Error("should not have received another event")
case <-ctxTimeout.Done():
}
}

View file

@ -97,7 +97,6 @@ func runConcurrentFlags(t *testing.T, count int, f func()) {
}
func TestFlagsConcurrentAdd(t *testing.T) {
t.Parallel()
var flags Flags
var added atomic.Int32
@ -112,7 +111,6 @@ func TestFlagsConcurrentAdd(t *testing.T) {
}
func TestFlagsConcurrentRemove(t *testing.T) {
t.Parallel()
var flags Flags
flags.Set(1)
@ -128,7 +126,6 @@ func TestFlagsConcurrentRemove(t *testing.T) {
}
func TestFlagsConcurrentSet(t *testing.T) {
t.Parallel()
var flags Flags
var set atomic.Int32

View file

@ -35,7 +35,6 @@ import (
"sync"
"time"
"github.com/dlintw/goconf"
"github.com/oschwald/maxminddb-golang"
)
@ -277,63 +276,3 @@ func IsValidContinent(continent string) bool {
return false
}
}
func LoadGeoIPOverrides(config *goconf.ConfigFile, ignoreErrors bool) (map[*net.IPNet]string, error) {
options, _ := GetStringOptions(config, "geoip-overrides", true)
if len(options) == 0 {
return nil, nil
}
var err error
geoipOverrides := make(map[*net.IPNet]string, len(options))
for option, value := range options {
var ip net.IP
var ipNet *net.IPNet
if strings.Contains(option, "/") {
_, ipNet, err = net.ParseCIDR(option)
if err != nil {
if ignoreErrors {
log.Printf("could not parse CIDR %s (%s), skipping", option, err)
continue
}
return nil, fmt.Errorf("could not parse CIDR %s: %s", option, err)
}
} else {
ip = net.ParseIP(option)
if ip == nil {
if ignoreErrors {
log.Printf("could not parse IP %s, skipping", option)
continue
}
return nil, fmt.Errorf("could not parse IP %s", option)
}
var mask net.IPMask
if ipv4 := ip.To4(); ipv4 != nil {
mask = net.CIDRMask(32, 32)
} else {
mask = net.CIDRMask(128, 128)
}
ipNet = &net.IPNet{
IP: ip,
Mask: mask,
}
}
value = strings.ToUpper(strings.TrimSpace(value))
if value == "" {
log.Printf("IP %s doesn't have a country assigned, skipping", option)
continue
} else if !IsValidCountry(value) {
log.Printf("Country %s for IP %s is invalid, skipping", value, option)
continue
}
log.Printf("Using country %s for %s", value, ipNet)
geoipOverrides[ipNet] = value
}
return geoipOverrides, nil
}

View file

@ -78,7 +78,6 @@ func GetGeoIpUrlForTest(t *testing.T) string {
}
func TestGeoLookup(t *testing.T) {
CatchLogForTest(t)
reader, err := NewGeoLookupFromUrl(GetGeoIpUrlForTest(t))
if err != nil {
t.Fatal(err)
@ -93,7 +92,6 @@ func TestGeoLookup(t *testing.T) {
}
func TestGeoLookupCaching(t *testing.T) {
CatchLogForTest(t)
reader, err := NewGeoLookupFromUrl(GetGeoIpUrlForTest(t))
if err != nil {
t.Fatal(err)
@ -140,7 +138,6 @@ func TestGeoLookupContinent(t *testing.T) {
}
func TestGeoLookupCloseEmpty(t *testing.T) {
CatchLogForTest(t)
reader, err := NewGeoLookupFromUrl("ignore-url")
if err != nil {
t.Fatal(err)
@ -149,7 +146,6 @@ func TestGeoLookupCloseEmpty(t *testing.T) {
}
func TestGeoLookupFromFile(t *testing.T) {
CatchLogForTest(t)
geoIpUrl := GetGeoIpUrlForTest(t)
resp, err := http.Get(geoIpUrl)

59
go.mod
View file

@ -1,6 +1,6 @@
module github.com/strukturag/nextcloud-spreed-signaling
go 1.21
go 1.20
require (
github.com/dlintw/goconf v0.0.0-20120228082610-dcc070983490
@ -9,22 +9,21 @@ require (
github.com/google/uuid v1.6.0
github.com/gorilla/mux v1.8.1
github.com/gorilla/securecookie v1.1.2
github.com/gorilla/websocket v1.5.3
github.com/gorilla/websocket v1.5.1
github.com/mailru/easyjson v0.7.7
github.com/nats-io/nats-server/v2 v2.10.16
github.com/nats-io/nats.go v1.36.0
github.com/nats-io/nats-server/v2 v2.10.12
github.com/nats-io/nats.go v1.34.0
github.com/notedit/janus-go v0.0.0-20200517101215-10eb8b95d1a0
github.com/oschwald/maxminddb-golang v1.13.0
github.com/oschwald/maxminddb-golang v1.12.0
github.com/pion/sdp/v3 v3.0.9
github.com/prometheus/client_golang v1.19.1
go.etcd.io/etcd/api/v3 v3.5.14
go.etcd.io/etcd/client/pkg/v3 v3.5.14
go.etcd.io/etcd/client/v3 v3.5.14
go.etcd.io/etcd/server/v3 v3.5.14
go.uber.org/zap v1.27.0
google.golang.org/grpc v1.64.0
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.4.0
google.golang.org/protobuf v1.34.2
github.com/prometheus/client_golang v1.19.0
go.etcd.io/etcd/api/v3 v3.5.12
go.etcd.io/etcd/client/pkg/v3 v3.5.12
go.etcd.io/etcd/client/v3 v3.5.12
go.etcd.io/etcd/server/v3 v3.5.12
google.golang.org/grpc v1.62.1
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0
google.golang.org/protobuf v1.33.0
)
require (
@ -47,26 +46,26 @@ require (
github.com/jonboulle/clockwork v0.2.2 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.8 // indirect
github.com/klauspost/compress v1.17.7 // indirect
github.com/minio/highwayhash v1.0.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/nats-io/jwt/v2 v2.5.7 // indirect
github.com/nats-io/jwt/v2 v2.5.5 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/pion/randutil v0.1.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sirupsen/logrus v1.7.0 // indirect
github.com/soheilhy/cmux v0.1.5 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
go.etcd.io/bbolt v1.3.10 // indirect
go.etcd.io/etcd/client/v2 v2.305.14 // indirect
go.etcd.io/etcd/pkg/v3 v3.5.14 // indirect
go.etcd.io/etcd/raft/v3 v3.5.14 // indirect
go.etcd.io/bbolt v1.3.8 // indirect
go.etcd.io/etcd/client/v2 v2.305.12 // indirect
go.etcd.io/etcd/pkg/v3 v3.5.12 // indirect
go.etcd.io/etcd/raft/v3 v3.5.12 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect
go.opentelemetry.io/otel v1.20.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect
@ -75,15 +74,17 @@ require (
go.opentelemetry.io/otel/sdk v1.20.0 // indirect
go.opentelemetry.io/otel/trace v1.20.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.uber.org/multierr v1.10.0 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/text v0.15.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.17.0 // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/net v0.21.0 // indirect
golang.org/x/sys v0.18.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect

141
go.sum
View file

@ -1,10 +1,8 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU=
cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls=
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
@ -17,10 +15,8 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
@ -37,7 +33,6 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@ -56,7 +51,6 @@ github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOW
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -68,10 +62,8 @@ github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -79,8 +71,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA=
github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
@ -97,14 +89,12 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
@ -114,12 +104,12 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/nats-io/jwt/v2 v2.5.7 h1:j5lH1fUXCnJnY8SsQeB/a/z9Azgu2bYIDvtPVNdxe2c=
github.com/nats-io/jwt/v2 v2.5.7/go.mod h1:ZdWS1nZa6WMZfFwwgpEaqBV8EPGVgOTDHN/wTbz0Y5A=
github.com/nats-io/nats-server/v2 v2.10.16 h1:2jXaiydp5oB/nAx/Ytf9fdCi9QN6ItIc9eehX8kwVV0=
github.com/nats-io/nats-server/v2 v2.10.16/go.mod h1:Pksi38H2+6xLe1vQx0/EA4bzetM0NqyIHcIbmgXSkIU=
github.com/nats-io/nats.go v1.36.0 h1:suEUPuWzTSse/XhESwqLxXGuj8vGRuPRoG7MoRN/qyU=
github.com/nats-io/nats.go v1.36.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/jwt/v2 v2.5.5 h1:ROfXb50elFq5c9+1ztaUbdlrArNFl2+fQWP6B8HGEq4=
github.com/nats-io/jwt/v2 v2.5.5/go.mod h1:ZdWS1nZa6WMZfFwwgpEaqBV8EPGVgOTDHN/wTbz0Y5A=
github.com/nats-io/nats-server/v2 v2.10.12 h1:G6u+RDrHkw4bkwn7I911O5jqys7jJVRY6MwgndyUsnE=
github.com/nats-io/nats-server/v2 v2.10.12/go.mod h1:H1n6zXtYLFCgXcf/SF8QNTSIFuS8tyZQMN9NguUHdEs=
github.com/nats-io/nats.go v1.34.0 h1:fnxnPCNiwIG5w08rlMcEKTUw4AV/nKyGCOJE8TdhSPk=
github.com/nats-io/nats.go v1.34.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
@ -127,17 +117,18 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS
github.com/notedit/janus-go v0.0.0-20200517101215-10eb8b95d1a0 h1:EFU9iv8BMPyBo8iFMHvQleYlF5M3PY6zpAbxsngImjE=
github.com/notedit/janus-go v0.0.0-20200517101215-10eb8b95d1a0/go.mod h1:BN/Txse3qz8tZOmCm2OfajB2wHVujWmX3o9nVdsI6gE=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/oschwald/maxminddb-golang v1.13.0 h1:R8xBorY71s84yO06NgTmQvqvTvlS/bnYZrrWX1MElnU=
github.com/oschwald/maxminddb-golang v1.13.0/go.mod h1:BU0z8BfFVhi1LQaonTwwGQlsHUEu9pWNdMfmq4ztm0o=
github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs=
github.com/oschwald/maxminddb-golang v1.12.0/go.mod h1:q0Nob5lTCqyQ8WT6FYgS1L7PXKVVbgiymefNwIjPzgY=
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY=
github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
@ -147,10 +138,9 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@ -175,22 +165,22 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
go.etcd.io/etcd/api/v3 v3.5.14 h1:vHObSCxyB9zlF60w7qzAdTcGaglbJOpSj1Xj9+WGxq0=
go.etcd.io/etcd/api/v3 v3.5.14/go.mod h1:BmtWcRlQvwa1h3G2jvKYwIQy4PkHlDej5t7uLMUdJUU=
go.etcd.io/etcd/client/pkg/v3 v3.5.14 h1:SaNH6Y+rVEdxfpA2Jr5wkEvN6Zykme5+YnbCkxvuWxQ=
go.etcd.io/etcd/client/pkg/v3 v3.5.14/go.mod h1:8uMgAokyG1czCtIdsq+AGyYQMvpIKnSvPjFMunkgeZI=
go.etcd.io/etcd/client/v2 v2.305.14 h1:v5ASLyFuMlVd/gKU6uf6Cod+vSWKa4Rsv9+eghl0Nwk=
go.etcd.io/etcd/client/v2 v2.305.14/go.mod h1:AWYT0lLEkBuqVaGw0UVMtA4rxCb3/oGE8PxZ8cUS4tI=
go.etcd.io/etcd/client/v3 v3.5.14 h1:CWfRs4FDaDoSz81giL7zPpZH2Z35tbOrAJkkjMqOupg=
go.etcd.io/etcd/client/v3 v3.5.14/go.mod h1:k3XfdV/VIHy/97rqWjoUzrj9tk7GgJGH9J8L4dNXmAk=
go.etcd.io/etcd/pkg/v3 v3.5.14 h1:keuxhJiDCPjTKpW77GxJnnVVD5n4IsfvkDaqiqUMNEQ=
go.etcd.io/etcd/pkg/v3 v3.5.14/go.mod h1:7o+DL6a7DYz9KSjWByX+NGmQPYinoH3D36VAu/B3JqA=
go.etcd.io/etcd/raft/v3 v3.5.14 h1:mHnpbljpBBftmK+YUfp+49ivaCc126aBPLAnwDw0DnE=
go.etcd.io/etcd/raft/v3 v3.5.14/go.mod h1:WnIK5blyJGRKsHA3efovdNoLv9QELTZHzpDOVIAuL2s=
go.etcd.io/etcd/server/v3 v3.5.14 h1:l/3gdiSSoGU6MyKAYiL+8WSOMq9ySG+NqQ04euLtZfY=
go.etcd.io/etcd/server/v3 v3.5.14/go.mod h1:SPh0rUtGNDgOZd/aTbkAUYZV+5FFHw5sdbGnO2/byw0=
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.etcd.io/etcd/api/v3 v3.5.12 h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c=
go.etcd.io/etcd/api/v3 v3.5.12/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4=
go.etcd.io/etcd/client/pkg/v3 v3.5.12 h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarindU2A=
go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4=
go.etcd.io/etcd/client/v2 v2.305.12 h1:0m4ovXYo1CHaA/Mp3X/Fak5sRNIWf01wk/X1/G3sGKI=
go.etcd.io/etcd/client/v2 v2.305.12/go.mod h1:aQ/yhsxMu+Oht1FOupSr60oBvcS9cKXHrzBpDsPTf9E=
go.etcd.io/etcd/client/v3 v3.5.12 h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg=
go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw=
go.etcd.io/etcd/pkg/v3 v3.5.12 h1:OK2fZKI5hX/+BTK76gXSTyZMrbnARyX9S643GenNGb8=
go.etcd.io/etcd/pkg/v3 v3.5.12/go.mod h1:UVwg/QIMoJncyeb/YxvJBJCE/NEwtHWashqc8A1nj/M=
go.etcd.io/etcd/raft/v3 v3.5.12 h1:7r22RufdDsq2z3STjoR7Msz6fYH8tmbkdheGfwJNRmU=
go.etcd.io/etcd/raft/v3 v3.5.12/go.mod h1:ERQuZVe79PI6vcC3DlKBukDCLja/L7YMu29B74Iwj4U=
go.etcd.io/etcd/server/v3 v3.5.12 h1:EtMjsbfyfkwZuA2JlKOiBfuGkFCekv5H178qjXypbG8=
go.etcd.io/etcd/server/v3 v3.5.12/go.mod h1:axB0oCjMy+cemo5290/CutIjoxlfA6KVYKD1w0uue10=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M=
go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
@ -208,19 +198,20 @@ go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCD
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@ -238,34 +229,31 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -283,32 +271,30 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4=
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU=
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.4.0 h1:9SxA29VM43MF5Z9dQu694wmY5t8E/Gxr7s+RSxiIDmc=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.4.0/go.mod h1:yZOK5zhQMiALmuweVdIVoQPa6eIJyXn2B9g5dJDhqX4=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@ -317,6 +303,7 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View file

@ -24,9 +24,7 @@ package signaling
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net"
"net/url"
@ -39,8 +37,6 @@ import (
clientv3 "go.etcd.io/etcd/client/v3"
"google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
status "google.golang.org/grpc/status"
)
@ -53,8 +49,6 @@ const (
)
var (
ErrNoSuchResumeId = fmt.Errorf("unknown resume id")
customResolverPrefix atomic.Uint64
)
@ -142,9 +136,9 @@ func NewGrpcClient(target string, ip net.IP, opts ...grpc.DialOption) (*GrpcClie
hostname: hostname,
}
opts = append(opts, grpc.WithResolvers(resolver))
conn, err = grpc.NewClient(fmt.Sprintf("%s://%s", resolver.Scheme(), target), opts...)
conn, err = grpc.Dial(fmt.Sprintf("%s://%s", resolver.Scheme(), target), opts...)
} else {
conn, err = grpc.NewClient(target, opts...)
conn, err = grpc.Dial(target, opts...)
}
if err != nil {
return nil, err
@ -189,26 +183,6 @@ func (c *GrpcClient) GetServerId(ctx context.Context) (string, error) {
return response.GetServerId(), nil
}
func (c *GrpcClient) LookupResumeId(ctx context.Context, resumeId string) (*LookupResumeIdReply, error) {
statsGrpcClientCalls.WithLabelValues("LookupResumeId").Inc()
// TODO: Remove debug logging
log.Printf("Lookup resume id %s on %s", resumeId, c.Target())
response, err := c.impl.LookupResumeId(ctx, &LookupResumeIdRequest{
ResumeId: resumeId,
}, grpc.WaitForReady(true))
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
return nil, ErrNoSuchResumeId
} else if err != nil {
return nil, err
}
if sessionId := response.GetSessionId(); sessionId == "" {
return nil, ErrNoSuchResumeId
}
return response, nil
}
func (c *GrpcClient) LookupSessionId(ctx context.Context, roomSessionId string, disconnectReason string) (string, error) {
statsGrpcClientCalls.WithLabelValues("LookupSessionId").Inc()
// TODO: Remove debug logging
@ -282,86 +256,6 @@ func (c *GrpcClient) GetSessionCount(ctx context.Context, u *url.URL) (uint32, e
return response.GetCount(), nil
}
type ProxySessionReceiver interface {
RemoteAddr() string
Country() string
UserAgent() string
OnProxyMessage(message *ServerSessionMessage) error
OnProxyClose(err error)
}
type SessionProxy struct {
sessionId string
receiver ProxySessionReceiver
sendMu sync.Mutex
client RpcSessions_ProxySessionClient
}
func (p *SessionProxy) recvPump() {
var closeError error
defer func() {
p.receiver.OnProxyClose(closeError)
if err := p.Close(); err != nil {
log.Printf("Error closing proxy for session %s: %s", p.sessionId, err)
}
}()
for {
msg, err := p.client.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
log.Printf("Error receiving message from proxy for session %s: %s", p.sessionId, err)
closeError = err
break
}
if err := p.receiver.OnProxyMessage(msg); err != nil {
log.Printf("Error processing message %+v from proxy for session %s: %s", msg, p.sessionId, err)
}
}
}
func (p *SessionProxy) Send(message *ClientSessionMessage) error {
p.sendMu.Lock()
defer p.sendMu.Unlock()
return p.client.Send(message)
}
func (p *SessionProxy) Close() error {
p.sendMu.Lock()
defer p.sendMu.Unlock()
return p.client.CloseSend()
}
func (c *GrpcClient) ProxySession(ctx context.Context, sessionId string, receiver ProxySessionReceiver) (*SessionProxy, error) {
statsGrpcClientCalls.WithLabelValues("ProxySession").Inc()
md := metadata.Pairs(
"sessionId", sessionId,
"remoteAddr", receiver.RemoteAddr(),
"country", receiver.Country(),
"userAgent", receiver.UserAgent(),
)
client, err := c.impl.ProxySession(metadata.NewOutgoingContext(ctx, md), grpc.WaitForReady(true))
if err != nil {
return nil, err
}
proxy := &SessionProxy{
sessionId: sessionId,
receiver: receiver,
client: client,
}
go proxy.recvPump()
return proxy, nil
}
type grpcClientsList struct {
clients []*GrpcClient
entry *DnsMonitorEntry
@ -380,27 +274,21 @@ type GrpcClients struct {
targetPrefix string
targetInformation map[string]*GrpcTargetInformationEtcd
dialOptions atomic.Value // []grpc.DialOption
creds credentials.TransportCredentials
initializedCtx context.Context
initializedFunc context.CancelFunc
initializedWg sync.WaitGroup
wakeupChanForTesting chan struct{}
selfCheckWaitGroup sync.WaitGroup
closeCtx context.Context
closeFunc context.CancelFunc
}
func NewGrpcClients(config *goconf.ConfigFile, etcdClient *EtcdClient, dnsMonitor *DnsMonitor) (*GrpcClients, error) {
initializedCtx, initializedFunc := context.WithCancel(context.Background())
closeCtx, closeFunc := context.WithCancel(context.Background())
result := &GrpcClients{
dnsMonitor: dnsMonitor,
etcdClient: etcdClient,
initializedCtx: initializedCtx,
initializedFunc: initializedFunc,
closeCtx: closeCtx,
closeFunc: closeFunc,
}
if err := result.load(config, false); err != nil {
return nil, err
@ -414,13 +302,6 @@ func (c *GrpcClients) load(config *goconf.ConfigFile, fromReload bool) error {
return err
}
if c.creds != nil {
if cr, ok := c.creds.(*reloadableCredentials); ok {
cr.Close()
}
}
c.creds = creds
opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)}
c.dialOptions.Store(opts)
@ -494,10 +375,6 @@ loop:
id, err := c.getServerIdWithTimeout(ctx, client)
if err != nil {
if errors.Is(err, context.Canceled) {
return
}
if status.Code(err) != codes.Canceled {
log.Printf("Error checking GRPC server id of %s, retrying in %s: %s", client.Target(), backoff.NextWait(), err)
}
@ -597,13 +474,12 @@ func (c *GrpcClients) loadTargetsStatic(config *goconf.ConfigFile, fromReload bo
}
c.selfCheckWaitGroup.Add(1)
go c.checkIsSelf(c.closeCtx, target, client)
go c.checkIsSelf(context.Background(), target, client)
log.Printf("Adding %s as GRPC target", client.Target())
entry, found := clientsMap[target]
if !found {
entry = &grpcClientsList{}
clientsMap[target] = entry
}
entry.clients = append(entry.clients, client)
clients = append(clients, client)
@ -672,7 +548,7 @@ func (c *GrpcClients) onLookup(entry *DnsMonitorEntry, all []net.IP, added []net
}
c.selfCheckWaitGroup.Add(1)
go c.checkIsSelf(c.closeCtx, target, client)
go c.checkIsSelf(context.Background(), target, client)
log.Printf("Adding %s as GRPC target", client.Target())
newClients = append(newClients, client)
@ -710,72 +586,54 @@ func (c *GrpcClients) loadTargetsEtcd(config *goconf.ConfigFile, fromReload bool
}
func (c *GrpcClients) EtcdClientCreated(client *EtcdClient) {
c.initializedWg.Add(1)
go func() {
if err := client.WaitForConnection(c.closeCtx); err != nil {
if errors.Is(err, context.Canceled) {
return
}
if err := client.Watch(context.Background(), c.targetPrefix, c, clientv3.WithPrefix()); err != nil {
log.Printf("Error processing watch for %s: %s", c.targetPrefix, err)
}
}()
go func() {
if err := client.WaitForConnection(context.Background()); err != nil {
panic(err)
}
backoff, _ := NewExponentialBackoff(initialWaitDelay, maxWaitDelay)
var nextRevision int64
for c.closeCtx.Err() == nil {
response, err := c.getGrpcTargets(c.closeCtx, client, c.targetPrefix)
for {
response, err := c.getGrpcTargets(client, c.targetPrefix)
if err != nil {
if errors.Is(err, context.Canceled) {
return
} else if errors.Is(err, context.DeadlineExceeded) {
if err == context.DeadlineExceeded {
log.Printf("Timeout getting initial list of GRPC targets, retry in %s", backoff.NextWait())
} else {
log.Printf("Could not get initial list of GRPC targets, retry in %s: %s", backoff.NextWait(), err)
}
backoff.Wait(c.closeCtx)
backoff.Wait(context.Background())
continue
}
for _, ev := range response.Kvs {
c.EtcdKeyUpdated(client, string(ev.Key), ev.Value, nil)
c.EtcdKeyUpdated(client, string(ev.Key), ev.Value)
}
c.initializedWg.Wait()
c.initializedFunc()
nextRevision = response.Header.Revision + 1
break
}
prevRevision := nextRevision
backoff.Reset()
for c.closeCtx.Err() == nil {
var err error
if nextRevision, err = client.Watch(c.closeCtx, c.targetPrefix, nextRevision, c, clientv3.WithPrefix()); err != nil {
log.Printf("Error processing watch for %s (%s), retry in %s", c.targetPrefix, err, backoff.NextWait())
backoff.Wait(c.closeCtx)
continue
}
if nextRevision != prevRevision {
backoff.Reset()
prevRevision = nextRevision
} else {
log.Printf("Processing watch for %s interrupted, retry in %s", c.targetPrefix, backoff.NextWait())
backoff.Wait(c.closeCtx)
}
return
}
}()
}
func (c *GrpcClients) EtcdWatchCreated(client *EtcdClient, key string) {
c.initializedWg.Done()
}
func (c *GrpcClients) getGrpcTargets(ctx context.Context, client *EtcdClient, targetPrefix string) (*clientv3.GetResponse, error) {
ctx, cancel := context.WithTimeout(ctx, time.Second)
func (c *GrpcClients) getGrpcTargets(client *EtcdClient, targetPrefix string) (*clientv3.GetResponse, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
return client.Get(ctx, targetPrefix, clientv3.WithPrefix())
}
func (c *GrpcClients) EtcdKeyUpdated(client *EtcdClient, key string, data []byte, prevValue []byte) {
func (c *GrpcClients) EtcdKeyUpdated(client *EtcdClient, key string, data []byte) {
var info GrpcTargetInformationEtcd
if err := json.Unmarshal(data, &info); err != nil {
log.Printf("Could not decode GRPC target %s=%s: %s", key, string(data), err)
@ -808,7 +666,7 @@ func (c *GrpcClients) EtcdKeyUpdated(client *EtcdClient, key string, data []byte
}
c.selfCheckWaitGroup.Add(1)
go c.checkIsSelf(c.closeCtx, info.Address, cl)
go c.checkIsSelf(context.Background(), info.Address, cl)
log.Printf("Adding %s as GRPC target", cl.Target())
@ -824,7 +682,7 @@ func (c *GrpcClients) EtcdKeyUpdated(client *EtcdClient, key string, data []byte
c.wakeupForTesting()
}
func (c *GrpcClients) EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte) {
func (c *GrpcClients) EtcdKeyDeleted(client *EtcdClient, key string) {
c.mu.Lock()
defer c.mu.Unlock()
@ -908,12 +766,6 @@ func (c *GrpcClients) Close() {
if c.etcdClient != nil {
c.etcdClient.RemoveListener(c)
}
if c.creds != nil {
if cr, ok := c.creds.(*reloadableCredentials); ok {
cr.Close()
}
}
c.closeFunc()
}
func (c *GrpcClients) GetClients() []*GrpcClient {

View file

@ -112,32 +112,27 @@ func waitForEvent(ctx context.Context, t *testing.T, ch <-chan struct{}) {
}
func Test_GrpcClients_EtcdInitial(t *testing.T) {
CatchLogForTest(t)
ensureNoGoroutinesLeak(t, func(t *testing.T) {
_, addr1 := NewGrpcServerForTest(t)
_, addr2 := NewGrpcServerForTest(t)
_, addr1 := NewGrpcServerForTest(t)
_, addr2 := NewGrpcServerForTest(t)
etcd := NewEtcdForTest(t)
etcd := NewEtcdForTest(t)
SetEtcdValue(etcd, "/grpctargets/one", []byte("{\"address\":\""+addr1+"\"}"))
SetEtcdValue(etcd, "/grpctargets/two", []byte("{\"address\":\""+addr2+"\"}"))
SetEtcdValue(etcd, "/grpctargets/one", []byte("{\"address\":\""+addr1+"\"}"))
SetEtcdValue(etcd, "/grpctargets/two", []byte("{\"address\":\""+addr2+"\"}"))
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
if err := client.WaitForInitialized(ctx); err != nil {
t.Fatal(err)
}
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
if err := client.WaitForInitialized(ctx); err != nil {
t.Fatal(err)
}
if clients := client.GetClients(); len(clients) != 2 {
t.Errorf("Expected two clients, got %+v", clients)
}
})
if clients := client.GetClients(); len(clients) != 2 {
t.Errorf("Expected two clients, got %+v", clients)
}
}
func Test_GrpcClients_EtcdUpdate(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
etcd := NewEtcdForTest(t)
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
ch := client.getWakeupChannelForTesting()
@ -192,8 +187,6 @@ func Test_GrpcClients_EtcdUpdate(t *testing.T) {
}
func Test_GrpcClients_EtcdIgnoreSelf(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
etcd := NewEtcdForTest(t)
client, _ := NewGrpcClientsWithEtcdForTest(t, etcd)
ch := client.getWakeupChannelForTesting()
@ -238,65 +231,60 @@ func Test_GrpcClients_EtcdIgnoreSelf(t *testing.T) {
}
func Test_GrpcClients_DnsDiscovery(t *testing.T) {
CatchLogForTest(t)
ensureNoGoroutinesLeak(t, func(t *testing.T) {
lookup := newMockDnsLookupForTest(t)
target := "testgrpc:12345"
ip1 := net.ParseIP("192.168.0.1")
ip2 := net.ParseIP("192.168.0.2")
targetWithIp1 := fmt.Sprintf("%s (%s)", target, ip1)
targetWithIp2 := fmt.Sprintf("%s (%s)", target, ip2)
lookup.Set("testgrpc", []net.IP{ip1})
client, dnsMonitor := NewGrpcClientsForTest(t, target)
ch := client.getWakeupChannelForTesting()
lookup := newMockDnsLookupForTest(t)
target := "testgrpc:12345"
ip1 := net.ParseIP("192.168.0.1")
ip2 := net.ParseIP("192.168.0.2")
targetWithIp1 := fmt.Sprintf("%s (%s)", target, ip1)
targetWithIp2 := fmt.Sprintf("%s (%s)", target, ip2)
lookup.Set("testgrpc", []net.IP{ip1})
client, dnsMonitor := NewGrpcClientsForTest(t, target)
ch := client.getWakeupChannelForTesting()
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
dnsMonitor.checkHostnames()
if clients := client.GetClients(); len(clients) != 1 {
t.Errorf("Expected one client, got %+v", clients)
} else if clients[0].Target() != targetWithIp1 {
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
} else if !clients[0].ip.Equal(ip1) {
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
}
dnsMonitor.checkHostnames()
if clients := client.GetClients(); len(clients) != 1 {
t.Errorf("Expected one client, got %+v", clients)
} else if clients[0].Target() != targetWithIp1 {
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
} else if !clients[0].ip.Equal(ip1) {
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
}
lookup.Set("testgrpc", []net.IP{ip1, ip2})
drainWakeupChannel(ch)
dnsMonitor.checkHostnames()
waitForEvent(ctx, t, ch)
lookup.Set("testgrpc", []net.IP{ip1, ip2})
drainWakeupChannel(ch)
dnsMonitor.checkHostnames()
waitForEvent(ctx, t, ch)
if clients := client.GetClients(); len(clients) != 2 {
t.Errorf("Expected two client, got %+v", clients)
} else if clients[0].Target() != targetWithIp1 {
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
} else if !clients[0].ip.Equal(ip1) {
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
} else if clients[1].Target() != targetWithIp2 {
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[1].Target())
} else if !clients[1].ip.Equal(ip2) {
t.Errorf("Expected IP %s, got %s", ip2, clients[1].ip)
}
if clients := client.GetClients(); len(clients) != 2 {
t.Errorf("Expected two client, got %+v", clients)
} else if clients[0].Target() != targetWithIp1 {
t.Errorf("Expected target %s, got %s", targetWithIp1, clients[0].Target())
} else if !clients[0].ip.Equal(ip1) {
t.Errorf("Expected IP %s, got %s", ip1, clients[0].ip)
} else if clients[1].Target() != targetWithIp2 {
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[1].Target())
} else if !clients[1].ip.Equal(ip2) {
t.Errorf("Expected IP %s, got %s", ip2, clients[1].ip)
}
lookup.Set("testgrpc", []net.IP{ip2})
drainWakeupChannel(ch)
dnsMonitor.checkHostnames()
waitForEvent(ctx, t, ch)
lookup.Set("testgrpc", []net.IP{ip2})
drainWakeupChannel(ch)
dnsMonitor.checkHostnames()
waitForEvent(ctx, t, ch)
if clients := client.GetClients(); len(clients) != 1 {
t.Errorf("Expected one client, got %+v", clients)
} else if clients[0].Target() != targetWithIp2 {
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[0].Target())
} else if !clients[0].ip.Equal(ip2) {
t.Errorf("Expected IP %s, got %s", ip2, clients[0].ip)
}
})
if clients := client.GetClients(); len(clients) != 1 {
t.Errorf("Expected one client, got %+v", clients)
} else if clients[0].Target() != targetWithIp2 {
t.Errorf("Expected target %s, got %s", targetWithIp2, clients[0].Target())
} else if !clients[0].ip.Equal(ip2) {
t.Errorf("Expected IP %s, got %s", ip2, clients[0].ip)
}
}
func Test_GrpcClients_DnsDiscoveryInitialFailed(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
lookup := newMockDnsLookupForTest(t)
target := "testgrpc:12345"
ip1 := net.ParseIP("192.168.0.1")
@ -332,58 +320,55 @@ func Test_GrpcClients_DnsDiscoveryInitialFailed(t *testing.T) {
}
func Test_GrpcClients_Encryption(t *testing.T) {
CatchLogForTest(t)
ensureNoGoroutinesLeak(t, func(t *testing.T) {
serverKey, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
serverKey, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
t.Fatal(err)
}
clientKey, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
t.Fatal(err)
}
serverCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Server cert", serverKey)
clientCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Testing client", clientKey)
dir := t.TempDir()
serverPrivkeyFile := path.Join(dir, "server-privkey.pem")
serverPubkeyFile := path.Join(dir, "server-pubkey.pem")
serverCertFile := path.Join(dir, "server-cert.pem")
WritePrivateKey(serverKey, serverPrivkeyFile) // nolint
WritePublicKey(&serverKey.PublicKey, serverPubkeyFile) // nolint
os.WriteFile(serverCertFile, serverCert, 0755) // nolint
clientPrivkeyFile := path.Join(dir, "client-privkey.pem")
clientPubkeyFile := path.Join(dir, "client-pubkey.pem")
clientCertFile := path.Join(dir, "client-cert.pem")
WritePrivateKey(clientKey, clientPrivkeyFile) // nolint
WritePublicKey(&clientKey.PublicKey, clientPubkeyFile) // nolint
os.WriteFile(clientCertFile, clientCert, 0755) // nolint
serverConfig := goconf.NewConfigFile()
serverConfig.AddOption("grpc", "servercertificate", serverCertFile)
serverConfig.AddOption("grpc", "serverkey", serverPrivkeyFile)
serverConfig.AddOption("grpc", "clientca", clientCertFile)
_, addr := NewGrpcServerForTestWithConfig(t, serverConfig)
clientConfig := goconf.NewConfigFile()
clientConfig.AddOption("grpc", "targets", addr)
clientConfig.AddOption("grpc", "clientcertificate", clientCertFile)
clientConfig.AddOption("grpc", "clientkey", clientPrivkeyFile)
clientConfig.AddOption("grpc", "serverca", serverCertFile)
clients, _ := NewGrpcClientsForTestWithConfig(t, clientConfig, nil)
ctx, cancel1 := context.WithTimeout(context.Background(), time.Second)
defer cancel1()
if err := clients.WaitForInitialized(ctx); err != nil {
t.Fatal(err)
}
for _, client := range clients.GetClients() {
if _, err := client.GetServerId(ctx); err != nil {
t.Fatal(err)
}
clientKey, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
t.Fatal(err)
}
serverCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Server cert", serverKey)
clientCert := GenerateSelfSignedCertificateForTesting(t, 1024, "Testing client", clientKey)
dir := t.TempDir()
serverPrivkeyFile := path.Join(dir, "server-privkey.pem")
serverPubkeyFile := path.Join(dir, "server-pubkey.pem")
serverCertFile := path.Join(dir, "server-cert.pem")
WritePrivateKey(serverKey, serverPrivkeyFile) // nolint
WritePublicKey(&serverKey.PublicKey, serverPubkeyFile) // nolint
os.WriteFile(serverCertFile, serverCert, 0755) // nolint
clientPrivkeyFile := path.Join(dir, "client-privkey.pem")
clientPubkeyFile := path.Join(dir, "client-pubkey.pem")
clientCertFile := path.Join(dir, "client-cert.pem")
WritePrivateKey(clientKey, clientPrivkeyFile) // nolint
WritePublicKey(&clientKey.PublicKey, clientPubkeyFile) // nolint
os.WriteFile(clientCertFile, clientCert, 0755) // nolint
serverConfig := goconf.NewConfigFile()
serverConfig.AddOption("grpc", "servercertificate", serverCertFile)
serverConfig.AddOption("grpc", "serverkey", serverPrivkeyFile)
serverConfig.AddOption("grpc", "clientca", clientCertFile)
_, addr := NewGrpcServerForTestWithConfig(t, serverConfig)
clientConfig := goconf.NewConfigFile()
clientConfig.AddOption("grpc", "targets", addr)
clientConfig.AddOption("grpc", "clientcertificate", clientCertFile)
clientConfig.AddOption("grpc", "clientkey", clientPrivkeyFile)
clientConfig.AddOption("grpc", "serverca", serverCertFile)
clients, _ := NewGrpcClientsForTestWithConfig(t, clientConfig, nil)
ctx, cancel1 := context.WithTimeout(context.Background(), time.Second)
defer cancel1()
if err := clients.WaitForInitialized(ctx); err != nil {
t.Fatal(err)
}
for _, client := range clients.GetClients() {
if _, err := client.GetServerId(ctx); err != nil {
t.Fatal(err)
}
}
})
}
}

View file

@ -125,15 +125,6 @@ func (c *reloadableCredentials) OverrideServerName(serverName string) error {
return nil
}
func (c *reloadableCredentials) Close() {
if c.loader != nil {
c.loader.Close()
}
if c.pool != nil {
c.pool.Close()
}
}
func NewReloadableCredentials(config *goconf.ConfigFile, server bool) (credentials.TransportCredentials, error) {
var prefix string
var caPrefix string

View file

@ -1,229 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2024 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"sync/atomic"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
const (
grpcRemoteClientMessageQueue = 16
)
func getMD(md metadata.MD, key string) string {
if values := md.Get(key); len(values) > 0 {
return values[0]
}
return ""
}
// remoteGrpcClient is a remote client connecting from a GRPC proxy to a Hub.
type remoteGrpcClient struct {
hub *Hub
client RpcSessions_ProxySessionServer
sessionId string
remoteAddr string
country string
userAgent string
closeCtx context.Context
closeFunc context.CancelCauseFunc
session atomic.Pointer[Session]
messages chan WritableClientMessage
}
func newRemoteGrpcClient(hub *Hub, request RpcSessions_ProxySessionServer) (*remoteGrpcClient, error) {
md, found := metadata.FromIncomingContext(request.Context())
if !found {
return nil, errors.New("no metadata provided")
}
closeCtx, closeFunc := context.WithCancelCause(context.Background())
result := &remoteGrpcClient{
hub: hub,
client: request,
sessionId: getMD(md, "sessionId"),
remoteAddr: getMD(md, "remoteAddr"),
country: getMD(md, "country"),
userAgent: getMD(md, "userAgent"),
closeCtx: closeCtx,
closeFunc: closeFunc,
messages: make(chan WritableClientMessage, grpcRemoteClientMessageQueue),
}
return result, nil
}
func (c *remoteGrpcClient) readPump() {
var closeError error
defer func() {
c.closeFunc(closeError)
c.hub.OnClosed(c)
}()
for {
msg, err := c.client.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
// Connection was closed locally.
break
}
if status.Code(err) != codes.Canceled {
log.Printf("Error reading from remote client for session %s: %s", c.sessionId, err)
closeError = err
}
break
}
c.hub.OnMessageReceived(c, msg.Message)
}
}
func (c *remoteGrpcClient) Context() context.Context {
return c.client.Context()
}
func (c *remoteGrpcClient) RemoteAddr() string {
return c.remoteAddr
}
func (c *remoteGrpcClient) UserAgent() string {
return c.userAgent
}
func (c *remoteGrpcClient) Country() string {
return c.country
}
func (c *remoteGrpcClient) IsConnected() bool {
return true
}
func (c *remoteGrpcClient) IsAuthenticated() bool {
return c.GetSession() != nil
}
func (c *remoteGrpcClient) GetSession() Session {
session := c.session.Load()
if session == nil {
return nil
}
return *session
}
func (c *remoteGrpcClient) SetSession(session Session) {
if session == nil {
c.session.Store(nil)
} else {
c.session.Store(&session)
}
}
func (c *remoteGrpcClient) SendError(e *Error) bool {
message := &ServerMessage{
Type: "error",
Error: e,
}
return c.SendMessage(message)
}
func (c *remoteGrpcClient) SendByeResponse(message *ClientMessage) bool {
return c.SendByeResponseWithReason(message, "")
}
func (c *remoteGrpcClient) SendByeResponseWithReason(message *ClientMessage, reason string) bool {
response := &ServerMessage{
Type: "bye",
}
if message != nil {
response.Id = message.Id
}
if reason != "" {
if response.Bye == nil {
response.Bye = &ByeServerMessage{}
}
response.Bye.Reason = reason
}
return c.SendMessage(response)
}
func (c *remoteGrpcClient) SendMessage(message WritableClientMessage) bool {
if c.closeCtx.Err() != nil {
return false
}
select {
case c.messages <- message:
return true
default:
log.Printf("Message queue for remote client of session %s is full, not sending %+v", c.sessionId, message)
return false
}
}
func (c *remoteGrpcClient) Close() {
c.closeFunc(nil)
}
func (c *remoteGrpcClient) run() error {
go c.readPump()
for {
select {
case <-c.closeCtx.Done():
if err := context.Cause(c.closeCtx); err != context.Canceled {
return err
}
return nil
case msg := <-c.messages:
data, err := json.Marshal(msg)
if err != nil {
log.Printf("Error marshalling %+v for remote client for session %s: %s", msg, c.sessionId, err)
continue
}
if err := c.client.Send(&ServerSessionMessage{
Message: data,
}); err != nil {
return fmt.Errorf("error sending %+v to remote client for session %s: %w", msg, c.sessionId, err)
}
}
}
}

View file

@ -55,14 +55,6 @@ func init() {
GrpcServerId = hex.EncodeToString(md.Sum(nil))
}
type GrpcServerHub interface {
GetSessionByResumeId(resumeId string) Session
GetSessionByPublicId(sessionId string) Session
GetSessionIdByRoomSessionId(roomSessionId string) (string, error)
GetBackend(u *url.URL) *Backend
}
type GrpcServer struct {
UnimplementedRpcBackendServer
UnimplementedRpcInternalServer
@ -74,12 +66,12 @@ type GrpcServer struct {
listener net.Listener
serverId string // can be overwritten from tests
hub GrpcServerHub
hub *Hub
}
func NewGrpcServer(config *goconf.ConfigFile) (*GrpcServer, error) {
var listener net.Listener
if addr, _ := GetStringOptionWithEnv(config, "grpc", "listen"); addr != "" {
if addr, _ := config.GetString("grpc", "listen"); addr != "" {
var err error
listener, err = net.Listen("tcp", addr)
if err != nil {
@ -116,30 +108,13 @@ func (s *GrpcServer) Run() error {
func (s *GrpcServer) Close() {
s.conn.GracefulStop()
if cr, ok := s.creds.(*reloadableCredentials); ok {
cr.Close()
}
}
func (s *GrpcServer) LookupResumeId(ctx context.Context, request *LookupResumeIdRequest) (*LookupResumeIdReply, error) {
statsGrpcServerCalls.WithLabelValues("LookupResumeId").Inc()
// TODO: Remove debug logging
log.Printf("Lookup session for resume id %s", request.ResumeId)
session := s.hub.GetSessionByResumeId(request.ResumeId)
if session == nil {
return nil, status.Error(codes.NotFound, "no such room session id")
}
return &LookupResumeIdReply{
SessionId: session.PublicId(),
}, nil
}
func (s *GrpcServer) LookupSessionId(ctx context.Context, request *LookupSessionIdRequest) (*LookupSessionIdReply, error) {
statsGrpcServerCalls.WithLabelValues("LookupSessionId").Inc()
// TODO: Remove debug logging
log.Printf("Lookup session id for room session id %s", request.RoomSessionId)
sid, err := s.hub.GetSessionIdByRoomSessionId(request.RoomSessionId)
sid, err := s.hub.roomSessions.GetSessionId(request.RoomSessionId)
if errors.Is(err, ErrNoSuchRoomSession) {
return nil, status.Error(codes.NotFound, "no such room session id")
} else if err != nil {
@ -229,7 +204,7 @@ func (s *GrpcServer) GetSessionCount(ctx context.Context, request *GetSessionCou
return nil, status.Error(codes.InvalidArgument, "invalid url")
}
backend := s.hub.GetBackend(u)
backend := s.hub.backend.GetBackend(u)
if backend == nil {
return nil, status.Error(codes.NotFound, "no such backend")
}
@ -238,21 +213,3 @@ func (s *GrpcServer) GetSessionCount(ctx context.Context, request *GetSessionCou
Count: uint32(backend.Len()),
}, nil
}
func (s *GrpcServer) ProxySession(request RpcSessions_ProxySessionServer) error {
statsGrpcServerCalls.WithLabelValues("ProxySession").Inc()
hub, ok := s.hub.(*Hub)
if !ok {
return status.Error(codes.Internal, "invalid hub type")
}
client, err := newRemoteGrpcClient(hub, request)
if err != nil {
return err
}
sid := hub.registerClient(client)
defer hub.unregisterClient(sid)
return client.run()
}

View file

@ -98,7 +98,6 @@ func NewGrpcServerForTest(t *testing.T) (server *GrpcServer, addr string) {
}
func Test_GrpcServer_ReloadCerts(t *testing.T) {
CatchLogForTest(t)
key, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
t.Fatal(err)
@ -179,7 +178,6 @@ func Test_GrpcServer_ReloadCerts(t *testing.T) {
}
func Test_GrpcServer_ReloadCA(t *testing.T) {
CatchLogForTest(t)
serverKey, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
t.Fatal(err)

View file

@ -26,18 +26,8 @@ option go_package = "github.com/strukturag/nextcloud-spreed-signaling;signaling"
package signaling;
service RpcSessions {
rpc LookupResumeId(LookupResumeIdRequest) returns (LookupResumeIdReply) {}
rpc LookupSessionId(LookupSessionIdRequest) returns (LookupSessionIdReply) {}
rpc IsSessionInCall(IsSessionInCallRequest) returns (IsSessionInCallReply) {}
rpc ProxySession(stream ClientSessionMessage) returns (stream ServerSessionMessage) {}
}
message LookupResumeIdRequest {
string resumeId = 1;
}
message LookupResumeIdReply {
string sessionId = 1;
}
message LookupSessionIdRequest {
@ -59,11 +49,3 @@ message IsSessionInCallRequest {
message IsSessionInCallReply {
bool inCall = 1;
}
message ClientSessionMessage {
bytes message = 1;
}
message ServerSessionMessage {
bytes message = 1;
}

View file

@ -29,7 +29,6 @@ import (
)
func TestHttpClientPool(t *testing.T) {
t.Parallel()
if _, err := NewHttpClientPool(0, false); err == nil {
t.Error("should not be possible to create empty pool")
}

646
hub.go

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -258,8 +258,8 @@ type JanusGateway struct {
// return gateway, nil
// }
func NewJanusGateway(ctx context.Context, wsURL string, listener GatewayListener) (*JanusGateway, error) {
conn, _, err := janusDialer.DialContext(ctx, wsURL, nil)
func NewJanusGateway(wsURL string, listener GatewayListener) (*JanusGateway, error) {
conn, _, err := janusDialer.Dial(wsURL, nil)
if err != nil {
return nil, err
}
@ -310,7 +310,7 @@ func (gateway *JanusGateway) cancelTransactions() {
t.quit()
}(t)
}
clear(gateway.transactions)
gateway.transactions = make(map[uint64]*transaction)
gateway.Unlock()
}

View file

@ -66,7 +66,7 @@ type McuInitiator interface {
}
type Mcu interface {
Start(ctx context.Context) error
Start() error
Stop()
Reload(config *goconf.ConfigFile)
@ -76,48 +76,7 @@ type Mcu interface {
GetStats() interface{}
NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error)
NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType, initiator McuInitiator) (McuSubscriber, error)
}
// PublisherStream contains the available properties when creating a
// remote publisher in Janus.
type PublisherStream struct {
Mid string `json:"mid"`
Mindex int `json:"mindex"`
Type string `json:"type"`
Description string `json:"description,omitempty"`
Disabled bool `json:"disabled,omitempty"`
// For types "audio" and "video"
Codec string `json:"codec,omitempty"`
// For type "audio"
Stereo bool `json:"stereo,omitempty"`
Fec bool `json:"fec,omitempty"`
Dtx bool `json:"dtx,omitempty"`
// For type "video"
Simulcast bool `json:"simulcast,omitempty"`
Svc bool `json:"svc,omitempty"`
ProfileH264 string `json:"h264_profile,omitempty"`
ProfileVP9 string `json:"vp9_profile,omitempty"`
ExtIdVideoOrientation int `json:"videoorient_ext_id,omitempty"`
ExtIdPlayoutDelay int `json:"playoutdelay_ext_id,omitempty"`
}
type RemotePublisherController interface {
PublisherId() string
StartPublishing(ctx context.Context, publisher McuRemotePublisherProperties) error
GetStreams(ctx context.Context) ([]PublisherStream, error)
}
type RemoteMcu interface {
NewRemotePublisher(ctx context.Context, listener McuListener, controller RemotePublisherController, streamType StreamType) (McuRemotePublisher, error)
NewRemoteSubscriber(ctx context.Context, listener McuListener, publisher McuRemotePublisher) (McuRemoteSubscriber, error)
NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType) (McuSubscriber, error)
}
type StreamType string
@ -157,10 +116,6 @@ type McuPublisher interface {
HasMedia(MediaType) bool
SetMedia(MediaType)
GetStreams(ctx context.Context) ([]PublisherStream, error)
PublishRemote(ctx context.Context, remoteId string, hostname string, port int, rtcpPort int) error
UnpublishRemote(ctx context.Context, remoteId string) error
}
type McuSubscriber interface {
@ -168,18 +123,3 @@ type McuSubscriber interface {
Publisher() string
}
type McuRemotePublisherProperties interface {
Port() int
RtcpPort() int
}
type McuRemotePublisher interface {
McuClient
McuRemotePublisherProperties
}
type McuRemoteSubscriber interface {
McuSubscriber
}

View file

@ -28,43 +28,3 @@ import (
func TestCommonMcuStats(t *testing.T) {
collectAndLint(t, commonMcuStats...)
}
type MockMcuListener struct {
publicId string
}
func (m *MockMcuListener) PublicId() string {
return m.publicId
}
func (m *MockMcuListener) OnUpdateOffer(client McuClient, offer map[string]interface{}) {
}
func (m *MockMcuListener) OnIceCandidate(client McuClient, candidate interface{}) {
}
func (m *MockMcuListener) OnIceCompleted(client McuClient) {
}
func (m *MockMcuListener) SubscriberSidUpdated(subscriber McuSubscriber) {
}
func (m *MockMcuListener) PublisherClosed(publisher McuPublisher) {
}
func (m *MockMcuListener) SubscriberClosed(subscriber McuSubscriber) {
}
type MockMcuInitiator struct {
country string
}
func (m *MockMcuInitiator) Country() string {
return m.country
}

File diff suppressed because it is too large Load diff

View file

@ -1,216 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2017 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"log"
"reflect"
"strconv"
"sync"
"github.com/notedit/janus-go"
)
type mcuJanusClient struct {
mcu *mcuJanus
listener McuListener
mu sync.Mutex // nolint
id uint64
session uint64
roomId uint64
sid string
streamType StreamType
maxBitrate int
handle *JanusHandle
handleId uint64
closeChan chan struct{}
deferred chan func()
handleEvent func(event *janus.EventMsg)
handleHangup func(event *janus.HangupMsg)
handleDetached func(event *janus.DetachedMsg)
handleConnected func(event *janus.WebRTCUpMsg)
handleSlowLink func(event *janus.SlowLinkMsg)
handleMedia func(event *janus.MediaMsg)
}
func (c *mcuJanusClient) Id() string {
return strconv.FormatUint(c.id, 10)
}
func (c *mcuJanusClient) Sid() string {
return c.sid
}
func (c *mcuJanusClient) StreamType() StreamType {
return c.streamType
}
func (c *mcuJanusClient) MaxBitrate() int {
return c.maxBitrate
}
func (c *mcuJanusClient) Close(ctx context.Context) {
}
func (c *mcuJanusClient) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
}
func (c *mcuJanusClient) closeClient(ctx context.Context) bool {
if handle := c.handle; handle != nil {
c.handle = nil
close(c.closeChan)
if _, err := handle.Detach(ctx); err != nil {
if e, ok := err.(*janus.ErrorMsg); !ok || e.Err.Code != JANUS_ERROR_HANDLE_NOT_FOUND {
log.Println("Could not detach client", handle.Id, err)
}
}
return true
}
return false
}
func (c *mcuJanusClient) run(handle *JanusHandle, closeChan <-chan struct{}) {
loop:
for {
select {
case msg := <-handle.Events:
switch t := msg.(type) {
case *janus.EventMsg:
c.handleEvent(t)
case *janus.HangupMsg:
c.handleHangup(t)
case *janus.DetachedMsg:
c.handleDetached(t)
case *janus.MediaMsg:
c.handleMedia(t)
case *janus.WebRTCUpMsg:
c.handleConnected(t)
case *janus.SlowLinkMsg:
c.handleSlowLink(t)
case *TrickleMsg:
c.handleTrickle(t)
default:
log.Println("Received unsupported event type", msg, reflect.TypeOf(msg))
}
case f := <-c.deferred:
f()
case <-closeChan:
break loop
}
}
}
func (c *mcuJanusClient) sendOffer(ctx context.Context, offer map[string]interface{}, callback func(error, map[string]interface{})) {
handle := c.handle
if handle == nil {
callback(ErrNotConnected, nil)
return
}
configure_msg := map[string]interface{}{
"request": "configure",
"audio": true,
"video": true,
"data": true,
}
answer_msg, err := handle.Message(ctx, configure_msg, offer)
if err != nil {
callback(err, nil)
return
}
callback(nil, answer_msg.Jsep)
}
func (c *mcuJanusClient) sendAnswer(ctx context.Context, answer map[string]interface{}, callback func(error, map[string]interface{})) {
handle := c.handle
if handle == nil {
callback(ErrNotConnected, nil)
return
}
start_msg := map[string]interface{}{
"request": "start",
"room": c.roomId,
}
start_response, err := handle.Message(ctx, start_msg, answer)
if err != nil {
callback(err, nil)
return
}
log.Println("Started listener", start_response)
callback(nil, nil)
}
func (c *mcuJanusClient) sendCandidate(ctx context.Context, candidate interface{}, callback func(error, map[string]interface{})) {
handle := c.handle
if handle == nil {
callback(ErrNotConnected, nil)
return
}
if _, err := handle.Trickle(ctx, candidate); err != nil {
callback(err, nil)
return
}
callback(nil, nil)
}
func (c *mcuJanusClient) handleTrickle(event *TrickleMsg) {
if event.Candidate.Completed {
c.listener.OnIceCompleted(c)
} else {
c.listener.OnIceCandidate(c, event.Candidate)
}
}
func (c *mcuJanusClient) selectStream(ctx context.Context, stream *streamSelection, callback func(error, map[string]interface{})) {
handle := c.handle
if handle == nil {
callback(ErrNotConnected, nil)
return
}
if stream == nil || !stream.HasValues() {
callback(nil, nil)
return
}
configure_msg := map[string]interface{}{
"request": "configure",
}
if stream != nil {
stream.AddToMessage(configure_msg)
}
_, err := handle.Message(ctx, configure_msg, nil)
if err != nil {
callback(err, nil)
return
}
callback(nil, nil)
}

View file

@ -1,457 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2017 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"errors"
"fmt"
"log"
"strconv"
"strings"
"sync/atomic"
"github.com/notedit/janus-go"
"github.com/pion/sdp/v3"
)
const (
ExtensionUrlPlayoutDelay = "http://www.webrtc.org/experiments/rtp-hdrext/playout-delay"
ExtensionUrlVideoOrientation = "urn:3gpp:video-orientation"
)
const (
sdpHasOffer = 1
sdpHasAnswer = 2
)
type mcuJanusPublisher struct {
mcuJanusClient
id string
bitrate int
mediaTypes MediaType
stats publisherStatsCounter
sdpFlags Flags
sdpReady *Closer
offerSdp atomic.Pointer[sdp.SessionDescription]
answerSdp atomic.Pointer[sdp.SessionDescription]
}
func (p *mcuJanusPublisher) handleEvent(event *janus.EventMsg) {
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
ctx := context.TODO()
switch videoroom {
case "destroyed":
log.Printf("Publisher %d: associated room has been destroyed, closing", p.handleId)
go p.Close(ctx)
case "slow_link":
// Ignore, processed through "handleSlowLink" in the general events.
default:
log.Printf("Unsupported videoroom publisher event in %d: %+v", p.handleId, event)
}
} else {
log.Printf("Unsupported publisher event in %d: %+v", p.handleId, event)
}
}
func (p *mcuJanusPublisher) handleHangup(event *janus.HangupMsg) {
log.Printf("Publisher %d received hangup (%s), closing", p.handleId, event.Reason)
go p.Close(context.Background())
}
func (p *mcuJanusPublisher) handleDetached(event *janus.DetachedMsg) {
log.Printf("Publisher %d received detached, closing", p.handleId)
go p.Close(context.Background())
}
func (p *mcuJanusPublisher) handleConnected(event *janus.WebRTCUpMsg) {
log.Printf("Publisher %d received connected", p.handleId)
p.mcu.publisherConnected.Notify(getStreamId(p.id, p.streamType))
}
func (p *mcuJanusPublisher) handleSlowLink(event *janus.SlowLinkMsg) {
if event.Uplink {
log.Printf("Publisher %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
} else {
log.Printf("Publisher %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
}
}
func (p *mcuJanusPublisher) handleMedia(event *janus.MediaMsg) {
mediaType := StreamType(event.Type)
if mediaType == StreamTypeVideo && p.streamType == StreamTypeScreen {
// We want to differentiate between audio, video and screensharing
mediaType = p.streamType
}
p.stats.EnableStream(mediaType, event.Receiving)
}
func (p *mcuJanusPublisher) HasMedia(mt MediaType) bool {
return (p.mediaTypes & mt) == mt
}
func (p *mcuJanusPublisher) SetMedia(mt MediaType) {
p.mediaTypes = mt
}
func (p *mcuJanusPublisher) NotifyReconnected() {
ctx := context.TODO()
handle, session, roomId, _, err := p.mcu.getOrCreatePublisherHandle(ctx, p.id, p.streamType, p.bitrate)
if err != nil {
log.Printf("Could not reconnect publisher %s: %s", p.id, err)
// TODO(jojo): Retry
return
}
p.handle = handle
p.handleId = handle.Id
p.session = session
p.roomId = roomId
log.Printf("Publisher %s reconnected on handle %d", p.id, p.handleId)
}
func (p *mcuJanusPublisher) Close(ctx context.Context) {
notify := false
p.mu.Lock()
if handle := p.handle; handle != nil && p.roomId != 0 {
destroy_msg := map[string]interface{}{
"request": "destroy",
"room": p.roomId,
}
if _, err := handle.Request(ctx, destroy_msg); err != nil {
log.Printf("Error destroying room %d: %s", p.roomId, err)
} else {
log.Printf("Room %d destroyed", p.roomId)
}
p.mcu.mu.Lock()
delete(p.mcu.publishers, getStreamId(p.id, p.streamType))
p.mcu.mu.Unlock()
p.roomId = 0
notify = true
}
p.closeClient(ctx)
p.mu.Unlock()
p.stats.Reset()
if notify {
statsPublishersCurrent.WithLabelValues(string(p.streamType)).Dec()
p.mcu.unregisterClient(p)
p.listener.PublisherClosed(p)
}
p.mcuJanusClient.Close(ctx)
}
func (p *mcuJanusPublisher) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
statsMcuMessagesTotal.WithLabelValues(data.Type).Inc()
jsep_msg := data.Payload
switch data.Type {
case "offer":
p.deferred <- func() {
if data.offerSdp == nil {
// Should have been checked before.
go callback(errors.New("No sdp found in offer"), nil)
return
}
p.offerSdp.Store(data.offerSdp)
p.sdpFlags.Add(sdpHasOffer)
if p.sdpFlags.Get() == sdpHasAnswer|sdpHasOffer {
p.sdpReady.Close()
}
// TODO Tear down previous publisher and get a new one if sid does
// not match?
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
p.sendOffer(msgctx, jsep_msg, func(err error, jsep map[string]interface{}) {
if err != nil {
callback(err, jsep)
return
}
sdpData, found := jsep["sdp"]
if !found {
log.Printf("No sdp found in answer %+v", jsep)
} else {
sdpString, ok := sdpData.(string)
if !ok {
log.Printf("Invalid sdp found in answer %+v", jsep)
} else {
var answerSdp sdp.SessionDescription
if err := answerSdp.UnmarshalString(sdpString); err != nil {
log.Printf("Error parsing answer sdp %+v: %s", sdpString, err)
p.answerSdp.Store(nil)
p.sdpFlags.Remove(sdpHasAnswer)
} else {
p.answerSdp.Store(&answerSdp)
p.sdpFlags.Add(sdpHasAnswer)
if p.sdpFlags.Get() == sdpHasAnswer|sdpHasOffer {
p.sdpReady.Close()
}
}
}
}
callback(nil, jsep)
})
}
case "candidate":
p.deferred <- func() {
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
if data.Sid == "" || data.Sid == p.Sid() {
p.sendCandidate(msgctx, jsep_msg["candidate"], callback)
} else {
go callback(fmt.Errorf("Candidate message sid (%s) does not match publisher sid (%s)", data.Sid, p.Sid()), nil)
}
}
case "endOfCandidates":
// Ignore
default:
go callback(fmt.Errorf("Unsupported message type: %s", data.Type), nil)
}
}
func getFmtpValue(fmtp string, key string) (string, bool) {
parts := strings.Split(fmtp, ";")
for _, part := range parts {
kv := strings.SplitN(part, "=", 2)
if len(kv) != 2 {
continue
}
if strings.EqualFold(strings.TrimSpace(kv[0]), key) {
return strings.TrimSpace(kv[1]), true
}
}
return "", false
}
func (p *mcuJanusPublisher) GetStreams(ctx context.Context) ([]PublisherStream, error) {
offerSdp := p.offerSdp.Load()
answerSdp := p.answerSdp.Load()
if offerSdp == nil || answerSdp == nil {
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-p.sdpReady.C:
offerSdp = p.offerSdp.Load()
answerSdp = p.answerSdp.Load()
if offerSdp == nil || answerSdp == nil {
// Only can happen on invalid SDPs.
return nil, errors.New("no offer and/or answer processed yet")
}
}
}
var streams []PublisherStream
for idx, m := range answerSdp.MediaDescriptions {
mid, found := m.Attribute(sdp.AttrKeyMID)
if !found {
continue
}
s := PublisherStream{
Mid: mid,
Mindex: idx,
Type: m.MediaName.Media,
}
if len(m.MediaName.Formats) == 0 {
continue
}
if strings.EqualFold(s.Type, "application") && strings.EqualFold(m.MediaName.Formats[0], "webrtc-datachannel") {
s.Type = "data"
streams = append(streams, s)
continue
}
pt, err := strconv.ParseInt(m.MediaName.Formats[0], 10, 8)
if err != nil {
continue
}
answerCodec, err := answerSdp.GetCodecForPayloadType(uint8(pt))
if err != nil {
continue
}
if strings.EqualFold(s.Type, "audio") {
s.Codec = answerCodec.Name
if value, found := getFmtpValue(answerCodec.Fmtp, "useinbandfec"); found && value == "1" {
s.Fec = true
}
if value, found := getFmtpValue(answerCodec.Fmtp, "usedtx"); found && value == "1" {
s.Dtx = true
}
if value, found := getFmtpValue(answerCodec.Fmtp, "stereo"); found && value == "1" {
s.Stereo = true
}
} else if strings.EqualFold(s.Type, "video") {
s.Codec = answerCodec.Name
// TODO: Determine if SVC is used.
s.Svc = false
if strings.EqualFold(answerCodec.Name, "vp9") {
// Parse VP9 profile from "profile-id=XXX"
// Exampe: "a=fmtp:98 profile-id=0"
if profile, found := getFmtpValue(answerCodec.Fmtp, "profile-id"); found {
s.ProfileVP9 = profile
}
} else if strings.EqualFold(answerCodec.Name, "h264") {
// Parse H.264 profile from "profile-level-id=XXX"
// Example: "a=fmtp:104 level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42001f"
if profile, found := getFmtpValue(answerCodec.Fmtp, "profile-level-id"); found {
s.ProfileH264 = profile
}
}
var extmap sdp.ExtMap
for _, a := range m.Attributes {
switch a.Key {
case sdp.AttrKeyExtMap:
if err := extmap.Unmarshal(extmap.Name() + ":" + a.Value); err != nil {
log.Printf("Error parsing extmap %s: %s", a.Value, err)
continue
}
switch extmap.URI.String() {
case ExtensionUrlPlayoutDelay:
s.ExtIdPlayoutDelay = extmap.Value
case ExtensionUrlVideoOrientation:
s.ExtIdVideoOrientation = extmap.Value
}
case "simulcast":
s.Simulcast = true
case sdp.AttrKeySSRCGroup:
if strings.HasPrefix(a.Value, "SIM ") {
s.Simulcast = true
}
}
}
for _, a := range offerSdp.MediaDescriptions[idx].Attributes {
switch a.Key {
case "simulcast":
s.Simulcast = true
case sdp.AttrKeySSRCGroup:
if strings.HasPrefix(a.Value, "SIM ") {
s.Simulcast = true
}
}
}
} else if strings.EqualFold(s.Type, "data") { // nolint
// Already handled above.
} else {
log.Printf("Skip type %s", s.Type)
continue
}
streams = append(streams, s)
}
return streams, nil
}
func getPublisherRemoteId(id string, remoteId string) string {
return fmt.Sprintf("%s@%s", id, remoteId)
}
func (p *mcuJanusPublisher) PublishRemote(ctx context.Context, remoteId string, hostname string, port int, rtcpPort int) error {
msg := map[string]interface{}{
"request": "publish_remotely",
"room": p.roomId,
"publisher_id": streamTypeUserIds[p.streamType],
"remote_id": getPublisherRemoteId(p.id, remoteId),
"host": hostname,
"port": port,
"rtcp_port": rtcpPort,
}
response, err := p.handle.Request(ctx, msg)
if err != nil {
return err
}
errorMessage := getPluginStringValue(response.PluginData, pluginVideoRoom, "error")
errorCode := getPluginIntValue(response.PluginData, pluginVideoRoom, "error_code")
if errorMessage != "" || errorCode != 0 {
if errorCode == 0 {
errorCode = 500
}
if errorMessage == "" {
errorMessage = "unknown error"
}
return &janus.ErrorMsg{
Err: janus.ErrorData{
Code: int(errorCode),
Reason: errorMessage,
},
}
}
log.Printf("Publishing %s to %s (port=%d, rtcpPort=%d) for %s", p.id, hostname, port, rtcpPort, remoteId)
return nil
}
func (p *mcuJanusPublisher) UnpublishRemote(ctx context.Context, remoteId string) error {
msg := map[string]interface{}{
"request": "unpublish_remotely",
"room": p.roomId,
"publisher_id": streamTypeUserIds[p.streamType],
"remote_id": getPublisherRemoteId(p.id, remoteId),
}
response, err := p.handle.Request(ctx, msg)
if err != nil {
return err
}
errorMessage := getPluginStringValue(response.PluginData, pluginVideoRoom, "error")
errorCode := getPluginIntValue(response.PluginData, pluginVideoRoom, "error_code")
if errorMessage != "" || errorCode != 0 {
if errorCode == 0 {
errorCode = 500
}
if errorMessage == "" {
errorMessage = "unknown error"
}
return &janus.ErrorMsg{
Err: janus.ErrorData{
Code: int(errorCode),
Reason: errorMessage,
},
}
}
log.Printf("Unpublished remote %s for %s", p.id, remoteId)
return nil
}

View file

@ -1,92 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2024 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"testing"
)
func TestGetFmtpValueH264(t *testing.T) {
testcases := []struct {
fmtp string
profile string
}{
{
"",
"",
},
{
"level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42001f",
"42001f",
},
{
"level-asymmetry-allowed=1;packetization-mode=0",
"",
},
{
"level-asymmetry-allowed=1; packetization-mode=0; profile-level-id = 42001f",
"42001f",
},
}
for _, tc := range testcases {
value, found := getFmtpValue(tc.fmtp, "profile-level-id")
if !found && tc.profile != "" {
t.Errorf("did not find profile \"%s\" in \"%s\"", tc.profile, tc.fmtp)
} else if found && tc.profile == "" {
t.Errorf("did not expect profile in \"%s\" but got \"%s\"", tc.fmtp, value)
} else if found && tc.profile != value {
t.Errorf("expected profile \"%s\" in \"%s\" but got \"%s\"", tc.profile, tc.fmtp, value)
}
}
}
func TestGetFmtpValueVP9(t *testing.T) {
testcases := []struct {
fmtp string
profile string
}{
{
"",
"",
},
{
"profile-id=0",
"0",
},
{
"profile-id = 0",
"0",
},
}
for _, tc := range testcases {
value, found := getFmtpValue(tc.fmtp, "profile-id")
if !found && tc.profile != "" {
t.Errorf("did not find profile \"%s\" in \"%s\"", tc.profile, tc.fmtp)
} else if found && tc.profile == "" {
t.Errorf("did not expect profile in \"%s\" but got \"%s\"", tc.fmtp, value)
} else if found && tc.profile != value {
t.Errorf("expected profile \"%s\" in \"%s\" but got \"%s\"", tc.profile, tc.fmtp, value)
}
}
}

View file

@ -1,150 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2024 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"log"
"sync/atomic"
"github.com/notedit/janus-go"
)
type mcuJanusRemotePublisher struct {
mcuJanusPublisher
ref atomic.Int64
port int
rtcpPort int
}
func (p *mcuJanusRemotePublisher) addRef() int64 {
return p.ref.Add(1)
}
func (p *mcuJanusRemotePublisher) release() bool {
return p.ref.Add(-1) == 0
}
func (p *mcuJanusRemotePublisher) Port() int {
return p.port
}
func (p *mcuJanusRemotePublisher) RtcpPort() int {
return p.rtcpPort
}
func (p *mcuJanusRemotePublisher) handleEvent(event *janus.EventMsg) {
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
ctx := context.TODO()
switch videoroom {
case "destroyed":
log.Printf("Remote publisher %d: associated room has been destroyed, closing", p.handleId)
go p.Close(ctx)
case "slow_link":
// Ignore, processed through "handleSlowLink" in the general events.
default:
log.Printf("Unsupported videoroom remote publisher event in %d: %+v", p.handleId, event)
}
} else {
log.Printf("Unsupported remote publisher event in %d: %+v", p.handleId, event)
}
}
func (p *mcuJanusRemotePublisher) handleHangup(event *janus.HangupMsg) {
log.Printf("Remote publisher %d received hangup (%s), closing", p.handleId, event.Reason)
go p.Close(context.Background())
}
func (p *mcuJanusRemotePublisher) handleDetached(event *janus.DetachedMsg) {
log.Printf("Remote publisher %d received detached, closing", p.handleId)
go p.Close(context.Background())
}
func (p *mcuJanusRemotePublisher) handleConnected(event *janus.WebRTCUpMsg) {
log.Printf("Remote publisher %d received connected", p.handleId)
p.mcu.publisherConnected.Notify(getStreamId(p.id, p.streamType))
}
func (p *mcuJanusRemotePublisher) handleSlowLink(event *janus.SlowLinkMsg) {
if event.Uplink {
log.Printf("Remote publisher %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
} else {
log.Printf("Remote publisher %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
}
}
func (p *mcuJanusRemotePublisher) NotifyReconnected() {
ctx := context.TODO()
handle, session, roomId, _, err := p.mcu.getOrCreatePublisherHandle(ctx, p.id, p.streamType, p.bitrate)
if err != nil {
log.Printf("Could not reconnect remote publisher %s: %s", p.id, err)
// TODO(jojo): Retry
return
}
p.handle = handle
p.handleId = handle.Id
p.session = session
p.roomId = roomId
log.Printf("Remote publisher %s reconnected on handle %d", p.id, p.handleId)
}
func (p *mcuJanusRemotePublisher) Close(ctx context.Context) {
if !p.release() {
return
}
p.mu.Lock()
if handle := p.handle; handle != nil {
response, err := p.handle.Request(ctx, map[string]interface{}{
"request": "remove_remote_publisher",
"room": p.roomId,
"id": streamTypeUserIds[p.streamType],
})
if err != nil {
log.Printf("Error removing remote publisher %s in room %d: %s", p.id, p.roomId, err)
} else {
log.Printf("Removed remote publisher: %+v", response)
}
if p.roomId != 0 {
destroy_msg := map[string]interface{}{
"request": "destroy",
"room": p.roomId,
}
if _, err := handle.Request(ctx, destroy_msg); err != nil {
log.Printf("Error destroying room %d: %s", p.roomId, err)
} else {
log.Printf("Room %d destroyed", p.roomId)
}
p.mcu.mu.Lock()
delete(p.mcu.remotePublishers, getStreamId(p.id, p.streamType))
p.mcu.mu.Unlock()
p.roomId = 0
}
}
p.closeClient(ctx)
p.mu.Unlock()
}

View file

@ -1,115 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2024 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"log"
"strconv"
"sync/atomic"
"github.com/notedit/janus-go"
)
type mcuJanusRemoteSubscriber struct {
mcuJanusSubscriber
remote atomic.Pointer[mcuJanusRemotePublisher]
}
func (p *mcuJanusRemoteSubscriber) handleEvent(event *janus.EventMsg) {
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
ctx := context.TODO()
switch videoroom {
case "destroyed":
log.Printf("Remote subscriber %d: associated room has been destroyed, closing", p.handleId)
go p.Close(ctx)
case "event":
// Handle renegotiations, but ignore other events like selected
// substream / temporal layer.
if getPluginStringValue(event.Plugindata, pluginVideoRoom, "configured") == "ok" &&
event.Jsep != nil && event.Jsep["type"] == "offer" && event.Jsep["sdp"] != nil {
p.listener.OnUpdateOffer(p, event.Jsep)
}
case "slow_link":
// Ignore, processed through "handleSlowLink" in the general events.
default:
log.Printf("Unsupported videoroom event %s for remote subscriber %d: %+v", videoroom, p.handleId, event)
}
} else {
log.Printf("Unsupported event for remote subscriber %d: %+v", p.handleId, event)
}
}
func (p *mcuJanusRemoteSubscriber) handleHangup(event *janus.HangupMsg) {
log.Printf("Remote subscriber %d received hangup (%s), closing", p.handleId, event.Reason)
go p.Close(context.Background())
}
func (p *mcuJanusRemoteSubscriber) handleDetached(event *janus.DetachedMsg) {
log.Printf("Remote subscriber %d received detached, closing", p.handleId)
go p.Close(context.Background())
}
func (p *mcuJanusRemoteSubscriber) handleConnected(event *janus.WebRTCUpMsg) {
log.Printf("Remote subscriber %d received connected", p.handleId)
p.mcu.SubscriberConnected(p.Id(), p.publisher, p.streamType)
}
func (p *mcuJanusRemoteSubscriber) handleSlowLink(event *janus.SlowLinkMsg) {
if event.Uplink {
log.Printf("Remote subscriber %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
} else {
log.Printf("Remote subscriber %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
}
}
func (p *mcuJanusRemoteSubscriber) handleMedia(event *janus.MediaMsg) {
// Only triggered for publishers
}
func (p *mcuJanusRemoteSubscriber) NotifyReconnected() {
ctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
handle, pub, err := p.mcu.getOrCreateSubscriberHandle(ctx, p.publisher, p.streamType)
if err != nil {
// TODO(jojo): Retry?
log.Printf("Could not reconnect remote subscriber for publisher %s: %s", p.publisher, err)
p.Close(context.Background())
return
}
p.handle = handle
p.handleId = handle.Id
p.roomId = pub.roomId
p.sid = strconv.FormatUint(handle.Id, 10)
p.listener.SubscriberSidUpdated(p)
log.Printf("Subscriber %d for publisher %s reconnected on handle %d", p.id, p.publisher, p.handleId)
}
func (p *mcuJanusRemoteSubscriber) Close(ctx context.Context) {
p.mcuJanusSubscriber.Close(ctx)
if remote := p.remote.Swap(nil); remote != nil {
remote.Close(context.Background())
}
}

View file

@ -1,110 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2017 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"database/sql"
"fmt"
)
type streamSelection struct {
substream sql.NullInt16
temporal sql.NullInt16
audio sql.NullBool
video sql.NullBool
}
func (s *streamSelection) HasValues() bool {
return s.substream.Valid || s.temporal.Valid || s.audio.Valid || s.video.Valid
}
func (s *streamSelection) AddToMessage(message map[string]interface{}) {
if s.substream.Valid {
message["substream"] = s.substream.Int16
}
if s.temporal.Valid {
message["temporal"] = s.temporal.Int16
}
if s.audio.Valid {
message["audio"] = s.audio.Bool
}
if s.video.Valid {
message["video"] = s.video.Bool
}
}
func parseStreamSelection(payload map[string]interface{}) (*streamSelection, error) {
var stream streamSelection
if value, found := payload["substream"]; found {
switch value := value.(type) {
case int:
stream.substream.Valid = true
stream.substream.Int16 = int16(value)
case float32:
stream.substream.Valid = true
stream.substream.Int16 = int16(value)
case float64:
stream.substream.Valid = true
stream.substream.Int16 = int16(value)
default:
return nil, fmt.Errorf("Unsupported substream value: %v", value)
}
}
if value, found := payload["temporal"]; found {
switch value := value.(type) {
case int:
stream.temporal.Valid = true
stream.temporal.Int16 = int16(value)
case float32:
stream.temporal.Valid = true
stream.temporal.Int16 = int16(value)
case float64:
stream.temporal.Valid = true
stream.temporal.Int16 = int16(value)
default:
return nil, fmt.Errorf("Unsupported temporal value: %v", value)
}
}
if value, found := payload["audio"]; found {
switch value := value.(type) {
case bool:
stream.audio.Valid = true
stream.audio.Bool = value
default:
return nil, fmt.Errorf("Unsupported audio value: %v", value)
}
}
if value, found := payload["video"]; found {
switch value := value.(type) {
case bool:
stream.video.Valid = true
stream.video.Bool = value
default:
return nil, fmt.Errorf("Unsupported video value: %v", value)
}
}
return &stream, nil
}

View file

@ -1,321 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2017 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"fmt"
"log"
"strconv"
"github.com/notedit/janus-go"
)
type mcuJanusSubscriber struct {
mcuJanusClient
publisher string
}
func (p *mcuJanusSubscriber) Publisher() string {
return p.publisher
}
func (p *mcuJanusSubscriber) handleEvent(event *janus.EventMsg) {
if videoroom := getPluginStringValue(event.Plugindata, pluginVideoRoom, "videoroom"); videoroom != "" {
ctx := context.TODO()
switch videoroom {
case "destroyed":
log.Printf("Subscriber %d: associated room has been destroyed, closing", p.handleId)
go p.Close(ctx)
case "event":
// Handle renegotiations, but ignore other events like selected
// substream / temporal layer.
if getPluginStringValue(event.Plugindata, pluginVideoRoom, "configured") == "ok" &&
event.Jsep != nil && event.Jsep["type"] == "offer" && event.Jsep["sdp"] != nil {
p.listener.OnUpdateOffer(p, event.Jsep)
}
case "slow_link":
// Ignore, processed through "handleSlowLink" in the general events.
default:
log.Printf("Unsupported videoroom event %s for subscriber %d: %+v", videoroom, p.handleId, event)
}
} else {
log.Printf("Unsupported event for subscriber %d: %+v", p.handleId, event)
}
}
func (p *mcuJanusSubscriber) handleHangup(event *janus.HangupMsg) {
log.Printf("Subscriber %d received hangup (%s), closing", p.handleId, event.Reason)
go p.Close(context.Background())
}
func (p *mcuJanusSubscriber) handleDetached(event *janus.DetachedMsg) {
log.Printf("Subscriber %d received detached, closing", p.handleId)
go p.Close(context.Background())
}
func (p *mcuJanusSubscriber) handleConnected(event *janus.WebRTCUpMsg) {
log.Printf("Subscriber %d received connected", p.handleId)
p.mcu.SubscriberConnected(p.Id(), p.publisher, p.streamType)
}
func (p *mcuJanusSubscriber) handleSlowLink(event *janus.SlowLinkMsg) {
if event.Uplink {
log.Printf("Subscriber %s (%d) is reporting %d lost packets on the uplink (Janus -> client)", p.listener.PublicId(), p.handleId, event.Lost)
} else {
log.Printf("Subscriber %s (%d) is reporting %d lost packets on the downlink (client -> Janus)", p.listener.PublicId(), p.handleId, event.Lost)
}
}
func (p *mcuJanusSubscriber) handleMedia(event *janus.MediaMsg) {
// Only triggered for publishers
}
func (p *mcuJanusSubscriber) NotifyReconnected() {
ctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
handle, pub, err := p.mcu.getOrCreateSubscriberHandle(ctx, p.publisher, p.streamType)
if err != nil {
// TODO(jojo): Retry?
log.Printf("Could not reconnect subscriber for publisher %s: %s", p.publisher, err)
p.Close(context.Background())
return
}
p.handle = handle
p.handleId = handle.Id
p.roomId = pub.roomId
p.sid = strconv.FormatUint(handle.Id, 10)
p.listener.SubscriberSidUpdated(p)
log.Printf("Subscriber %d for publisher %s reconnected on handle %d", p.id, p.publisher, p.handleId)
}
func (p *mcuJanusSubscriber) Close(ctx context.Context) {
p.mu.Lock()
closed := p.closeClient(ctx)
p.mu.Unlock()
if closed {
p.mcu.SubscriberDisconnected(p.Id(), p.publisher, p.streamType)
statsSubscribersCurrent.WithLabelValues(string(p.streamType)).Dec()
}
p.mcu.unregisterClient(p)
p.listener.SubscriberClosed(p)
p.mcuJanusClient.Close(ctx)
}
func (p *mcuJanusSubscriber) joinRoom(ctx context.Context, stream *streamSelection, callback func(error, map[string]interface{})) {
handle := p.handle
if handle == nil {
callback(ErrNotConnected, nil)
return
}
waiter := p.mcu.publisherConnected.NewWaiter(getStreamId(p.publisher, p.streamType))
defer p.mcu.publisherConnected.Release(waiter)
loggedNotPublishingYet := false
retry:
join_msg := map[string]interface{}{
"request": "join",
"ptype": "subscriber",
"room": p.roomId,
}
if p.mcu.isMultistream() {
join_msg["streams"] = []map[string]interface{}{
{
"feed": streamTypeUserIds[p.streamType],
},
}
} else {
join_msg["feed"] = streamTypeUserIds[p.streamType]
}
if stream != nil {
stream.AddToMessage(join_msg)
}
join_response, err := handle.Message(ctx, join_msg, nil)
if err != nil {
callback(err, nil)
return
}
if error_code := getPluginIntValue(join_response.Plugindata, pluginVideoRoom, "error_code"); error_code > 0 {
switch error_code {
case JANUS_VIDEOROOM_ERROR_ALREADY_JOINED:
// The subscriber is already connected to the room. This can happen
// if a client leaves a call but keeps the subscriber objects active.
// On joining the call again, the subscriber tries to join on the
// MCU which will fail because he is still connected.
// To get a new Offer SDP, we have to tear down the session on the
// MCU and join again.
p.mu.Lock()
p.closeClient(ctx)
p.mu.Unlock()
var pub *mcuJanusPublisher
handle, pub, err = p.mcu.getOrCreateSubscriberHandle(ctx, p.publisher, p.streamType)
if err != nil {
// Reconnection didn't work, need to unregister/remove subscriber
// so a new object will be created if the request is retried.
p.mcu.unregisterClient(p)
p.listener.SubscriberClosed(p)
callback(fmt.Errorf("Already connected as subscriber for %s, error during re-joining: %s", p.streamType, err), nil)
return
}
p.handle = handle
p.handleId = handle.Id
p.roomId = pub.roomId
p.sid = strconv.FormatUint(handle.Id, 10)
p.listener.SubscriberSidUpdated(p)
p.closeChan = make(chan struct{}, 1)
go p.run(p.handle, p.closeChan)
log.Printf("Already connected subscriber %d for %s, leaving and re-joining on handle %d", p.id, p.streamType, p.handleId)
goto retry
case JANUS_VIDEOROOM_ERROR_NO_SUCH_ROOM:
fallthrough
case JANUS_VIDEOROOM_ERROR_NO_SUCH_FEED:
switch error_code {
case JANUS_VIDEOROOM_ERROR_NO_SUCH_ROOM:
log.Printf("Publisher %s not created yet for %s, wait and retry to join room %d as subscriber", p.publisher, p.streamType, p.roomId)
case JANUS_VIDEOROOM_ERROR_NO_SUCH_FEED:
log.Printf("Publisher %s not sending yet for %s, wait and retry to join room %d as subscriber", p.publisher, p.streamType, p.roomId)
}
if !loggedNotPublishingYet {
loggedNotPublishingYet = true
statsWaitingForPublisherTotal.WithLabelValues(string(p.streamType)).Inc()
}
if err := waiter.Wait(ctx); err != nil {
callback(err, nil)
return
}
log.Printf("Retry subscribing %s from %s", p.streamType, p.publisher)
goto retry
default:
// TODO(jojo): Should we handle other errors, too?
callback(fmt.Errorf("Error joining room as subscriber: %+v", join_response), nil)
return
}
}
//log.Println("Joined as listener", join_response)
p.session = join_response.Session
callback(nil, join_response.Jsep)
}
func (p *mcuJanusSubscriber) update(ctx context.Context, stream *streamSelection, callback func(error, map[string]interface{})) {
handle := p.handle
if handle == nil {
callback(ErrNotConnected, nil)
return
}
configure_msg := map[string]interface{}{
"request": "configure",
"update": true,
}
if stream != nil {
stream.AddToMessage(configure_msg)
}
configure_response, err := handle.Message(ctx, configure_msg, nil)
if err != nil {
callback(err, nil)
return
}
callback(nil, configure_response.Jsep)
}
func (p *mcuJanusSubscriber) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
statsMcuMessagesTotal.WithLabelValues(data.Type).Inc()
jsep_msg := data.Payload
switch data.Type {
case "requestoffer":
fallthrough
case "sendoffer":
p.deferred <- func() {
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
stream, err := parseStreamSelection(jsep_msg)
if err != nil {
go callback(err, nil)
return
}
if data.Sid == "" || data.Sid != p.Sid() {
p.joinRoom(msgctx, stream, callback)
} else {
p.update(msgctx, stream, callback)
}
}
case "answer":
p.deferred <- func() {
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
if data.Sid == "" || data.Sid == p.Sid() {
p.sendAnswer(msgctx, jsep_msg, callback)
} else {
go callback(fmt.Errorf("Answer message sid (%s) does not match subscriber sid (%s)", data.Sid, p.Sid()), nil)
}
}
case "candidate":
p.deferred <- func() {
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
if data.Sid == "" || data.Sid == p.Sid() {
p.sendCandidate(msgctx, jsep_msg["candidate"], callback)
} else {
go callback(fmt.Errorf("Candidate message sid (%s) does not match subscriber sid (%s)", data.Sid, p.Sid()), nil)
}
}
case "endOfCandidates":
// Ignore
case "selectStream":
stream, err := parseStreamSelection(jsep_msg)
if err != nil {
go callback(err, nil)
return
}
if stream == nil || !stream.HasValues() {
// Nothing to do
go callback(nil, nil)
return
}
p.deferred <- func() {
msgctx, cancel := context.WithTimeout(context.Background(), p.mcu.mcuTimeout)
defer cancel()
p.selectStream(msgctx, stream, callback)
}
default:
// Return error asynchronously
go callback(fmt.Errorf("Unsupported message type: %s", data.Type), nil)
}
}

View file

@ -162,7 +162,6 @@ func (p *mcuProxyPublisher) SetMedia(mt MediaType) {
}
func (p *mcuProxyPublisher) NotifyClosed() {
log.Printf("Publisher %s at %s was closed", p.proxyId, p.conn)
p.listener.PublisherClosed(p)
p.conn.removePublisher(p)
}
@ -186,7 +185,7 @@ func (p *mcuProxyPublisher) Close(ctx context.Context) {
return
}
log.Printf("Deleted publisher %s at %s", p.proxyId, p.conn)
log.Printf("Delete publisher %s at %s", p.proxyId, p.conn)
}
func (p *mcuProxyPublisher) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
@ -218,26 +217,13 @@ func (p *mcuProxyPublisher) ProcessEvent(msg *EventProxyServerMessage) {
}
}
func (p *mcuProxyPublisher) GetStreams(ctx context.Context) ([]PublisherStream, error) {
return nil, errors.New("not implemented")
}
func (p *mcuProxyPublisher) PublishRemote(ctx context.Context, remoteId string, hostname string, port int, rtcpPort int) error {
return errors.New("remote publishing not supported for proxy publishers")
}
func (p *mcuProxyPublisher) UnpublishRemote(ctx context.Context, remoteId string) error {
return errors.New("remote publishing not supported for proxy publishers")
}
type mcuProxySubscriber struct {
mcuProxyPubSubCommon
publisherId string
publisherConn *mcuProxyConnection
publisherId string
}
func newMcuProxySubscriber(publisherId string, sid string, streamType StreamType, maxBitrate int, proxyId string, conn *mcuProxyConnection, listener McuListener, publisherConn *mcuProxyConnection) *mcuProxySubscriber {
func newMcuProxySubscriber(publisherId string, sid string, streamType StreamType, maxBitrate int, proxyId string, conn *mcuProxyConnection, listener McuListener) *mcuProxySubscriber {
return &mcuProxySubscriber{
mcuProxyPubSubCommon: mcuProxyPubSubCommon{
sid: sid,
@ -248,8 +234,7 @@ func newMcuProxySubscriber(publisherId string, sid string, streamType StreamType
listener: listener,
},
publisherId: publisherId,
publisherConn: publisherConn,
publisherId: publisherId,
}
}
@ -258,11 +243,6 @@ func (s *mcuProxySubscriber) Publisher() string {
}
func (s *mcuProxySubscriber) NotifyClosed() {
if s.publisherConn != nil {
log.Printf("Remote subscriber %s at %s (forwarded to %s) was closed", s.proxyId, s.conn, s.publisherConn)
} else {
log.Printf("Subscriber %s at %s was closed", s.proxyId, s.conn)
}
s.listener.SubscriberClosed(s)
s.conn.removeSubscriber(s)
}
@ -279,26 +259,14 @@ func (s *mcuProxySubscriber) Close(ctx context.Context) {
}
if response, err := s.conn.performSyncRequest(ctx, msg); err != nil {
if s.publisherConn != nil {
log.Printf("Could not delete remote subscriber %s at %s (forwarded to %s): %s", s.proxyId, s.conn, s.publisherConn, err)
} else {
log.Printf("Could not delete subscriber %s at %s: %s", s.proxyId, s.conn, err)
}
log.Printf("Could not delete subscriber %s at %s: %s", s.proxyId, s.conn, err)
return
} else if response.Type == "error" {
if s.publisherConn != nil {
log.Printf("Could not delete remote subscriber %s at %s (forwarded to %s): %s", s.proxyId, s.conn, s.publisherConn, response.Error)
} else {
log.Printf("Could not delete subscriber %s at %s: %s", s.proxyId, s.conn, response.Error)
}
log.Printf("Could not delete subscriber %s at %s: %s", s.proxyId, s.conn, response.Error)
return
}
if s.publisherConn != nil {
log.Printf("Deleted remote subscriber %s at %s (forwarded to %s)", s.proxyId, s.conn, s.publisherConn)
} else {
log.Printf("Deleted subscriber %s at %s", s.proxyId, s.conn)
}
log.Printf("Delete subscriber %s at %s", s.proxyId, s.conn)
}
func (s *mcuProxySubscriber) SendMessage(ctx context.Context, message *MessageClientMessage, data *MessageClientMessageData, callback func(error, map[string]interface{})) {
@ -340,7 +308,6 @@ type mcuProxyConnection struct {
ip net.IP
load atomic.Int64
bandwidth atomic.Pointer[EventProxyServerBandwidth]
mu sync.Mutex
closer *Closer
closedDone *Closer
@ -359,7 +326,7 @@ type mcuProxyConnection struct {
msgId atomic.Int64
helloMsgId string
sessionId atomic.Value
sessionId string
country atomic.Value
callbacks map[string]func(*ProxyServerMessage)
@ -392,7 +359,6 @@ func newMcuProxyConnection(proxy *mcuProxy, baseUrl string, ip net.IP) (*mcuProx
}
conn.reconnectInterval.Store(int64(initialReconnectInterval))
conn.load.Store(loadNotConnected)
conn.bandwidth.Store(nil)
conn.country.Store("")
return conn, nil
}
@ -405,54 +371,6 @@ func (c *mcuProxyConnection) String() string {
return c.rawUrl
}
func (c *mcuProxyConnection) IsSameCountry(initiator McuInitiator) bool {
if initiator == nil {
return true
}
initiatorCountry := initiator.Country()
if initiatorCountry == "" {
return true
}
connCountry := c.Country()
if connCountry == "" {
return true
}
return initiatorCountry == connCountry
}
func (c *mcuProxyConnection) IsSameContinent(initiator McuInitiator) bool {
if initiator == nil {
return true
}
initiatorCountry := initiator.Country()
if initiatorCountry == "" {
return true
}
connCountry := c.Country()
if connCountry == "" {
return true
}
initiatorContinents, found := ContinentMap[initiatorCountry]
if found {
m := c.proxy.getContinentsMap()
// Map continents to other continents (e.g. use Europe for Africa).
for _, continent := range initiatorContinents {
if toAdd, found := m[continent]; found {
initiatorContinents = append(initiatorContinents, toAdd...)
}
}
}
connContinents := ContinentMap[connCountry]
return ContinentsOverlap(initiatorContinents, connContinents)
}
type mcuProxyConnectionStats struct {
Url string `json:"url"`
IP net.IP `json:"ip,omitempty"`
@ -496,29 +414,10 @@ func (c *mcuProxyConnection) Load() int64 {
return c.load.Load()
}
func (c *mcuProxyConnection) Bandwidth() *EventProxyServerBandwidth {
return c.bandwidth.Load()
}
func (c *mcuProxyConnection) Country() string {
return c.country.Load().(string)
}
func (c *mcuProxyConnection) SessionId() string {
sid := c.sessionId.Load()
if sid == nil {
return ""
}
return sid.(string)
}
func (c *mcuProxyConnection) IsConnected() bool {
c.mu.Lock()
defer c.mu.Unlock()
return c.conn != nil && c.SessionId() != ""
}
func (c *mcuProxyConnection) IsTemporary() bool {
return c.temporary.Load()
}
@ -544,10 +443,7 @@ func (c *mcuProxyConnection) readPump() {
}
}()
defer c.close()
defer func() {
c.load.Store(loadNotConnected)
c.bandwidth.Store(nil)
}()
defer c.load.Store(loadNotConnected)
c.mu.Lock()
conn := c.conn
@ -848,9 +744,8 @@ func (c *mcuProxyConnection) clearPublishers() {
publisher.NotifyClosed()
}
}(c.publishers)
// Can't use clear(...) here as the map is processed by the goroutine above.
c.publishers = make(map[string]*mcuProxyPublisher)
clear(c.publisherIds)
c.publisherIds = make(map[string]string)
if c.closeScheduled.Load() || c.IsTemporary() {
go c.closeIfEmpty()
@ -880,7 +775,6 @@ func (c *mcuProxyConnection) clearSubscribers() {
subscriber.NotifyClosed()
}
}(c.subscribers)
// Can't use clear(...) here as the map is processed by the goroutine above.
c.subscribers = make(map[string]*mcuProxySubscriber)
if c.closeScheduled.Load() || c.IsTemporary() {
@ -892,7 +786,7 @@ func (c *mcuProxyConnection) clearCallbacks() {
c.mu.Lock()
defer c.mu.Unlock()
clear(c.callbacks)
c.callbacks = make(map[string]func(*ProxyServerMessage))
}
func (c *mcuProxyConnection) getCallback(id string) func(*ProxyServerMessage) {
@ -912,11 +806,11 @@ func (c *mcuProxyConnection) processMessage(msg *ProxyServerMessage) {
switch msg.Type {
case "error":
if msg.Error.Code == "no_such_session" {
log.Printf("Session %s could not be resumed on %s, registering new", c.SessionId(), c)
log.Printf("Session %s could not be resumed on %s, registering new", c.sessionId, c)
c.clearPublishers()
c.clearSubscribers()
c.clearCallbacks()
c.sessionId.Store("")
c.sessionId = ""
if err := c.sendHello(); err != nil {
log.Printf("Could not send hello request to %s: %s", c, err)
c.scheduleReconnect()
@ -927,8 +821,8 @@ func (c *mcuProxyConnection) processMessage(msg *ProxyServerMessage) {
log.Printf("Hello connection to %s failed with %+v, reconnecting", c, msg.Error)
c.scheduleReconnect()
case "hello":
resumed := c.SessionId() == msg.Hello.SessionId
c.sessionId.Store(msg.Hello.SessionId)
resumed := c.sessionId == msg.Hello.SessionId
c.sessionId = msg.Hello.SessionId
country := ""
if msg.Hello.Server != nil {
if country = msg.Hello.Server.Country; country != "" && !IsValidCountry(country) {
@ -938,11 +832,11 @@ func (c *mcuProxyConnection) processMessage(msg *ProxyServerMessage) {
}
c.country.Store(country)
if resumed {
log.Printf("Resumed session %s on %s", c.SessionId(), c)
log.Printf("Resumed session %s on %s", c.sessionId, c)
} else if country != "" {
log.Printf("Received session %s from %s (in %s)", c.SessionId(), c, country)
log.Printf("Received session %s from %s (in %s)", c.sessionId, c, country)
} else {
log.Printf("Received session %s from %s", c.SessionId(), c)
log.Printf("Received session %s from %s", c.sessionId, c)
}
if c.trackClose.CompareAndSwap(false, true) {
statsConnectedProxyBackendsCurrent.WithLabelValues(c.Country()).Inc()
@ -1013,10 +907,9 @@ func (c *mcuProxyConnection) processEvent(msg *ProxyServerMessage) {
return
case "update-load":
if proxyDebugMessages {
log.Printf("Load of %s now at %d (%s)", c, event.Load, event.Bandwidth)
log.Printf("Load of %s now at %d", c, event.Load)
}
c.load.Store(event.Load)
c.bandwidth.Store(event.Bandwidth)
statsProxyBackendLoadCurrent.WithLabelValues(c.url.String()).Set(float64(event.Load))
return
case "shutdown-scheduled":
@ -1051,8 +944,8 @@ func (c *mcuProxyConnection) processBye(msg *ProxyServerMessage) {
bye := msg.Bye
switch bye.Reason {
case "session_resumed":
log.Printf("Session %s on %s was resumed by other client, resetting", c.SessionId(), c)
c.sessionId.Store("")
log.Printf("Session %s on %s was resumed by other client, resetting", c.sessionId, c)
c.sessionId = ""
default:
log.Printf("Received bye with unsupported reason from %s %+v", c, bye)
}
@ -1067,10 +960,17 @@ func (c *mcuProxyConnection) sendHello() error {
Version: "1.0",
},
}
if sessionId := c.SessionId(); sessionId != "" {
msg.Hello.ResumeId = sessionId
if c.sessionId != "" {
msg.Hello.ResumeId = c.sessionId
} else {
tokenString, err := c.proxy.createToken("")
claims := &TokenClaims{
jwt.RegisteredClaims{
IssuedAt: jwt.NewNumericDate(time.Now()),
Issuer: c.proxy.tokenId,
},
}
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
tokenString, err := token.SignedString(c.proxy.tokenKey)
if err != nil {
return err
}
@ -1191,48 +1091,7 @@ func (c *mcuProxyConnection) newSubscriber(ctx context.Context, listener McuList
proxyId := response.Command.Id
log.Printf("Created %s subscriber %s on %s for %s", streamType, proxyId, c, publisherSessionId)
subscriber := newMcuProxySubscriber(publisherSessionId, response.Command.Sid, streamType, response.Command.Bitrate, proxyId, c, listener, nil)
c.subscribersLock.Lock()
c.subscribers[proxyId] = subscriber
c.subscribersLock.Unlock()
statsSubscribersCurrent.WithLabelValues(string(streamType)).Inc()
statsSubscribersTotal.WithLabelValues(string(streamType)).Inc()
return subscriber, nil
}
func (c *mcuProxyConnection) newRemoteSubscriber(ctx context.Context, listener McuListener, publisherId string, publisherSessionId string, streamType StreamType, publisherConn *mcuProxyConnection) (McuSubscriber, error) {
if c == publisherConn {
return c.newSubscriber(ctx, listener, publisherId, publisherSessionId, streamType)
}
remoteToken, err := c.proxy.createToken(publisherId)
if err != nil {
return nil, err
}
msg := &ProxyClientMessage{
Type: "command",
Command: &CommandProxyClientMessage{
Type: "create-subscriber",
StreamType: streamType,
PublisherId: publisherId,
RemoteUrl: publisherConn.rawUrl,
RemoteToken: remoteToken,
},
}
response, err := c.performSyncRequest(ctx, msg)
if err != nil {
// TODO: Cancel request
return nil, err
} else if response.Type == "error" {
return nil, fmt.Errorf("Error creating remote %s subscriber for %s on %s (forwarded to %s): %+v", streamType, publisherSessionId, c, publisherConn, response.Error)
}
proxyId := response.Command.Id
log.Printf("Created remote %s subscriber %s on %s for %s (forwarded to %s)", streamType, proxyId, c, publisherSessionId, publisherConn)
subscriber := newMcuProxySubscriber(publisherSessionId, response.Command.Sid, streamType, response.Command.Bitrate, proxyId, c, listener, publisherConn)
subscriber := newMcuProxySubscriber(publisherSessionId, response.Command.Sid, streamType, response.Command.Bitrate, proxyId, c, listener)
c.subscribersLock.Lock()
c.subscribers[proxyId] = subscriber
c.subscribersLock.Unlock()
@ -1255,8 +1114,8 @@ type mcuProxy struct {
connRequests atomic.Int64
nextSort atomic.Int64
maxStreamBitrate atomic.Int32
maxScreenBitrate atomic.Int32
maxStreamBitrate int
maxScreenBitrate int
mu sync.RWMutex
publishers map[string]*mcuProxyConnection
@ -1319,14 +1178,14 @@ func NewMcuProxy(config *goconf.ConfigFile, etcdClient *EtcdClient, rpcClients *
connectionsMap: make(map[string][]*mcuProxyConnection),
proxyTimeout: proxyTimeout,
maxStreamBitrate: maxStreamBitrate,
maxScreenBitrate: maxScreenBitrate,
publishers: make(map[string]*mcuProxyConnection),
rpcClients: rpcClients,
}
mcu.maxStreamBitrate.Store(int32(maxStreamBitrate))
mcu.maxScreenBitrate.Store(int32(maxScreenBitrate))
if err := mcu.loadContinentsMap(config); err != nil {
return nil, err
}
@ -1395,9 +1254,9 @@ func (m *mcuProxy) loadContinentsMap(config *goconf.ConfigFile) error {
return nil
}
func (m *mcuProxy) Start(ctx context.Context) error {
log.Printf("Maximum bandwidth %d bits/sec per publishing stream", m.maxStreamBitrate.Load())
log.Printf("Maximum bandwidth %d bits/sec per screensharing stream", m.maxScreenBitrate.Load())
func (m *mcuProxy) Start() error {
log.Printf("Maximum bandwidth %d bits/sec per publishing stream", m.maxStreamBitrate)
log.Printf("Maximum bandwidth %d bits/sec per screensharing stream", m.maxScreenBitrate)
return m.config.Start()
}
@ -1415,48 +1274,6 @@ func (m *mcuProxy) Stop() {
m.config.Stop()
}
func (m *mcuProxy) createToken(subject string) (string, error) {
claims := &TokenClaims{
jwt.RegisteredClaims{
IssuedAt: jwt.NewNumericDate(time.Now()),
Issuer: m.tokenId,
Subject: subject,
},
}
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
tokenString, err := token.SignedString(m.tokenKey)
if err != nil {
return "", err
}
return tokenString, nil
}
func (m *mcuProxy) hasConnections() bool {
m.connectionsMu.RLock()
defer m.connectionsMu.RUnlock()
for _, conn := range m.connections {
if conn.IsConnected() {
return true
}
}
return false
}
func (m *mcuProxy) WaitForConnections(ctx context.Context) error {
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
for !m.hasConnections() {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}
}
return nil
}
func (m *mcuProxy) AddConnection(ignoreErrors bool, url string, ips ...net.IP) error {
m.connectionsMu.Lock()
defer m.connectionsMu.Unlock()
@ -1556,20 +1373,6 @@ func (m *mcuProxy) KeepConnection(url string, ips ...net.IP) {
}
func (m *mcuProxy) Reload(config *goconf.ConfigFile) {
maxStreamBitrate, _ := config.GetInt("mcu", "maxstreambitrate")
if maxStreamBitrate <= 0 {
maxStreamBitrate = defaultMaxStreamBitrate
}
log.Printf("Maximum bandwidth %d bits/sec per publishing stream", m.maxStreamBitrate.Load())
m.maxStreamBitrate.Store(int32(maxStreamBitrate))
maxScreenBitrate, _ := config.GetInt("mcu", "maxscreenbitrate")
if maxScreenBitrate <= 0 {
maxScreenBitrate = defaultMaxScreenBitrate
}
log.Printf("Maximum bandwidth %d bits/sec per screensharing stream", m.maxScreenBitrate.Load())
m.maxScreenBitrate.Store(int32(maxScreenBitrate))
if err := m.loadContinentsMap(config); err != nil {
log.Printf("Error loading continents map: %s", err)
}
@ -1762,27 +1565,27 @@ func (m *mcuProxy) removePublisher(publisher *mcuProxyPublisher) {
delete(m.publishers, getStreamId(publisher.id, publisher.StreamType()))
}
func (m *mcuProxy) createPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator, connections []*mcuProxyConnection, isAllowed func(c *mcuProxyConnection) bool) McuPublisher {
var maxBitrate int
if streamType == StreamTypeScreen {
maxBitrate = int(m.maxScreenBitrate.Load())
} else {
maxBitrate = int(m.maxStreamBitrate.Load())
}
if bitrate <= 0 {
bitrate = maxBitrate
} else {
bitrate = min(bitrate, maxBitrate)
}
func (m *mcuProxy) NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error) {
connections := m.getSortedConnections(initiator)
for _, conn := range connections {
if !isAllowed(conn) || conn.IsShutdownScheduled() || conn.IsTemporary() {
if conn.IsShutdownScheduled() || conn.IsTemporary() {
continue
}
subctx, cancel := context.WithTimeout(ctx, m.proxyTimeout)
defer cancel()
var maxBitrate int
if streamType == StreamTypeScreen {
maxBitrate = m.maxScreenBitrate
} else {
maxBitrate = m.maxStreamBitrate
}
if bitrate <= 0 {
bitrate = maxBitrate
} else {
bitrate = min(bitrate, maxBitrate)
}
publisher, err := conn.newPublisher(subctx, listener, id, sid, streamType, bitrate, mediaTypes)
if err != nil {
log.Printf("Could not create %s publisher for %s on %s: %s", streamType, id, conn, err)
@ -1793,61 +1596,11 @@ func (m *mcuProxy) createPublisher(ctx context.Context, listener McuListener, id
m.publishers[getStreamId(id, streamType)] = conn
m.mu.Unlock()
m.publisherWaiters.Wakeup()
return publisher
return publisher, nil
}
return nil
}
func (m *mcuProxy) NewPublisher(ctx context.Context, listener McuListener, id string, sid string, streamType StreamType, bitrate int, mediaTypes MediaType, initiator McuInitiator) (McuPublisher, error) {
connections := m.getSortedConnections(initiator)
publisher := m.createPublisher(ctx, listener, id, sid, streamType, bitrate, mediaTypes, initiator, connections, func(c *mcuProxyConnection) bool {
bw := c.Bandwidth()
return bw == nil || bw.AllowIncoming()
})
if publisher == nil {
// No proxy has available bandwidth, select one with the lowest currently used bandwidth.
connections2 := make([]*mcuProxyConnection, 0, len(connections))
for _, c := range connections {
if c.Bandwidth() != nil {
connections2 = append(connections2, c)
}
}
SlicesSortFunc(connections2, func(a *mcuProxyConnection, b *mcuProxyConnection) int {
var incoming_a *float64
if bw := a.Bandwidth(); bw != nil {
incoming_a = bw.Incoming
}
var incoming_b *float64
if bw := b.Bandwidth(); bw != nil {
incoming_b = bw.Incoming
}
if incoming_a == nil && incoming_b == nil {
return 0
} else if incoming_a == nil && incoming_b != nil {
return -1
} else if incoming_a != nil && incoming_b == nil {
return -1
} else if *incoming_a < *incoming_b {
return -1
} else if *incoming_a > *incoming_b {
return 1
}
return 0
})
publisher = m.createPublisher(ctx, listener, id, sid, streamType, bitrate, mediaTypes, initiator, connections2, func(c *mcuProxyConnection) bool {
return true
})
}
if publisher == nil {
statsProxyNobackendAvailableTotal.WithLabelValues(string(streamType)).Inc()
return nil, fmt.Errorf("No MCU connection available")
}
return publisher, nil
statsProxyNobackendAvailableTotal.WithLabelValues(string(streamType)).Inc()
return nil, fmt.Errorf("No MCU connection available")
}
func (m *mcuProxy) getPublisherConnection(publisher string, streamType StreamType) *mcuProxyConnection {
@ -1888,38 +1641,7 @@ func (m *mcuProxy) waitForPublisherConnection(ctx context.Context, publisher str
}
}
type proxyPublisherInfo struct {
id string
conn *mcuProxyConnection
err error
}
func (m *mcuProxy) createSubscriber(ctx context.Context, listener McuListener, id string, publisher string, streamType StreamType, publisherConn *mcuProxyConnection, connections []*mcuProxyConnection, isAllowed func(c *mcuProxyConnection) bool) McuSubscriber {
for _, conn := range connections {
if !isAllowed(conn) || conn.IsShutdownScheduled() || conn.IsTemporary() {
continue
}
var subscriber McuSubscriber
var err error
if conn == publisherConn {
subscriber, err = conn.newSubscriber(ctx, listener, id, publisher, streamType)
} else {
subscriber, err = conn.newRemoteSubscriber(ctx, listener, id, publisher, streamType, publisherConn)
}
if err != nil {
log.Printf("Could not create subscriber for %s publisher %s on %s: %s", streamType, publisher, conn, err)
continue
}
return subscriber
}
return nil
}
func (m *mcuProxy) NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType, initiator McuInitiator) (McuSubscriber, error) {
var publisherInfo *proxyPublisherInfo
func (m *mcuProxy) NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType) (McuSubscriber, error) {
if conn := m.getPublisherConnection(publisher, streamType); conn != nil {
// Fast common path: publisher is available locally.
conn.publishersLock.Lock()
@ -1929,190 +1651,113 @@ func (m *mcuProxy) NewSubscriber(ctx context.Context, listener McuListener, publ
return nil, fmt.Errorf("Unknown publisher %s", publisher)
}
publisherInfo = &proxyPublisherInfo{
id: id,
conn: conn,
return conn.newSubscriber(ctx, listener, id, publisher, streamType)
}
log.Printf("No %s publisher %s found yet, deferring", streamType, publisher)
ch := make(chan McuSubscriber)
getctx, cancel := context.WithCancel(ctx)
defer cancel()
// Wait for publisher to be created locally.
go func() {
if conn := m.waitForPublisherConnection(getctx, publisher, streamType); conn != nil {
cancel() // Cancel pending RPC calls.
conn.publishersLock.Lock()
id, found := conn.publisherIds[getStreamId(publisher, streamType)]
conn.publishersLock.Unlock()
if !found {
log.Printf("Unknown id for local %s publisher %s", streamType, publisher)
return
}
subscriber, err := conn.newSubscriber(ctx, listener, id, publisher, streamType)
if subscriber != nil {
ch <- subscriber
} else if err != nil {
log.Printf("Error creating local subscriber for %s publisher %s: %s", streamType, publisher, err)
}
}
} else {
log.Printf("No %s publisher %s found yet, deferring", streamType, publisher)
ch := make(chan *proxyPublisherInfo, 1)
getctx, cancel := context.WithCancel(ctx)
defer cancel()
}()
var wg sync.WaitGroup
// Wait for publisher to be created locally.
wg.Add(1)
go func() {
defer wg.Done()
if conn := m.waitForPublisherConnection(getctx, publisher, streamType); conn != nil {
cancel() // Cancel pending RPC calls.
conn.publishersLock.Lock()
id, found := conn.publisherIds[getStreamId(publisher, streamType)]
conn.publishersLock.Unlock()
if !found {
ch <- &proxyPublisherInfo{
err: fmt.Errorf("Unknown id for local %s publisher %s", streamType, publisher),
}
// Wait for publisher to be created on one of the other servers in the cluster.
if clients := m.rpcClients.GetClients(); len(clients) > 0 {
for _, client := range clients {
go func(client *GrpcClient) {
id, url, ip, err := client.GetPublisherId(getctx, publisher, streamType)
if errors.Is(err, context.Canceled) {
return
} else if err != nil {
log.Printf("Error getting %s publisher id %s from %s: %s", streamType, publisher, client.Target(), err)
return
} else if id == "" {
// Publisher not found on other server
return
}
ch <- &proxyPublisherInfo{
id: id,
conn: conn,
cancel() // Cancel pending RPC calls.
log.Printf("Found publisher id %s through %s on proxy %s", id, client.Target(), url)
m.connectionsMu.RLock()
connections := m.connections
m.connectionsMu.RUnlock()
var publisherConn *mcuProxyConnection
for _, conn := range connections {
if conn.rawUrl != url || !ip.Equal(conn.ip) {
continue
}
// Simple case, signaling server has a connection to the same endpoint
publisherConn = conn
break
}
}
}()
// Wait for publisher to be created on one of the other servers in the cluster.
if clients := m.rpcClients.GetClients(); len(clients) > 0 {
for _, client := range clients {
wg.Add(1)
go func(client *GrpcClient) {
defer wg.Done()
id, url, ip, err := client.GetPublisherId(getctx, publisher, streamType)
if errors.Is(err, context.Canceled) {
return
} else if err != nil {
log.Printf("Error getting %s publisher id %s from %s: %s", streamType, publisher, client.Target(), err)
return
} else if id == "" {
// Publisher not found on other server
if publisherConn == nil {
publisherConn, err = newMcuProxyConnection(m, url, ip)
if err != nil {
log.Printf("Could not create temporary connection to %s for %s publisher %s: %s", url, streamType, publisher, err)
return
}
cancel() // Cancel pending RPC calls.
log.Printf("Found publisher id %s through %s on proxy %s", id, client.Target(), url)
m.connectionsMu.RLock()
connections := m.connections
m.connectionsMu.RUnlock()
var publisherConn *mcuProxyConnection
for _, conn := range connections {
if conn.rawUrl != url || !ip.Equal(conn.ip) {
continue
}
// Simple case, signaling server has a connection to the same endpoint
publisherConn = conn
break
publisherConn.setTemporary()
publisherConn.start()
if err := publisherConn.waitUntilConnected(ctx); err != nil {
log.Printf("Could not establish new connection to %s: %s", publisherConn, err)
publisherConn.closeIfEmpty()
return
}
if publisherConn == nil {
publisherConn, err = newMcuProxyConnection(m, url, ip)
if err != nil {
log.Printf("Could not create temporary connection to %s for %s publisher %s: %s", url, streamType, publisher, err)
return
}
publisherConn.setTemporary()
publisherConn.start()
if err := publisherConn.waitUntilConnected(ctx); err != nil {
log.Printf("Could not establish new connection to %s: %s", publisherConn, err)
publisherConn.closeIfEmpty()
return
}
m.connectionsMu.Lock()
m.connections = append(m.connections, publisherConn)
conns, found := m.connectionsMap[url]
if found {
conns = append(conns, publisherConn)
} else {
conns = []*mcuProxyConnection{publisherConn}
}
m.connectionsMap[url] = conns
m.connectionsMu.Unlock()
}
ch <- &proxyPublisherInfo{
id: id,
conn: publisherConn,
}
}(client)
}
}
wg.Wait()
select {
case ch <- &proxyPublisherInfo{
err: fmt.Errorf("No %s publisher %s found", streamType, publisher),
}:
default:
}
select {
case info := <-ch:
publisherInfo = info
case <-ctx.Done():
return nil, fmt.Errorf("No %s publisher %s found", streamType, publisher)
}
}
if publisherInfo.err != nil {
return nil, publisherInfo.err
}
bw := publisherInfo.conn.Bandwidth()
allowOutgoing := bw == nil || bw.AllowOutgoing()
if !allowOutgoing || !publisherInfo.conn.IsSameCountry(initiator) {
connections := m.getSortedConnections(initiator)
if !allowOutgoing || len(connections) > 0 && !connections[0].IsSameCountry(publisherInfo.conn) {
// Connect to remote publisher through "closer" gateway.
subscriber := m.createSubscriber(ctx, listener, publisherInfo.id, publisher, streamType, publisherInfo.conn, connections, func(c *mcuProxyConnection) bool {
bw := c.Bandwidth()
return bw == nil || bw.AllowOutgoing()
})
if subscriber == nil {
connections2 := make([]*mcuProxyConnection, 0, len(connections))
for _, c := range connections {
if c.Bandwidth() != nil {
connections2 = append(connections2, c)
m.connectionsMu.Lock()
m.connections = append(m.connections, publisherConn)
conns, found := m.connectionsMap[url]
if found {
conns = append(conns, publisherConn)
} else {
conns = []*mcuProxyConnection{publisherConn}
}
m.connectionsMap[url] = conns
m.connectionsMu.Unlock()
}
SlicesSortFunc(connections2, func(a *mcuProxyConnection, b *mcuProxyConnection) int {
var outgoing_a *float64
if bw := a.Bandwidth(); bw != nil {
outgoing_a = bw.Outgoing
}
var outgoing_b *float64
if bw := b.Bandwidth(); bw != nil {
outgoing_b = bw.Outgoing
subscriber, err := publisherConn.newSubscriber(ctx, listener, id, publisher, streamType)
if err != nil {
if publisherConn.IsTemporary() {
publisherConn.closeIfEmpty()
}
log.Printf("Could not create subscriber for %s publisher %s: %s", streamType, publisher, err)
return
}
if outgoing_a == nil && outgoing_b == nil {
return 0
} else if outgoing_a == nil && outgoing_b != nil {
return -1
} else if outgoing_a != nil && outgoing_b == nil {
return -1
} else if *outgoing_a < *outgoing_b {
return -1
} else if *outgoing_a > *outgoing_b {
return 1
}
return 0
})
subscriber = m.createSubscriber(ctx, listener, publisherInfo.id, publisher, streamType, publisherInfo.conn, connections2, func(c *mcuProxyConnection) bool {
return true
})
}
if subscriber != nil {
return subscriber, nil
}
ch <- subscriber
}(client)
}
}
subscriber, err := publisherInfo.conn.newSubscriber(ctx, listener, publisherInfo.id, publisher, streamType)
if err != nil {
if publisherInfo.conn.IsTemporary() {
publisherInfo.conn.closeIfEmpty()
}
log.Printf("Could not create subscriber for %s publisher %s on %s: %s", streamType, publisher, publisherInfo.conn, err)
return nil, err
select {
case subscriber := <-ch:
return subscriber, nil
case <-ctx.Done():
return nil, fmt.Errorf("No %s publisher %s found", streamType, publisher)
}
return subscriber, nil
}

File diff suppressed because it is too large Load diff

View file

@ -23,7 +23,6 @@ package signaling
import (
"context"
"errors"
"fmt"
"log"
"sync"
@ -50,7 +49,7 @@ func NewTestMCU() (*TestMCU, error) {
}, nil
}
func (m *TestMCU) Start(ctx context.Context) error {
func (m *TestMCU) Start() error {
return nil
}
@ -118,7 +117,7 @@ func (m *TestMCU) GetPublisher(id string) *TestMCUPublisher {
return m.publishers[id]
}
func (m *TestMCU) NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType, initiator McuInitiator) (McuSubscriber, error) {
func (m *TestMCU) NewSubscriber(ctx context.Context, listener McuListener, publisher string, streamType StreamType) (McuSubscriber, error) {
m.mu.Lock()
defer m.mu.Unlock()
@ -223,18 +222,6 @@ func (p *TestMCUPublisher) SendMessage(ctx context.Context, message *MessageClie
}()
}
func (p *TestMCUPublisher) GetStreams(ctx context.Context) ([]PublisherStream, error) {
return nil, errors.New("not implemented")
}
func (p *TestMCUPublisher) PublishRemote(ctx context.Context, remoteId string, hostname string, port int, rtcpPort int) error {
return errors.New("remote publishing not supported")
}
func (p *TestMCUPublisher) UnpublishRemote(ctx context.Context, remoteId string) error {
return errors.New("remote publishing not supported")
}
type TestMCUSubscriber struct {
TestMCUClient
@ -266,8 +253,6 @@ func (s *TestMCUSubscriber) SendMessage(ctx context.Context, message *MessageCli
"type": "offer",
"sdp": sdp,
})
case "answer":
callback(nil, nil)
default:
callback(fmt.Errorf("Message type %s is not implemented", data.Type), nil)
}

View file

@ -104,7 +104,6 @@ func testNatsClient_Subscribe(t *testing.T, client NatsClient) {
}
func TestNatsClient_Subscribe(t *testing.T) {
CatchLogForTest(t)
ensureNoGoroutinesLeak(t, func(t *testing.T) {
client := CreateLocalNatsClientForTest(t)
@ -121,7 +120,6 @@ func testNatsClient_PublishAfterClose(t *testing.T, client NatsClient) {
}
func TestNatsClient_PublishAfterClose(t *testing.T) {
CatchLogForTest(t)
ensureNoGoroutinesLeak(t, func(t *testing.T) {
client := CreateLocalNatsClientForTest(t)
@ -139,7 +137,6 @@ func testNatsClient_SubscribeAfterClose(t *testing.T, client NatsClient) {
}
func TestNatsClient_SubscribeAfterClose(t *testing.T) {
CatchLogForTest(t)
ensureNoGoroutinesLeak(t, func(t *testing.T) {
client := CreateLocalNatsClientForTest(t)
@ -162,7 +159,6 @@ func testNatsClient_BadSubjects(t *testing.T, client NatsClient) {
}
func TestNatsClient_BadSubjects(t *testing.T) {
CatchLogForTest(t)
ensureNoGoroutinesLeak(t, func(t *testing.T) {
client := CreateLocalNatsClientForTest(t)

View file

@ -118,7 +118,6 @@ func TestNotifierResetWillNotify(t *testing.T) {
}
func TestNotifierDuplicate(t *testing.T) {
t.Parallel()
var notifier Notifier
var wgStart sync.WaitGroup
var wgEnd sync.WaitGroup

View file

@ -8,12 +8,6 @@
# See "https://golang.org/pkg/net/http/pprof/" for further information.
#debug = false
# Comma separated list of trusted proxies (IPs or CIDR networks) that may set
# the "X-Real-Ip" or "X-Forwarded-For" headers. If both are provided, the
# "X-Real-Ip" header will take precedence (if valid).
# Leave empty to allow loopback and local addresses.
#trustedproxies =
# ISO 3166 country this proxy is located at. This will be used by the signaling
# servers to determine the closest proxy for publishers.
#country = DE
@ -26,36 +20,6 @@
# - etcd: Token information are retrieved from an etcd cluster (see below).
tokentype = static
# The external hostname for remote streams. Leaving this empty will autodetect
# and use the first public IP found on the available network interfaces.
#hostname =
# The token id to use when connecting remote stream.
#token_id = server1
# The private key for the configured token id to use when connecting remote
# streams.
#token_key = privkey.pem
# If set to "true", certificate validation of remote stream requests will be
# skipped. This should only be enabled during development, e.g. to work with
# self-signed certificates.
#skipverify = false
[bandwidth]
# Target bandwidth limit for incoming streams (in megabits per second).
# Set to 0 to disable the limit. If the limit is reached, the proxy notifies
# the signaling servers that another proxy should be used for publishing if
# possible.
#incoming = 1024
# Target bandwidth limit for outgoing streams (in megabits per second).
# Set to 0 to disable the limit. If the limit is reached, the proxy notifies
# the signaling servers that another proxy should be used for subscribing if
# possible. Note that this might require additional outgoing bandwidth for the
# remote streams.
#outgoing = 1024
[tokens]
# For token type "static": Mapping of <tokenid> = <publickey> of signaling
# servers allowed to connect.

View file

@ -36,8 +36,6 @@ import (
"github.com/dlintw/goconf"
"github.com/gorilla/mux"
signaling "github.com/strukturag/nextcloud-spreed-signaling"
)
var (
@ -92,7 +90,7 @@ func main() {
}
defer proxy.Stop()
if addr, _ := signaling.GetStringOptionWithEnv(config, "http", "listen"); addr != "" {
if addr, _ := config.GetString("http", "listen"); addr != "" {
readTimeout, _ := config.GetInt("http", "readtimeout")
if readTimeout <= 0 {
readTimeout = defaultReadTimeout

View file

@ -53,18 +53,18 @@ func (c *ProxyClient) SetSession(session *ProxySession) {
c.session.Store(session)
}
func (c *ProxyClient) OnClosed(client signaling.HandlerClient) {
func (c *ProxyClient) OnClosed(client *signaling.Client) {
if session := c.GetSession(); session != nil {
session.MarkUsed()
}
c.proxy.clientClosed(&c.Client)
}
func (c *ProxyClient) OnMessageReceived(client signaling.HandlerClient, data []byte) {
func (c *ProxyClient) OnMessageReceived(client *signaling.Client, data []byte) {
c.proxy.processMessage(c, data)
}
func (c *ProxyClient) OnRTTReceived(client signaling.HandlerClient, rtt time.Duration) {
func (c *ProxyClient) OnRTTReceived(client *signaling.Client, rtt time.Duration) {
if session := c.GetSession(); session != nil {
session.MarkUsed()
}

View file

@ -1,490 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2024 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package main
import (
"context"
"crypto/rsa"
"crypto/tls"
"encoding/json"
"errors"
"log"
"net/http"
"net/url"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/golang-jwt/jwt/v4"
"github.com/gorilla/websocket"
signaling "github.com/strukturag/nextcloud-spreed-signaling"
)
const (
initialReconnectInterval = 1 * time.Second
maxReconnectInterval = 32 * time.Second
// Time allowed to write a message to the peer.
writeWait = 10 * time.Second
// Time allowed to read the next pong message from the peer.
pongWait = 60 * time.Second
// Send pings to peer with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
)
var (
ErrNotConnected = errors.New("not connected")
)
type RemoteConnection struct {
mu sync.Mutex
url *url.URL
conn *websocket.Conn
closer *signaling.Closer
closed atomic.Bool
tokenId string
tokenKey *rsa.PrivateKey
tlsConfig *tls.Config
connectedSince time.Time
reconnectTimer *time.Timer
reconnectInterval atomic.Int64
msgId atomic.Int64
helloMsgId string
sessionId string
pendingMessages []*signaling.ProxyClientMessage
messageCallbacks map[string]chan *signaling.ProxyServerMessage
}
func NewRemoteConnection(proxyUrl string, tokenId string, tokenKey *rsa.PrivateKey, tlsConfig *tls.Config) (*RemoteConnection, error) {
u, err := url.Parse(proxyUrl)
if err != nil {
return nil, err
}
result := &RemoteConnection{
url: u,
closer: signaling.NewCloser(),
tokenId: tokenId,
tokenKey: tokenKey,
tlsConfig: tlsConfig,
reconnectTimer: time.NewTimer(0),
messageCallbacks: make(map[string]chan *signaling.ProxyServerMessage),
}
result.reconnectInterval.Store(int64(initialReconnectInterval))
go result.writePump()
return result, nil
}
func (c *RemoteConnection) String() string {
return c.url.String()
}
func (c *RemoteConnection) reconnect() {
u, err := c.url.Parse("proxy")
if err != nil {
log.Printf("Could not resolve url to proxy at %s: %s", c, err)
c.scheduleReconnect()
return
}
if u.Scheme == "http" {
u.Scheme = "ws"
} else if u.Scheme == "https" {
u.Scheme = "wss"
}
dialer := websocket.Dialer{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: c.tlsConfig,
}
conn, _, err := dialer.DialContext(context.TODO(), u.String(), nil)
if err != nil {
log.Printf("Error connecting to proxy at %s: %s", c, err)
c.scheduleReconnect()
return
}
log.Printf("Connected to %s", c)
c.closed.Store(false)
c.mu.Lock()
c.connectedSince = time.Now()
c.conn = conn
c.mu.Unlock()
c.reconnectInterval.Store(int64(initialReconnectInterval))
if err := c.sendHello(); err != nil {
log.Printf("Error sending hello request to proxy at %s: %s", c, err)
c.scheduleReconnect()
return
}
if !c.sendPing() {
return
}
go c.readPump(conn)
}
func (c *RemoteConnection) scheduleReconnect() {
if err := c.sendClose(); err != nil && err != ErrNotConnected {
log.Printf("Could not send close message to %s: %s", c, err)
}
c.close()
interval := c.reconnectInterval.Load()
c.reconnectTimer.Reset(time.Duration(interval))
interval = interval * 2
if interval > int64(maxReconnectInterval) {
interval = int64(maxReconnectInterval)
}
c.reconnectInterval.Store(interval)
}
func (c *RemoteConnection) sendHello() error {
c.helloMsgId = strconv.FormatInt(c.msgId.Add(1), 10)
msg := &signaling.ProxyClientMessage{
Id: c.helloMsgId,
Type: "hello",
Hello: &signaling.HelloProxyClientMessage{
Version: "1.0",
},
}
if sessionId := c.sessionId; sessionId != "" {
msg.Hello.ResumeId = sessionId
} else {
tokenString, err := c.createToken("")
if err != nil {
return err
}
msg.Hello.Token = tokenString
}
return c.SendMessage(msg)
}
func (c *RemoteConnection) sendClose() error {
c.mu.Lock()
defer c.mu.Unlock()
if c.conn == nil {
return ErrNotConnected
}
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
return c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
}
func (c *RemoteConnection) close() {
c.mu.Lock()
defer c.mu.Unlock()
if c.conn != nil {
c.conn.Close()
c.conn = nil
}
}
func (c *RemoteConnection) Close() error {
c.mu.Lock()
defer c.mu.Unlock()
c.reconnectTimer.Stop()
if c.conn == nil {
return nil
}
c.sendClose()
err1 := c.conn.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), time.Time{})
err2 := c.conn.Close()
c.conn = nil
if err1 != nil {
return err1
}
return err2
}
func (c *RemoteConnection) createToken(subject string) (string, error) {
claims := &signaling.TokenClaims{
RegisteredClaims: jwt.RegisteredClaims{
IssuedAt: jwt.NewNumericDate(time.Now()),
Issuer: c.tokenId,
Subject: subject,
},
}
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
tokenString, err := token.SignedString(c.tokenKey)
if err != nil {
return "", err
}
return tokenString, nil
}
func (c *RemoteConnection) SendMessage(msg *signaling.ProxyClientMessage) error {
c.mu.Lock()
defer c.mu.Unlock()
return c.sendMessageLocked(context.Background(), msg)
}
func (c *RemoteConnection) deferMessage(ctx context.Context, msg *signaling.ProxyClientMessage) {
c.pendingMessages = append(c.pendingMessages, msg)
if ctx.Done() != nil {
go func() {
<-ctx.Done()
c.mu.Lock()
defer c.mu.Unlock()
for idx, m := range c.pendingMessages {
if m == msg {
c.pendingMessages[idx] = nil
break
}
}
}()
}
}
func (c *RemoteConnection) sendMessageLocked(ctx context.Context, msg *signaling.ProxyClientMessage) error {
if c.conn == nil {
// Defer until connected.
c.deferMessage(ctx, msg)
return nil
}
if c.helloMsgId != "" && c.helloMsgId != msg.Id {
// Hello request is still inflight, defer.
c.deferMessage(ctx, msg)
return nil
}
c.conn.SetWriteDeadline(time.Now().Add(writeWait)) // nolint
return c.conn.WriteJSON(msg)
}
func (c *RemoteConnection) readPump(conn *websocket.Conn) {
defer func() {
if !c.closed.Load() {
c.scheduleReconnect()
}
}()
defer c.close()
for {
msgType, msg, err := conn.ReadMessage()
if err != nil {
if errors.Is(err, websocket.ErrCloseSent) {
break
} else if _, ok := err.(*websocket.CloseError); !ok || websocket.IsUnexpectedCloseError(err,
websocket.CloseNormalClosure,
websocket.CloseGoingAway,
websocket.CloseNoStatusReceived) {
log.Printf("Error reading from %s: %v", c, err)
}
break
}
if msgType != websocket.TextMessage {
log.Printf("unexpected message type %q (%s)", msgType, string(msg))
continue
}
var message signaling.ProxyServerMessage
if err := json.Unmarshal(msg, &message); err != nil {
log.Printf("could not decode message %s: %s", string(msg), err)
continue
}
c.mu.Lock()
helloMsgId := c.helloMsgId
c.mu.Unlock()
if helloMsgId != "" && message.Id == helloMsgId {
c.processHello(&message)
} else {
c.processMessage(&message)
}
}
}
func (c *RemoteConnection) sendPing() bool {
c.mu.Lock()
defer c.mu.Unlock()
if c.conn == nil {
return false
}
now := time.Now()
msg := strconv.FormatInt(now.UnixNano(), 10)
c.conn.SetWriteDeadline(now.Add(writeWait)) // nolint
if err := c.conn.WriteMessage(websocket.PingMessage, []byte(msg)); err != nil {
log.Printf("Could not send ping to proxy at %s: %v", c, err)
go c.scheduleReconnect()
return false
}
return true
}
func (c *RemoteConnection) writePump() {
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
}()
defer c.reconnectTimer.Stop()
for {
select {
case <-c.reconnectTimer.C:
c.reconnect()
case <-ticker.C:
c.sendPing()
case <-c.closer.C:
return
}
}
}
func (c *RemoteConnection) processHello(msg *signaling.ProxyServerMessage) {
c.helloMsgId = ""
switch msg.Type {
case "error":
if msg.Error.Code == "no_such_session" {
log.Printf("Session %s could not be resumed on %s, registering new", c.sessionId, c)
c.sessionId = ""
if err := c.sendHello(); err != nil {
log.Printf("Could not send hello request to %s: %s", c, err)
c.scheduleReconnect()
}
return
}
log.Printf("Hello connection to %s failed with %+v, reconnecting", c, msg.Error)
c.scheduleReconnect()
case "hello":
resumed := c.sessionId == msg.Hello.SessionId
c.sessionId = msg.Hello.SessionId
country := ""
if msg.Hello.Server != nil {
if country = msg.Hello.Server.Country; country != "" && !signaling.IsValidCountry(country) {
log.Printf("Proxy %s sent invalid country %s in hello response", c, country)
country = ""
}
}
if resumed {
log.Printf("Resumed session %s on %s", c.sessionId, c)
} else if country != "" {
log.Printf("Received session %s from %s (in %s)", c.sessionId, c, country)
} else {
log.Printf("Received session %s from %s", c.sessionId, c)
}
pending := c.pendingMessages
c.pendingMessages = nil
for _, m := range pending {
if m == nil {
continue
}
if err := c.sendMessageLocked(context.Background(), m); err != nil {
log.Printf("Could not send pending message %+v to %s: %s", m, c, err)
}
}
default:
log.Printf("Received unsupported hello response %+v from %s, reconnecting", msg, c)
c.scheduleReconnect()
}
}
func (c *RemoteConnection) processMessage(msg *signaling.ProxyServerMessage) {
if msg.Id != "" {
c.mu.Lock()
ch, found := c.messageCallbacks[msg.Id]
if found {
delete(c.messageCallbacks, msg.Id)
c.mu.Unlock()
ch <- msg
return
}
c.mu.Unlock()
}
switch msg.Type {
case "event":
c.processEvent(msg)
default:
log.Printf("Received unsupported message %+v from %s", msg, c)
}
}
func (c *RemoteConnection) processEvent(msg *signaling.ProxyServerMessage) {
switch msg.Event.Type {
case "update-load":
default:
log.Printf("Received unsupported event %+v from %s", msg, c)
}
}
func (c *RemoteConnection) RequestMessage(ctx context.Context, msg *signaling.ProxyClientMessage) (*signaling.ProxyServerMessage, error) {
msg.Id = strconv.FormatInt(c.msgId.Add(1), 10)
c.mu.Lock()
defer c.mu.Unlock()
if err := c.sendMessageLocked(ctx, msg); err != nil {
return nil, err
}
ch := make(chan *signaling.ProxyServerMessage, 1)
c.messageCallbacks[msg.Id] = ch
c.mu.Unlock()
defer func() {
c.mu.Lock()
delete(c.messageCallbacks, msg.Id)
}()
select {
case <-ctx.Done():
// TODO: Cancel request.
return nil, ctx.Err()
case response := <-ch:
if response.Type == "error" {
return nil, response.Error
}
return response, nil
}
}

View file

@ -24,10 +24,7 @@ package main
import (
"context"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
"log"
@ -48,7 +45,6 @@ import (
"github.com/gorilla/mux"
"github.com/gorilla/securecookie"
"github.com/gorilla/websocket"
"github.com/notedit/janus-go"
"github.com/prometheus/client_golang/prometheus/promhttp"
signaling "github.com/strukturag/nextcloud-spreed-signaling"
@ -67,16 +63,6 @@ const (
// Maximum age a token may have to prevent reuse of old tokens.
maxTokenAge = 5 * time.Minute
remotePublisherTimeout = 5 * time.Second
ProxyFeatureRemoteStreams = "remote-streams"
)
var (
defaultProxyFeatures = []string{
ProxyFeatureRemoteStreams,
}
)
type ContextKey string
@ -84,44 +70,35 @@ type ContextKey string
var (
ContextKeySession = ContextKey("session")
TimeoutCreatingPublisher = signaling.NewError("timeout", "Timeout creating publisher.")
TimeoutCreatingSubscriber = signaling.NewError("timeout", "Timeout creating subscriber.")
TokenAuthFailed = signaling.NewError("auth_failed", "The token could not be authenticated.")
TokenExpired = signaling.NewError("token_expired", "The token is expired.")
TokenNotValidYet = signaling.NewError("token_not_valid_yet", "The token is not valid yet.")
UnknownClient = signaling.NewError("unknown_client", "Unknown client id given.")
UnsupportedCommand = signaling.NewError("bad_request", "Unsupported command received.")
UnsupportedMessage = signaling.NewError("bad_request", "Unsupported message received.")
UnsupportedPayload = signaling.NewError("unsupported_payload", "Unsupported payload type.")
ShutdownScheduled = signaling.NewError("shutdown_scheduled", "The server is scheduled to shutdown.")
RemoteSubscribersNotSupported = signaling.NewError("unsupported_subscriber", "Remote subscribers are not supported.")
TimeoutCreatingPublisher = signaling.NewError("timeout", "Timeout creating publisher.")
TimeoutCreatingSubscriber = signaling.NewError("timeout", "Timeout creating subscriber.")
TokenAuthFailed = signaling.NewError("auth_failed", "The token could not be authenticated.")
TokenExpired = signaling.NewError("token_expired", "The token is expired.")
TokenNotValidYet = signaling.NewError("token_not_valid_yet", "The token is not valid yet.")
UnknownClient = signaling.NewError("unknown_client", "Unknown client id given.")
UnsupportedCommand = signaling.NewError("bad_request", "Unsupported command received.")
UnsupportedMessage = signaling.NewError("bad_request", "Unsupported message received.")
UnsupportedPayload = signaling.NewError("unsupported_payload", "Unsupported payload type.")
ShutdownScheduled = signaling.NewError("shutdown_scheduled", "The server is scheduled to shutdown.")
)
type ProxyServer struct {
version string
country string
welcomeMessage string
welcomeMsg *signaling.WelcomeServerMessage
config *goconf.ConfigFile
url string
mcu signaling.Mcu
stopped atomic.Bool
load atomic.Int64
maxIncoming atomic.Int64
currentIncoming atomic.Int64
maxOutgoing atomic.Int64
currentOutgoing atomic.Int64
shutdownChannel chan struct{}
shutdownScheduled atomic.Bool
upgrader websocket.Upgrader
tokens ProxyTokens
statsAllowedIps atomic.Pointer[signaling.AllowedIps]
trustedProxies atomic.Pointer[signaling.AllowedIps]
statsAllowedIps *signaling.AllowedIps
sid atomic.Uint64
cookie *securecookie.SecureCookie
@ -131,71 +108,6 @@ type ProxyServer struct {
clients map[string]signaling.McuClient
clientIds map[string]string
clientsLock sync.RWMutex
tokenId string
tokenKey *rsa.PrivateKey
remoteTlsConfig *tls.Config
remoteHostname string
remoteConnections map[string]*RemoteConnection
remoteConnectionsLock sync.Mutex
}
func IsPublicIP(IP net.IP) bool {
if IP.IsLoopback() || IP.IsLinkLocalMulticast() || IP.IsLinkLocalUnicast() {
return false
}
if ip4 := IP.To4(); ip4 != nil {
switch {
case ip4[0] == 10:
return false
case ip4[0] == 172 && ip4[1] >= 16 && ip4[1] <= 31:
return false
case ip4[0] == 192 && ip4[1] == 168:
return false
default:
return true
}
}
return false
}
func GetLocalIP() (string, error) {
addrs, err := net.InterfaceAddrs()
if err != nil {
return "", err
}
for _, address := range addrs {
if ipnet, ok := address.(*net.IPNet); ok && IsPublicIP(ipnet.IP) {
if ipnet.IP.To4() != nil {
return ipnet.IP.String(), nil
}
}
}
return "", nil
}
func getTargetBandwidths(config *goconf.ConfigFile) (int, int) {
maxIncoming, _ := config.GetInt("bandwidth", "incoming")
if maxIncoming < 0 {
maxIncoming = 0
}
if maxIncoming > 0 {
log.Printf("Target bandwidth for incoming streams: %d MBit/s", maxIncoming)
} else {
log.Printf("Target bandwidth for incoming streams: unlimited")
}
maxOutgoing, _ := config.GetInt("bandwidth", "outgoing")
if maxOutgoing < 0 {
maxOutgoing = 0
}
if maxIncoming > 0 {
log.Printf("Target bandwidth for outgoing streams: %d MBit/s", maxOutgoing)
} else {
log.Printf("Target bandwidth for outgoing streams: unlimited")
}
return maxIncoming, maxOutgoing
}
func NewProxyServer(r *mux.Router, version string, config *goconf.ConfigFile) (*ProxyServer, error) {
@ -241,19 +153,6 @@ func NewProxyServer(r *mux.Router, version string, config *goconf.ConfigFile) (*
statsAllowedIps = signaling.DefaultAllowedIps()
}
trustedProxies, _ := config.GetString("app", "trustedproxies")
trustedProxiesIps, err := signaling.ParseAllowedIps(trustedProxies)
if err != nil {
return nil, err
}
if !trustedProxiesIps.Empty() {
log.Printf("Trusted proxies: %s", trustedProxiesIps)
} else {
trustedProxiesIps = signaling.DefaultTrustedProxies
log.Printf("No trusted proxies configured, only allowing for %s", trustedProxiesIps)
}
country, _ := config.GetString("app", "country")
country = strings.ToUpper(country)
if signaling.IsValidCountry(country) {
@ -274,61 +173,10 @@ func NewProxyServer(r *mux.Router, version string, config *goconf.ConfigFile) (*
return nil, err
}
tokenId, _ := config.GetString("app", "token_id")
var tokenKey *rsa.PrivateKey
var remoteHostname string
var remoteTlsConfig *tls.Config
if tokenId != "" {
tokenKeyFilename, _ := config.GetString("app", "token_key")
if tokenKeyFilename == "" {
return nil, fmt.Errorf("No token key configured")
}
tokenKeyData, err := os.ReadFile(tokenKeyFilename)
if err != nil {
return nil, fmt.Errorf("Could not read private key from %s: %s", tokenKeyFilename, err)
}
tokenKey, err = jwt.ParseRSAPrivateKeyFromPEM(tokenKeyData)
if err != nil {
return nil, fmt.Errorf("Could not parse private key from %s: %s", tokenKeyFilename, err)
}
log.Printf("Using \"%s\" as token id for remote streams", tokenId)
remoteHostname, _ = config.GetString("app", "hostname")
if remoteHostname == "" {
remoteHostname, err = GetLocalIP()
if err != nil {
return nil, fmt.Errorf("could not get local ip: %w", err)
}
}
if remoteHostname == "" {
log.Printf("WARNING: Could not determine hostname for remote streams, will be disabled. Please configure manually.")
} else {
log.Printf("Using \"%s\" as hostname for remote streams", remoteHostname)
}
skipverify, _ := config.GetBool("backend", "skipverify")
if skipverify {
log.Println("WARNING: Remote stream requests verification is disabled!")
remoteTlsConfig = &tls.Config{
InsecureSkipVerify: skipverify,
}
}
} else {
log.Printf("No token id configured, remote streams will be disabled")
}
maxIncoming, maxOutgoing := getTargetBandwidths(config)
result := &ProxyServer{
version: version,
country: country,
welcomeMessage: string(welcomeMessage) + "\n",
welcomeMsg: &signaling.WelcomeServerMessage{
Version: version,
Country: country,
Features: defaultProxyFeatures,
},
config: config,
shutdownChannel: make(chan struct{}),
@ -337,25 +185,16 @@ func NewProxyServer(r *mux.Router, version string, config *goconf.ConfigFile) (*
WriteBufferSize: websocketWriteBufferSize,
},
tokens: tokens,
tokens: tokens,
statsAllowedIps: statsAllowedIps,
cookie: securecookie.New(hashKey, blockKey).MaxAge(0),
sessions: make(map[uint64]*ProxySession),
clients: make(map[string]signaling.McuClient),
clientIds: make(map[string]string),
tokenId: tokenId,
tokenKey: tokenKey,
remoteTlsConfig: remoteTlsConfig,
remoteHostname: remoteHostname,
remoteConnections: make(map[string]*RemoteConnection),
}
result.maxIncoming.Store(int64(maxIncoming) * 1024 * 1024)
result.maxOutgoing.Store(int64(maxOutgoing) * 1024 * 1024)
result.statsAllowedIps.Store(statsAllowedIps)
result.trustedProxies.Store(trustedProxiesIps)
result.upgrader.CheckOrigin = result.checkOrigin
if debug, _ := config.GetBool("app", "debug"); debug {
@ -384,7 +223,7 @@ func (s *ProxyServer) checkOrigin(r *http.Request) bool {
}
func (s *ProxyServer) Start(config *goconf.ConfigFile) error {
s.url, _ = signaling.GetStringOptionWithEnv(config, "mcu", "url")
s.url, _ = config.GetString("mcu", "url")
if s.url == "" {
return fmt.Errorf("No MCU server url configured")
}
@ -406,7 +245,7 @@ func (s *ProxyServer) Start(config *goconf.ConfigFile) error {
for {
switch mcuType {
case signaling.McuTypeJanus:
mcu, err = signaling.NewMcuJanus(ctx, s.url, config)
mcu, err = signaling.NewMcuJanus(s.url, config)
if err == nil {
signaling.RegisterJanusMcuStats()
}
@ -416,7 +255,7 @@ func (s *ProxyServer) Start(config *goconf.ConfigFile) error {
if err == nil {
mcu.SetOnConnected(s.onMcuConnected)
mcu.SetOnDisconnected(s.onMcuDisconnected)
err = mcu.Start(ctx)
err = mcu.Start()
if err != nil {
log.Printf("Could not create %s MCU at %s: %s", mcuType, s.url, err)
}
@ -459,7 +298,18 @@ loop:
}
}
func (s *ProxyServer) newLoadEvent(load int64, incoming int64, outgoing int64) *signaling.ProxyServerMessage {
func (s *ProxyServer) updateLoad() {
load := s.GetClientsLoad()
if load == s.load.Load() {
return
}
s.load.Store(load)
if s.shutdownScheduled.Load() {
// Server is scheduled to shutdown, no need to update clients with current load.
return
}
msg := &signaling.ProxyServerMessage{
Type: "event",
Event: &signaling.EventProxyServerMessage{
@ -467,41 +317,7 @@ func (s *ProxyServer) newLoadEvent(load int64, incoming int64, outgoing int64) *
Load: load,
},
}
maxIncoming := s.maxIncoming.Load()
maxOutgoing := s.maxOutgoing.Load()
if maxIncoming > 0 || maxOutgoing > 0 {
msg.Event.Bandwidth = &signaling.EventProxyServerBandwidth{}
if maxIncoming > 0 {
value := float64(incoming) / float64(maxIncoming) * 100
msg.Event.Bandwidth.Incoming = &value
}
if maxOutgoing > 0 {
value := float64(outgoing) / float64(maxOutgoing) * 100
msg.Event.Bandwidth.Outgoing = &value
}
}
return msg
}
func (s *ProxyServer) updateLoad() {
load, incoming, outgoing := s.GetClientsLoad()
oldLoad := s.load.Swap(load)
oldIncoming := s.currentIncoming.Swap(incoming)
oldOutgoing := s.currentOutgoing.Swap(outgoing)
if oldLoad == load && oldIncoming == incoming && oldOutgoing == outgoing {
return
}
s.sendLoadToAll(load, incoming, outgoing)
}
func (s *ProxyServer) sendLoadToAll(load int64, incoming int64, outgoing int64) {
if s.shutdownScheduled.Load() {
// Server is scheduled to shutdown, no need to update clients with current load.
return
}
msg := s.newLoadEvent(load, incoming, outgoing)
s.IterateSessions(func(session *ProxySession) {
session.sendMessage(msg)
})
@ -572,42 +388,7 @@ func (s *ProxyServer) ScheduleShutdown() {
}
func (s *ProxyServer) Reload(config *goconf.ConfigFile) {
statsAllowed, _ := config.GetString("stats", "allowed_ips")
if statsAllowedIps, err := signaling.ParseAllowedIps(statsAllowed); err == nil {
if !statsAllowedIps.Empty() {
log.Printf("Only allowing access to the stats endpoint from %s", statsAllowed)
} else {
log.Printf("No IPs configured for the stats endpoint, only allowing access from 127.0.0.1")
statsAllowedIps = signaling.DefaultAllowedIps()
}
s.statsAllowedIps.Store(statsAllowedIps)
} else {
log.Printf("Error parsing allowed stats ips from \"%s\": %s", statsAllowedIps, err)
}
trustedProxies, _ := config.GetString("app", "trustedproxies")
if trustedProxiesIps, err := signaling.ParseAllowedIps(trustedProxies); err == nil {
if !trustedProxiesIps.Empty() {
log.Printf("Trusted proxies: %s", trustedProxiesIps)
} else {
trustedProxiesIps = signaling.DefaultTrustedProxies
log.Printf("No trusted proxies configured, only allowing for %s", trustedProxiesIps)
}
s.trustedProxies.Store(trustedProxiesIps)
} else {
log.Printf("Error parsing trusted proxies from \"%s\": %s", trustedProxies, err)
}
maxIncoming, maxOutgoing := getTargetBandwidths(config)
oldIncoming := s.maxIncoming.Swap(int64(maxIncoming))
oldOutgoing := s.maxOutgoing.Swap(int64(maxOutgoing))
if oldIncoming != int64(maxIncoming) || oldOutgoing != int64(maxOutgoing) {
// Notify sessions about updated load / bandwidth usage.
go s.sendLoadToAll(s.load.Load(), s.currentIncoming.Load(), s.currentOutgoing.Load())
}
s.tokens.Reload(config)
s.mcu.Reload(config)
}
func (s *ProxyServer) setCommonHeaders(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
@ -617,6 +398,24 @@ func (s *ProxyServer) setCommonHeaders(f func(http.ResponseWriter, *http.Request
}
}
func getRealUserIP(r *http.Request) string {
// Note this function assumes it is running behind a trusted proxy, so
// the headers can be trusted.
if ip := r.Header.Get("X-Real-IP"); ip != "" {
return ip
}
if ip := r.Header.Get("X-Forwarded-For"); ip != "" {
// Result could be a list "clientip, proxy1, proxy2", so only use first element.
if pos := strings.Index(ip, ","); pos >= 0 {
ip = strings.TrimSpace(ip[:pos])
}
return ip
}
return r.RemoteAddr
}
func (s *ProxyServer) welcomeHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
@ -624,11 +423,8 @@ func (s *ProxyServer) welcomeHandler(w http.ResponseWriter, r *http.Request) {
}
func (s *ProxyServer) proxyHandler(w http.ResponseWriter, r *http.Request) {
addr := signaling.GetRealUserIP(r, s.trustedProxies.Load())
header := http.Header{}
header.Set("Server", "nextcloud-spreed-signaling-proxy/"+s.version)
header.Set("X-Spreed-Signaling-Features", strings.Join(s.welcomeMsg.Features, ", "))
conn, err := s.upgrader.Upgrade(w, r, header)
addr := getRealUserIP(r)
conn, err := s.upgrader.Upgrade(w, r, nil)
if err != nil {
log.Printf("Could not upgrade request from %s: %s", addr, err)
return
@ -683,7 +479,13 @@ func (s *ProxyServer) onMcuDisconnected() {
}
func (s *ProxyServer) sendCurrentLoad(session *ProxySession) {
msg := s.newLoadEvent(s.load.Load(), s.currentIncoming.Load(), s.currentOutgoing.Load())
msg := &signaling.ProxyServerMessage{
Type: "event",
Event: &signaling.EventProxyServerMessage{
Type: "update-load",
Load: s.load.Load(),
},
}
session.sendMessage(msg)
}
@ -777,7 +579,10 @@ func (s *ProxyServer) processMessage(client *ProxyClient, data []byte) {
Hello: &signaling.HelloProxyServerMessage{
Version: signaling.HelloVersionV1,
SessionId: session.PublicId(),
Server: s.welcomeMsg,
Server: &signaling.WelcomeServerMessage{
Version: s.version,
Country: s.country,
},
},
}
client.SendMessage(response)
@ -808,59 +613,6 @@ func (i *emptyInitiator) Country() string {
return ""
}
type proxyRemotePublisher struct {
proxy *ProxyServer
remoteUrl string
publisherId string
}
func (p *proxyRemotePublisher) PublisherId() string {
return p.publisherId
}
func (p *proxyRemotePublisher) StartPublishing(ctx context.Context, publisher signaling.McuRemotePublisherProperties) error {
conn, err := p.proxy.getRemoteConnection(p.remoteUrl)
if err != nil {
return err
}
if _, err := conn.RequestMessage(ctx, &signaling.ProxyClientMessage{
Type: "command",
Command: &signaling.CommandProxyClientMessage{
Type: "publish-remote",
ClientId: p.publisherId,
Hostname: p.proxy.remoteHostname,
Port: publisher.Port(),
RtcpPort: publisher.RtcpPort(),
},
}); err != nil {
return err
}
return nil
}
func (p *proxyRemotePublisher) GetStreams(ctx context.Context) ([]signaling.PublisherStream, error) {
conn, err := p.proxy.getRemoteConnection(p.remoteUrl)
if err != nil {
return nil, err
}
response, err := conn.RequestMessage(ctx, &signaling.ProxyClientMessage{
Type: "command",
Command: &signaling.CommandProxyClientMessage{
Type: "get-publisher-streams",
ClientId: p.publisherId,
},
})
if err != nil {
return nil, err
}
return response.Command.Streams, nil
}
func (s *ProxyServer) processCommand(ctx context.Context, client *ProxyClient, session *ProxySession, message *signaling.ProxyClientMessage) {
cmd := message.Command
@ -903,89 +655,18 @@ func (s *ProxyServer) processCommand(ctx context.Context, client *ProxyClient, s
case "create-subscriber":
id := uuid.New().String()
publisherId := cmd.PublisherId
var subscriber signaling.McuSubscriber
var err error
handleCreateError := func(err error) {
if err == context.DeadlineExceeded {
log.Printf("Timeout while creating %s subscriber on %s for %s", cmd.StreamType, publisherId, session.PublicId())
session.sendMessage(message.NewErrorServerMessage(TimeoutCreatingSubscriber))
return
} else if errors.Is(err, signaling.ErrRemoteStreamsNotSupported) {
session.sendMessage(message.NewErrorServerMessage(RemoteSubscribersNotSupported))
return
}
subscriber, err := s.mcu.NewSubscriber(ctx, session, publisherId, cmd.StreamType)
if err == context.DeadlineExceeded {
log.Printf("Timeout while creating %s subscriber on %s for %s", cmd.StreamType, publisherId, session.PublicId())
session.sendMessage(message.NewErrorServerMessage(TimeoutCreatingSubscriber))
return
} else if err != nil {
log.Printf("Error while creating %s subscriber on %s for %s: %s", cmd.StreamType, publisherId, session.PublicId(), err)
session.sendMessage(message.NewWrappedErrorServerMessage(err))
return
}
if cmd.RemoteUrl != "" {
if s.tokenId == "" || s.tokenKey == nil || s.remoteHostname == "" {
session.sendMessage(message.NewErrorServerMessage(RemoteSubscribersNotSupported))
return
}
remoteMcu, ok := s.mcu.(signaling.RemoteMcu)
if !ok {
session.sendMessage(message.NewErrorServerMessage(RemoteSubscribersNotSupported))
return
}
claims, _, err := s.parseToken(cmd.RemoteToken)
if err != nil {
if e, ok := err.(*signaling.Error); ok {
client.SendMessage(message.NewErrorServerMessage(e))
} else {
client.SendMessage(message.NewWrappedErrorServerMessage(err))
}
return
}
if claims.Subject != publisherId {
session.sendMessage(message.NewErrorServerMessage(TokenAuthFailed))
return
}
subCtx, cancel := context.WithTimeout(ctx, remotePublisherTimeout)
defer cancel()
log.Printf("Creating remote subscriber for %s on %s", publisherId, cmd.RemoteUrl)
controller := &proxyRemotePublisher{
proxy: s,
remoteUrl: cmd.RemoteUrl,
publisherId: publisherId,
}
var publisher signaling.McuRemotePublisher
publisher, err = remoteMcu.NewRemotePublisher(subCtx, session, controller, cmd.StreamType)
if err != nil {
handleCreateError(err)
return
}
defer func() {
go publisher.Close(context.Background())
}()
subscriber, err = remoteMcu.NewRemoteSubscriber(subCtx, session, publisher)
if err != nil {
handleCreateError(err)
return
}
log.Printf("Created remote %s subscriber %s as %s for %s on %s", cmd.StreamType, subscriber.Id(), id, session.PublicId(), cmd.RemoteUrl)
} else {
subscriber, err = s.mcu.NewSubscriber(ctx, session, publisherId, cmd.StreamType, &emptyInitiator{})
if err != nil {
handleCreateError(err)
return
}
log.Printf("Created %s subscriber %s as %s for %s", cmd.StreamType, subscriber.Id(), id, session.PublicId())
}
log.Printf("Created %s subscriber %s as %s for %s", cmd.StreamType, subscriber.Id(), id, session.PublicId())
session.StoreSubscriber(ctx, id, subscriber)
s.StoreClient(id, subscriber)
@ -1070,77 +751,6 @@ func (s *ProxyServer) processCommand(ctx context.Context, client *ProxyClient, s
},
}
session.sendMessage(response)
case "publish-remote":
client := s.GetClient(cmd.ClientId)
if client == nil {
session.sendMessage(message.NewErrorServerMessage(UnknownClient))
return
}
publisher, ok := client.(signaling.McuPublisher)
if !ok {
session.sendMessage(message.NewErrorServerMessage(UnknownClient))
return
}
if err := publisher.PublishRemote(ctx, session.PublicId(), cmd.Hostname, cmd.Port, cmd.RtcpPort); err != nil {
var je *janus.ErrorMsg
if !errors.As(err, &je) || je.Err.Code != signaling.JANUS_VIDEOROOM_ERROR_ID_EXISTS {
log.Printf("Error publishing %s %s to remote %s (port=%d, rtcpPort=%d): %s", publisher.StreamType(), cmd.ClientId, cmd.Hostname, cmd.Port, cmd.RtcpPort, err)
session.sendMessage(message.NewWrappedErrorServerMessage(err))
return
}
if err := publisher.UnpublishRemote(ctx, session.PublicId()); err != nil {
log.Printf("Error unpublishing old %s %s to remote %s (port=%d, rtcpPort=%d): %s", publisher.StreamType(), cmd.ClientId, cmd.Hostname, cmd.Port, cmd.RtcpPort, err)
session.sendMessage(message.NewWrappedErrorServerMessage(err))
return
}
if err := publisher.PublishRemote(ctx, session.PublicId(), cmd.Hostname, cmd.Port, cmd.RtcpPort); err != nil {
log.Printf("Error publishing %s %s to remote %s (port=%d, rtcpPort=%d): %s", publisher.StreamType(), cmd.ClientId, cmd.Hostname, cmd.Port, cmd.RtcpPort, err)
session.sendMessage(message.NewWrappedErrorServerMessage(err))
return
}
}
response := &signaling.ProxyServerMessage{
Id: message.Id,
Type: "command",
Command: &signaling.CommandProxyServerMessage{
Id: cmd.ClientId,
},
}
session.sendMessage(response)
case "get-publisher-streams":
client := s.GetClient(cmd.ClientId)
if client == nil {
session.sendMessage(message.NewErrorServerMessage(UnknownClient))
return
}
publisher, ok := client.(signaling.McuPublisher)
if !ok {
session.sendMessage(message.NewErrorServerMessage(UnknownClient))
return
}
streams, err := publisher.GetStreams(ctx)
if err != nil {
log.Printf("Could not get streams of publisher %s: %s", publisher.Id(), err)
session.sendMessage(message.NewWrappedErrorServerMessage(err))
return
}
response := &signaling.ProxyServerMessage{
Id: message.Id,
Type: "command",
Command: &signaling.CommandProxyServerMessage{
Id: cmd.ClientId,
Streams: streams,
},
}
session.sendMessage(response)
default:
log.Printf("Unsupported command %+v", message.Command)
session.sendMessage(message.NewErrorServerMessage(UnsupportedCommand))
@ -1167,10 +777,9 @@ func (s *ProxyServer) processPayload(ctx context.Context, client *ProxyClient, s
fallthrough
case "candidate":
mcuData = &signaling.MessageClientMessageData{
RoomType: string(mcuClient.StreamType()),
Type: payload.Type,
Sid: payload.Sid,
Payload: payload.Payload,
Type: payload.Type,
Sid: payload.Sid,
Payload: payload.Payload,
}
case "endOfCandidates":
// Ignore but confirm, not passed along to Janus anyway.
@ -1187,21 +796,14 @@ func (s *ProxyServer) processPayload(ctx context.Context, client *ProxyClient, s
fallthrough
case "sendoffer":
mcuData = &signaling.MessageClientMessageData{
RoomType: string(mcuClient.StreamType()),
Type: payload.Type,
Sid: payload.Sid,
Type: payload.Type,
Sid: payload.Sid,
}
default:
session.sendMessage(message.NewErrorServerMessage(UnsupportedPayload))
return
}
if err := mcuData.CheckValid(); err != nil {
log.Printf("Received invalid payload %+v for %s client %s: %s", mcuData, mcuClient.StreamType(), payload.ClientId, err)
session.sendMessage(message.NewErrorServerMessage(UnsupportedPayload))
return
}
mcuClient.SendMessage(ctx, nil, mcuData, func(err error, response map[string]interface{}) {
var responseMsg *signaling.ProxyServerMessage
if err != nil {
@ -1223,9 +825,13 @@ func (s *ProxyServer) processPayload(ctx context.Context, client *ProxyClient, s
})
}
func (s *ProxyServer) parseToken(tokenValue string) (*signaling.TokenClaims, string, error) {
func (s *ProxyServer) NewSession(hello *signaling.HelloProxyClientMessage) (*ProxySession, error) {
if proxyDebugMessages {
log.Printf("Hello: %+v", hello)
}
reason := "auth-failed"
token, err := jwt.ParseWithClaims(tokenValue, &signaling.TokenClaims{}, func(token *jwt.Token) (interface{}, error) {
token, err := jwt.ParseWithClaims(hello.Token, &signaling.TokenClaims{}, func(token *jwt.Token) (interface{}, error) {
// Don't forget to validate the alg is what you expect:
if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
log.Printf("Unexpected signing method: %v", token.Header["alg"])
@ -1257,35 +863,25 @@ func (s *ProxyServer) parseToken(tokenValue string) (*signaling.TokenClaims, str
})
if err, ok := err.(*jwt.ValidationError); ok {
if err.Errors&jwt.ValidationErrorIssuedAt == jwt.ValidationErrorIssuedAt {
return nil, "not-valid-yet", TokenNotValidYet
statsTokenErrorsTotal.WithLabelValues("not-valid-yet").Inc()
return nil, TokenNotValidYet
}
}
if err != nil {
return nil, reason, TokenAuthFailed
statsTokenErrorsTotal.WithLabelValues(reason).Inc()
return nil, TokenAuthFailed
}
claims, ok := token.Claims.(*signaling.TokenClaims)
if !ok || !token.Valid {
return nil, "auth-failed", TokenAuthFailed
statsTokenErrorsTotal.WithLabelValues("auth-failed").Inc()
return nil, TokenAuthFailed
}
minIssuedAt := time.Now().Add(-maxTokenAge)
if issuedAt := claims.IssuedAt; issuedAt != nil && issuedAt.Before(minIssuedAt) {
return nil, "expired", TokenExpired
}
return claims, "", nil
}
func (s *ProxyServer) NewSession(hello *signaling.HelloProxyClientMessage) (*ProxySession, error) {
if proxyDebugMessages {
log.Printf("Hello: %+v", hello)
}
claims, reason, err := s.parseToken(hello.Token)
if err != nil {
statsTokenErrorsTotal.WithLabelValues(reason).Inc()
return nil, err
statsTokenErrorsTotal.WithLabelValues("expired").Inc()
return nil, TokenExpired
}
sid := s.sid.Add(1)
@ -1381,21 +977,15 @@ func (s *ProxyServer) HasClients() bool {
return len(s.clients) > 0
}
func (s *ProxyServer) GetClientsLoad() (load int64, incoming int64, outgoing int64) {
func (s *ProxyServer) GetClientsLoad() int64 {
s.clientsLock.RLock()
defer s.clientsLock.RUnlock()
var load int64
for _, c := range s.clients {
bitrate := int64(c.MaxBitrate())
load += bitrate
if _, ok := c.(signaling.McuPublisher); ok {
incoming += bitrate
} else if _, ok := c.(signaling.McuSubscriber); ok {
outgoing += bitrate
}
load += int64(c.MaxBitrate())
}
load = load / 1024
return
return load / 1024
}
func (s *ProxyServer) GetClient(id string) signaling.McuClient {
@ -1404,22 +994,6 @@ func (s *ProxyServer) GetClient(id string) signaling.McuClient {
return s.clients[id]
}
func (s *ProxyServer) GetPublisher(publisherId string) signaling.McuPublisher {
s.clientsLock.RLock()
defer s.clientsLock.RUnlock()
for _, c := range s.clients {
pub, ok := c.(signaling.McuPublisher)
if !ok {
continue
}
if pub.Id() == publisherId {
return pub
}
}
return nil
}
func (s *ProxyServer) GetClientId(client signaling.McuClient) string {
s.clientsLock.RLock()
defer s.clientsLock.RUnlock()
@ -1436,14 +1010,19 @@ func (s *ProxyServer) getStats() map[string]interface{} {
}
func (s *ProxyServer) allowStatsAccess(r *http.Request) bool {
addr := signaling.GetRealUserIP(r, s.trustedProxies.Load())
addr := getRealUserIP(r)
if strings.Contains(addr, ":") {
if host, _, err := net.SplitHostPort(addr); err == nil {
addr = host
}
}
ip := net.ParseIP(addr)
if len(ip) == 0 {
if ip == nil {
return false
}
allowed := s.statsAllowedIps.Load()
return allowed != nil && allowed.Allowed(ip)
return s.statsAllowedIps.Allowed(ip)
}
func (s *ProxyServer) validateStatsRequest(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
@ -1476,21 +1055,3 @@ func (s *ProxyServer) metricsHandler(w http.ResponseWriter, r *http.Request) {
// Expose prometheus metrics at "/metrics".
promhttp.Handler().ServeHTTP(w, r)
}
func (s *ProxyServer) getRemoteConnection(url string) (*RemoteConnection, error) {
s.remoteConnectionsLock.Lock()
defer s.remoteConnectionsLock.Unlock()
conn, found := s.remoteConnections[url]
if found {
return conn, nil
}
conn, err := NewRemoteConnection(url, s.tokenId, s.tokenKey, s.remoteTlsConfig)
if err != nil {
return nil, err
}
s.remoteConnections[url] = conn
return conn, nil
}

View file

@ -22,22 +22,17 @@
package main
import (
"context"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"net"
"net/http/httptest"
"os"
"strings"
"testing"
"time"
"github.com/dlintw/goconf"
"github.com/golang-jwt/jwt/v4"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
signaling "github.com/strukturag/nextcloud-spreed-signaling"
)
@ -46,22 +41,12 @@ const (
TokenIdForTest = "foo"
)
func getWebsocketUrl(url string) string {
if strings.HasPrefix(url, "http://") {
return "ws://" + url[7:] + "/proxy"
} else if strings.HasPrefix(url, "https://") {
return "wss://" + url[8:] + "/proxy"
} else {
panic("Unsupported URL: " + url)
}
}
func newProxyServerForTest(t *testing.T) (*ProxyServer, *rsa.PrivateKey, *httptest.Server) {
func newProxyServerForTest(t *testing.T) (*ProxyServer, *rsa.PrivateKey) {
tempdir := t.TempDir()
var proxy *ProxyServer
var server *ProxyServer
t.Cleanup(func() {
if proxy != nil {
proxy.Stop()
if server != nil {
server.Stop()
}
})
@ -101,107 +86,14 @@ func newProxyServerForTest(t *testing.T) (*ProxyServer, *rsa.PrivateKey, *httpte
config := goconf.NewConfigFile()
config.AddOption("tokens", TokenIdForTest, pubkey.Name())
if proxy, err = NewProxyServer(r, "0.0", config); err != nil {
t.Fatalf("could not create proxy server: %s", err)
}
server := httptest.NewServer(r)
t.Cleanup(func() {
server.Close()
})
return proxy, key, server
}
func TestTokenValid(t *testing.T) {
signaling.CatchLogForTest(t)
proxy, key, _ := newProxyServerForTest(t)
claims := &signaling.TokenClaims{
RegisteredClaims: jwt.RegisteredClaims{
IssuedAt: jwt.NewNumericDate(time.Now().Add(-maxTokenAge / 2)),
Issuer: TokenIdForTest,
},
}
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
tokenString, err := token.SignedString(key)
if err != nil {
t.Fatalf("could not create token: %s", err)
}
hello := &signaling.HelloProxyClientMessage{
Version: "1.0",
Token: tokenString,
}
session, err := proxy.NewSession(hello)
if session != nil {
defer session.Close()
} else if err != nil {
t.Error(err)
}
}
func TestTokenNotSigned(t *testing.T) {
signaling.CatchLogForTest(t)
proxy, _, _ := newProxyServerForTest(t)
claims := &signaling.TokenClaims{
RegisteredClaims: jwt.RegisteredClaims{
IssuedAt: jwt.NewNumericDate(time.Now().Add(-maxTokenAge / 2)),
Issuer: TokenIdForTest,
},
}
token := jwt.NewWithClaims(jwt.SigningMethodNone, claims)
tokenString, err := token.SignedString(jwt.UnsafeAllowNoneSignatureType)
if err != nil {
t.Fatalf("could not create token: %s", err)
}
hello := &signaling.HelloProxyClientMessage{
Version: "1.0",
Token: tokenString,
}
session, err := proxy.NewSession(hello)
if session != nil {
defer session.Close()
t.Errorf("should not have created session")
} else if err != TokenAuthFailed {
t.Errorf("could have failed with TokenAuthFailed, got %s", err)
}
}
func TestTokenUnknown(t *testing.T) {
signaling.CatchLogForTest(t)
proxy, key, _ := newProxyServerForTest(t)
claims := &signaling.TokenClaims{
RegisteredClaims: jwt.RegisteredClaims{
IssuedAt: jwt.NewNumericDate(time.Now().Add(-maxTokenAge / 2)),
Issuer: TokenIdForTest + "2",
},
}
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
tokenString, err := token.SignedString(key)
if err != nil {
t.Fatalf("could not create token: %s", err)
}
hello := &signaling.HelloProxyClientMessage{
Version: "1.0",
Token: tokenString,
}
session, err := proxy.NewSession(hello)
if session != nil {
defer session.Close()
t.Errorf("should not have created session")
} else if err != TokenAuthFailed {
t.Errorf("could have failed with TokenAuthFailed, got %s", err)
if server, err = NewProxyServer(r, "0.0", config); err != nil {
t.Fatalf("could not create server: %s", err)
}
return server, key
}
func TestTokenInFuture(t *testing.T) {
signaling.CatchLogForTest(t)
proxy, key, _ := newProxyServerForTest(t)
server, key := newProxyServerForTest(t)
claims := &signaling.TokenClaims{
RegisteredClaims: jwt.RegisteredClaims{
@ -219,7 +111,7 @@ func TestTokenInFuture(t *testing.T) {
Version: "1.0",
Token: tokenString,
}
session, err := proxy.NewSession(hello)
session, err := server.NewSession(hello)
if session != nil {
defer session.Close()
t.Errorf("should not have created session")
@ -227,103 +119,3 @@ func TestTokenInFuture(t *testing.T) {
t.Errorf("could have failed with TokenNotValidYet, got %s", err)
}
}
func TestTokenExpired(t *testing.T) {
signaling.CatchLogForTest(t)
proxy, key, _ := newProxyServerForTest(t)
claims := &signaling.TokenClaims{
RegisteredClaims: jwt.RegisteredClaims{
IssuedAt: jwt.NewNumericDate(time.Now().Add(-maxTokenAge * 2)),
Issuer: TokenIdForTest,
},
}
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
tokenString, err := token.SignedString(key)
if err != nil {
t.Fatalf("could not create token: %s", err)
}
hello := &signaling.HelloProxyClientMessage{
Version: "1.0",
Token: tokenString,
}
session, err := proxy.NewSession(hello)
if session != nil {
defer session.Close()
t.Errorf("should not have created session")
} else if err != TokenExpired {
t.Errorf("could have failed with TokenExpired, got %s", err)
}
}
func TestPublicIPs(t *testing.T) {
public := []string{
"8.8.8.8",
"172.15.1.2",
"172.32.1.2",
"192.167.0.1",
"192.169.0.1",
}
private := []string{
"127.0.0.1",
"10.1.2.3",
"172.16.1.2",
"172.31.1.2",
"192.168.0.1",
"192.168.254.254",
}
for _, s := range public {
ip := net.ParseIP(s)
if len(ip) == 0 {
t.Errorf("invalid IP: %s", s)
} else if !IsPublicIP(ip) {
t.Errorf("should be public IP: %s", s)
}
}
for _, s := range private {
ip := net.ParseIP(s)
if len(ip) == 0 {
t.Errorf("invalid IP: %s", s)
} else if IsPublicIP(ip) {
t.Errorf("should be private IP: %s", s)
}
}
}
func TestWebsocketFeatures(t *testing.T) {
signaling.CatchLogForTest(t)
_, _, server := newProxyServerForTest(t)
conn, response, err := websocket.DefaultDialer.DialContext(context.Background(), getWebsocketUrl(server.URL), nil)
if err != nil {
t.Fatal(err)
}
defer conn.Close() // nolint
if server := response.Header.Get("Server"); !strings.HasPrefix(server, "nextcloud-spreed-signaling-proxy/") {
t.Errorf("expected valid server header, got \"%s\"", server)
}
features := response.Header.Get("X-Spreed-Signaling-Features")
featuresList := make(map[string]bool)
for _, f := range strings.Split(features, ",") {
f = strings.TrimSpace(f)
if f != "" {
if _, found := featuresList[f]; found {
t.Errorf("duplicate feature id \"%s\" in \"%s\"", f, features)
}
featuresList[f] = true
}
}
if len(featuresList) == 0 {
t.Errorf("expected valid features header, got \"%s\"", features)
}
if _, found := featuresList["remote-streams"]; !found {
t.Errorf("expected feature \"remote-streams\", got \"%s\"", features)
}
if err := conn.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), time.Time{}); err != nil {
t.Errorf("could not write close message: %s", err)
}
}

View file

@ -299,9 +299,8 @@ func (s *ProxySession) clearPublishers() {
publisher.Close(context.Background())
}
}(s.publishers)
// Can't use clear(...) here as the map is processed by the goroutine above.
s.publishers = make(map[string]signaling.McuPublisher)
clear(s.publisherIds)
s.publisherIds = make(map[signaling.McuPublisher]string)
}
func (s *ProxySession) clearSubscribers() {
@ -316,9 +315,8 @@ func (s *ProxySession) clearSubscribers() {
subscriber.Close(context.Background())
}
}(s.subscribers)
// Can't use clear(...) here as the map is processed by the goroutine above.
s.subscribers = make(map[string]signaling.McuSubscriber)
clear(s.subscriberIds)
s.subscriberIds = make(map[signaling.McuSubscriber]string)
}
func (s *ProxySession) NotifyDisconnected() {

View file

@ -39,8 +39,6 @@ import (
"github.com/dlintw/goconf"
"go.etcd.io/etcd/server/v3/embed"
"go.etcd.io/etcd/server/v3/lease"
signaling "github.com/strukturag/nextcloud-spreed-signaling"
)
var (
@ -102,7 +100,6 @@ func newEtcdForTesting(t *testing.T) *embed.Etcd {
t.Cleanup(func() {
etcd.Close()
<-etcd.Server.StopNotify()
})
// Wait for server to be ready.
<-etcd.Server.ReadyNotify()
@ -163,7 +160,6 @@ func generateAndSaveKey(t *testing.T, etcd *embed.Etcd, name string) *rsa.Privat
}
func TestProxyTokensEtcd(t *testing.T) {
signaling.CatchLogForTest(t)
tokens, etcd := newTokensEtcdForTesting(t)
key1 := generateAndSaveKey(t, etcd, "/foo")

View file

@ -41,9 +41,6 @@ type proxyConfigEtcd struct {
keyPrefix string
keyInfos map[string]*ProxyInformationEtcd
urlToKey map[string]string
closeCtx context.Context
closeFunc context.CancelFunc
}
func NewProxyConfigEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient, proxy McuProxy) (ProxyConfig, error) {
@ -51,17 +48,12 @@ func NewProxyConfigEtcd(config *goconf.ConfigFile, etcdClient *EtcdClient, proxy
return nil, errors.New("No etcd endpoints configured")
}
closeCtx, closeFunc := context.WithCancel(context.Background())
result := &proxyConfigEtcd{
proxy: proxy,
client: etcdClient,
keyInfos: make(map[string]*ProxyInformationEtcd),
urlToKey: make(map[string]string),
closeCtx: closeCtx,
closeFunc: closeFunc,
}
if err := result.configure(config, false); err != nil {
return nil, err
@ -91,16 +83,17 @@ func (p *proxyConfigEtcd) Reload(config *goconf.ConfigFile) error {
func (p *proxyConfigEtcd) Stop() {
p.client.RemoveListener(p)
p.closeFunc()
}
func (p *proxyConfigEtcd) EtcdClientCreated(client *EtcdClient) {
go func() {
if err := client.WaitForConnection(p.closeCtx); err != nil {
if errors.Is(err, context.Canceled) {
return
}
if err := client.Watch(context.Background(), p.keyPrefix, p, clientv3.WithPrefix()); err != nil {
log.Printf("Error processing watch for %s: %s", p.keyPrefix, err)
}
}()
go func() {
if err := client.WaitForConnection(context.Background()); err != nil {
panic(err)
}
@ -108,47 +101,23 @@ func (p *proxyConfigEtcd) EtcdClientCreated(client *EtcdClient) {
if err != nil {
panic(err)
}
var nextRevision int64
for p.closeCtx.Err() == nil {
response, err := p.getProxyUrls(p.closeCtx, client, p.keyPrefix)
for {
response, err := p.getProxyUrls(client, p.keyPrefix)
if err != nil {
if errors.Is(err, context.Canceled) {
return
} else if errors.Is(err, context.DeadlineExceeded) {
if err == context.DeadlineExceeded {
log.Printf("Timeout getting initial list of proxy URLs, retry in %s", backoff.NextWait())
} else {
log.Printf("Could not get initial list of proxy URLs, retry in %s: %s", backoff.NextWait(), err)
}
backoff.Wait(p.closeCtx)
backoff.Wait(context.Background())
continue
}
for _, ev := range response.Kvs {
p.EtcdKeyUpdated(client, string(ev.Key), ev.Value, nil)
}
nextRevision = response.Header.Revision + 1
break
}
prevRevision := nextRevision
backoff.Reset()
for p.closeCtx.Err() == nil {
var err error
if nextRevision, err = client.Watch(p.closeCtx, p.keyPrefix, nextRevision, p, clientv3.WithPrefix()); err != nil {
log.Printf("Error processing watch for %s (%s), retry in %s", p.keyPrefix, err, backoff.NextWait())
backoff.Wait(p.closeCtx)
continue
}
if nextRevision != prevRevision {
backoff.Reset()
prevRevision = nextRevision
} else {
log.Printf("Processing watch for %s interrupted, retry in %s", p.keyPrefix, backoff.NextWait())
backoff.Wait(p.closeCtx)
p.EtcdKeyUpdated(client, string(ev.Key), ev.Value)
}
return
}
}()
}
@ -156,14 +125,14 @@ func (p *proxyConfigEtcd) EtcdClientCreated(client *EtcdClient) {
func (p *proxyConfigEtcd) EtcdWatchCreated(client *EtcdClient, key string) {
}
func (p *proxyConfigEtcd) getProxyUrls(ctx context.Context, client *EtcdClient, keyPrefix string) (*clientv3.GetResponse, error) {
ctx, cancel := context.WithTimeout(ctx, time.Second)
func (p *proxyConfigEtcd) getProxyUrls(client *EtcdClient, keyPrefix string) (*clientv3.GetResponse, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
return client.Get(ctx, keyPrefix, clientv3.WithPrefix())
}
func (p *proxyConfigEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []byte, prevValue []byte) {
func (p *proxyConfigEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []byte) {
var info ProxyInformationEtcd
if err := json.Unmarshal(data, &info); err != nil {
log.Printf("Could not decode proxy information %s: %s", string(data), err)
@ -204,7 +173,7 @@ func (p *proxyConfigEtcd) EtcdKeyUpdated(client *EtcdClient, key string, data []
}
}
func (p *proxyConfigEtcd) EtcdKeyDeleted(client *EtcdClient, key string, prevValue []byte) {
func (p *proxyConfigEtcd) EtcdKeyDeleted(client *EtcdClient, key string) {
p.mu.Lock()
defer p.mu.Unlock()

View file

@ -62,8 +62,6 @@ func SetEtcdProxy(t *testing.T, etcd *embed.Etcd, path string, proxy *TestProxyI
}
func TestProxyConfigEtcd(t *testing.T) {
t.Parallel()
CatchLogForTest(t)
proxy := newMcuProxyForConfig(t)
etcd, config := newProxyConfigEtcd(t, proxy)

View file

@ -86,7 +86,7 @@ func (p *proxyConfigStatic) configure(config *goconf.ConfigFile, fromReload bool
remove[u] = ips
}
mcuUrl, _ := GetStringOptionWithEnv(config, "mcu", "url")
mcuUrl, _ := config.GetString("mcu", "url")
for _, u := range strings.Split(mcuUrl, " ") {
u = strings.TrimSpace(u)
if u == "" {

View file

@ -59,7 +59,6 @@ func updateProxyConfigStatic(t *testing.T, config ProxyConfig, dns bool, urls ..
}
func TestProxyConfigStaticSimple(t *testing.T) {
CatchLogForTest(t)
proxy := newMcuProxyForConfig(t)
config, _ := newProxyConfigStatic(t, proxy, false, "https://foo/")
proxy.Expect("add", "https://foo/")
@ -78,7 +77,6 @@ func TestProxyConfigStaticSimple(t *testing.T) {
}
func TestProxyConfigStaticDNS(t *testing.T) {
CatchLogForTest(t)
lookup := newMockDnsLookupForTest(t)
proxy := newMcuProxyForConfig(t)
config, dnsMonitor := newProxyConfigStatic(t, proxy, true, "https://foo/")

View file

@ -1,99 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2021 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"sync"
)
type publisherStatsCounter struct {
mu sync.Mutex
streamTypes map[StreamType]bool
subscribers map[string]bool
}
func (c *publisherStatsCounter) Reset() {
c.mu.Lock()
defer c.mu.Unlock()
count := len(c.subscribers)
for streamType := range c.streamTypes {
statsMcuPublisherStreamTypesCurrent.WithLabelValues(string(streamType)).Dec()
statsMcuSubscriberStreamTypesCurrent.WithLabelValues(string(streamType)).Sub(float64(count))
}
c.streamTypes = nil
c.subscribers = nil
}
func (c *publisherStatsCounter) EnableStream(streamType StreamType, enable bool) {
c.mu.Lock()
defer c.mu.Unlock()
if enable == c.streamTypes[streamType] {
return
}
if enable {
if c.streamTypes == nil {
c.streamTypes = make(map[StreamType]bool)
}
c.streamTypes[streamType] = true
statsMcuPublisherStreamTypesCurrent.WithLabelValues(string(streamType)).Inc()
statsMcuSubscriberStreamTypesCurrent.WithLabelValues(string(streamType)).Add(float64(len(c.subscribers)))
} else {
delete(c.streamTypes, streamType)
statsMcuPublisherStreamTypesCurrent.WithLabelValues(string(streamType)).Dec()
statsMcuSubscriberStreamTypesCurrent.WithLabelValues(string(streamType)).Sub(float64(len(c.subscribers)))
}
}
func (c *publisherStatsCounter) AddSubscriber(id string) {
c.mu.Lock()
defer c.mu.Unlock()
if c.subscribers[id] {
return
}
if c.subscribers == nil {
c.subscribers = make(map[string]bool)
}
c.subscribers[id] = true
for streamType := range c.streamTypes {
statsMcuSubscriberStreamTypesCurrent.WithLabelValues(string(streamType)).Inc()
}
}
func (c *publisherStatsCounter) RemoveSubscriber(id string) {
c.mu.Lock()
defer c.mu.Unlock()
if !c.subscribers[id] {
return
}
delete(c.subscribers, id)
for streamType := range c.streamTypes {
statsMcuSubscriberStreamTypesCurrent.WithLabelValues(string(streamType)).Dec()
}
}

View file

@ -1,154 +0,0 @@
/**
* Standalone signaling server for the Nextcloud Spreed app.
* Copyright (C) 2024 struktur AG
*
* @author Joachim Bauch <bauch@struktur.de>
*
* @license GNU AGPL version 3 or any later version
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package signaling
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"sync/atomic"
"time"
)
type RemoteSession struct {
hub *Hub
client *Client
remoteClient *GrpcClient
sessionId string
proxy atomic.Pointer[SessionProxy]
}
func NewRemoteSession(hub *Hub, client *Client, remoteClient *GrpcClient, sessionId string) (*RemoteSession, error) {
remoteSession := &RemoteSession{
hub: hub,
client: client,
remoteClient: remoteClient,
sessionId: sessionId,
}
client.SetSessionId(sessionId)
client.SetHandler(remoteSession)
// Don't use "client.Context()" here as it could close the proxy connection
// before any final messages are forwarded to the remote end.
proxy, err := remoteClient.ProxySession(context.Background(), sessionId, remoteSession)
if err != nil {
return nil, err
}
remoteSession.proxy.Store(proxy)
return remoteSession, nil
}
func (s *RemoteSession) Country() string {
return s.client.Country()
}
func (s *RemoteSession) RemoteAddr() string {
return s.client.RemoteAddr()
}
func (s *RemoteSession) UserAgent() string {
return s.client.UserAgent()
}
func (s *RemoteSession) IsConnected() bool {
return true
}
func (s *RemoteSession) Start(message *ClientMessage) error {
return s.sendMessage(message)
}
func (s *RemoteSession) OnProxyMessage(msg *ServerSessionMessage) error {
var message *ServerMessage
if err := json.Unmarshal(msg.Message, &message); err != nil {
return err
}
if !s.client.SendMessage(message) {
return fmt.Errorf("could not send message to client")
}
return nil
}
func (s *RemoteSession) OnProxyClose(err error) {
if err != nil {
log.Printf("Proxy connection for session %s to %s was closed with error: %s", s.sessionId, s.remoteClient.Target(), err)
}
s.Close()
}
func (s *RemoteSession) SendMessage(message WritableClientMessage) bool {
return s.sendMessage(message) == nil
}
func (s *RemoteSession) sendProxyMessage(message []byte) error {
proxy := s.proxy.Load()
if proxy == nil {
return errors.New("proxy already closed")
}
msg := &ClientSessionMessage{
Message: message,
}
return proxy.Send(msg)
}
func (s *RemoteSession) sendMessage(message interface{}) error {
data, err := json.Marshal(message)
if err != nil {
return err
}
return s.sendProxyMessage(data)
}
func (s *RemoteSession) Close() {
if proxy := s.proxy.Swap(nil); proxy != nil {
proxy.Close()
}
s.hub.unregisterRemoteSession(s)
s.client.Close()
}
func (s *RemoteSession) OnLookupCountry(client HandlerClient) string {
return s.hub.OnLookupCountry(client)
}
func (s *RemoteSession) OnClosed(client HandlerClient) {
s.Close()
}
func (s *RemoteSession) OnMessageReceived(client HandlerClient, message []byte) {
if err := s.sendProxyMessage(message); err != nil {
log.Printf("Error sending %s to the proxy for session %s: %s", string(message), s.sessionId, err)
s.Close()
}
}
func (s *RemoteSession) OnRTTReceived(client HandlerClient, rtt time.Duration) {
}

22
room.go
View file

@ -65,7 +65,7 @@ type Room struct {
events AsyncEvents
backend *Backend
properties json.RawMessage
properties *json.RawMessage
closer *Closer
mu *sync.RWMutex
@ -95,7 +95,7 @@ func getRoomIdForBackend(id string, backend *Backend) string {
return backend.Id() + "|" + id
}
func NewRoom(roomId string, properties json.RawMessage, hub *Hub, events AsyncEvents, backend *Backend) (*Room, error) {
func NewRoom(roomId string, properties *json.RawMessage, hub *Hub, events AsyncEvents, backend *Backend) (*Room, error) {
room := &Room{
id: roomId,
hub: hub,
@ -136,7 +136,7 @@ func (r *Room) Id() string {
return r.id
}
func (r *Room) Properties() json.RawMessage {
func (r *Room) Properties() *json.RawMessage {
r.mu.RLock()
defer r.mu.RUnlock()
return r.properties
@ -270,12 +270,12 @@ func (r *Room) processBackendRoomRequestAsyncRoom(message *AsyncRoomMessage) {
}
}
func (r *Room) AddSession(session Session, sessionData json.RawMessage) {
func (r *Room) AddSession(session Session, sessionData *json.RawMessage) {
var roomSessionData *RoomSessionData
if len(sessionData) > 0 {
if sessionData != nil && len(*sessionData) > 0 {
roomSessionData = &RoomSessionData{}
if err := json.Unmarshal(sessionData, roomSessionData); err != nil {
log.Printf("Error decoding room session data \"%s\": %s", string(sessionData), err)
if err := json.Unmarshal(*sessionData, roomSessionData); err != nil {
log.Printf("Error decoding room session data \"%s\": %s", string(*sessionData), err)
roomSessionData = nil
}
}
@ -480,11 +480,11 @@ func (r *Room) publish(message *ServerMessage) error {
})
}
func (r *Room) UpdateProperties(properties json.RawMessage) {
func (r *Room) UpdateProperties(properties *json.RawMessage) {
r.mu.Lock()
defer r.mu.Unlock()
if (len(r.properties) == 0 && len(properties) == 0) ||
(len(r.properties) > 0 && len(properties) > 0 && bytes.Equal(r.properties, properties)) {
if (r.properties == nil && properties == nil) ||
(r.properties != nil && properties != nil && bytes.Equal(*r.properties, *properties)) {
// Don't notify if properties didn't change.
return
}
@ -769,7 +769,7 @@ func (r *Room) PublishUsersInCallChangedAll(inCall int) {
Type: "update",
Update: &RoomEventServerMessage{
RoomId: r.id,
InCall: inCallMsg,
InCall: &inCallMsg,
All: true,
},
},

View file

@ -63,7 +63,6 @@ func NewRoomPingForTest(t *testing.T) (*url.URL, *RoomPing) {
}
func TestSingleRoomPing(t *testing.T) {
CatchLogForTest(t)
u, ping := NewRoomPingForTest(t)
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
@ -114,7 +113,6 @@ func TestSingleRoomPing(t *testing.T) {
}
func TestMultiRoomPing(t *testing.T) {
CatchLogForTest(t)
u, ping := NewRoomPingForTest(t)
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
@ -161,7 +159,6 @@ func TestMultiRoomPing(t *testing.T) {
}
func TestMultiRoomPing_Separate(t *testing.T) {
CatchLogForTest(t)
u, ping := NewRoomPingForTest(t)
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
@ -204,7 +201,6 @@ func TestMultiRoomPing_Separate(t *testing.T) {
}
func TestMultiRoomPing_DeleteRoom(t *testing.T) {
CatchLogForTest(t)
u, ping := NewRoomPingForTest(t)
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)

Some files were not shown because too many files have changed in this diff Show more