Merge remote-tracking branch 'origin/master' into raja_1833

This commit is contained in:
boks1971
2024-06-20 11:45:46 +05:30
68 changed files with 2215 additions and 499 deletions
Binary file not shown.

Before

Width:  |  Height:  |  Size: 53 KiB

After

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

After

Width:  |  Height:  |  Size: 48 KiB

+2 -2
View File
@@ -43,7 +43,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '>=1.21'
go-version-file: "go.mod"
- name: Download Go modules
run: go mod download
@@ -66,7 +66,7 @@ jobs:
- name: Build and push
id: docker_build
uses: docker/build-push-action@v5
uses: docker/build-push-action@v6
with:
context: .
push: ${{ github.event_name != 'pull_request' }}
+3 -3
View File
@@ -35,13 +35,13 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '>=1.21'
go-version-file: "go.mod"
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser
version: latest
args: release --rm-dist
version: '~> v2'
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+3 -1
View File
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
version: 2
before:
hooks:
- go mod tidy
@@ -31,6 +33,7 @@ builds:
goos:
- linux
- windows
archives:
- format_overrides:
- goos: windows
@@ -56,4 +59,3 @@ checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ incpatch .Version }}-next"
+76
View File
@@ -2,6 +2,82 @@
This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [1.6.2] - 2024-06-15
### Added
- Support for optional publisher datachannel (#2693)
- add room/participant name limit (#2704)
- Pass through timestamp in abs capture time (#2715)
- Support SIP transports. (#2724)
### Fixed
- add missing strings.EqualFold for some mimeType comparisons (#2701)
- connection reset without any closing handshake on clientside (#2709)
- Do not propagate RTCP if report is not processed. (#2739)
- Fix DD tracker addition. (#2751)
- Reset tracker on expected layer increase. (#2753)
- Do not add tracker for invalid layers. (#2759)
- Do not compare payload type before bind (#2775)
- fix agent jobs not launching when using the CreateRoom API (#2784)
### Changed
- Performance improvements to forwarding by using condition var. (#2691 #2699)
- Simplify time stamp calculation on switches. (#2688)
- Simplify layer roll back. (#2702)
- ensure room is running before attempting to delete (#2705)
- Redact egress object in CreateRoom request (#2710)
- reduce participant lock scope (#2732)
- Demote some less useful/noisy logs. (#2743)
- Stop probe on probe controller reset (#2744)
- initialize bucket size by publish bitrates (#2763)
- Validate RTP packets. (#2778)
## [1.6.1] - 2024-04-26
This release changes the default behavior when creating or updating WHIP
ingress. WHIP ingress will now default to disabling transcoding and
forwarding media unchanged to the LiveKit subscribers. This behavior can
be changed by using the new `enable_transcoding` available in updated
SDKs. The behavior of existing ingresses is unchanged.
### Added
- Add support for "abs-capture-time" extension. (#2640)
- Add PropagationDelay API to sender report data (#2646)
- Add support for EnableTranscoding ingress option (#2681)
- Pass new SIP metadata. Update protocol. (#2683)
- Handle UpdateLocalAudioTrack and UpdateLocalVideoTrack. (#2684)
- Forward transcription data packets to the room (#2687)
### Fixed
- backwards compatability for IsRecorder (#2647)
- Reduce RED weight in half. (#2648)
- add disconnected chan to participant (#2650)
- add typed ops queue (#2655)
- ICE config cache module. (#2654)
- use typed ops queue in pctransport (#2656)
- Use the ingress state updated_at field to ensure that out of order RPC do not overwrite state (#2657)
- Log ICE candidates to debug TCP connection issues. (#2658)
- Debug logging addition of ICE candidate (#2659)
- fix participant, ensure room name matches (#2660)
- replace keyframe ticker with timer (#2661)
- fix key frame timer (#2662)
- Disable dynamic playout delay for screenshare track (#2663)
- Don't log dd invalid template index (#2664)
- Do codec munging when munging RTP header. (#2665)
- Connection quality LOST only if RTCP is also not available. (#2670)
- Handle large jumps in RTCP sender report timestamp. (#2674)
- Bump golang.org/x/net from 0.22.0 to 0.23.0 (#2673)
- do not capture pointers in ops queue closures (#2675)
- Fix SubParticipant twice when paticipant left (#2672)
- use ttlcache (#2677)
- Detach subscriber datachannel to save memory (#2680)
- Clean up UpdateVideoLayers (#2685)
## [1.6.0] - 2024-04-10
### Added
+2 -2
View File
@@ -305,8 +305,8 @@ LiveKit server is licensed under Apache License v2.0.
<br/><table>
<thead><tr><th colspan="2">LiveKit Ecosystem</th></tr></thead>
<tbody>
<tr><td>Real-time SDKs</td><td><a href="https://github.com/livekit/components-js">React Components</a> · <a href="https://github.com/livekit/client-sdk-js">JavaScript</a> · <a href="https://github.com/livekit/client-sdk-swift">iOS/macOS</a> · <a href="https://github.com/livekit/client-sdk-android">Android</a> · <a href="https://github.com/livekit/client-sdk-flutter">Flutter</a> · <a href="https://github.com/livekit/client-sdk-react-native">React Native</a> · <a href="https://github.com/livekit/client-sdk-rust">Rust</a> · <a href="https://github.com/livekit/client-sdk-python">Python</a> · <a href="https://github.com/livekit/client-sdk-unity-web">Unity (web)</a> · <a href="https://github.com/livekit/client-sdk-unity">Unity (beta)</a></td></tr><tr></tr>
<tr><td>Server APIs</td><td><a href="https://github.com/livekit/server-sdk-js">Node.js</a> · <a href="https://github.com/livekit/server-sdk-go">Golang</a> · <a href="https://github.com/livekit/server-sdk-ruby">Ruby</a> · <a href="https://github.com/livekit/server-sdk-kotlin">Java/Kotlin</a> · <a href="https://github.com/livekit/client-sdk-python">Python</a> · <a href="https://github.com/livekit/client-sdk-rust">Rust</a> · <a href="https://github.com/agence104/livekit-server-sdk-php">PHP (community)</a></td></tr><tr></tr>
<tr><td>Real-time SDKs</td><td><a href="https://github.com/livekit/components-js">React Components</a> · <a href="https://github.com/livekit/client-sdk-js">Browser</a> · <a href="https://github.com/livekit/client-sdk-swift">iOS/macOS</a> · <a href="https://github.com/livekit/client-sdk-android">Android</a> · <a href="https://github.com/livekit/client-sdk-flutter">Flutter</a> · <a href="https://github.com/livekit/client-sdk-react-native">React Native</a> · <a href="https://github.com/livekit/rust-sdks">Rust</a> · <a href="https://github.com/livekit/node-sdks">Node.js</a> · <a href="https://github.com/livekit/python-sdks">Python</a> · <a href="https://github.com/livekit/client-sdk-unity-web">Unity (web)</a> · <a href="https://github.com/livekit/client-sdk-unity">Unity (beta)</a></td></tr><tr></tr>
<tr><td>Server APIs</td><td><a href="https://github.com/livekit/node-sdks">Node.js</a> · <a href="https://github.com/livekit/server-sdk-go">Golang</a> · <a href="https://github.com/livekit/server-sdk-ruby">Ruby</a> · <a href="https://github.com/livekit/server-sdk-kotlin">Java/Kotlin</a> · <a href="https://github.com/livekit/python-sdks">Python</a> · <a href="https://github.com/livekit/rust-sdks">Rust</a> · <a href="https://github.com/agence104/livekit-server-sdk-php">PHP (community)</a></td></tr><tr></tr>
<tr><td>Agents Frameworks</td><td><a href="https://github.com/livekit/agents">Python</a> · <a href="https://github.com/livekit/agent-playground">Playground</a></td></tr><tr></tr>
<tr><td>Services</td><td><b>Livekit server</b> · <a href="https://github.com/livekit/egress">Egress</a> · <a href="https://github.com/livekit/ingress">Ingress</a> · <a href="https://github.com/livekit/sip">SIP</a></td></tr><tr></tr>
<tr><td>Resources</td><td><a href="https://docs.livekit.io">Docs</a> · <a href="https://github.com/livekit-examples">Example apps</a> · <a href="https://livekit.io/cloud">Cloud</a> · <a href="https://docs.livekit.io/oss/deployment">Self-hosting</a> · <a href="https://github.com/livekit/livekit-cli">CLI</a></td></tr>
+8 -2
View File
@@ -183,8 +183,6 @@ keys:
# # allow tracks to be unmuted remotely, defaults to false
# # tracks can always be muted from the Room Service APIs
# enable_remote_unmute: true
# # limit size of room and participant's metadata, 0 for no limit
# max_metadata_size: 0
# # control playout delay in ms of video track (and associated audio track)
# playout_delay:
# enabled: true
@@ -311,3 +309,11 @@ keys:
# # value less or equal than 0 means no limit.
# subscription_limit_video: 0
# subscription_limit_audio: 0
# # limit size of room and participant's metadata, 0 for no limit
# max_metadata_size: 0
# # limit size of participant attributes, 0 for no limit
# max_attributes_size: 0
# # limit length of room names
# max_room_name_length: 0
# # limit length of participant identity
# max_participant_identity_length: 0
+39 -16
View File
@@ -13,20 +13,21 @@ require (
github.com/gammazero/deque v0.2.1
github.com/gammazero/workerpool v1.1.3
github.com/google/wire v0.6.0
github.com/gorilla/websocket v1.5.1
github.com/gorilla/websocket v1.5.3
github.com/hashicorp/go-version v1.7.0
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/jellydator/ttlcache/v3 v3.2.0
github.com/jxskiss/base62 v1.1.0
github.com/livekit/mageutil v0.0.0-20230125210925-54e8a70427c1
github.com/livekit/mediatransportutil v0.0.0-20240501132628-6105557bbb9a
github.com/livekit/protocol v1.17.1-0.20240606023900-429fec77a69b
github.com/livekit/mediatransportutil v0.0.0-20240613015318-84b69facfb75
github.com/livekit/protocol v1.18.0
github.com/livekit/psrpc v0.5.3-0.20240526192918-fbdaf10e6aa5
github.com/mackerelio/go-osstat v0.2.4
github.com/mackerelio/go-osstat v0.2.5
github.com/magefile/mage v1.15.0
github.com/maxbrunsfeld/counterfeiter/v6 v6.8.1
github.com/mitchellh/go-homedir v1.1.0
github.com/olekukonko/tablewriter v0.0.5
github.com/ory/dockertest/v3 v3.10.0
github.com/pion/dtls/v2 v2.2.11
github.com/pion/ice/v2 v2.3.24
github.com/pion/interceptor v0.1.29
@@ -39,35 +40,48 @@ require (
github.com/pion/webrtc/v3 v3.2.40
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.19.1
github.com/redis/go-redis/v9 v9.5.1
github.com/redis/go-redis/v9 v9.5.3
github.com/rs/cors v1.11.0
github.com/stretchr/testify v1.9.0
github.com/thoas/go-funk v0.9.3
github.com/twitchtv/twirp v8.1.3+incompatible
github.com/ua-parser/uap-go v0.0.0-20240113215029-33f8e6d47f38
github.com/ua-parser/uap-go v0.0.0-20240611065828-3a4781585db6
github.com/urfave/cli/v2 v2.27.2
github.com/urfave/negroni/v3 v3.1.0
github.com/urfave/negroni/v3 v3.1.1
go.uber.org/atomic v1.11.0
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20240529005216-23cca8864a10
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8
golang.org/x/sync v0.7.0
google.golang.org/protobuf v1.34.1
google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v3 v3.0.1
)
require (
dario.cat/mergo v1.0.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containerd/continuity v0.4.3 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/docker/cli v26.1.4+incompatible // indirect
github.com/docker/docker v27.0.0+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/eapache/channels v1.1.0 // indirect
github.com/eapache/queue v1.1.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/subcommands v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
@@ -80,9 +94,15 @@ require (
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/mdlayher/netlink v1.7.1 // indirect
github.com/mdlayher/socket v0.4.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/nats-io/nats.go v1.35.0 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/opencontainers/runc v1.1.13 // indirect
github.com/pion/datachannel v1.5.5 // indirect
github.com/pion/logging v0.2.2 // indirect
github.com/pion/mdns v0.0.12 // indirect
@@ -95,16 +115,19 @@ require (
github.com/prometheus/procfs v0.12.0 // indirect
github.com/puzpuzpuz/xsync/v3 v3.1.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap/exp v0.2.0 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/mod v0.17.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/text v0.15.0 // indirect
golang.org/x/tools v0.21.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/mod v0.18.0 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/sys v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/tools v0.22.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect
google.golang.org/grpc v1.64.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
+102 -30
View File
@@ -1,3 +1,11 @@
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA=
github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
@@ -10,15 +18,21 @@ github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
github.com/cilium/ebpf v0.8.1 h1:bLSSEbBLqGPXxls55pGr5qWZaTqcmfDJHhou7t254ao=
github.com/cilium/ebpf v0.8.1/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk=
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/d5/tengo/v2 v2.17.0 h1:BWUN9NoJzw48jZKiYDXDIF3QrIVZRm1uV1gTzeZ2lqM=
github.com/d5/tengo/v2 v2.17.0/go.mod h1:XRGjEs5I9jYIKTxly6HCF8oiiilk5E/RYXOZ5b0DZC8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -26,6 +40,14 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/docker/cli v26.1.4+incompatible h1:I8PHdc0MtxEADqYJZvhBrW9bo8gawKwwenxRM7/rLu8=
github.com/docker/cli v26.1.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker v27.0.0+incompatible h1:JRugTYuelmWlW0M3jakcIadDx2HUoUO6+Tf2C5jVfwA=
github.com/docker/docker v27.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/eapache/channels v1.1.0 h1:F1taHcn7/F0i8DYqKXJnyhJcVpp2kgFcNePxXtnyu4k=
@@ -50,6 +72,10 @@ github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@@ -61,6 +87,8 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -69,8 +97,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI=
github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA=
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
@@ -101,6 +129,8 @@ github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786 h1:N527AHMa79
github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786/go.mod h1:v4hqbTdfQngbVSZJVWUhGE/lbTFf9jb+ygmNUDQMuOs=
github.com/jxskiss/base62 v1.1.0 h1:A5zbF8v8WXx2xixnAKD2w+abC+sIzYJX+nxmhA6HWFw=
github.com/jxskiss/base62 v1.1.0/go.mod h1:HhWAlUXvxKThfOlZbcuFzsqwtF5TcqS9ru3y5GfjWAc=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
@@ -114,18 +144,20 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2 h1:hRGSmZu7j271trc9sneMrpOW7GN5ngLm8YUZIPzf394=
github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw7k08o4c=
github.com/lithammer/shortuuid/v4 v4.0.0/go.mod h1:Zs8puNcrvf2rV9rTH51ZLLcj7ZXqQI3lv67aw4KiB1Y=
github.com/livekit/mageutil v0.0.0-20230125210925-54e8a70427c1 h1:jm09419p0lqTkDaKb5iXdynYrzB84ErPPO4LbRASk58=
github.com/livekit/mageutil v0.0.0-20230125210925-54e8a70427c1/go.mod h1:Rs3MhFwutWhGwmY1VQsygw28z5bWcnEYmS1OG9OxjOQ=
github.com/livekit/mediatransportutil v0.0.0-20240501132628-6105557bbb9a h1:ATbv0x7G5tW2HgiouQ57csFE/G4gekl2oV1cxb2Dy24=
github.com/livekit/mediatransportutil v0.0.0-20240501132628-6105557bbb9a/go.mod h1:jwKUCmObuiEDH0iiuJHaGMXwRs3RjrB4G6qqgkr/5oE=
github.com/livekit/protocol v1.17.1-0.20240606023900-429fec77a69b h1:VZMvqc23x/dXRpJQLc6CIkCuLUjev0HDLFO9NCEqfOk=
github.com/livekit/protocol v1.17.1-0.20240606023900-429fec77a69b/go.mod h1:cN8WmGQR+kWz1+UWcAQdFFUcbW76PnfZDdkLAbYIqd4=
github.com/livekit/mediatransportutil v0.0.0-20240613015318-84b69facfb75 h1:p60OjeixzXnhGFQL8wmdUwWPxijEDe9ZJFMosq+byec=
github.com/livekit/mediatransportutil v0.0.0-20240613015318-84b69facfb75/go.mod h1:jwKUCmObuiEDH0iiuJHaGMXwRs3RjrB4G6qqgkr/5oE=
github.com/livekit/protocol v1.18.0 h1:LLOjKBA8rtnGpVGjAmKUROy7bv/l9q1wyn9hNmj8Sdg=
github.com/livekit/protocol v1.18.0/go.mod h1:cN8WmGQR+kWz1+UWcAQdFFUcbW76PnfZDdkLAbYIqd4=
github.com/livekit/psrpc v0.5.3-0.20240526192918-fbdaf10e6aa5 h1:mTZyrjk5WEWMsvaYtJ42pG7DuxysKj21DKPINpGSIto=
github.com/livekit/psrpc v0.5.3-0.20240526192918-fbdaf10e6aa5/go.mod h1:CQUBSPfYYAaevg1TNCc6/aYsa8DJH4jSRFdCeSZk5u0=
github.com/mackerelio/go-osstat v0.2.4 h1:qxGbdPkFo65PXOb/F/nhDKpF2nGmGaCFDLXoZjJTtUs=
github.com/mackerelio/go-osstat v0.2.4/go.mod h1:Zy+qzGdZs3A9cuIqmgbJvwbmLQH9dJvtio5ZjJTbdlQ=
github.com/mackerelio/go-osstat v0.2.5 h1:+MqTbZUhoIt4m8qzkVoXUJg1EuifwlAJSk4Yl2GXh+o=
github.com/mackerelio/go-osstat v0.2.5/go.mod h1:atxwWF+POUZcdtR1wnsUcQxTytoHG4uhl2AKKzrOajY=
github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
@@ -153,6 +185,12 @@ github.com/mdlayher/socket v0.4.0 h1:280wsy40IC9M9q1uPGcLBwXpcTQDtoGwVt+BNoITxIw
github.com/mdlayher/socket v0.4.0/go.mod h1:xxFqz5GRCUN3UEOm9CZqEJsAbe1C8OwSK46NlmWuVoc=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/nats-io/nats.go v1.35.0 h1:XFNqNM7v5B+MQMKqVGAyHwYhyKb48jrenXNxIU20ULk=
github.com/nats-io/nats.go v1.35.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
@@ -163,6 +201,14 @@ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/opencontainers/runc v1.1.13 h1:98S2srgG9vw0zWcDpFMn5TRrh8kLxa/5OFUstuUhmRs=
github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA=
github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4=
github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg=
github.com/pion/datachannel v1.5.5 h1:10ef4kwdjije+M9d7Xm9im2Y3O6A6ccQb0zcqZcJew8=
github.com/pion/datachannel v1.5.5/go.mod h1:iMz+lECmfdCMqFRhXhcA/219B0SQlbpoR2V118yimL0=
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
@@ -223,8 +269,8 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/puzpuzpuz/xsync/v3 v3.1.0 h1:EewKT7/LNac5SLiEblJeUu8z5eERHrmRLnMQL2d7qX4=
github.com/puzpuzpuz/xsync/v3 v3.1.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/redis/go-redis/v9 v9.5.1 h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8=
github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
github.com/redis/go-redis/v9 v9.5.3 h1:fOAp1/uJG+ZtcITgZOfYFmTKPE7n4Vclj1wZFgRciUU=
github.com/redis/go-redis/v9 v9.5.3/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
@@ -234,11 +280,14 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8=
github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -252,14 +301,23 @@ github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw=
github.com/thoas/go-funk v0.9.3/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q=
github.com/twitchtv/twirp v8.1.3+incompatible h1:+F4TdErPgSUbMZMwp13Q/KgDVuI7HJXP61mNV3/7iuU=
github.com/twitchtv/twirp v8.1.3+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A=
github.com/ua-parser/uap-go v0.0.0-20240113215029-33f8e6d47f38 h1:F04Na0QJP9GJrwmK3vQDuDrCuGllrrfngW8CIeF1aag=
github.com/ua-parser/uap-go v0.0.0-20240113215029-33f8e6d47f38/go.mod h1:BUbeWZiieNxAuuADTBNb3/aeje6on3DhU3rpWsQSB1E=
github.com/ua-parser/uap-go v0.0.0-20240611065828-3a4781585db6 h1:SIKIoA4e/5Y9ZOl0DCe3eVMLPOQzJxgZpfdHHeauNTM=
github.com/ua-parser/uap-go v0.0.0-20240611065828-3a4781585db6/go.mod h1:BUbeWZiieNxAuuADTBNb3/aeje6on3DhU3rpWsQSB1E=
github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI=
github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM=
github.com/urfave/negroni/v3 v3.1.0 h1:lzmuxGSpnJCT/ujgIAjkU3+LW3NX8alCglO/L6KjIGQ=
github.com/urfave/negroni/v3 v3.1.0/go.mod h1:jWvnX03kcSjDBl/ShB0iHvx5uOs7mAzZXW+JvJ5XYAs=
github.com/urfave/negroni/v3 v3.1.1 h1:6MS4nG9Jk/UuCACaUlNXCbiKa0ywF9LXz5dGu09v8hw=
github.com/urfave/negroni/v3 v3.1.1/go.mod h1:jWvnX03kcSjDBl/ShB0iHvx5uOs7mAzZXW+JvJ5XYAs=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw=
github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
@@ -276,6 +334,7 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.uber.org/zap/exp v0.2.0 h1:FtGenNNeCATRB3CmB/yEUnjEFeJWpB/pMcy7e2bKPYs=
go.uber.org/zap/exp v0.2.0/go.mod h1:t0gqAIdh1MfKv9EwN/dLwfZnJxe9ITAZN78HEWPFWDQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
@@ -284,23 +343,27 @@ golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98y
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20240529005216-23cca8864a10 h1:vpzMC/iZhYFAjJzHU0Cfuq+w1vLLsF2vLkDrPjzKYck=
golang.org/x/exp v0.0.0-20240529005216-23cca8864a10/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY=
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
@@ -318,9 +381,11 @@ golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -348,11 +413,13 @@ golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -365,8 +432,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -389,25 +456,28 @@ golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e h1:Elxv5MwEkCI9f5SkoL6afed6NTdxaGoAo39eANBwHL8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -421,3 +491,5 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo=
gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A=
+36 -13
View File
@@ -231,17 +231,20 @@ type VideoConfig struct {
type RoomConfig struct {
// enable rooms to be automatically created
AutoCreate bool `yaml:"auto_create,omitempty"`
EnabledCodecs []CodecSpec `yaml:"enabled_codecs,omitempty"`
MaxParticipants uint32 `yaml:"max_participants,omitempty"`
EmptyTimeout uint32 `yaml:"empty_timeout,omitempty"`
DepartureTimeout uint32 `yaml:"departure_timeout,omitempty"`
EnableRemoteUnmute bool `yaml:"enable_remote_unmute,omitempty"`
MaxMetadataSize uint32 `yaml:"max_metadata_size,omitempty"`
PlayoutDelay PlayoutDelayConfig `yaml:"playout_delay,omitempty"`
SyncStreams bool `yaml:"sync_streams,omitempty"`
MaxRoomNameLength int `yaml:"max_room_name_length,omitempty"`
MaxParticipantIdentityLength int `yaml:"max_participant_identity_length,omitempty"`
AutoCreate bool `yaml:"auto_create,omitempty"`
EnabledCodecs []CodecSpec `yaml:"enabled_codecs,omitempty"`
MaxParticipants uint32 `yaml:"max_participants,omitempty"`
EmptyTimeout uint32 `yaml:"empty_timeout,omitempty"`
DepartureTimeout uint32 `yaml:"departure_timeout,omitempty"`
EnableRemoteUnmute bool `yaml:"enable_remote_unmute,omitempty"`
PlayoutDelay PlayoutDelayConfig `yaml:"playout_delay,omitempty"`
SyncStreams bool `yaml:"sync_streams,omitempty"`
// deprecated, moved to limits
MaxMetadataSize uint32 `yaml:"max_metadata_size,omitempty"`
// deprecated, moved to limits
MaxRoomNameLength int `yaml:"max_room_name_length,omitempty"`
// deprecated, moved to limits
MaxParticipantIdentityLength int `yaml:"max_participant_identity_length,omitempty"`
}
type CodecSpec struct {
@@ -300,6 +303,11 @@ type LimitConfig struct {
BytesPerSec float32 `yaml:"bytes_per_sec,omitempty"`
SubscriptionLimitVideo int32 `yaml:"subscription_limit_video,omitempty"`
SubscriptionLimitAudio int32 `yaml:"subscription_limit_audio,omitempty"`
MaxMetadataSize uint32 `yaml:"max_metadata_size,omitempty"`
// total size of all attributes on a participant
MaxAttributesSize uint32 `yaml:"max_attributes_size,omitempty"`
MaxRoomNameLength int `yaml:"max_room_name_length,omitempty"`
MaxParticipantIdentityLength int `yaml:"max_participant_identity_length,omitempty"`
}
type IngressConfig struct {
@@ -494,8 +502,12 @@ var DefaultConfig = Config{
{Mime: webrtc.MimeTypeVP9},
{Mime: webrtc.MimeTypeAV1},
},
EmptyTimeout: 5 * 60,
DepartureTimeout: 20,
EmptyTimeout: 5 * 60,
DepartureTimeout: 20,
},
Limit: LimitConfig{
MaxMetadataSize: 64000,
MaxAttributesSize: 64000,
MaxRoomNameLength: 256,
MaxParticipantIdentityLength: 256,
},
@@ -585,6 +597,17 @@ func NewConfig(confString string, strictMode bool, c *cli.Context, baseFlags []c
conf.Logging.ComponentLevels["pion"] = conf.Logging.PionLevel
}
// copy over legacy limits
if conf.Room.MaxMetadataSize != 0 {
conf.Limit.MaxMetadataSize = conf.Room.MaxMetadataSize
}
if conf.Room.MaxParticipantIdentityLength != 0 {
conf.Limit.MaxParticipantIdentityLength = conf.Room.MaxParticipantIdentityLength
}
if conf.Room.MaxRoomNameLength != 0 {
conf.Limit.MaxRoomNameLength = conf.Room.MaxRoomNameLength
}
return &conf, nil
}
+6
View File
@@ -20,6 +20,8 @@ import (
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
"github.com/livekit/livekit-server/pkg/config/configtest"
)
func TestConfig_UnmarshalKeys(t *testing.T) {
@@ -80,3 +82,7 @@ func TestGeneratedFlags(t *testing.T) {
require.NotNil(t, conf.RTC.ReconnectOnSubscriptionError)
require.False(t, *conf.RTC.ReconnectOnSubscriptionError)
}
func TestYAMLTag(t *testing.T) {
require.NoError(t, configtest.CheckYAMLTags(Config{}))
}
+64
View File
@@ -0,0 +1,64 @@
package configtest
import (
"fmt"
"reflect"
"slices"
"strings"
"go.uber.org/multierr"
"google.golang.org/protobuf/proto"
)
var protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
func checkYAMLTags(t reflect.Type, seen map[reflect.Type]struct{}) error {
if _, ok := seen[t]; ok {
return nil
}
seen[t] = struct{}{}
switch t.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.Pointer:
return checkYAMLTags(t.Elem(), seen)
case reflect.Struct:
if reflect.PointerTo(t).Implements(protoMessageType) {
// ignore protobuf messages
return nil
}
var errs error
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if field.Type.Kind() == reflect.Bool {
// ignore boolean fields
continue
}
if field.Tag.Get("config") == "allowempty" {
// ignore configured exceptions
continue
}
parts := strings.Split(field.Tag.Get("yaml"), ",")
if parts[0] == "-" {
// ignore unparsed fields
continue
}
if !slices.Contains(parts, "omitempty") && !slices.Contains(parts, "inline") {
errs = multierr.Append(errs, fmt.Errorf("%s/%s.%s missing omitempty tag", t.PkgPath(), t.Name(), field.Name))
}
errs = multierr.Append(errs, checkYAMLTags(field.Type, seen))
}
return errs
default:
return nil
}
}
func CheckYAMLTags(config any) error {
return checkYAMLTags(reflect.TypeOf(config), map[reflect.Type]struct{}{})
}
-1
View File
@@ -29,7 +29,6 @@ import (
type EgressLauncher interface {
StartEgress(context.Context, *rpc.StartEgressRequest) (*livekit.EgressInfo, error)
StartEgressWithClusterId(ctx context.Context, clusterId string, req *rpc.StartEgressRequest) (*livekit.EgressInfo, error)
}
func StartParticipantEgress(
+4 -1
View File
@@ -14,7 +14,9 @@
package rtc
import "errors"
import (
"errors"
)
var (
ErrRoomClosed = errors.New("room has already closed")
@@ -29,6 +31,7 @@ var (
ErrEmptyParticipantID = errors.New("participant ID cannot be empty")
ErrMissingGrants = errors.New("VideoGrant is missing")
ErrInternalError = errors.New("internal error")
ErrAttributeExceedsLimits = errors.New("attribute size exceeds limits")
// Track subscription related
ErrNoTrackPermission = errors.New("participant is not allowed to subscribe to this track")
+9 -6
View File
@@ -353,11 +353,14 @@ func (t *MediaTrack) AddReceiver(receiver *webrtc.RTPReceiver, track *webrtc.Tra
t.SetSimulcast(true)
}
if t.IsSimulcast() {
t.MediaTrackReceiver.SetLayerSsrc(mime, track.RID(), uint32(track.SSRC()))
var bitrates int
if len(ti.Layers) > int(layer) {
bitrates = int(ti.Layers[layer].GetBitrate())
}
buff.Bind(receiver.GetParameters(), track.Codec().RTPCodecCapability)
t.MediaTrackReceiver.SetLayerSsrc(mime, track.RID(), uint32(track.SSRC()))
buff.Bind(receiver.GetParameters(), track.Codec().RTPCodecCapability, bitrates)
// if subscriber request fps before fps calculated, update them after fps updated.
buff.OnFpsChanged(func() {
@@ -408,13 +411,13 @@ func (t *MediaTrack) Restart() {
}
}
func (t *MediaTrack) Close(willBeResumed bool) {
func (t *MediaTrack) Close(isExpectedToResume bool) {
t.MediaTrackReceiver.SetClosing()
if t.dynacastManager != nil {
t.dynacastManager.Close()
}
t.MediaTrackReceiver.ClearAllReceivers(willBeResumed)
t.MediaTrackReceiver.Close()
t.MediaTrackReceiver.ClearAllReceivers(isExpectedToResume)
t.MediaTrackReceiver.Close(isExpectedToResume)
}
func (t *MediaTrack) SetMuted(muted bool) {
+25 -23
View File
@@ -97,16 +97,16 @@ type MediaTrackReceiverParams struct {
type MediaTrackReceiver struct {
params MediaTrackReceiverParams
lock sync.RWMutex
receivers []*simulcastReceiver
trackInfo *livekit.TrackInfo
potentialCodecs []webrtc.RTPCodecParameters
state mediaTrackReceiverState
willBeResumed bool
lock sync.RWMutex
receivers []*simulcastReceiver
trackInfo *livekit.TrackInfo
potentialCodecs []webrtc.RTPCodecParameters
state mediaTrackReceiverState
isExpectedToResume bool
onSetupReceiver func(mime string)
onMediaLossFeedback func(dt *sfu.DownTrack, report *rtcp.ReceiverReport)
onClose []func()
onClose []func(isExpectedToResume bool)
*MediaTrackSubscriptions
}
@@ -258,7 +258,7 @@ func (t *MediaTrackReceiver) SetPotentialCodecs(codecs []webrtc.RTPCodecParamete
t.lock.Unlock()
}
func (t *MediaTrackReceiver) ClearReceiver(mime string, willBeResumed bool) {
func (t *MediaTrackReceiver) ClearReceiver(mime string, isExpectedToResume bool) {
t.lock.Lock()
receivers := slices.Clone(t.receivers)
for idx, receiver := range receivers {
@@ -272,20 +272,20 @@ func (t *MediaTrackReceiver) ClearReceiver(mime string, willBeResumed bool) {
t.receivers = receivers
t.lock.Unlock()
t.removeAllSubscribersForMime(mime, willBeResumed)
t.removeAllSubscribersForMime(mime, isExpectedToResume)
}
func (t *MediaTrackReceiver) ClearAllReceivers(willBeResumed bool) {
func (t *MediaTrackReceiver) ClearAllReceivers(isExpectedToResume bool) {
t.params.Logger.Debugw("clearing all receivers")
t.lock.Lock()
receivers := t.receivers
t.receivers = nil
t.willBeResumed = willBeResumed
t.isExpectedToResume = isExpectedToResume
t.lock.Unlock()
for _, r := range receivers {
t.removeAllSubscribersForMime(r.Codec().MimeType, willBeResumed)
t.removeAllSubscribersForMime(r.Codec().MimeType, isExpectedToResume)
}
}
@@ -332,16 +332,18 @@ func (t *MediaTrackReceiver) TryClose() bool {
numActiveReceivers++
}
}
isExpectedToResume := t.isExpectedToResume
t.lock.RUnlock()
if numActiveReceivers != 0 {
return false
}
t.Close()
t.Close(isExpectedToResume)
return true
}
func (t *MediaTrackReceiver) Close() {
func (t *MediaTrackReceiver) Close(isExpectedToResume bool) {
t.lock.Lock()
if t.state == mediaTrackReceiverStateClosed {
t.lock.Unlock()
@@ -353,7 +355,7 @@ func (t *MediaTrackReceiver) Close() {
t.lock.Unlock()
for _, f := range onclose {
f()
f(isExpectedToResume)
}
}
@@ -437,7 +439,7 @@ func (t *MediaTrackReceiver) SetMuted(muted bool) {
t.MediaTrackSubscriptions.SetMuted(muted)
}
func (t *MediaTrackReceiver) AddOnClose(f func()) {
func (t *MediaTrackReceiver) AddOnClose(f func(isExpectedToResume bool)) {
if f == nil {
return
}
@@ -499,16 +501,16 @@ func (t *MediaTrackReceiver) AddSubscriber(sub types.LocalParticipant) (types.Su
// media track could have been closed while adding subscription
remove := false
willBeResumed := false
isExpectedToResume := false
t.lock.RLock()
if t.state != mediaTrackReceiverStateOpen {
willBeResumed = t.willBeResumed
isExpectedToResume = t.isExpectedToResume
remove = true
}
t.lock.RUnlock()
if remove {
_ = t.MediaTrackSubscriptions.RemoveSubscriber(sub.ID(), willBeResumed)
_ = t.MediaTrackSubscriptions.RemoveSubscriber(sub.ID(), isExpectedToResume)
return nil, ErrNotOpen
}
@@ -517,14 +519,14 @@ func (t *MediaTrackReceiver) AddSubscriber(sub types.LocalParticipant) (types.Su
// RemoveSubscriber removes participant from subscription
// stop all forwarders to the client
func (t *MediaTrackReceiver) RemoveSubscriber(subscriberID livekit.ParticipantID, willBeResumed bool) {
_ = t.MediaTrackSubscriptions.RemoveSubscriber(subscriberID, willBeResumed)
func (t *MediaTrackReceiver) RemoveSubscriber(subscriberID livekit.ParticipantID, isExpectedToResume bool) {
_ = t.MediaTrackSubscriptions.RemoveSubscriber(subscriberID, isExpectedToResume)
}
func (t *MediaTrackReceiver) removeAllSubscribersForMime(mime string, willBeResumed bool) {
func (t *MediaTrackReceiver) removeAllSubscribersForMime(mime string, isExpectedToResume bool) {
t.params.Logger.Debugw("removing all subscribers for mime", "mime", mime)
for _, subscriberID := range t.MediaTrackSubscriptions.GetAllSubscribersForMime(mime) {
t.RemoveSubscriber(subscriberID, willBeResumed)
t.RemoveSubscriber(subscriberID, isExpectedToResume)
}
}
+12 -12
View File
@@ -296,8 +296,8 @@ func (t *MediaTrackSubscriptions) AddSubscriber(sub types.LocalParticipant, wr *
// But, the subscription could be removed early if the published track is closed
// while adding subscription. In those cases, subscription manager would not have set
// the `OnClose` callback. So, set it here to handle cases of early close.
subTrack.OnClose(func(willBeResumed bool) {
if !willBeResumed {
subTrack.OnClose(func(isExpectedToResume bool) {
if !isExpectedToResume {
if err := sub.RemoveTrackFromSubscriber(sender); err != nil {
t.params.Logger.Warnw("could not remove track from peer connection", err)
}
@@ -306,8 +306,8 @@ func (t *MediaTrackSubscriptions) AddSubscriber(sub types.LocalParticipant, wr *
downTrack.SetTransceiver(transceiver)
downTrack.OnCloseHandler(func(willBeResumed bool) {
go t.downTrackClosed(sub, willBeResumed)
downTrack.OnCloseHandler(func(isExpectedToResume bool) {
go t.downTrackClosed(sub, isExpectedToResume)
})
t.subscribedTracksMu.Lock()
@@ -319,24 +319,24 @@ func (t *MediaTrackSubscriptions) AddSubscriber(sub types.LocalParticipant, wr *
// RemoveSubscriber removes participant from subscription
// stop all forwarders to the client
func (t *MediaTrackSubscriptions) RemoveSubscriber(subscriberID livekit.ParticipantID, willBeResumed bool) error {
func (t *MediaTrackSubscriptions) RemoveSubscriber(subscriberID livekit.ParticipantID, isExpectedToResume bool) error {
subTrack := t.getSubscribedTrack(subscriberID)
if subTrack == nil {
return errNotFound
}
t.params.Logger.Debugw("removing subscriber", "subscriberID", subscriberID, "willBeResumed", willBeResumed)
t.closeSubscribedTrack(subTrack, willBeResumed)
t.params.Logger.Debugw("removing subscriber", "subscriberID", subscriberID, "isExpectedToResume", isExpectedToResume)
t.closeSubscribedTrack(subTrack, isExpectedToResume)
return nil
}
func (t *MediaTrackSubscriptions) closeSubscribedTrack(subTrack types.SubscribedTrack, willBeResumed bool) {
func (t *MediaTrackSubscriptions) closeSubscribedTrack(subTrack types.SubscribedTrack, isExpectedToResume bool) {
dt := subTrack.DownTrack()
if dt == nil {
return
}
if willBeResumed {
if isExpectedToResume {
dt.CloseWithFlush(false)
} else {
// flushing blocks, avoid blocking when publisher removes all its subscribers
@@ -418,7 +418,7 @@ func (t *MediaTrackSubscriptions) DebugInfo() []map[string]interface{} {
func (t *MediaTrackSubscriptions) downTrackClosed(
sub types.LocalParticipant,
willBeResumed bool,
isExpectedToResume bool,
) {
subscriberID := sub.ID()
t.subscribedTracksMu.RLock()
@@ -429,7 +429,7 @@ func (t *MediaTrackSubscriptions) downTrackClosed(
// Cache transceiver for potential re-use on resume.
// To ensure subscription manager does not re-subscribe before caching,
// delete the subscribed track only after caching.
if willBeResumed {
if isExpectedToResume {
dt := subTrack.DownTrack()
tr := dt.GetTransceiver()
if tr != nil {
@@ -442,6 +442,6 @@ func (t *MediaTrackSubscriptions) downTrackClosed(
delete(t.subscribedTracks, subscriberID)
t.subscribedTracksMu.Unlock()
subTrack.Close(willBeResumed)
subTrack.Close(isExpectedToResume)
}
}
+58 -6
View File
@@ -136,6 +136,7 @@ type ParticipantParams struct {
VersionGenerator utils.TimedVersionGenerator
TrackResolver types.MediaTrackResolver
DisableDynacast bool
MaxAttributesSize uint32
SubscriberAllowPause bool
SubscriptionLimitAudio int32
SubscriptionLimitVideo int32
@@ -164,6 +165,7 @@ type ParticipantImpl struct {
isPublisher atomic.Bool
sessionStartRecorded atomic.Bool
lastActiveAt time.Time
// when first connected
connectedAt time.Time
// timer that's set when disconnect is detected on primary PC
@@ -458,6 +460,52 @@ func (p *ParticipantImpl) SetMetadata(metadata string) {
}
}
func (p *ParticipantImpl) SetAttributes(attrs map[string]string) error {
p.lock.Lock()
grants := p.grants.Load().Clone()
if grants.Attributes == nil {
grants.Attributes = make(map[string]string)
}
var keysToDelete []string
for k, v := range attrs {
if v == "" {
keysToDelete = append(keysToDelete, k)
} else {
grants.Attributes[k] = v
}
}
for _, k := range keysToDelete {
delete(grants.Attributes, k)
}
maxAttributesSize := p.params.MaxAttributesSize
if maxAttributesSize > 0 {
total := 0
for k, v := range grants.Attributes {
total += len(k) + len(v)
}
if uint32(total) > maxAttributesSize {
p.lock.Unlock()
return ErrAttributeExceedsLimits
}
}
p.grants.Store(grants)
p.dirty.Store(true)
onParticipantUpdate := p.onParticipantUpdate
onClaimsChanged := p.onClaimsChanged
p.lock.Unlock()
if onParticipantUpdate != nil {
onParticipantUpdate(p)
}
if onClaimsChanged != nil {
onClaimsChanged(p)
}
return nil
}
func (p *ParticipantImpl) ClaimGrants() *auth.ClaimGrants {
return p.grants.Load()
}
@@ -550,6 +598,7 @@ func (p *ParticipantImpl) ToProtoWithVersion() (*livekit.ParticipantInfo, utils.
Version: v,
Permission: grants.Video.ToPermission(),
Metadata: grants.Metadata,
Attributes: grants.Attributes,
Region: p.params.Region,
IsPublisher: p.IsPublisher(),
Kind: grants.GetParticipantKind(),
@@ -779,8 +828,9 @@ func (p *ParticipantImpl) AddTrack(req *livekit.AddTrackRequest) {
return
}
p.lock.Lock()
defer p.lock.Unlock()
p.pendingTracksLock.Lock()
defer p.pendingTracksLock.Unlock()
ti := p.addPendingTrackLocked(req)
if ti == nil {
return
@@ -1374,6 +1424,11 @@ func (p *ParticipantImpl) updateState(state livekit.ParticipantInfo_State) {
return
}
if state == livekit.ParticipantInfo_DISCONNECTED && oldState == livekit.ParticipantInfo_ACTIVE {
prometheus.RecordSessionDuration(int(p.ProtocolVersion()), time.Since(p.lastActiveAt))
} else if state == livekit.ParticipantInfo_ACTIVE {
p.lastActiveAt = time.Now()
}
p.params.Logger.Debugw("updating participant state", "state", state.String())
p.dirty.Store(true)
@@ -1767,9 +1822,6 @@ func (p *ParticipantImpl) onSubscribedMaxQualityChange(
}
func (p *ParticipantImpl) addPendingTrackLocked(req *livekit.AddTrackRequest) *livekit.TrackInfo {
p.pendingTracksLock.Lock()
defer p.pendingTracksLock.Unlock()
if req.Sid != "" {
track := p.GetPublishedTrack(livekit.TrackID(req.Sid))
if track == nil {
@@ -2127,7 +2179,7 @@ func (p *ParticipantImpl) addMediaTrack(signalCid string, sdpCid string, ti *liv
}
trackID := livekit.TrackID(ti.Sid)
mt.AddOnClose(func() {
mt.AddOnClose(func(_isExpectedToRsume bool) {
if p.supervisor != nil {
p.supervisor.ClearPublishedTrack(trackID, mt)
}
+12 -1
View File
@@ -819,13 +819,24 @@ func (r *Room) SetMetadata(metadata string) <-chan struct{} {
return r.protoProxy.MarkDirty(true)
}
func (r *Room) UpdateParticipantMetadata(participant types.LocalParticipant, name string, metadata string) {
func (r *Room) UpdateParticipantMetadata(
participant types.LocalParticipant,
name string,
metadata string,
attributes map[string]string,
) error {
if attributes != nil && len(attributes) > 0 {
if err := participant.SetAttributes(attributes); err != nil {
return err
}
}
if metadata != "" {
participant.SetMetadata(metadata)
}
if name != "" {
participant.SetName(name)
}
return nil
}
func (r *Room) sendRoomUpdate() {
+9 -1
View File
@@ -93,7 +93,15 @@ func HandleParticipantSignal(room types.Room, participant types.LocalParticipant
case *livekit.SignalRequest_UpdateMetadata:
if participant.ClaimGrants().Video.GetCanUpdateOwnMetadata() {
room.UpdateParticipantMetadata(participant, msg.UpdateMetadata.Name, msg.UpdateMetadata.Metadata)
err := room.UpdateParticipantMetadata(
participant,
msg.UpdateMetadata.Name,
msg.UpdateMetadata.Metadata,
msg.UpdateMetadata.Attributes,
)
if err != nil {
pLogger.Warnw("could not update metadata", err)
}
}
case *livekit.SignalRequest_UpdateAudioTrack:
+2 -2
View File
@@ -139,9 +139,9 @@ func (t *SubscribedTrack) Bound(err error) {
}
// for DownTrack callback to notify us that it's closed
func (t *SubscribedTrack) Close(willBeResumed bool) {
func (t *SubscribedTrack) Close(isExpectedToResume bool) {
if onClose := t.onClose.Load(); onClose != nil {
go onClose.(func(bool))(willBeResumed)
go onClose.(func(bool))(isExpectedToResume)
}
}
+8 -8
View File
@@ -91,7 +91,7 @@ func NewSubscriptionManager(params SubscriptionManagerParams) *SubscriptionManag
return m
}
func (m *SubscriptionManager) Close(willBeResumed bool) {
func (m *SubscriptionManager) Close(isExpectedToResume bool) {
m.lock.Lock()
if m.isClosed() {
m.lock.Unlock()
@@ -113,7 +113,7 @@ func (m *SubscriptionManager) Close(willBeResumed bool) {
}
}
if willBeResumed {
if isExpectedToResume {
for _, dt := range downTracksToClose {
dt.CloseWithFlush(false)
}
@@ -523,8 +523,8 @@ func (m *SubscriptionManager) subscribe(s *trackSubscription) error {
)
}
if err == nil && subTrack != nil { // subTrack could be nil if already subscribed
subTrack.OnClose(func(willBeResumed bool) {
m.handleSubscribedTrackClose(s, willBeResumed)
subTrack.OnClose(func(isExpectedToResume bool) {
m.handleSubscribedTrackClose(s, isExpectedToResume)
})
subTrack.AddOnBind(func(err error) {
if err != nil {
@@ -615,10 +615,10 @@ func (m *SubscriptionManager) handleSourceTrackRemoved(trackID livekit.TrackID)
// - subscriber-initiated unsubscribe
// - UpTrack was closed
// - publisher revoked permissions for the participant
func (m *SubscriptionManager) handleSubscribedTrackClose(s *trackSubscription, willBeResumed bool) {
func (m *SubscriptionManager) handleSubscribedTrackClose(s *trackSubscription, isExpectedToResume bool) {
s.logger.Debugw(
"subscribed track closed",
"willBeResumed", willBeResumed,
"isExpectedToResume", isExpectedToResume,
)
wasBound := s.isBound()
subTrack := s.getSubscribedTrack()
@@ -666,7 +666,7 @@ func (m *SubscriptionManager) handleSubscribedTrackClose(s *trackSubscription, w
context.Background(),
m.params.Participant.ID(),
&livekit.TrackInfo{Sid: string(s.trackID), Type: subTrack.MediaTrack().Kind()},
!willBeResumed,
!isExpectedToResume,
)
dt := subTrack.DownTrack()
@@ -684,7 +684,7 @@ func (m *SubscriptionManager) handleSubscribedTrackClose(s *trackSubscription, w
}
}
if !willBeResumed {
if !isExpectedToResume {
sender := subTrack.RTPSender()
if sender != nil {
s.logger.Debugw("removing PeerConnection track",
+14 -14
View File
@@ -214,11 +214,11 @@ func TestUnsubscribe(t *testing.T) {
st, err := res.Track.AddSubscriber(sm.params.Participant)
require.NoError(t, err)
s.subscribedTrack = st
st.OnClose(func(willBeResumed bool) {
sm.handleSubscribedTrackClose(s, willBeResumed)
st.OnClose(func(isExpectedToResume bool) {
sm.handleSubscribedTrackClose(s, isExpectedToResume)
})
res.Track.(*typesfakes.FakeMediaTrack).RemoveSubscriberCalls(func(pID livekit.ParticipantID, willBeResumed bool) {
setTestSubscribedTrackClosed(t, st, willBeResumed)
res.Track.(*typesfakes.FakeMediaTrack).RemoveSubscriberCalls(func(pID livekit.ParticipantID, isExpectedToResume bool) {
setTestSubscribedTrackClosed(t, st, isExpectedToResume)
})
sm.lock.Lock()
@@ -279,18 +279,18 @@ func TestSubscribeStatusChanged(t *testing.T) {
return !s1.needsSubscribe() && !s2.needsSubscribe()
}, subSettleTimeout, subCheckInterval, "track1 and track2 should be subscribed")
st1 := s1.getSubscribedTrack()
st1.OnClose(func(willBeResumed bool) {
sm.handleSubscribedTrackClose(s1, willBeResumed)
st1.OnClose(func(isExpectedToResume bool) {
sm.handleSubscribedTrackClose(s1, isExpectedToResume)
})
st2 := s2.getSubscribedTrack()
st2.OnClose(func(willBeResumed bool) {
sm.handleSubscribedTrackClose(s2, willBeResumed)
st2.OnClose(func(isExpectedToResume bool) {
sm.handleSubscribedTrackClose(s2, isExpectedToResume)
})
st1.MediaTrack().(*typesfakes.FakeMediaTrack).RemoveSubscriberCalls(func(pID livekit.ParticipantID, willBeResumed bool) {
setTestSubscribedTrackClosed(t, st1, willBeResumed)
st1.MediaTrack().(*typesfakes.FakeMediaTrack).RemoveSubscriberCalls(func(pID livekit.ParticipantID, isExpectedToResume bool) {
setTestSubscribedTrackClosed(t, st1, isExpectedToResume)
})
st2.MediaTrack().(*typesfakes.FakeMediaTrack).RemoveSubscriberCalls(func(pID livekit.ParticipantID, willBeResumed bool) {
setTestSubscribedTrackClosed(t, st2, willBeResumed)
st2.MediaTrack().(*typesfakes.FakeMediaTrack).RemoveSubscriberCalls(func(pID livekit.ParticipantID, isExpectedToResume bool) {
setTestSubscribedTrackClosed(t, st2, isExpectedToResume)
})
require.Eventually(t, func() bool {
@@ -533,9 +533,9 @@ func setTestSubscribedTrackBound(t *testing.T, st types.SubscribedTrack) {
}
}
func setTestSubscribedTrackClosed(t *testing.T, st types.SubscribedTrack, willBeResumed bool) {
func setTestSubscribedTrackClosed(t *testing.T, st types.SubscribedTrack, isExpectedToResume bool) {
fst, ok := st.(*typesfakes.FakeSubscribedTrack)
require.True(t, ok)
fst.OnCloseArgsForCall(0)(willBeResumed)
fst.OnCloseArgsForCall(0)(isExpectedToResume)
}
+13 -11
View File
@@ -255,13 +255,10 @@ type Participant interface {
CanSkipBroadcast() bool
ToProto() *livekit.ParticipantInfo
SetName(name string)
SetMetadata(metadata string)
IsPublisher() bool
GetPublishedTrack(trackID livekit.TrackID) MediaTrack
GetPublishedTracks() []MediaTrack
RemovePublishedTrack(track MediaTrack, willBeResumed bool, shouldClose bool)
RemovePublishedTrack(track MediaTrack, isExpectedToResume bool, shouldClose bool)
GetAudioLevel() (smoothedLevel float64, active bool)
@@ -329,6 +326,11 @@ type LocalParticipant interface {
SetSignalSourceValid(valid bool)
HandleSignalSourceClose()
// updates
SetName(name string)
SetMetadata(metadata string)
SetAttributes(attributes map[string]string) error
// permissions
ClaimGrants() *auth.ClaimGrants
SetPermission(permission *livekit.ParticipantPermission) bool
@@ -437,7 +439,7 @@ type Room interface {
SimulateScenario(participant LocalParticipant, scenario *livekit.SimulateScenario) error
ResolveMediaTrackForSubscriber(subIdentity livekit.ParticipantIdentity, trackID livekit.TrackID) MediaResolverResult
GetLocalParticipants() []LocalParticipant
UpdateParticipantMetadata(participant LocalParticipant, name string, metadata string)
UpdateParticipantMetadata(participant LocalParticipant, name string, metadata string, attributes map[string]string) error
}
// MediaTrack represents a media track
@@ -466,15 +468,15 @@ type MediaTrack interface {
GetAudioLevel() (level float64, active bool)
Close(willBeResumed bool)
Close(isExpectedToResume bool)
IsOpen() bool
// callbacks
AddOnClose(func())
AddOnClose(func(isExpectedToResume bool))
// subscribers
AddSubscriber(participant LocalParticipant) (SubscribedTrack, error)
RemoveSubscriber(participantID livekit.ParticipantID, willBeResumed bool)
RemoveSubscriber(participantID livekit.ParticipantID, isExpectedToResume bool)
IsSubscriber(subID livekit.ParticipantID) bool
RevokeDisallowedSubscribers(allowedSubscriberIdentities []livekit.ParticipantIdentity) []livekit.ParticipantIdentity
GetAllSubscribers() []livekit.ParticipantID
@@ -487,7 +489,7 @@ type MediaTrack interface {
GetTemporalLayerForSpatialFps(spatial int32, fps uint32, mime string) int32
Receivers() []sfu.TrackReceiver
ClearAllReceivers(willBeResumed bool)
ClearAllReceivers(isExpectedToResume bool)
IsEncrypted() bool
}
@@ -514,8 +516,8 @@ type LocalMediaTrack interface {
type SubscribedTrack interface {
AddOnBind(f func(error))
IsBound() bool
Close(willBeResumed bool)
OnClose(f func(willBeResumed bool))
Close(isExpectedToResume bool)
OnClose(f func(isExpectedToResume bool))
ID() livekit.TrackID
PublisherID() livekit.ParticipantID
PublisherIdentity() livekit.ParticipantIdentity
@@ -10,10 +10,10 @@ import (
)
type FakeLocalMediaTrack struct {
AddOnCloseStub func(func())
AddOnCloseStub func(func(isExpectedToResume bool))
addOnCloseMutex sync.RWMutex
addOnCloseArgsForCall []struct {
arg1 func()
arg1 func(isExpectedToResume bool)
}
AddSubscriberStub func(types.LocalParticipant) (types.SubscribedTrack, error)
addSubscriberMutex sync.RWMutex
@@ -351,10 +351,10 @@ type FakeLocalMediaTrack struct {
invocationsMutex sync.RWMutex
}
func (fake *FakeLocalMediaTrack) AddOnClose(arg1 func()) {
func (fake *FakeLocalMediaTrack) AddOnClose(arg1 func(isExpectedToResume bool)) {
fake.addOnCloseMutex.Lock()
fake.addOnCloseArgsForCall = append(fake.addOnCloseArgsForCall, struct {
arg1 func()
arg1 func(isExpectedToResume bool)
}{arg1})
stub := fake.AddOnCloseStub
fake.recordInvocation("AddOnClose", []interface{}{arg1})
@@ -370,13 +370,13 @@ func (fake *FakeLocalMediaTrack) AddOnCloseCallCount() int {
return len(fake.addOnCloseArgsForCall)
}
func (fake *FakeLocalMediaTrack) AddOnCloseCalls(stub func(func())) {
func (fake *FakeLocalMediaTrack) AddOnCloseCalls(stub func(func(isExpectedToResume bool))) {
fake.addOnCloseMutex.Lock()
defer fake.addOnCloseMutex.Unlock()
fake.AddOnCloseStub = stub
}
func (fake *FakeLocalMediaTrack) AddOnCloseArgsForCall(i int) func() {
func (fake *FakeLocalMediaTrack) AddOnCloseArgsForCall(i int) func(isExpectedToResume bool) {
fake.addOnCloseMutex.RLock()
defer fake.addOnCloseMutex.RUnlock()
argsForCall := fake.addOnCloseArgsForCall[i]
@@ -733,6 +733,17 @@ type FakeLocalParticipant struct {
sendSpeakerUpdateReturnsOnCall map[int]struct {
result1 error
}
SetAttributesStub func(map[string]string) error
setAttributesMutex sync.RWMutex
setAttributesArgsForCall []struct {
arg1 map[string]string
}
setAttributesReturns struct {
result1 error
}
setAttributesReturnsOnCall map[int]struct {
result1 error
}
SetICEConfigStub func(*livekit.ICEConfig)
setICEConfigMutex sync.RWMutex
setICEConfigArgsForCall []struct {
@@ -4881,6 +4892,67 @@ func (fake *FakeLocalParticipant) SendSpeakerUpdateReturnsOnCall(i int, result1
}{result1}
}
func (fake *FakeLocalParticipant) SetAttributes(arg1 map[string]string) error {
fake.setAttributesMutex.Lock()
ret, specificReturn := fake.setAttributesReturnsOnCall[len(fake.setAttributesArgsForCall)]
fake.setAttributesArgsForCall = append(fake.setAttributesArgsForCall, struct {
arg1 map[string]string
}{arg1})
stub := fake.SetAttributesStub
fakeReturns := fake.setAttributesReturns
fake.recordInvocation("SetAttributes", []interface{}{arg1})
fake.setAttributesMutex.Unlock()
if stub != nil {
return stub(arg1)
}
if specificReturn {
return ret.result1
}
return fakeReturns.result1
}
func (fake *FakeLocalParticipant) SetAttributesCallCount() int {
fake.setAttributesMutex.RLock()
defer fake.setAttributesMutex.RUnlock()
return len(fake.setAttributesArgsForCall)
}
func (fake *FakeLocalParticipant) SetAttributesCalls(stub func(map[string]string) error) {
fake.setAttributesMutex.Lock()
defer fake.setAttributesMutex.Unlock()
fake.SetAttributesStub = stub
}
func (fake *FakeLocalParticipant) SetAttributesArgsForCall(i int) map[string]string {
fake.setAttributesMutex.RLock()
defer fake.setAttributesMutex.RUnlock()
argsForCall := fake.setAttributesArgsForCall[i]
return argsForCall.arg1
}
func (fake *FakeLocalParticipant) SetAttributesReturns(result1 error) {
fake.setAttributesMutex.Lock()
defer fake.setAttributesMutex.Unlock()
fake.SetAttributesStub = nil
fake.setAttributesReturns = struct {
result1 error
}{result1}
}
func (fake *FakeLocalParticipant) SetAttributesReturnsOnCall(i int, result1 error) {
fake.setAttributesMutex.Lock()
defer fake.setAttributesMutex.Unlock()
fake.SetAttributesStub = nil
if fake.setAttributesReturnsOnCall == nil {
fake.setAttributesReturnsOnCall = make(map[int]struct {
result1 error
})
}
fake.setAttributesReturnsOnCall[i] = struct {
result1 error
}{result1}
}
func (fake *FakeLocalParticipant) SetICEConfig(arg1 *livekit.ICEConfig) {
fake.setICEConfigMutex.Lock()
fake.setICEConfigArgsForCall = append(fake.setICEConfigArgsForCall, struct {
@@ -6571,6 +6643,8 @@ func (fake *FakeLocalParticipant) Invocations() map[string][][]interface{} {
defer fake.sendRoomUpdateMutex.RUnlock()
fake.sendSpeakerUpdateMutex.RLock()
defer fake.sendSpeakerUpdateMutex.RUnlock()
fake.setAttributesMutex.RLock()
defer fake.setAttributesMutex.RUnlock()
fake.setICEConfigMutex.RLock()
defer fake.setICEConfigMutex.RUnlock()
fake.setMetadataMutex.RLock()
+6 -6
View File
@@ -10,10 +10,10 @@ import (
)
type FakeMediaTrack struct {
AddOnCloseStub func(func())
AddOnCloseStub func(func(isExpectedToResume bool))
addOnCloseMutex sync.RWMutex
addOnCloseArgsForCall []struct {
arg1 func()
arg1 func(isExpectedToResume bool)
}
AddSubscriberStub func(types.LocalParticipant) (types.SubscribedTrack, error)
addSubscriberMutex sync.RWMutex
@@ -287,10 +287,10 @@ type FakeMediaTrack struct {
invocationsMutex sync.RWMutex
}
func (fake *FakeMediaTrack) AddOnClose(arg1 func()) {
func (fake *FakeMediaTrack) AddOnClose(arg1 func(isExpectedToResume bool)) {
fake.addOnCloseMutex.Lock()
fake.addOnCloseArgsForCall = append(fake.addOnCloseArgsForCall, struct {
arg1 func()
arg1 func(isExpectedToResume bool)
}{arg1})
stub := fake.AddOnCloseStub
fake.recordInvocation("AddOnClose", []interface{}{arg1})
@@ -306,13 +306,13 @@ func (fake *FakeMediaTrack) AddOnCloseCallCount() int {
return len(fake.addOnCloseArgsForCall)
}
func (fake *FakeMediaTrack) AddOnCloseCalls(stub func(func())) {
func (fake *FakeMediaTrack) AddOnCloseCalls(stub func(func(isExpectedToResume bool))) {
fake.addOnCloseMutex.Lock()
defer fake.addOnCloseMutex.Unlock()
fake.AddOnCloseStub = stub
}
func (fake *FakeMediaTrack) AddOnCloseArgsForCall(i int) func() {
func (fake *FakeMediaTrack) AddOnCloseArgsForCall(i int) func(isExpectedToResume bool) {
fake.addOnCloseMutex.RLock()
defer fake.addOnCloseMutex.RUnlock()
argsForCall := fake.addOnCloseArgsForCall[i]
@@ -175,16 +175,6 @@ type FakeParticipant struct {
arg2 bool
arg3 bool
}
SetMetadataStub func(string)
setMetadataMutex sync.RWMutex
setMetadataArgsForCall []struct {
arg1 string
}
SetNameStub func(string)
setNameMutex sync.RWMutex
setNameArgsForCall []struct {
arg1 string
}
StateStub func() livekit.ParticipantInfo_State
stateMutex sync.RWMutex
stateArgsForCall []struct {
@@ -1115,70 +1105,6 @@ func (fake *FakeParticipant) RemovePublishedTrackArgsForCall(i int) (types.Media
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
}
func (fake *FakeParticipant) SetMetadata(arg1 string) {
fake.setMetadataMutex.Lock()
fake.setMetadataArgsForCall = append(fake.setMetadataArgsForCall, struct {
arg1 string
}{arg1})
stub := fake.SetMetadataStub
fake.recordInvocation("SetMetadata", []interface{}{arg1})
fake.setMetadataMutex.Unlock()
if stub != nil {
fake.SetMetadataStub(arg1)
}
}
func (fake *FakeParticipant) SetMetadataCallCount() int {
fake.setMetadataMutex.RLock()
defer fake.setMetadataMutex.RUnlock()
return len(fake.setMetadataArgsForCall)
}
func (fake *FakeParticipant) SetMetadataCalls(stub func(string)) {
fake.setMetadataMutex.Lock()
defer fake.setMetadataMutex.Unlock()
fake.SetMetadataStub = stub
}
func (fake *FakeParticipant) SetMetadataArgsForCall(i int) string {
fake.setMetadataMutex.RLock()
defer fake.setMetadataMutex.RUnlock()
argsForCall := fake.setMetadataArgsForCall[i]
return argsForCall.arg1
}
func (fake *FakeParticipant) SetName(arg1 string) {
fake.setNameMutex.Lock()
fake.setNameArgsForCall = append(fake.setNameArgsForCall, struct {
arg1 string
}{arg1})
stub := fake.SetNameStub
fake.recordInvocation("SetName", []interface{}{arg1})
fake.setNameMutex.Unlock()
if stub != nil {
fake.SetNameStub(arg1)
}
}
func (fake *FakeParticipant) SetNameCallCount() int {
fake.setNameMutex.RLock()
defer fake.setNameMutex.RUnlock()
return len(fake.setNameArgsForCall)
}
func (fake *FakeParticipant) SetNameCalls(stub func(string)) {
fake.setNameMutex.Lock()
defer fake.setNameMutex.Unlock()
fake.SetNameStub = stub
}
func (fake *FakeParticipant) SetNameArgsForCall(i int) string {
fake.setNameMutex.RLock()
defer fake.setNameMutex.RUnlock()
argsForCall := fake.setNameArgsForCall[i]
return argsForCall.arg1
}
func (fake *FakeParticipant) State() livekit.ParticipantInfo_State {
fake.stateMutex.Lock()
ret, specificReturn := fake.stateReturnsOnCall[len(fake.stateArgsForCall)]
@@ -1561,10 +1487,6 @@ func (fake *FakeParticipant) Invocations() map[string][][]interface{} {
defer fake.kindMutex.RUnlock()
fake.removePublishedTrackMutex.RLock()
defer fake.removePublishedTrackMutex.RUnlock()
fake.setMetadataMutex.RLock()
defer fake.setMetadataMutex.RUnlock()
fake.setNameMutex.RLock()
defer fake.setNameMutex.RUnlock()
fake.stateMutex.RLock()
defer fake.stateMutex.RUnlock()
fake.subscriptionPermissionMutex.RLock()
+45 -8
View File
@@ -82,12 +82,19 @@ type FakeRoom struct {
syncStateReturnsOnCall map[int]struct {
result1 error
}
UpdateParticipantMetadataStub func(types.LocalParticipant, string, string)
UpdateParticipantMetadataStub func(types.LocalParticipant, string, string, map[string]string) error
updateParticipantMetadataMutex sync.RWMutex
updateParticipantMetadataArgsForCall []struct {
arg1 types.LocalParticipant
arg2 string
arg3 string
arg4 map[string]string
}
updateParticipantMetadataReturns struct {
result1 error
}
updateParticipantMetadataReturnsOnCall map[int]struct {
result1 error
}
UpdateSubscriptionPermissionStub func(types.LocalParticipant, *livekit.SubscriptionPermission) error
updateSubscriptionPermissionMutex sync.RWMutex
@@ -492,19 +499,26 @@ func (fake *FakeRoom) SyncStateReturnsOnCall(i int, result1 error) {
}{result1}
}
func (fake *FakeRoom) UpdateParticipantMetadata(arg1 types.LocalParticipant, arg2 string, arg3 string) {
func (fake *FakeRoom) UpdateParticipantMetadata(arg1 types.LocalParticipant, arg2 string, arg3 string, arg4 map[string]string) error {
fake.updateParticipantMetadataMutex.Lock()
ret, specificReturn := fake.updateParticipantMetadataReturnsOnCall[len(fake.updateParticipantMetadataArgsForCall)]
fake.updateParticipantMetadataArgsForCall = append(fake.updateParticipantMetadataArgsForCall, struct {
arg1 types.LocalParticipant
arg2 string
arg3 string
}{arg1, arg2, arg3})
arg4 map[string]string
}{arg1, arg2, arg3, arg4})
stub := fake.UpdateParticipantMetadataStub
fake.recordInvocation("UpdateParticipantMetadata", []interface{}{arg1, arg2, arg3})
fakeReturns := fake.updateParticipantMetadataReturns
fake.recordInvocation("UpdateParticipantMetadata", []interface{}{arg1, arg2, arg3, arg4})
fake.updateParticipantMetadataMutex.Unlock()
if stub != nil {
fake.UpdateParticipantMetadataStub(arg1, arg2, arg3)
return stub(arg1, arg2, arg3, arg4)
}
if specificReturn {
return ret.result1
}
return fakeReturns.result1
}
func (fake *FakeRoom) UpdateParticipantMetadataCallCount() int {
@@ -513,17 +527,40 @@ func (fake *FakeRoom) UpdateParticipantMetadataCallCount() int {
return len(fake.updateParticipantMetadataArgsForCall)
}
func (fake *FakeRoom) UpdateParticipantMetadataCalls(stub func(types.LocalParticipant, string, string)) {
func (fake *FakeRoom) UpdateParticipantMetadataCalls(stub func(types.LocalParticipant, string, string, map[string]string) error) {
fake.updateParticipantMetadataMutex.Lock()
defer fake.updateParticipantMetadataMutex.Unlock()
fake.UpdateParticipantMetadataStub = stub
}
func (fake *FakeRoom) UpdateParticipantMetadataArgsForCall(i int) (types.LocalParticipant, string, string) {
func (fake *FakeRoom) UpdateParticipantMetadataArgsForCall(i int) (types.LocalParticipant, string, string, map[string]string) {
fake.updateParticipantMetadataMutex.RLock()
defer fake.updateParticipantMetadataMutex.RUnlock()
argsForCall := fake.updateParticipantMetadataArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4
}
func (fake *FakeRoom) UpdateParticipantMetadataReturns(result1 error) {
fake.updateParticipantMetadataMutex.Lock()
defer fake.updateParticipantMetadataMutex.Unlock()
fake.UpdateParticipantMetadataStub = nil
fake.updateParticipantMetadataReturns = struct {
result1 error
}{result1}
}
func (fake *FakeRoom) UpdateParticipantMetadataReturnsOnCall(i int, result1 error) {
fake.updateParticipantMetadataMutex.Lock()
defer fake.updateParticipantMetadataMutex.Unlock()
fake.UpdateParticipantMetadataStub = nil
if fake.updateParticipantMetadataReturnsOnCall == nil {
fake.updateParticipantMetadataReturnsOnCall = make(map[int]struct {
result1 error
})
}
fake.updateParticipantMetadataReturnsOnCall[i] = struct {
result1 error
}{result1}
}
func (fake *FakeRoom) UpdateSubscriptionPermission(arg1 types.LocalParticipant, arg2 *livekit.SubscriptionPermission) error {
@@ -81,10 +81,10 @@ type FakeSubscribedTrack struct {
needsNegotiationReturnsOnCall map[int]struct {
result1 bool
}
OnCloseStub func(func(willBeResumed bool))
OnCloseStub func(func(isExpectedToResume bool))
onCloseMutex sync.RWMutex
onCloseArgsForCall []struct {
arg1 func(willBeResumed bool)
arg1 func(isExpectedToResume bool)
}
PublisherIDStub func() livekit.ParticipantID
publisherIDMutex sync.RWMutex
@@ -557,10 +557,10 @@ func (fake *FakeSubscribedTrack) NeedsNegotiationReturnsOnCall(i int, result1 bo
}{result1}
}
func (fake *FakeSubscribedTrack) OnClose(arg1 func(willBeResumed bool)) {
func (fake *FakeSubscribedTrack) OnClose(arg1 func(isExpectedToResume bool)) {
fake.onCloseMutex.Lock()
fake.onCloseArgsForCall = append(fake.onCloseArgsForCall, struct {
arg1 func(willBeResumed bool)
arg1 func(isExpectedToResume bool)
}{arg1})
stub := fake.OnCloseStub
fake.recordInvocation("OnClose", []interface{}{arg1})
@@ -576,13 +576,13 @@ func (fake *FakeSubscribedTrack) OnCloseCallCount() int {
return len(fake.onCloseArgsForCall)
}
func (fake *FakeSubscribedTrack) OnCloseCalls(stub func(func(willBeResumed bool))) {
func (fake *FakeSubscribedTrack) OnCloseCalls(stub func(func(isExpectedToResume bool))) {
fake.onCloseMutex.Lock()
defer fake.onCloseMutex.Unlock()
fake.OnCloseStub = stub
}
func (fake *FakeSubscribedTrack) OnCloseArgsForCall(i int) func(willBeResumed bool) {
func (fake *FakeSubscribedTrack) OnCloseArgsForCall(i int) func(isExpectedToResume bool) {
fake.onCloseMutex.RLock()
defer fake.onCloseMutex.RUnlock()
argsForCall := fake.onCloseArgsForCall[i]
+8 -11
View File
@@ -20,6 +20,7 @@ import (
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"golang.org/x/exp/maps"
"github.com/livekit/livekit-server/pkg/rtc/types"
"github.com/livekit/protocol/utils"
@@ -65,7 +66,7 @@ func NewUpTrackManager(params UpTrackManagerParams) *UpTrackManager {
}
}
func (u *UpTrackManager) Close(willBeResumed bool) {
func (u *UpTrackManager) Close(isExpectedToResume bool) {
u.lock.Lock()
if u.closed {
u.lock.Unlock()
@@ -79,7 +80,7 @@ func (u *UpTrackManager) Close(willBeResumed bool) {
u.lock.Unlock()
for _, t := range publishedTracks {
t.Close(willBeResumed)
t.Close(isExpectedToResume)
}
if onClose := u.getOnUpTrackManagerClose(); onClose != nil {
@@ -147,11 +148,7 @@ func (u *UpTrackManager) GetPublishedTracks() []types.MediaTrack {
u.lock.RLock()
defer u.lock.RUnlock()
tracks := make([]types.MediaTrack, 0, len(u.publishedTracks))
for _, t := range u.publishedTracks {
tracks = append(tracks, t)
}
return tracks
return maps.Values(u.publishedTracks)
}
func (u *UpTrackManager) UpdateSubscriptionPermission(
@@ -277,7 +274,7 @@ func (u *UpTrackManager) AddPublishedTrack(track types.MediaTrack) {
u.lock.Unlock()
u.params.Logger.Debugw("added published track", "trackID", track.ID(), "trackInfo", logger.Proto(track.ToProto()))
track.AddOnClose(func() {
track.AddOnClose(func(_isExpectedToResume bool) {
u.lock.Lock()
delete(u.publishedTracks, track.ID())
// not modifying subscription permissions, will get reset on next update from participant
@@ -285,11 +282,11 @@ func (u *UpTrackManager) AddPublishedTrack(track types.MediaTrack) {
})
}
func (u *UpTrackManager) RemovePublishedTrack(track types.MediaTrack, willBeResumed bool, shouldClose bool) {
func (u *UpTrackManager) RemovePublishedTrack(track types.MediaTrack, isExpectedToResume bool, shouldClose bool) {
if shouldClose {
track.Close(willBeResumed)
track.Close(isExpectedToResume)
} else {
track.ClearAllReceivers(willBeResumed)
track.ClearAllReceivers(isExpectedToResume)
}
u.lock.Lock()
delete(u.publishedTracks, track.ID())
+10 -14
View File
@@ -54,7 +54,16 @@ func NewEgressLauncher(client rpc.EgressClient, io IOClient) rtc.EgressLauncher
}
func (s *egressLauncher) StartEgress(ctx context.Context, req *rpc.StartEgressRequest) (*livekit.EgressInfo, error) {
info, err := s.StartEgressWithClusterId(ctx, "", req)
if s.client == nil {
return nil, ErrEgressNotConnected
}
// Ensure we have an Egress ID
if req.EgressId == "" {
req.EgressId = guid.New(utils.EgressPrefix)
}
info, err := s.client.StartEgress(ctx, "", req)
if err != nil {
return nil, err
}
@@ -66,16 +75,3 @@ func (s *egressLauncher) StartEgress(ctx context.Context, req *rpc.StartEgressRe
return info, nil
}
func (s *egressLauncher) StartEgressWithClusterId(ctx context.Context, clusterId string, req *rpc.StartEgressRequest) (*livekit.EgressInfo, error) {
if s.client == nil {
return nil, ErrEgressNotConnected
}
// Ensure we have an Egress ID
if req.EgressId == "" {
req.EgressId = guid.New(utils.EgressPrefix)
}
return s.client.StartEgress(ctx, clusterId, req)
}
+79
View File
@@ -0,0 +1,79 @@
// Copyright 2024 LiveKit, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package service_test
import (
"fmt"
"log"
"net"
"os"
"sync/atomic"
"testing"
"github.com/ory/dockertest/v3"
)
var Docker *dockertest.Pool
func TestMain(m *testing.M) {
pool, err := dockertest.NewPool("")
if err != nil {
log.Fatalf("Could not construct pool: %s", err)
}
// uses pool to try to connect to Docker
err = pool.Client.Ping()
if err != nil {
log.Fatalf("Could not connect to Docker: %s", err)
}
Docker = pool
code := m.Run()
os.Exit(code)
}
func waitTCPPort(t testing.TB, addr string) {
if err := Docker.Retry(func() error {
conn, err := net.Dial("tcp", addr)
if err != nil {
t.Log(err)
return err
}
_ = conn.Close()
return nil
}); err != nil {
t.Fatal(err)
}
}
var redisLast uint32
func runRedis(t testing.TB) string {
c, err := Docker.RunWithOptions(&dockertest.RunOptions{
Name: fmt.Sprintf("lktest-redis-%d", atomic.AddUint32(&redisLast, 1)),
Repository: "redis", Tag: "latest",
})
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
_ = Docker.Purge(c)
})
addr := c.GetHostPort("6379/tcp")
waitTCPPort(t, addr)
t.Log("Redis running on", addr)
return addr
}
+1
View File
@@ -26,6 +26,7 @@ var (
ErrIngressNotFound = psrpc.NewErrorf(psrpc.NotFound, "ingress does not exist")
ErrIngressNonReusable = psrpc.NewErrorf(psrpc.InvalidArgument, "ingress is not reusable and cannot be modified")
ErrMetadataExceedsLimits = psrpc.NewErrorf(psrpc.InvalidArgument, "metadata size exceeds limits")
ErrAttributeExceedsLimits = psrpc.NewErrorf(psrpc.InvalidArgument, "attribute size exceeds limits")
ErrRoomNameExceedsLimits = psrpc.NewErrorf(psrpc.InvalidArgument, "room name length exceeds limits")
ErrParticipantIdentityExceedsLimits = psrpc.NewErrorf(psrpc.InvalidArgument, "participant identity length exceeds limits")
ErrOperationFailed = psrpc.NewErrorf(psrpc.Internal, "operation cannot be completed")
+6
View File
@@ -80,8 +80,14 @@ type RoomAllocator interface {
//counterfeiter:generate . SIPStore
type SIPStore interface {
StoreSIPTrunk(ctx context.Context, info *livekit.SIPTrunkInfo) error
StoreSIPInboundTrunk(ctx context.Context, info *livekit.SIPInboundTrunkInfo) error
StoreSIPOutboundTrunk(ctx context.Context, info *livekit.SIPOutboundTrunkInfo) error
LoadSIPTrunk(ctx context.Context, sipTrunkID string) (*livekit.SIPTrunkInfo, error)
LoadSIPInboundTrunk(ctx context.Context, sipTrunkID string) (*livekit.SIPInboundTrunkInfo, error)
LoadSIPOutboundTrunk(ctx context.Context, sipTrunkID string) (*livekit.SIPOutboundTrunkInfo, error)
ListSIPTrunk(ctx context.Context) ([]*livekit.SIPTrunkInfo, error)
ListSIPInboundTrunk(ctx context.Context) ([]*livekit.SIPInboundTrunkInfo, error)
ListSIPOutboundTrunk(ctx context.Context) ([]*livekit.SIPOutboundTrunkInfo, error)
DeleteSIPTrunk(ctx context.Context, info *livekit.SIPTrunkInfo) error
StoreSIPDispatchRule(ctx context.Context, info *livekit.SIPDispatchRuleInfo) error
+6 -6
View File
@@ -26,8 +26,8 @@ import (
// matchSIPTrunk finds a SIP Trunk definition matching the request.
// Returns nil if no rules matched or an error if there are conflicting definitions.
func (s *IOInfoService) matchSIPTrunk(ctx context.Context, calling, called string) (*livekit.SIPTrunkInfo, error) {
trunks, err := s.ss.ListSIPTrunk(ctx)
func (s *IOInfoService) matchSIPTrunk(ctx context.Context, calling, called string) (*livekit.SIPInboundTrunkInfo, error) {
trunks, err := s.ss.ListSIPInboundTrunk(ctx)
if err != nil {
return nil, err
}
@@ -36,7 +36,7 @@ func (s *IOInfoService) matchSIPTrunk(ctx context.Context, calling, called strin
// matchSIPDispatchRule finds the best dispatch rule matching the request parameters. Returns an error if no rule matched.
// Trunk parameter can be nil, in which case only wildcard dispatch rules will be effective (ones without Trunk IDs).
func (s *IOInfoService) matchSIPDispatchRule(ctx context.Context, trunk *livekit.SIPTrunkInfo, req *rpc.EvaluateSIPDispatchRulesRequest) (*livekit.SIPDispatchRuleInfo, error) {
func (s *IOInfoService) matchSIPDispatchRule(ctx context.Context, trunk *livekit.SIPInboundTrunkInfo, req *rpc.EvaluateSIPDispatchRulesRequest) (*livekit.SIPDispatchRuleInfo, error) {
// Trunk can still be nil here in case none matched or were defined.
// This is still fine, but only in case we'll match exactly one wildcard dispatch rule.
rules, err := s.ss.ListSIPDispatchRule(ctx)
@@ -74,7 +74,7 @@ func (s *IOInfoService) EvaluateSIPDispatchRules(ctx context.Context, req *rpc.E
return nil, err
}
log.Debugw("SIP dispatch rule matched", "sipRule", best.SipDispatchRuleId)
resp, err := sip.EvaluateDispatchRule(best, req)
resp, err := sip.EvaluateDispatchRule(trunkID, best, req)
if err != nil {
return nil, err
}
@@ -96,7 +96,7 @@ func (s *IOInfoService) GetSIPTrunkAuthentication(ctx context.Context, req *rpc.
log.Debugw("SIP trunk matched for auth", "sipTrunk", trunk.SipTrunkId)
return &rpc.GetSIPTrunkAuthenticationResponse{
SipTrunkId: trunk.SipTrunkId,
Username: trunk.InboundUsername,
Password: trunk.InboundPassword,
Username: trunk.AuthUsername,
Password: trunk.AuthPassword,
}, nil
}
+39 -82
View File
@@ -52,9 +52,6 @@ const (
IngressStatePrefix = "{ingress}_state:"
RoomIngressPrefix = "room_{ingress}:"
SIPTrunkKey = "sip_trunk"
SIPDispatchRuleKey = "sip_dispatch_rule"
// RoomParticipantsPrefix is hash of participant_name => ParticipantInfo
RoomParticipantsPrefix = "room_participants:"
@@ -825,94 +822,54 @@ func (s *RedisStore) DeleteIngress(_ context.Context, info *livekit.IngressInfo)
return nil
}
func (s *RedisStore) loadOne(ctx context.Context, key, id string, info proto.Message, notFoundErr error) error {
func redisStoreOne(ctx context.Context, s *RedisStore, key, id string, p proto.Message) error {
if id == "" {
return errors.New("id is not set")
}
data, err := proto.Marshal(p)
if err != nil {
return err
}
return s.rc.HSet(s.ctx, key, id, data).Err()
}
func redisLoadOne[T any, P interface {
*T
proto.Message
}](ctx context.Context, s *RedisStore, key, id string, notFoundErr error) (P, error) {
data, err := s.rc.HGet(s.ctx, key, id).Result()
switch err {
case nil:
return proto.Unmarshal([]byte(data), info)
case redis.Nil:
return notFoundErr
default:
return err
if err == redis.Nil {
return nil, notFoundErr
} else if err != nil {
return nil, err
}
var p P = new(T)
err = proto.Unmarshal([]byte(data), p)
if err != nil {
return nil, err
}
return p, err
}
func (s *RedisStore) loadMany(ctx context.Context, key string, onResult func() proto.Message) error {
func redisLoadMany[T any, P interface {
*T
proto.Message
}](ctx context.Context, s *RedisStore, key string) ([]P, error) {
data, err := s.rc.HGetAll(s.ctx, key).Result()
if err != nil {
if err == redis.Nil {
return nil
}
return err
if err == redis.Nil {
return nil, nil
} else if err != nil {
return nil, err
}
list := make([]P, 0, len(data))
for _, d := range data {
if err = proto.Unmarshal([]byte(d), onResult()); err != nil {
return err
var p P = new(T)
if err = proto.Unmarshal([]byte(d), p); err != nil {
return list, err
}
list = append(list, p)
}
return nil
}
func (s *RedisStore) StoreSIPTrunk(ctx context.Context, info *livekit.SIPTrunkInfo) error {
data, err := proto.Marshal(info)
if err != nil {
return err
}
return s.rc.HSet(s.ctx, SIPTrunkKey, info.SipTrunkId, data).Err()
}
func (s *RedisStore) LoadSIPTrunk(ctx context.Context, sipTrunkId string) (*livekit.SIPTrunkInfo, error) {
info := &livekit.SIPTrunkInfo{}
if err := s.loadOne(ctx, SIPTrunkKey, sipTrunkId, info, ErrSIPTrunkNotFound); err != nil {
return nil, err
}
return info, nil
}
func (s *RedisStore) DeleteSIPTrunk(ctx context.Context, info *livekit.SIPTrunkInfo) error {
return s.rc.HDel(s.ctx, SIPTrunkKey, info.SipTrunkId).Err()
}
func (s *RedisStore) ListSIPTrunk(ctx context.Context) (infos []*livekit.SIPTrunkInfo, err error) {
err = s.loadMany(ctx, SIPTrunkKey, func() proto.Message {
infos = append(infos, &livekit.SIPTrunkInfo{})
return infos[len(infos)-1]
})
return infos, err
}
func (s *RedisStore) StoreSIPDispatchRule(ctx context.Context, info *livekit.SIPDispatchRuleInfo) error {
data, err := proto.Marshal(info)
if err != nil {
return err
}
return s.rc.HSet(s.ctx, SIPDispatchRuleKey, info.SipDispatchRuleId, data).Err()
}
func (s *RedisStore) LoadSIPDispatchRule(ctx context.Context, sipDispatchRuleId string) (*livekit.SIPDispatchRuleInfo, error) {
info := &livekit.SIPDispatchRuleInfo{}
if err := s.loadOne(ctx, SIPDispatchRuleKey, sipDispatchRuleId, info, ErrSIPDispatchRuleNotFound); err != nil {
return nil, err
}
return info, nil
}
func (s *RedisStore) DeleteSIPDispatchRule(ctx context.Context, info *livekit.SIPDispatchRuleInfo) error {
return s.rc.HDel(s.ctx, SIPDispatchRuleKey, info.SipDispatchRuleId).Err()
}
func (s *RedisStore) ListSIPDispatchRule(ctx context.Context) (infos []*livekit.SIPDispatchRuleInfo, err error) {
err = s.loadMany(ctx, SIPDispatchRuleKey, func() proto.Message {
infos = append(infos, &livekit.SIPDispatchRuleInfo{})
return infos[len(infos)-1]
})
return infos, err
return list, nil
}
+199
View File
@@ -0,0 +1,199 @@
// Copyright 2023 LiveKit, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package service
import (
"context"
"github.com/livekit/protocol/livekit"
)
const (
SIPTrunkKey = "sip_trunk"
SIPInboundTrunkKey = "sip_inbound_trunk"
SIPOutboundTrunkKey = "sip_outbound_trunk"
SIPDispatchRuleKey = "sip_dispatch_rule"
)
func (s *RedisStore) StoreSIPTrunk(ctx context.Context, info *livekit.SIPTrunkInfo) error {
return redisStoreOne(s.ctx, s, SIPTrunkKey, info.SipTrunkId, info)
}
func (s *RedisStore) StoreSIPInboundTrunk(ctx context.Context, info *livekit.SIPInboundTrunkInfo) error {
return redisStoreOne(s.ctx, s, SIPInboundTrunkKey, info.SipTrunkId, info)
}
func (s *RedisStore) StoreSIPOutboundTrunk(ctx context.Context, info *livekit.SIPOutboundTrunkInfo) error {
return redisStoreOne(s.ctx, s, SIPOutboundTrunkKey, info.SipTrunkId, info)
}
func (s *RedisStore) loadSIPLegacyTrunk(ctx context.Context, id string) (*livekit.SIPTrunkInfo, error) {
return redisLoadOne[livekit.SIPTrunkInfo](ctx, s, SIPTrunkKey, id, ErrSIPTrunkNotFound)
}
func (s *RedisStore) loadSIPInboundTrunk(ctx context.Context, id string) (*livekit.SIPInboundTrunkInfo, error) {
return redisLoadOne[livekit.SIPInboundTrunkInfo](ctx, s, SIPInboundTrunkKey, id, ErrSIPTrunkNotFound)
}
func (s *RedisStore) loadSIPOutboundTrunk(ctx context.Context, id string) (*livekit.SIPOutboundTrunkInfo, error) {
return redisLoadOne[livekit.SIPOutboundTrunkInfo](ctx, s, SIPOutboundTrunkKey, id, ErrSIPTrunkNotFound)
}
func (s *RedisStore) LoadSIPTrunk(ctx context.Context, id string) (*livekit.SIPTrunkInfo, error) {
tr, err := s.loadSIPLegacyTrunk(ctx, id)
if err == nil {
return tr, nil
} else if err != ErrSIPTrunkNotFound {
return nil, err
}
in, err := s.loadSIPInboundTrunk(ctx, id)
if err == nil {
return in.AsTrunkInfo(), nil
} else if err != ErrSIPTrunkNotFound {
return nil, err
}
out, err := s.loadSIPOutboundTrunk(ctx, id)
if err == nil {
return out.AsTrunkInfo(), nil
} else if err != ErrSIPTrunkNotFound {
return nil, err
}
return nil, ErrSIPTrunkNotFound
}
func (s *RedisStore) LoadSIPInboundTrunk(ctx context.Context, id string) (*livekit.SIPInboundTrunkInfo, error) {
in, err := s.loadSIPInboundTrunk(ctx, id)
if err == nil {
return in, nil
} else if err != ErrSIPTrunkNotFound {
return nil, err
}
tr, err := s.loadSIPLegacyTrunk(ctx, id)
if err == nil {
return tr.AsInbound(), nil
} else if err != ErrSIPTrunkNotFound {
return nil, err
}
return nil, ErrSIPTrunkNotFound
}
func (s *RedisStore) LoadSIPOutboundTrunk(ctx context.Context, id string) (*livekit.SIPOutboundTrunkInfo, error) {
in, err := s.loadSIPOutboundTrunk(ctx, id)
if err == nil {
return in, nil
} else if err != ErrSIPTrunkNotFound {
return nil, err
}
tr, err := s.loadSIPLegacyTrunk(ctx, id)
if err == nil {
return tr.AsOutbound(), nil
} else if err != ErrSIPTrunkNotFound {
return nil, err
}
return nil, ErrSIPTrunkNotFound
}
func (s *RedisStore) deleteSIPTrunk(ctx context.Context, id string) error {
tx := s.rc.TxPipeline()
tx.HDel(s.ctx, SIPTrunkKey, id)
tx.HDel(s.ctx, SIPInboundTrunkKey, id)
tx.HDel(s.ctx, SIPOutboundTrunkKey, id)
_, err := tx.Exec(ctx)
return err
}
func (s *RedisStore) DeleteSIPTrunk(ctx context.Context, info *livekit.SIPTrunkInfo) error {
return s.deleteSIPTrunk(ctx, info.SipTrunkId)
}
func (s *RedisStore) listSIPLegacyTrunk(ctx context.Context) ([]*livekit.SIPTrunkInfo, error) {
return redisLoadMany[livekit.SIPTrunkInfo](ctx, s, SIPTrunkKey)
}
func (s *RedisStore) listSIPInboundTrunk(ctx context.Context) ([]*livekit.SIPInboundTrunkInfo, error) {
return redisLoadMany[livekit.SIPInboundTrunkInfo](ctx, s, SIPInboundTrunkKey)
}
func (s *RedisStore) listSIPOutboundTrunk(ctx context.Context) ([]*livekit.SIPOutboundTrunkInfo, error) {
return redisLoadMany[livekit.SIPOutboundTrunkInfo](ctx, s, SIPOutboundTrunkKey)
}
func (s *RedisStore) ListSIPTrunk(ctx context.Context) ([]*livekit.SIPTrunkInfo, error) {
infos, err := s.listSIPLegacyTrunk(ctx)
if err != nil {
return nil, err
}
in, err := s.listSIPInboundTrunk(ctx)
if err != nil {
return infos, err
}
for _, t := range in {
infos = append(infos, t.AsTrunkInfo())
}
out, err := s.listSIPOutboundTrunk(ctx)
if err != nil {
return infos, err
}
for _, t := range out {
infos = append(infos, t.AsTrunkInfo())
}
return infos, nil
}
func (s *RedisStore) ListSIPInboundTrunk(ctx context.Context) (infos []*livekit.SIPInboundTrunkInfo, err error) {
in, err := s.listSIPInboundTrunk(ctx)
if err != nil {
return in, err
}
old, err := s.listSIPLegacyTrunk(ctx)
if err != nil {
return nil, err
}
for _, t := range old {
in = append(in, t.AsInbound())
}
return in, nil
}
func (s *RedisStore) ListSIPOutboundTrunk(ctx context.Context) (infos []*livekit.SIPOutboundTrunkInfo, err error) {
out, err := s.listSIPOutboundTrunk(ctx)
if err != nil {
return out, err
}
old, err := s.listSIPLegacyTrunk(ctx)
if err != nil {
return nil, err
}
for _, t := range old {
out = append(out, t.AsOutbound())
}
return out, nil
}
func (s *RedisStore) StoreSIPDispatchRule(ctx context.Context, info *livekit.SIPDispatchRuleInfo) error {
return redisStoreOne(ctx, s, SIPDispatchRuleKey, info.SipDispatchRuleId, info)
}
func (s *RedisStore) LoadSIPDispatchRule(ctx context.Context, sipDispatchRuleId string) (*livekit.SIPDispatchRuleInfo, error) {
return redisLoadOne[livekit.SIPDispatchRuleInfo](ctx, s, SIPDispatchRuleKey, sipDispatchRuleId, ErrSIPDispatchRuleNotFound)
}
func (s *RedisStore) DeleteSIPDispatchRule(ctx context.Context, info *livekit.SIPDispatchRuleInfo) error {
return s.rc.HDel(s.ctx, SIPDispatchRuleKey, info.SipDispatchRuleId).Err()
}
func (s *RedisStore) ListSIPDispatchRule(ctx context.Context) (infos []*livekit.SIPDispatchRuleInfo, err error) {
return redisLoadMany[livekit.SIPDispatchRuleInfo](ctx, s, SIPDispatchRuleKey)
}
+263
View File
@@ -0,0 +1,263 @@
// Copyright 2024 LiveKit, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package service_test
import (
"context"
"slices"
"strings"
"testing"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/utils"
"github.com/livekit/protocol/utils/guid"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
"github.com/livekit/livekit-server/pkg/service"
)
func TestSIPStoreDispatch(t *testing.T) {
ctx := context.Background()
rs := redisStore(t)
id := guid.New(utils.SIPDispatchRulePrefix)
// No dispatch rules initially.
list, err := rs.ListSIPDispatchRule(ctx)
require.NoError(t, err)
require.Empty(t, list)
// Loading non-existent dispatch should return proper not found error.
got, err := rs.LoadSIPDispatchRule(ctx, id)
require.Equal(t, service.ErrSIPDispatchRuleNotFound, err)
require.Nil(t, got)
// Creation without ID should fail.
rule := &livekit.SIPDispatchRuleInfo{
TrunkIds: []string{"trunk"},
Rule: &livekit.SIPDispatchRule{Rule: &livekit.SIPDispatchRule_DispatchRuleDirect{
DispatchRuleDirect: &livekit.SIPDispatchRuleDirect{
RoomName: "room",
Pin: "1234",
},
}},
}
err = rs.StoreSIPDispatchRule(ctx, rule)
require.Error(t, err)
// Creation
rule.SipDispatchRuleId = id
err = rs.StoreSIPDispatchRule(ctx, rule)
require.NoError(t, err)
// Loading
got, err = rs.LoadSIPDispatchRule(ctx, id)
require.NoError(t, err)
require.True(t, proto.Equal(rule, got))
// Listing
list, err = rs.ListSIPDispatchRule(ctx)
require.NoError(t, err)
require.Len(t, list, 1)
require.True(t, proto.Equal(rule, list[0]))
// Deletion. Should not return error if not exists.
err = rs.DeleteSIPDispatchRule(ctx, &livekit.SIPDispatchRuleInfo{SipDispatchRuleId: id})
require.NoError(t, err)
err = rs.DeleteSIPDispatchRule(ctx, &livekit.SIPDispatchRuleInfo{SipDispatchRuleId: id})
require.NoError(t, err)
// Check that it's deleted.
list, err = rs.ListSIPDispatchRule(ctx)
require.NoError(t, err)
require.Empty(t, list)
got, err = rs.LoadSIPDispatchRule(ctx, id)
require.Equal(t, service.ErrSIPDispatchRuleNotFound, err)
require.Nil(t, got)
}
func TestSIPStoreTrunk(t *testing.T) {
ctx := context.Background()
rs := redisStore(t)
oldID := guid.New(utils.SIPTrunkPrefix)
inID := guid.New(utils.SIPTrunkPrefix)
outID := guid.New(utils.SIPTrunkPrefix)
// No trunks initially. Check legacy, inbound, outbound.
// Loading non-existent trunk should return proper not found error.
oldList, err := rs.ListSIPTrunk(ctx)
require.NoError(t, err)
require.Empty(t, oldList)
old, err := rs.LoadSIPTrunk(ctx, oldID)
require.Equal(t, service.ErrSIPTrunkNotFound, err)
require.Nil(t, old)
inList, err := rs.ListSIPInboundTrunk(ctx)
require.NoError(t, err)
require.Empty(t, inList)
in, err := rs.LoadSIPInboundTrunk(ctx, oldID)
require.Equal(t, service.ErrSIPTrunkNotFound, err)
require.Nil(t, in)
outList, err := rs.ListSIPOutboundTrunk(ctx)
require.NoError(t, err)
require.Empty(t, outList)
out, err := rs.LoadSIPOutboundTrunk(ctx, oldID)
require.Equal(t, service.ErrSIPTrunkNotFound, err)
require.Nil(t, out)
// Creation without ID should fail.
oldT := &livekit.SIPTrunkInfo{
Name: "Legacy",
}
err = rs.StoreSIPTrunk(ctx, oldT)
require.Error(t, err)
inT := &livekit.SIPInboundTrunkInfo{
Name: "Inbound",
}
err = rs.StoreSIPInboundTrunk(ctx, inT)
require.Error(t, err)
outT := &livekit.SIPOutboundTrunkInfo{
Name: "Outbound",
}
err = rs.StoreSIPOutboundTrunk(ctx, outT)
require.Error(t, err)
// Creation
oldT.SipTrunkId = oldID
err = rs.StoreSIPTrunk(ctx, oldT)
require.NoError(t, err)
inT.SipTrunkId = inID
err = rs.StoreSIPInboundTrunk(ctx, inT)
require.NoError(t, err)
outT.SipTrunkId = outID
err = rs.StoreSIPOutboundTrunk(ctx, outT)
require.NoError(t, err)
// Loading (with matching kind)
oldT2, err := rs.LoadSIPTrunk(ctx, oldID)
require.NoError(t, err)
require.True(t, proto.Equal(oldT, oldT2))
inT2, err := rs.LoadSIPInboundTrunk(ctx, inID)
require.NoError(t, err)
require.True(t, proto.Equal(inT, inT2))
outT2, err := rs.LoadSIPOutboundTrunk(ctx, outID)
require.NoError(t, err)
require.True(t, proto.Equal(outT, outT2))
// Loading (compat)
oldT2, err = rs.LoadSIPTrunk(ctx, inID)
require.NoError(t, err)
require.True(t, proto.Equal(inT.AsTrunkInfo(), oldT2))
oldT2, err = rs.LoadSIPTrunk(ctx, outID)
require.NoError(t, err)
require.True(t, proto.Equal(outT.AsTrunkInfo(), oldT2))
inT2, err = rs.LoadSIPInboundTrunk(ctx, oldID)
require.NoError(t, err)
require.True(t, proto.Equal(oldT.AsInbound(), inT2))
outT2, err = rs.LoadSIPOutboundTrunk(ctx, oldID)
require.NoError(t, err)
require.True(t, proto.Equal(oldT.AsOutbound(), outT2))
// Listing (always shows legacy + new)
listOld, err := rs.ListSIPTrunk(ctx)
require.NoError(t, err)
require.Len(t, listOld, 3)
slices.SortFunc(listOld, func(a, b *livekit.SIPTrunkInfo) int {
return strings.Compare(a.Name, b.Name)
})
require.True(t, proto.Equal(inT.AsTrunkInfo(), listOld[0]))
require.True(t, proto.Equal(oldT, listOld[1]))
require.True(t, proto.Equal(outT.AsTrunkInfo(), listOld[2]))
listIn, err := rs.ListSIPInboundTrunk(ctx)
require.NoError(t, err)
require.Len(t, listIn, 2)
slices.SortFunc(listIn, func(a, b *livekit.SIPInboundTrunkInfo) int {
return strings.Compare(a.Name, b.Name)
})
require.True(t, proto.Equal(inT, listIn[0]))
require.True(t, proto.Equal(oldT.AsInbound(), listIn[1]))
listOut, err := rs.ListSIPOutboundTrunk(ctx)
require.NoError(t, err)
require.Len(t, listOut, 2)
slices.SortFunc(listOut, func(a, b *livekit.SIPOutboundTrunkInfo) int {
return strings.Compare(a.Name, b.Name)
})
require.True(t, proto.Equal(oldT.AsOutbound(), listOut[0]))
require.True(t, proto.Equal(outT, listOut[1]))
// Deletion. Should not return error if not exists.
err = rs.DeleteSIPTrunk(ctx, &livekit.SIPTrunkInfo{SipTrunkId: oldID})
require.NoError(t, err)
err = rs.DeleteSIPTrunk(ctx, &livekit.SIPTrunkInfo{SipTrunkId: oldID})
require.NoError(t, err)
// Other objects are still there.
inT2, err = rs.LoadSIPInboundTrunk(ctx, inID)
require.NoError(t, err)
require.True(t, proto.Equal(inT, inT2))
outT2, err = rs.LoadSIPOutboundTrunk(ctx, outID)
require.NoError(t, err)
require.True(t, proto.Equal(outT, outT2))
// Delete the rest
err = rs.DeleteSIPTrunk(ctx, &livekit.SIPTrunkInfo{SipTrunkId: inID})
require.NoError(t, err)
err = rs.DeleteSIPTrunk(ctx, &livekit.SIPTrunkInfo{SipTrunkId: outID})
require.NoError(t, err)
// Check everything is deleted.
oldList, err = rs.ListSIPTrunk(ctx)
require.NoError(t, err)
require.Empty(t, oldList)
inList, err = rs.ListSIPInboundTrunk(ctx)
require.NoError(t, err)
require.Empty(t, inList)
outList, err = rs.ListSIPOutboundTrunk(ctx)
require.NoError(t, err)
require.Empty(t, outList)
old, err = rs.LoadSIPTrunk(ctx, oldID)
require.Equal(t, service.ErrSIPTrunkNotFound, err)
require.Nil(t, old)
in, err = rs.LoadSIPInboundTrunk(ctx, oldID)
require.Equal(t, service.ErrSIPTrunkNotFound, err)
require.Nil(t, in)
out, err = rs.LoadSIPOutboundTrunk(ctx, oldID)
require.Equal(t, service.ErrSIPTrunkNotFound, err)
require.Nil(t, out)
}
+9 -6
View File
@@ -31,9 +31,13 @@ import (
"github.com/livekit/livekit-server/pkg/service"
)
func redisStore(t testing.TB) *service.RedisStore {
return service.NewRedisStore(redisClient(t))
}
func TestRoomInternal(t *testing.T) {
ctx := context.Background()
rs := service.NewRedisStore(redisClient())
rs := redisStore(t)
room := &livekit.Room{
Sid: "123",
@@ -61,7 +65,7 @@ func TestRoomInternal(t *testing.T) {
func TestParticipantPersistence(t *testing.T) {
ctx := context.Background()
rs := service.NewRedisStore(redisClient())
rs := redisStore(t)
roomName := livekit.RoomName("room1")
_ = rs.DeleteRoom(ctx, roomName)
@@ -108,7 +112,7 @@ func TestParticipantPersistence(t *testing.T) {
func TestRoomLock(t *testing.T) {
ctx := context.Background()
rs := service.NewRedisStore(redisClient())
rs := redisStore(t)
lockInterval := 5 * time.Millisecond
roomName := livekit.RoomName("myroom")
@@ -158,8 +162,7 @@ func TestRoomLock(t *testing.T) {
func TestEgressStore(t *testing.T) {
ctx := context.Background()
rc := redisClient()
rs := service.NewRedisStore(rc)
rs := redisStore(t)
roomName := "egress-test"
@@ -229,7 +232,7 @@ func TestEgressStore(t *testing.T) {
func TestIngressStore(t *testing.T) {
ctx := context.Background()
rs := service.NewRedisStore(redisClient())
rs := redisStore(t)
info := &livekit.IngressInfo{
IngressId: "ingressId",
+9 -2
View File
@@ -433,6 +433,7 @@ func (r *RoomManager) StartSession(
AdaptiveStream: pi.AdaptiveStream,
AllowTCPFallback: allowFallback,
TURNSEnabled: r.config.IsTURNSEnabled(),
MaxAttributesSize: r.config.Limit.MaxAttributesSize,
GetParticipantInfo: func(pID livekit.ParticipantID) *livekit.ParticipantInfo {
if p := room.GetParticipantByID(pID); p != nil {
return p.ToProto()
@@ -708,8 +709,14 @@ func (r *RoomManager) UpdateParticipant(ctx context.Context, req *livekit.Update
}
participant.GetLogger().Debugw("updating participant",
"metadata", req.Metadata, "permission", req.Permission)
room.UpdateParticipantMetadata(participant, req.Name, req.Metadata)
"metadata", req.Metadata,
"permission", req.Permission,
"attributes", req.Attributes,
)
err = room.UpdateParticipantMetadata(participant, req.Name, req.Metadata, req.Attributes)
if err != nil {
return nil, err
}
if req.Permission != nil {
participant.SetPermission(req.Permission)
}
+18 -9
View File
@@ -33,9 +33,8 @@ import (
"github.com/livekit/protocol/rpc"
)
// A rooms service that supports a single node
type RoomService struct {
roomConf config.RoomConfig
limitConf config.LimitConfig
apiConf config.APIConfig
psrpcConf rpc.PSRPCConfig
router routing.MessageRouter
@@ -49,7 +48,7 @@ type RoomService struct {
}
func NewRoomService(
roomConf config.RoomConfig,
limitConf config.LimitConfig,
apiConf config.APIConfig,
psrpcConf rpc.PSRPCConfig,
router routing.MessageRouter,
@@ -62,7 +61,7 @@ func NewRoomService(
participantClient rpc.TypedParticipantClient,
) (svc *RoomService, err error) {
svc = &RoomService{
roomConf: roomConf,
limitConf: limitConf,
apiConf: apiConf,
psrpcConf: psrpcConf,
router: router,
@@ -87,7 +86,7 @@ func (s *RoomService) CreateRoom(ctx context.Context, req *livekit.CreateRoomReq
return nil, ErrEgressNotConnected
}
if limit := s.roomConf.MaxRoomNameLength; limit > 0 && len(req.Name) > limit {
if limit := s.limitConf.MaxRoomNameLength; limit > 0 && len(req.Name) > limit {
return nil, fmt.Errorf("%w: max length %d", ErrRoomNameExceedsLimits, limit)
}
@@ -104,7 +103,7 @@ func (s *RoomService) CreateRoom(ctx context.Context, req *livekit.CreateRoomReq
defer done()
if created {
go s.agentClient.LaunchJob(ctx, &agent.JobDescription{
go s.agentClient.LaunchJob(context.WithoutCancel(ctx), &agent.JobDescription{
JobType: livekit.JobType_JT_ROOM,
Room: rm,
})
@@ -232,10 +231,20 @@ func (s *RoomService) MutePublishedTrack(ctx context.Context, req *livekit.MuteR
func (s *RoomService) UpdateParticipant(ctx context.Context, req *livekit.UpdateParticipantRequest) (*livekit.ParticipantInfo, error) {
AppendLogFields(ctx, "room", req.Room, "participant", req.Identity)
maxMetadataSize := int(s.roomConf.MaxMetadataSize)
maxMetadataSize := int(s.limitConf.MaxMetadataSize)
if maxMetadataSize > 0 && len(req.Metadata) > maxMetadataSize {
return nil, twirp.InvalidArgumentError(ErrMetadataExceedsLimits.Error(), strconv.Itoa(maxMetadataSize))
}
maxAttributeSize := int(s.limitConf.MaxAttributesSize)
if maxAttributeSize > 0 {
total := 0
for key, val := range req.Attributes {
total += len(key) + len(val)
}
if total > maxAttributeSize {
return nil, twirp.InvalidArgumentError(ErrAttributeExceedsLimits.Error(), strconv.Itoa(maxAttributeSize))
}
}
if err := EnsureAdminPermission(ctx, livekit.RoomName(req.Room)); err != nil {
return nil, twirpAuthError(err)
@@ -270,7 +279,7 @@ func (s *RoomService) SendData(ctx context.Context, req *livekit.SendDataRequest
func (s *RoomService) UpdateRoomMetadata(ctx context.Context, req *livekit.UpdateRoomMetadataRequest) (*livekit.Room, error) {
AppendLogFields(ctx, "room", req.Room, "size", len(req.Metadata))
maxMetadataSize := int(s.roomConf.MaxMetadataSize)
maxMetadataSize := int(s.limitConf.MaxMetadataSize)
if maxMetadataSize > 0 && len(req.Metadata) > maxMetadataSize {
return nil, twirp.InvalidArgumentError(ErrMetadataExceedsLimits.Error(), strconv.Itoa(maxMetadataSize))
}
@@ -314,7 +323,7 @@ func (s *RoomService) UpdateRoomMetadata(ctx context.Context, req *livekit.Updat
}
if created {
go s.agentClient.LaunchJob(ctx, &agent.JobDescription{
go s.agentClient.LaunchJob(context.WithoutCancel(ctx), &agent.JobDescription{
JobType: livekit.JobType_JT_ROOM,
Room: room,
})
+6 -6
View File
@@ -34,7 +34,7 @@ import (
func TestDeleteRoom(t *testing.T) {
t.Run("missing permissions", func(t *testing.T) {
svc := newTestRoomService(config.RoomConfig{})
svc := newTestRoomService(config.LimitConfig{})
grant := &auth.ClaimGrants{
Video: &auth.VideoGrant{},
}
@@ -48,7 +48,7 @@ func TestDeleteRoom(t *testing.T) {
func TestMetaDataLimits(t *testing.T) {
t.Run("metadata exceed limits", func(t *testing.T) {
svc := newTestRoomService(config.RoomConfig{MaxMetadataSize: 5})
svc := newTestRoomService(config.LimitConfig{MaxMetadataSize: 5})
grant := &auth.ClaimGrants{
Video: &auth.VideoGrant{},
}
@@ -72,8 +72,8 @@ func TestMetaDataLimits(t *testing.T) {
})
notExceedsLimitsSvc := map[string]*TestRoomService{
"metadata noe exceeds limits": newTestRoomService(config.RoomConfig{MaxMetadataSize: 5}),
"metadata no limits": newTestRoomService(config.RoomConfig{}), // no limits
"metadata noe exceeds limits": newTestRoomService(config.LimitConfig{MaxMetadataSize: 5}),
"metadata no limits": newTestRoomService(config.LimitConfig{}), // no limits
}
for n, s := range notExceedsLimitsSvc {
@@ -104,12 +104,12 @@ func TestMetaDataLimits(t *testing.T) {
}
}
func newTestRoomService(conf config.RoomConfig) *TestRoomService {
func newTestRoomService(limitConf config.LimitConfig) *TestRoomService {
router := &routingfakes.FakeRouter{}
allocator := &servicefakes.FakeRoomAllocator{}
store := &servicefakes.FakeServiceStore{}
svc, err := service.NewRoomService(
conf,
limitConf,
config.APIConfig{ExecutionTimeout: 2},
rpc.PSRPCConfig{},
router,
+3 -3
View File
@@ -120,7 +120,7 @@ func (s *RTCService) validate(r *http.Request) (livekit.RoomName, routing.Partic
if claims.Identity == "" {
return "", pi, http.StatusBadRequest, ErrIdentityEmpty
}
if limit := s.config.Room.MaxParticipantIdentityLength; limit > 0 && len(claims.Identity) > limit {
if limit := s.config.Limit.MaxParticipantIdentityLength; limit > 0 && len(claims.Identity) > limit {
return "", pi, http.StatusBadRequest, fmt.Errorf("%w: max length %d", ErrParticipantIdentityExceedsLimits, limit)
}
@@ -136,7 +136,7 @@ func (s *RTCService) validate(r *http.Request) (livekit.RoomName, routing.Partic
if onlyName != "" {
roomName = onlyName
}
if limit := s.config.Room.MaxRoomNameLength; limit > 0 && len(roomName) > limit {
if limit := s.config.Limit.MaxRoomNameLength; limit > 0 && len(roomName) > limit {
return "", pi, http.StatusBadRequest, fmt.Errorf("%w: max length %d", ErrRoomNameExceedsLimits, limit)
}
@@ -508,7 +508,7 @@ func (s *RTCService) DrainConnections(interval time.Duration) {
defer t.Stop()
for c := range conns {
c.Close()
_ = c.Close()
<-t.C
}
}
+472
View File
@@ -47,6 +47,32 @@ type FakeSIPStore struct {
result1 []*livekit.SIPDispatchRuleInfo
result2 error
}
ListSIPInboundTrunkStub func(context.Context) ([]*livekit.SIPInboundTrunkInfo, error)
listSIPInboundTrunkMutex sync.RWMutex
listSIPInboundTrunkArgsForCall []struct {
arg1 context.Context
}
listSIPInboundTrunkReturns struct {
result1 []*livekit.SIPInboundTrunkInfo
result2 error
}
listSIPInboundTrunkReturnsOnCall map[int]struct {
result1 []*livekit.SIPInboundTrunkInfo
result2 error
}
ListSIPOutboundTrunkStub func(context.Context) ([]*livekit.SIPOutboundTrunkInfo, error)
listSIPOutboundTrunkMutex sync.RWMutex
listSIPOutboundTrunkArgsForCall []struct {
arg1 context.Context
}
listSIPOutboundTrunkReturns struct {
result1 []*livekit.SIPOutboundTrunkInfo
result2 error
}
listSIPOutboundTrunkReturnsOnCall map[int]struct {
result1 []*livekit.SIPOutboundTrunkInfo
result2 error
}
ListSIPTrunkStub func(context.Context) ([]*livekit.SIPTrunkInfo, error)
listSIPTrunkMutex sync.RWMutex
listSIPTrunkArgsForCall []struct {
@@ -74,6 +100,34 @@ type FakeSIPStore struct {
result1 *livekit.SIPDispatchRuleInfo
result2 error
}
LoadSIPInboundTrunkStub func(context.Context, string) (*livekit.SIPInboundTrunkInfo, error)
loadSIPInboundTrunkMutex sync.RWMutex
loadSIPInboundTrunkArgsForCall []struct {
arg1 context.Context
arg2 string
}
loadSIPInboundTrunkReturns struct {
result1 *livekit.SIPInboundTrunkInfo
result2 error
}
loadSIPInboundTrunkReturnsOnCall map[int]struct {
result1 *livekit.SIPInboundTrunkInfo
result2 error
}
LoadSIPOutboundTrunkStub func(context.Context, string) (*livekit.SIPOutboundTrunkInfo, error)
loadSIPOutboundTrunkMutex sync.RWMutex
loadSIPOutboundTrunkArgsForCall []struct {
arg1 context.Context
arg2 string
}
loadSIPOutboundTrunkReturns struct {
result1 *livekit.SIPOutboundTrunkInfo
result2 error
}
loadSIPOutboundTrunkReturnsOnCall map[int]struct {
result1 *livekit.SIPOutboundTrunkInfo
result2 error
}
LoadSIPTrunkStub func(context.Context, string) (*livekit.SIPTrunkInfo, error)
loadSIPTrunkMutex sync.RWMutex
loadSIPTrunkArgsForCall []struct {
@@ -100,6 +154,30 @@ type FakeSIPStore struct {
storeSIPDispatchRuleReturnsOnCall map[int]struct {
result1 error
}
StoreSIPInboundTrunkStub func(context.Context, *livekit.SIPInboundTrunkInfo) error
storeSIPInboundTrunkMutex sync.RWMutex
storeSIPInboundTrunkArgsForCall []struct {
arg1 context.Context
arg2 *livekit.SIPInboundTrunkInfo
}
storeSIPInboundTrunkReturns struct {
result1 error
}
storeSIPInboundTrunkReturnsOnCall map[int]struct {
result1 error
}
StoreSIPOutboundTrunkStub func(context.Context, *livekit.SIPOutboundTrunkInfo) error
storeSIPOutboundTrunkMutex sync.RWMutex
storeSIPOutboundTrunkArgsForCall []struct {
arg1 context.Context
arg2 *livekit.SIPOutboundTrunkInfo
}
storeSIPOutboundTrunkReturns struct {
result1 error
}
storeSIPOutboundTrunkReturnsOnCall map[int]struct {
result1 error
}
StoreSIPTrunkStub func(context.Context, *livekit.SIPTrunkInfo) error
storeSIPTrunkMutex sync.RWMutex
storeSIPTrunkArgsForCall []struct {
@@ -304,6 +382,134 @@ func (fake *FakeSIPStore) ListSIPDispatchRuleReturnsOnCall(i int, result1 []*liv
}{result1, result2}
}
func (fake *FakeSIPStore) ListSIPInboundTrunk(arg1 context.Context) ([]*livekit.SIPInboundTrunkInfo, error) {
fake.listSIPInboundTrunkMutex.Lock()
ret, specificReturn := fake.listSIPInboundTrunkReturnsOnCall[len(fake.listSIPInboundTrunkArgsForCall)]
fake.listSIPInboundTrunkArgsForCall = append(fake.listSIPInboundTrunkArgsForCall, struct {
arg1 context.Context
}{arg1})
stub := fake.ListSIPInboundTrunkStub
fakeReturns := fake.listSIPInboundTrunkReturns
fake.recordInvocation("ListSIPInboundTrunk", []interface{}{arg1})
fake.listSIPInboundTrunkMutex.Unlock()
if stub != nil {
return stub(arg1)
}
if specificReturn {
return ret.result1, ret.result2
}
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeSIPStore) ListSIPInboundTrunkCallCount() int {
fake.listSIPInboundTrunkMutex.RLock()
defer fake.listSIPInboundTrunkMutex.RUnlock()
return len(fake.listSIPInboundTrunkArgsForCall)
}
func (fake *FakeSIPStore) ListSIPInboundTrunkCalls(stub func(context.Context) ([]*livekit.SIPInboundTrunkInfo, error)) {
fake.listSIPInboundTrunkMutex.Lock()
defer fake.listSIPInboundTrunkMutex.Unlock()
fake.ListSIPInboundTrunkStub = stub
}
func (fake *FakeSIPStore) ListSIPInboundTrunkArgsForCall(i int) context.Context {
fake.listSIPInboundTrunkMutex.RLock()
defer fake.listSIPInboundTrunkMutex.RUnlock()
argsForCall := fake.listSIPInboundTrunkArgsForCall[i]
return argsForCall.arg1
}
func (fake *FakeSIPStore) ListSIPInboundTrunkReturns(result1 []*livekit.SIPInboundTrunkInfo, result2 error) {
fake.listSIPInboundTrunkMutex.Lock()
defer fake.listSIPInboundTrunkMutex.Unlock()
fake.ListSIPInboundTrunkStub = nil
fake.listSIPInboundTrunkReturns = struct {
result1 []*livekit.SIPInboundTrunkInfo
result2 error
}{result1, result2}
}
func (fake *FakeSIPStore) ListSIPInboundTrunkReturnsOnCall(i int, result1 []*livekit.SIPInboundTrunkInfo, result2 error) {
fake.listSIPInboundTrunkMutex.Lock()
defer fake.listSIPInboundTrunkMutex.Unlock()
fake.ListSIPInboundTrunkStub = nil
if fake.listSIPInboundTrunkReturnsOnCall == nil {
fake.listSIPInboundTrunkReturnsOnCall = make(map[int]struct {
result1 []*livekit.SIPInboundTrunkInfo
result2 error
})
}
fake.listSIPInboundTrunkReturnsOnCall[i] = struct {
result1 []*livekit.SIPInboundTrunkInfo
result2 error
}{result1, result2}
}
func (fake *FakeSIPStore) ListSIPOutboundTrunk(arg1 context.Context) ([]*livekit.SIPOutboundTrunkInfo, error) {
fake.listSIPOutboundTrunkMutex.Lock()
ret, specificReturn := fake.listSIPOutboundTrunkReturnsOnCall[len(fake.listSIPOutboundTrunkArgsForCall)]
fake.listSIPOutboundTrunkArgsForCall = append(fake.listSIPOutboundTrunkArgsForCall, struct {
arg1 context.Context
}{arg1})
stub := fake.ListSIPOutboundTrunkStub
fakeReturns := fake.listSIPOutboundTrunkReturns
fake.recordInvocation("ListSIPOutboundTrunk", []interface{}{arg1})
fake.listSIPOutboundTrunkMutex.Unlock()
if stub != nil {
return stub(arg1)
}
if specificReturn {
return ret.result1, ret.result2
}
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeSIPStore) ListSIPOutboundTrunkCallCount() int {
fake.listSIPOutboundTrunkMutex.RLock()
defer fake.listSIPOutboundTrunkMutex.RUnlock()
return len(fake.listSIPOutboundTrunkArgsForCall)
}
func (fake *FakeSIPStore) ListSIPOutboundTrunkCalls(stub func(context.Context) ([]*livekit.SIPOutboundTrunkInfo, error)) {
fake.listSIPOutboundTrunkMutex.Lock()
defer fake.listSIPOutboundTrunkMutex.Unlock()
fake.ListSIPOutboundTrunkStub = stub
}
func (fake *FakeSIPStore) ListSIPOutboundTrunkArgsForCall(i int) context.Context {
fake.listSIPOutboundTrunkMutex.RLock()
defer fake.listSIPOutboundTrunkMutex.RUnlock()
argsForCall := fake.listSIPOutboundTrunkArgsForCall[i]
return argsForCall.arg1
}
func (fake *FakeSIPStore) ListSIPOutboundTrunkReturns(result1 []*livekit.SIPOutboundTrunkInfo, result2 error) {
fake.listSIPOutboundTrunkMutex.Lock()
defer fake.listSIPOutboundTrunkMutex.Unlock()
fake.ListSIPOutboundTrunkStub = nil
fake.listSIPOutboundTrunkReturns = struct {
result1 []*livekit.SIPOutboundTrunkInfo
result2 error
}{result1, result2}
}
func (fake *FakeSIPStore) ListSIPOutboundTrunkReturnsOnCall(i int, result1 []*livekit.SIPOutboundTrunkInfo, result2 error) {
fake.listSIPOutboundTrunkMutex.Lock()
defer fake.listSIPOutboundTrunkMutex.Unlock()
fake.ListSIPOutboundTrunkStub = nil
if fake.listSIPOutboundTrunkReturnsOnCall == nil {
fake.listSIPOutboundTrunkReturnsOnCall = make(map[int]struct {
result1 []*livekit.SIPOutboundTrunkInfo
result2 error
})
}
fake.listSIPOutboundTrunkReturnsOnCall[i] = struct {
result1 []*livekit.SIPOutboundTrunkInfo
result2 error
}{result1, result2}
}
func (fake *FakeSIPStore) ListSIPTrunk(arg1 context.Context) ([]*livekit.SIPTrunkInfo, error) {
fake.listSIPTrunkMutex.Lock()
ret, specificReturn := fake.listSIPTrunkReturnsOnCall[len(fake.listSIPTrunkArgsForCall)]
@@ -433,6 +639,136 @@ func (fake *FakeSIPStore) LoadSIPDispatchRuleReturnsOnCall(i int, result1 *livek
}{result1, result2}
}
func (fake *FakeSIPStore) LoadSIPInboundTrunk(arg1 context.Context, arg2 string) (*livekit.SIPInboundTrunkInfo, error) {
fake.loadSIPInboundTrunkMutex.Lock()
ret, specificReturn := fake.loadSIPInboundTrunkReturnsOnCall[len(fake.loadSIPInboundTrunkArgsForCall)]
fake.loadSIPInboundTrunkArgsForCall = append(fake.loadSIPInboundTrunkArgsForCall, struct {
arg1 context.Context
arg2 string
}{arg1, arg2})
stub := fake.LoadSIPInboundTrunkStub
fakeReturns := fake.loadSIPInboundTrunkReturns
fake.recordInvocation("LoadSIPInboundTrunk", []interface{}{arg1, arg2})
fake.loadSIPInboundTrunkMutex.Unlock()
if stub != nil {
return stub(arg1, arg2)
}
if specificReturn {
return ret.result1, ret.result2
}
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeSIPStore) LoadSIPInboundTrunkCallCount() int {
fake.loadSIPInboundTrunkMutex.RLock()
defer fake.loadSIPInboundTrunkMutex.RUnlock()
return len(fake.loadSIPInboundTrunkArgsForCall)
}
func (fake *FakeSIPStore) LoadSIPInboundTrunkCalls(stub func(context.Context, string) (*livekit.SIPInboundTrunkInfo, error)) {
fake.loadSIPInboundTrunkMutex.Lock()
defer fake.loadSIPInboundTrunkMutex.Unlock()
fake.LoadSIPInboundTrunkStub = stub
}
func (fake *FakeSIPStore) LoadSIPInboundTrunkArgsForCall(i int) (context.Context, string) {
fake.loadSIPInboundTrunkMutex.RLock()
defer fake.loadSIPInboundTrunkMutex.RUnlock()
argsForCall := fake.loadSIPInboundTrunkArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2
}
func (fake *FakeSIPStore) LoadSIPInboundTrunkReturns(result1 *livekit.SIPInboundTrunkInfo, result2 error) {
fake.loadSIPInboundTrunkMutex.Lock()
defer fake.loadSIPInboundTrunkMutex.Unlock()
fake.LoadSIPInboundTrunkStub = nil
fake.loadSIPInboundTrunkReturns = struct {
result1 *livekit.SIPInboundTrunkInfo
result2 error
}{result1, result2}
}
func (fake *FakeSIPStore) LoadSIPInboundTrunkReturnsOnCall(i int, result1 *livekit.SIPInboundTrunkInfo, result2 error) {
fake.loadSIPInboundTrunkMutex.Lock()
defer fake.loadSIPInboundTrunkMutex.Unlock()
fake.LoadSIPInboundTrunkStub = nil
if fake.loadSIPInboundTrunkReturnsOnCall == nil {
fake.loadSIPInboundTrunkReturnsOnCall = make(map[int]struct {
result1 *livekit.SIPInboundTrunkInfo
result2 error
})
}
fake.loadSIPInboundTrunkReturnsOnCall[i] = struct {
result1 *livekit.SIPInboundTrunkInfo
result2 error
}{result1, result2}
}
func (fake *FakeSIPStore) LoadSIPOutboundTrunk(arg1 context.Context, arg2 string) (*livekit.SIPOutboundTrunkInfo, error) {
fake.loadSIPOutboundTrunkMutex.Lock()
ret, specificReturn := fake.loadSIPOutboundTrunkReturnsOnCall[len(fake.loadSIPOutboundTrunkArgsForCall)]
fake.loadSIPOutboundTrunkArgsForCall = append(fake.loadSIPOutboundTrunkArgsForCall, struct {
arg1 context.Context
arg2 string
}{arg1, arg2})
stub := fake.LoadSIPOutboundTrunkStub
fakeReturns := fake.loadSIPOutboundTrunkReturns
fake.recordInvocation("LoadSIPOutboundTrunk", []interface{}{arg1, arg2})
fake.loadSIPOutboundTrunkMutex.Unlock()
if stub != nil {
return stub(arg1, arg2)
}
if specificReturn {
return ret.result1, ret.result2
}
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeSIPStore) LoadSIPOutboundTrunkCallCount() int {
fake.loadSIPOutboundTrunkMutex.RLock()
defer fake.loadSIPOutboundTrunkMutex.RUnlock()
return len(fake.loadSIPOutboundTrunkArgsForCall)
}
func (fake *FakeSIPStore) LoadSIPOutboundTrunkCalls(stub func(context.Context, string) (*livekit.SIPOutboundTrunkInfo, error)) {
fake.loadSIPOutboundTrunkMutex.Lock()
defer fake.loadSIPOutboundTrunkMutex.Unlock()
fake.LoadSIPOutboundTrunkStub = stub
}
func (fake *FakeSIPStore) LoadSIPOutboundTrunkArgsForCall(i int) (context.Context, string) {
fake.loadSIPOutboundTrunkMutex.RLock()
defer fake.loadSIPOutboundTrunkMutex.RUnlock()
argsForCall := fake.loadSIPOutboundTrunkArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2
}
func (fake *FakeSIPStore) LoadSIPOutboundTrunkReturns(result1 *livekit.SIPOutboundTrunkInfo, result2 error) {
fake.loadSIPOutboundTrunkMutex.Lock()
defer fake.loadSIPOutboundTrunkMutex.Unlock()
fake.LoadSIPOutboundTrunkStub = nil
fake.loadSIPOutboundTrunkReturns = struct {
result1 *livekit.SIPOutboundTrunkInfo
result2 error
}{result1, result2}
}
func (fake *FakeSIPStore) LoadSIPOutboundTrunkReturnsOnCall(i int, result1 *livekit.SIPOutboundTrunkInfo, result2 error) {
fake.loadSIPOutboundTrunkMutex.Lock()
defer fake.loadSIPOutboundTrunkMutex.Unlock()
fake.LoadSIPOutboundTrunkStub = nil
if fake.loadSIPOutboundTrunkReturnsOnCall == nil {
fake.loadSIPOutboundTrunkReturnsOnCall = make(map[int]struct {
result1 *livekit.SIPOutboundTrunkInfo
result2 error
})
}
fake.loadSIPOutboundTrunkReturnsOnCall[i] = struct {
result1 *livekit.SIPOutboundTrunkInfo
result2 error
}{result1, result2}
}
func (fake *FakeSIPStore) LoadSIPTrunk(arg1 context.Context, arg2 string) (*livekit.SIPTrunkInfo, error) {
fake.loadSIPTrunkMutex.Lock()
ret, specificReturn := fake.loadSIPTrunkReturnsOnCall[len(fake.loadSIPTrunkArgsForCall)]
@@ -560,6 +896,130 @@ func (fake *FakeSIPStore) StoreSIPDispatchRuleReturnsOnCall(i int, result1 error
}{result1}
}
func (fake *FakeSIPStore) StoreSIPInboundTrunk(arg1 context.Context, arg2 *livekit.SIPInboundTrunkInfo) error {
fake.storeSIPInboundTrunkMutex.Lock()
ret, specificReturn := fake.storeSIPInboundTrunkReturnsOnCall[len(fake.storeSIPInboundTrunkArgsForCall)]
fake.storeSIPInboundTrunkArgsForCall = append(fake.storeSIPInboundTrunkArgsForCall, struct {
arg1 context.Context
arg2 *livekit.SIPInboundTrunkInfo
}{arg1, arg2})
stub := fake.StoreSIPInboundTrunkStub
fakeReturns := fake.storeSIPInboundTrunkReturns
fake.recordInvocation("StoreSIPInboundTrunk", []interface{}{arg1, arg2})
fake.storeSIPInboundTrunkMutex.Unlock()
if stub != nil {
return stub(arg1, arg2)
}
if specificReturn {
return ret.result1
}
return fakeReturns.result1
}
func (fake *FakeSIPStore) StoreSIPInboundTrunkCallCount() int {
fake.storeSIPInboundTrunkMutex.RLock()
defer fake.storeSIPInboundTrunkMutex.RUnlock()
return len(fake.storeSIPInboundTrunkArgsForCall)
}
func (fake *FakeSIPStore) StoreSIPInboundTrunkCalls(stub func(context.Context, *livekit.SIPInboundTrunkInfo) error) {
fake.storeSIPInboundTrunkMutex.Lock()
defer fake.storeSIPInboundTrunkMutex.Unlock()
fake.StoreSIPInboundTrunkStub = stub
}
func (fake *FakeSIPStore) StoreSIPInboundTrunkArgsForCall(i int) (context.Context, *livekit.SIPInboundTrunkInfo) {
fake.storeSIPInboundTrunkMutex.RLock()
defer fake.storeSIPInboundTrunkMutex.RUnlock()
argsForCall := fake.storeSIPInboundTrunkArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2
}
func (fake *FakeSIPStore) StoreSIPInboundTrunkReturns(result1 error) {
fake.storeSIPInboundTrunkMutex.Lock()
defer fake.storeSIPInboundTrunkMutex.Unlock()
fake.StoreSIPInboundTrunkStub = nil
fake.storeSIPInboundTrunkReturns = struct {
result1 error
}{result1}
}
func (fake *FakeSIPStore) StoreSIPInboundTrunkReturnsOnCall(i int, result1 error) {
fake.storeSIPInboundTrunkMutex.Lock()
defer fake.storeSIPInboundTrunkMutex.Unlock()
fake.StoreSIPInboundTrunkStub = nil
if fake.storeSIPInboundTrunkReturnsOnCall == nil {
fake.storeSIPInboundTrunkReturnsOnCall = make(map[int]struct {
result1 error
})
}
fake.storeSIPInboundTrunkReturnsOnCall[i] = struct {
result1 error
}{result1}
}
func (fake *FakeSIPStore) StoreSIPOutboundTrunk(arg1 context.Context, arg2 *livekit.SIPOutboundTrunkInfo) error {
fake.storeSIPOutboundTrunkMutex.Lock()
ret, specificReturn := fake.storeSIPOutboundTrunkReturnsOnCall[len(fake.storeSIPOutboundTrunkArgsForCall)]
fake.storeSIPOutboundTrunkArgsForCall = append(fake.storeSIPOutboundTrunkArgsForCall, struct {
arg1 context.Context
arg2 *livekit.SIPOutboundTrunkInfo
}{arg1, arg2})
stub := fake.StoreSIPOutboundTrunkStub
fakeReturns := fake.storeSIPOutboundTrunkReturns
fake.recordInvocation("StoreSIPOutboundTrunk", []interface{}{arg1, arg2})
fake.storeSIPOutboundTrunkMutex.Unlock()
if stub != nil {
return stub(arg1, arg2)
}
if specificReturn {
return ret.result1
}
return fakeReturns.result1
}
func (fake *FakeSIPStore) StoreSIPOutboundTrunkCallCount() int {
fake.storeSIPOutboundTrunkMutex.RLock()
defer fake.storeSIPOutboundTrunkMutex.RUnlock()
return len(fake.storeSIPOutboundTrunkArgsForCall)
}
func (fake *FakeSIPStore) StoreSIPOutboundTrunkCalls(stub func(context.Context, *livekit.SIPOutboundTrunkInfo) error) {
fake.storeSIPOutboundTrunkMutex.Lock()
defer fake.storeSIPOutboundTrunkMutex.Unlock()
fake.StoreSIPOutboundTrunkStub = stub
}
func (fake *FakeSIPStore) StoreSIPOutboundTrunkArgsForCall(i int) (context.Context, *livekit.SIPOutboundTrunkInfo) {
fake.storeSIPOutboundTrunkMutex.RLock()
defer fake.storeSIPOutboundTrunkMutex.RUnlock()
argsForCall := fake.storeSIPOutboundTrunkArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2
}
func (fake *FakeSIPStore) StoreSIPOutboundTrunkReturns(result1 error) {
fake.storeSIPOutboundTrunkMutex.Lock()
defer fake.storeSIPOutboundTrunkMutex.Unlock()
fake.StoreSIPOutboundTrunkStub = nil
fake.storeSIPOutboundTrunkReturns = struct {
result1 error
}{result1}
}
func (fake *FakeSIPStore) StoreSIPOutboundTrunkReturnsOnCall(i int, result1 error) {
fake.storeSIPOutboundTrunkMutex.Lock()
defer fake.storeSIPOutboundTrunkMutex.Unlock()
fake.StoreSIPOutboundTrunkStub = nil
if fake.storeSIPOutboundTrunkReturnsOnCall == nil {
fake.storeSIPOutboundTrunkReturnsOnCall = make(map[int]struct {
result1 error
})
}
fake.storeSIPOutboundTrunkReturnsOnCall[i] = struct {
result1 error
}{result1}
}
func (fake *FakeSIPStore) StoreSIPTrunk(arg1 context.Context, arg2 *livekit.SIPTrunkInfo) error {
fake.storeSIPTrunkMutex.Lock()
ret, specificReturn := fake.storeSIPTrunkReturnsOnCall[len(fake.storeSIPTrunkArgsForCall)]
@@ -631,14 +1091,26 @@ func (fake *FakeSIPStore) Invocations() map[string][][]interface{} {
defer fake.deleteSIPTrunkMutex.RUnlock()
fake.listSIPDispatchRuleMutex.RLock()
defer fake.listSIPDispatchRuleMutex.RUnlock()
fake.listSIPInboundTrunkMutex.RLock()
defer fake.listSIPInboundTrunkMutex.RUnlock()
fake.listSIPOutboundTrunkMutex.RLock()
defer fake.listSIPOutboundTrunkMutex.RUnlock()
fake.listSIPTrunkMutex.RLock()
defer fake.listSIPTrunkMutex.RUnlock()
fake.loadSIPDispatchRuleMutex.RLock()
defer fake.loadSIPDispatchRuleMutex.RUnlock()
fake.loadSIPInboundTrunkMutex.RLock()
defer fake.loadSIPInboundTrunkMutex.RUnlock()
fake.loadSIPOutboundTrunkMutex.RLock()
defer fake.loadSIPOutboundTrunkMutex.RUnlock()
fake.loadSIPTrunkMutex.RLock()
defer fake.loadSIPTrunkMutex.RUnlock()
fake.storeSIPDispatchRuleMutex.RLock()
defer fake.storeSIPDispatchRuleMutex.RUnlock()
fake.storeSIPInboundTrunkMutex.RLock()
defer fake.storeSIPInboundTrunkMutex.RUnlock()
fake.storeSIPOutboundTrunkMutex.RLock()
defer fake.storeSIPOutboundTrunkMutex.RUnlock()
fake.storeSIPTrunkMutex.RLock()
defer fake.storeSIPTrunkMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
+86 -5
View File
@@ -16,6 +16,7 @@ package service
import (
"context"
"errors"
"fmt"
"time"
@@ -82,7 +83,38 @@ func (s *SIPService) CreateSIPTrunk(ctx context.Context, req *livekit.CreateSIPT
}
// Validate all trunks including the new one first.
list, err := s.store.ListSIPTrunk(ctx)
list, err := s.store.ListSIPInboundTrunk(ctx)
if err != nil {
return nil, err
}
list = append(list, info.AsInbound())
if err = sip.ValidateTrunks(list); err != nil {
return nil, err
}
// Now we can generate ID and store.
info.SipTrunkId = guid.New(utils.SIPTrunkPrefix)
if err := s.store.StoreSIPTrunk(ctx, info); err != nil {
return nil, err
}
return info, nil
}
func (s *SIPService) CreateSIPInboundTrunk(ctx context.Context, req *livekit.CreateSIPInboundTrunkRequest) (*livekit.SIPInboundTrunkInfo, error) {
if s.store == nil {
return nil, ErrSIPNotConnected
}
info := req.Trunk
if info == nil {
return nil, errors.New("trunk info is required")
} else if info.SipTrunkId != "" {
return nil, errors.New("trunk ID must be empty")
}
// Keep ID empty still, so that validation can print "<new>" instead of a non-existent ID in the error.
// Validate all trunks including the new one first.
list, err := s.store.ListSIPInboundTrunk(ctx)
if err != nil {
return nil, err
}
@@ -93,7 +125,26 @@ func (s *SIPService) CreateSIPTrunk(ctx context.Context, req *livekit.CreateSIPT
// Now we can generate ID and store.
info.SipTrunkId = guid.New(utils.SIPTrunkPrefix)
if err := s.store.StoreSIPTrunk(ctx, info); err != nil {
if err := s.store.StoreSIPInboundTrunk(ctx, info); err != nil {
return nil, err
}
return info, nil
}
func (s *SIPService) CreateSIPOutboundTrunk(ctx context.Context, req *livekit.CreateSIPOutboundTrunkRequest) (*livekit.SIPOutboundTrunkInfo, error) {
if s.store == nil {
return nil, ErrSIPNotConnected
}
info := req.Trunk
if info == nil {
return nil, errors.New("trunk info is required")
} else if info.SipTrunkId != "" {
return nil, errors.New("trunk ID must be empty")
}
// No additional validation needed for outbound.
info.SipTrunkId = guid.New(utils.SIPTrunkPrefix)
if err := s.store.StoreSIPOutboundTrunk(ctx, info); err != nil {
return nil, err
}
return info, nil
@@ -112,6 +163,32 @@ func (s *SIPService) ListSIPTrunk(ctx context.Context, req *livekit.ListSIPTrunk
return &livekit.ListSIPTrunkResponse{Items: trunks}, nil
}
func (s *SIPService) ListSIPInboundTrunk(ctx context.Context, req *livekit.ListSIPInboundTrunkRequest) (*livekit.ListSIPInboundTrunkResponse, error) {
if s.store == nil {
return nil, ErrSIPNotConnected
}
trunks, err := s.store.ListSIPInboundTrunk(ctx)
if err != nil {
return nil, err
}
return &livekit.ListSIPInboundTrunkResponse{Items: trunks}, nil
}
func (s *SIPService) ListSIPOutboundTrunk(ctx context.Context, req *livekit.ListSIPOutboundTrunkRequest) (*livekit.ListSIPOutboundTrunkResponse, error) {
if s.store == nil {
return nil, ErrSIPNotConnected
}
trunks, err := s.store.ListSIPOutboundTrunk(ctx)
if err != nil {
return nil, err
}
return &livekit.ListSIPOutboundTrunkResponse{Items: trunks}, nil
}
func (s *SIPService) DeleteSIPTrunk(ctx context.Context, req *livekit.DeleteSIPTrunkRequest) (*livekit.SIPTrunkInfo, error) {
if s.store == nil {
return nil, ErrSIPNotConnected
@@ -199,13 +276,17 @@ func (s *SIPService) CreateSIPParticipantWithToken(ctx context.Context, req *liv
log := logger.GetLogger()
log = log.WithValues("callId", callID, "roomName", req.RoomName, "sipTrunk", req.SipTrunkId, "toUser", req.SipCallTo)
trunk, err := s.store.LoadSIPTrunk(ctx, req.SipTrunkId)
trunk, err := s.store.LoadSIPOutboundTrunk(ctx, req.SipTrunkId)
if err != nil {
log.Errorw("cannot get trunk to update sip participant", err)
return nil, err
}
log = log.WithValues("fromUser", trunk.OutboundNumber, "toHost", trunk.OutboundAddress)
ireq := rpc.NewCreateSIPParticipantRequest(callID, wsUrl, token, req, trunk)
ireq, err := rpc.NewCreateSIPParticipantRequest(callID, wsUrl, token, req, trunk)
if err != nil {
log.Errorw("cannot create sip participant request", err)
return nil, err
}
log = log.WithValues("fromUser", ireq.Number, "toHost", trunk.Address)
// CreateSIPParticipant will wait for LiveKit Participant to be created and that can take some time.
// Thus, we must set a higher deadline for it, if it's not set already.
+32 -2
View File
@@ -15,7 +15,9 @@
package service_test
import (
"context"
"testing"
"time"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/require"
@@ -23,10 +25,38 @@ import (
"github.com/livekit/livekit-server/pkg/service"
)
func redisClient() *redis.Client {
return redis.NewClient(&redis.Options{
func redisClient(t testing.TB) *redis.Client {
cli := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
})
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
err := cli.Ping(ctx).Err()
if err == nil {
t.Cleanup(func() {
_ = cli.Close()
})
return cli
}
_ = cli.Close()
t.Logf("local redis not available: %v", err)
t.Logf("starting redis in docker")
addr := runRedis(t)
cli = redis.NewClient(&redis.Options{
Addr: addr,
})
ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err = cli.Ping(ctx).Err(); err != nil {
_ = cli.Close()
t.Fatal(err)
}
t.Cleanup(func() {
_ = cli.Close()
})
return cli
}
func TestIsValidDomain(t *testing.T) {
+3 -3
View File
@@ -54,7 +54,7 @@ func InitializeServer(conf *config.Config, currentNode routing.LocalNode) (*Live
createClientConfiguration,
createForwardStats,
routing.CreateRouter,
getRoomConf,
getLimitConf,
config.DefaultAPIConfig,
wire.Bind(new(routing.MessageRouter), new(routing.Router)),
wire.Bind(new(livekit.RoomService), new(*RoomService)),
@@ -221,8 +221,8 @@ func createClientConfiguration() clientconfiguration.ClientConfigurationManager
return clientconfiguration.NewStaticClientConfigurationManager(clientconfiguration.StaticConfigurations)
}
func getRoomConf(config *config.Config) config.RoomConfig {
return config.Room
func getLimitConf(config *config.Config) config.LimitConfig {
return config.Limit
}
func getSignalRelayConfig(config *config.Config) config.SignalRelayConfig {
+4 -4
View File
@@ -36,7 +36,7 @@ import (
// Injectors from wire.go:
func InitializeServer(conf *config.Config, currentNode routing.LocalNode) (*LivekitServer, error) {
roomConfig := getRoomConf(conf)
limitConfig := getLimitConf(conf)
apiConfig := config.DefaultAPIConfig()
psrpcConfig := getPSRPCConfig(conf)
universalClient, err := createRedisClient(conf)
@@ -96,7 +96,7 @@ func InitializeServer(conf *config.Config, currentNode routing.LocalNode) (*Live
if err != nil {
return nil, err
}
roomService, err := NewRoomService(roomConfig, apiConfig, psrpcConfig, router, roomAllocator, objectStore, client, rtcEgressLauncher, topicFormatter, roomClient, participantClient)
roomService, err := NewRoomService(limitConfig, apiConfig, psrpcConfig, router, roomAllocator, objectStore, client, rtcEgressLauncher, topicFormatter, roomClient, participantClient)
if err != nil {
return nil, err
}
@@ -272,8 +272,8 @@ func createClientConfiguration() clientconfiguration.ClientConfigurationManager
return clientconfiguration.NewStaticClientConfigurationManager(clientconfiguration.StaticConfigurations)
}
func getRoomConf(config2 *config.Config) config.RoomConfig {
return config2.Room
func getLimitConf(config2 *config.Config) config.LimitConfig {
return config2.Limit
}
func getSignalRelayConfig(config2 *config.Config) config.SignalRelayConfig {
+47 -14
View File
@@ -132,6 +132,7 @@ type Buffer struct {
packetNotFoundCount atomic.Uint32
packetTooOldCount atomic.Uint32
extPacketTooMuchCount atomic.Uint32
invalidPacketCount atomic.Uint32
primaryBufferForRTX *Buffer
rtxPktBuf []byte
@@ -194,7 +195,7 @@ func (b *Buffer) SetAudioLossProxying(enable bool) {
b.enableAudioLossProxying = enable
}
func (b *Buffer) Bind(params webrtc.RTPParameters, codec webrtc.RTPCodecCapability) {
func (b *Buffer) Bind(params webrtc.RTPParameters, codec webrtc.RTPCodecCapability, bitrates int) {
b.Lock()
defer b.Unlock()
if b.bound {
@@ -264,6 +265,14 @@ func (b *Buffer) Bind(params webrtc.RTPParameters, codec webrtc.RTPCodecCapabili
}
}
}
if bitrates > 0 {
pps := bitrates / 8 / 1200
for pps > b.bucket.Capacity() {
if b.bucket.Grow() >= b.maxVideoPkts {
break
}
}
}
default:
b.codecType = webrtc.RTPCodecType(0)
@@ -308,6 +317,28 @@ func (b *Buffer) Write(pkt []byte) (n int, err error) {
return
}
if err = utils.ValidateRTPPacket(&rtpPacket, b.payloadType, b.mediaSSRC); err != nil {
invalidPacketCount := b.invalidPacketCount.Inc()
if (invalidPacketCount-1)%100 == 0 {
b.logger.Warnw(
"validating RTP packet failed", err,
"version", rtpPacket.Version,
"padding", rtpPacket.Padding,
"marker", rtpPacket.Marker,
"expectedPayloadType", b.payloadType,
"payloadType", rtpPacket.PayloadType,
"sequenceNumber", rtpPacket.SequenceNumber,
"timestamp", rtpPacket.Timestamp,
"expectedSSRC", b.mediaSSRC,
"ssrc", rtpPacket.SSRC,
"numExtensions", len(rtpPacket.Extensions),
"payloadSize", len(rtpPacket.Payload),
"rtpStats", b.rtpStats,
"snRangeMap", b.snRangeMap,
)
}
}
now := time.Now()
if b.twcc != nil && b.twccExtID != 0 && !b.closed.Load() {
if ext := rtpPacket.GetExtension(b.twccExtID); ext != nil {
@@ -580,12 +611,23 @@ func (b *Buffer) calc(rawPkt []byte, rtpPacket *rtp.Packet, arrivalTime time.Tim
rtpPacket.Header.SequenceNumber = uint16(flowState.ExtSequenceNumber)
_, err = b.bucket.AddPacketWithSequenceNumber(rawPkt, rtpPacket.Header.SequenceNumber)
if err != nil {
if errors.Is(err, bucket.ErrPacketTooOld) {
packetTooOldCount := b.packetTooOldCount.Inc()
if (packetTooOldCount-1)%100 == 0 {
if !flowState.IsDuplicate {
if errors.Is(err, bucket.ErrPacketTooOld) {
packetTooOldCount := b.packetTooOldCount.Inc()
if (packetTooOldCount-1)%100 == 0 {
b.logger.Warnw(
"could not add packet to bucket", err,
"count", packetTooOldCount,
"flowState", &flowState,
"snAdjustment", snAdjustment,
"incomingSequenceNumber", flowState.ExtSequenceNumber+snAdjustment,
"rtpStats", b.rtpStats,
"snRangeMap", b.snRangeMap,
)
}
} else if err != bucket.ErrRTXPacket {
b.logger.Warnw(
"could not add packet to bucket", err,
"count", packetTooOldCount,
"flowState", &flowState,
"snAdjustment", snAdjustment,
"incomingSequenceNumber", flowState.ExtSequenceNumber+snAdjustment,
@@ -593,15 +635,6 @@ func (b *Buffer) calc(rawPkt []byte, rtpPacket *rtp.Packet, arrivalTime time.Tim
"snRangeMap", b.snRangeMap,
)
}
} else if err != bucket.ErrRTXPacket {
b.logger.Warnw(
"could not add packet to bucket", err,
"flowState", &flowState,
"snAdjustment", snAdjustment,
"incomingSequenceNumber", flowState.ExtSequenceNumber+snAdjustment,
"rtpStats", b.rtpStats,
"snRangeMap", b.snRangeMap,
)
}
return
}
+46 -10
View File
@@ -44,7 +44,7 @@ var opusCodec = webrtc.RTPCodecParameters{
MimeType: "audio/opus",
ClockRate: 48000,
},
PayloadType: 96,
PayloadType: 111,
}
func TestNack(t *testing.T) {
@@ -68,7 +68,7 @@ func TestNack(t *testing.T) {
buff.Bind(webrtc.RTPParameters{
HeaderExtensions: nil,
Codecs: []webrtc.RTPCodecParameters{vp8Codec},
}, vp8Codec.RTPCodecCapability)
}, vp8Codec.RTPCodecCapability, 0)
rtt := uint32(20)
buff.nacker.SetRTT(rtt)
for i := 0; i < 15; i++ {
@@ -81,7 +81,13 @@ func TestNack(t *testing.T) {
time.Sleep(500 * time.Millisecond) // even a long wait should not exceed max retries
}
pkt := rtp.Packet{
Header: rtp.Header{SequenceNumber: uint16(i), Timestamp: uint32(i)},
Header: rtp.Header{
Version: 2,
PayloadType: 96,
SequenceNumber: uint16(i),
Timestamp: uint32(i),
SSRC: 123,
},
Payload: []byte{0xff, 0xff, 0xff, 0xfd, 0xb4, 0x9f, 0x94, 0x1},
}
b, err := pkt.Marshal()
@@ -127,7 +133,7 @@ func TestNack(t *testing.T) {
buff.Bind(webrtc.RTPParameters{
HeaderExtensions: nil,
Codecs: []webrtc.RTPCodecParameters{vp8Codec},
}, vp8Codec.RTPCodecCapability)
}, vp8Codec.RTPCodecCapability, 0)
rtt := uint32(30)
buff.nacker.SetRTT(rtt)
for i := 0; i < 15; i++ {
@@ -140,7 +146,13 @@ func TestNack(t *testing.T) {
time.Sleep(500 * time.Millisecond) // even a long wait should not exceed max retries
}
pkt := rtp.Packet{
Header: rtp.Header{SequenceNumber: uint16(i + 65533), Timestamp: uint32(i)},
Header: rtp.Header{
Version: 2,
PayloadType: 96,
SequenceNumber: uint16(i + 65533),
Timestamp: uint32(i),
SSRC: 123,
},
Payload: []byte{0xff, 0xff, 0xff, 0xfd, 0xb4, 0x9f, 0x94, 0x1},
}
b, err := pkt.Marshal()
@@ -166,23 +178,35 @@ func TestNewBuffer(t *testing.T) {
var TestPackets = []*rtp.Packet{
{
Header: rtp.Header{
Version: 2,
PayloadType: 96,
SequenceNumber: 65533,
SSRC: 123,
},
},
{
Header: rtp.Header{
Version: 2,
PayloadType: 96,
SequenceNumber: 65534,
SSRC: 123,
},
Payload: []byte{1},
},
{
Header: rtp.Header{
Version: 2,
PayloadType: 96,
SequenceNumber: 2,
SSRC: 123,
},
},
{
Header: rtp.Header{
Version: 2,
PayloadType: 96,
SequenceNumber: 65535,
SSRC: 123,
},
},
}
@@ -193,7 +217,7 @@ func TestNewBuffer(t *testing.T) {
buff.Bind(webrtc.RTPParameters{
HeaderExtensions: nil,
Codecs: []webrtc.RTPCodecParameters{vp8Codec},
}, vp8Codec.RTPCodecCapability)
}, vp8Codec.RTPCodecCapability, 0)
for _, p := range TestPackets {
buf, _ := p.Marshal()
@@ -229,10 +253,16 @@ func TestFractionLostReport(t *testing.T) {
buff.Bind(webrtc.RTPParameters{
HeaderExtensions: nil,
Codecs: []webrtc.RTPCodecParameters{opusCodec},
}, opusCodec.RTPCodecCapability)
}, opusCodec.RTPCodecCapability, 0)
for i := 0; i < 15; i++ {
pkt := rtp.Packet{
Header: rtp.Header{SequenceNumber: uint16(i), Timestamp: uint32(i)},
Header: rtp.Header{
Version: 2,
PayloadType: 111,
SequenceNumber: uint16(i),
Timestamp: uint32(i),
SSRC: 123,
},
Payload: []byte{0xff, 0xff, 0xff, 0xfd, 0xb4, 0x9f, 0x94, 0x1},
}
b, err := pkt.Marshal()
@@ -261,10 +291,16 @@ func TestFractionLostReport(t *testing.T) {
buff.Bind(webrtc.RTPParameters{
HeaderExtensions: nil,
Codecs: []webrtc.RTPCodecParameters{opusCodec},
}, opusCodec.RTPCodecCapability)
}, opusCodec.RTPCodecCapability, 0)
for i := 0; i < 15; i++ {
pkt := rtp.Packet{
Header: rtp.Header{SequenceNumber: uint16(i), Timestamp: uint32(i)},
Header: rtp.Header{
Version: 2,
PayloadType: 111,
SequenceNumber: uint16(i),
Timestamp: uint32(i),
SSRC: 123,
},
Payload: []byte{0xff, 0xff, 0xff, 0xfd, 0xb4, 0x9f, 0x94, 0x1},
}
b, err := pkt.Marshal()
+14 -3
View File
@@ -37,6 +37,8 @@ const (
cFirstPacketTimeAdjustThreshold = 15 * time.Second
cPassthroughNTPTimestamp = true
cSequenceNumberLargeJumpThreshold = 1000
)
// -------------------------------------------------------
@@ -172,8 +174,9 @@ type rtpStatsBase struct {
startTime time.Time
endTime time.Time
firstTime time.Time
highestTime time.Time
firstTime time.Time
firstTimeAdjustment time.Duration
highestTime time.Time
lastTransit uint64
lastJitterExtTimestamp uint64
@@ -549,6 +552,7 @@ func (r *rtpStatsBase) maybeAdjustFirstPacketTime(srData *RTCPSenderReportData,
r.logger.Infow("adjusting first packet time, too big, ignoring", getFields()...)
} else {
r.logger.Debugw("adjusting first packet time", getFields()...)
r.firstTimeAdjustment += r.firstTime.Sub(firstTime)
r.firstTime = firstTime
}
}
@@ -606,7 +610,13 @@ func (r *rtpStatsBase) deltaInfo(snapshotID uint32, extStartSN uint64, extHighes
// padding packets delta could be higher than expected due to out-of-order padding packets
packetsPadding := now.packetsPadding - then.packetsPadding
if packetsExpected < packetsPadding {
r.logger.Infow("padding packets more than expected", "packetsExpected", packetsExpected, "packetsPadding", packetsPadding)
r.logger.Infow(
"padding packets more than expected",
"packetsExpected", packetsExpected,
"packetsPadding", packetsPadding,
"startSequenceNumber", then.extStartSN,
"endSequenceNumber", now.extStartSN-1,
)
packetsExpected = 0
} else {
packetsExpected -= packetsPadding
@@ -643,6 +653,7 @@ func (r *rtpStatsBase) MarshalLogObject(e zapcore.ObjectEncoder) error {
e.AddTime("startTime", r.startTime)
e.AddTime("endTime", r.endTime)
e.AddTime("firstTime", r.firstTime)
e.AddDuration("firstTimeAdjustment", r.firstTimeAdjustment)
e.AddTime("highestTime", r.highestTime)
e.AddUint64("bytes", r.bytes)
+19 -13
View File
@@ -28,7 +28,7 @@ import (
)
const (
cHistorySize = 4096
cHistorySize = 8192
// RTCP Sender Reports are re-based to SFU time base so that all subscriber side
// can have the same time base (i. e. SFU time base). To convert publisher side
@@ -214,19 +214,11 @@ func (r *RTPStatsReceiver) Update(
"hdrSize", hdrSize,
"payloadSize", payloadSize,
"paddingSize", paddingSize,
"first", r.srFirst,
"last", r.srNewest,
}
}
if gapSN <= 0 { // duplicate OR out-of-order
if -gapSN >= cNumSequenceNumbers/2 {
if r.largeJumpNegativeCount%100 == 0 {
r.logger.Warnw(
"large sequence number gap negative", nil,
append(getLoggingFields(), "count", r.largeJumpNegativeCount)...,
)
}
r.largeJumpNegativeCount++
}
if gapSN != 0 {
r.packetsOutOfOrder++
}
@@ -246,8 +238,18 @@ func (r *RTPStatsReceiver) Update(
flowState.IsOutOfOrder = true
flowState.ExtSequenceNumber = resSN.ExtendedVal
flowState.ExtTimestamp = resTS.ExtendedVal
if !flowState.IsDuplicate && -gapSN >= cSequenceNumberLargeJumpThreshold {
if r.largeJumpNegativeCount%100 == 0 {
r.logger.Warnw(
"large sequence number gap negative", nil,
append(getLoggingFields(), "count", r.largeJumpNegativeCount)...,
)
}
r.largeJumpNegativeCount++
}
} else { // in-order
if gapSN >= cNumSequenceNumbers/2 || resTS.ExtendedVal < resTS.PreExtendedHighest {
if gapSN >= cSequenceNumberLargeJumpThreshold || resTS.ExtendedVal < resTS.PreExtendedHighest {
if r.largeJumpCount%100 == 0 {
r.logger.Warnw(
"large sequence number gap OR time reversed", nil,
@@ -594,10 +596,14 @@ func (r *RTPStatsReceiver) MarshalLogObject(e zapcore.ObjectEncoder) error {
defer r.lock.RUnlock()
e.AddObject("base", r.rtpStatsBase)
e.AddUint64("extendedStartSN", r.sequenceNumber.GetExtendedStart())
e.AddUint64("extStartSN", r.sequenceNumber.GetExtendedStart())
e.AddUint64("extHighestSN", r.sequenceNumber.GetExtendedHighest())
e.AddUint64("extStartTS", r.timestamp.GetExtendedStart())
e.AddUint64("extHighestTS", r.timestamp.GetExtendedHighest())
e.AddDuration("propagationDelay", r.propagationDelay)
e.AddDuration("longTermDeltaPropagationDelay", r.longTermDeltaPropagationDelay)
return nil
}
+5 -7
View File
@@ -296,15 +296,13 @@ func (r *RTPStatsSender) Update(
"startTime", r.startTime.String(),
"firstTime", r.firstTime.String(),
"highestTime", r.highestTime.String(),
"prevSN", r.extHighestSN,
"highestSN", r.extHighestSN,
"currSN", extSequenceNumber,
"gapSN", gapSN,
"prevTS", r.extHighestTS,
"highestTS", r.extHighestTS,
"currTS", extTimestamp,
"gapTS", extTimestamp - r.extHighestTS,
"gapTS", int64(extTimestamp - r.extHighestTS),
"packetTime", packetTime.String(),
"sequenceNumber", extSequenceNumber,
"timestamp", extTimestamp,
"marker", marker,
"hdrSize", hdrSize,
"payloadSize", payloadSize,
@@ -318,7 +316,7 @@ func (r *RTPStatsSender) Update(
// do not start on a padding only packet
return
}
if -gapSN >= cNumSequenceNumbers/2 {
if -gapSN >= cSequenceNumberLargeJumpThreshold {
if r.largeJumpNegativeCount%100 == 0 {
r.logger.Warnw(
"large sequence number gap negative", nil,
@@ -374,7 +372,7 @@ func (r *RTPStatsSender) Update(
r.setSnInfo(extSequenceNumber, r.extHighestSN, uint16(pktSize), uint8(hdrSize), uint16(payloadSize), marker, true)
}
} else { // in-order
if gapSN >= cNumSequenceNumbers/2 || extTimestamp < r.extHighestTS {
if gapSN >= cSequenceNumberLargeJumpThreshold || extTimestamp < r.extHighestTS {
if r.largeJumpCount%100 == 0 {
r.logger.Warnw(
"large sequence number gap OR time reversed", nil,
+3 -3
View File
@@ -296,7 +296,7 @@ type DownTrack struct {
onStatsUpdate func(dt *DownTrack, stat *livekit.AnalyticsStat)
onMaxSubscribedLayerChanged func(dt *DownTrack, layer int32)
onRttUpdate func(dt *DownTrack, rtt uint32)
onCloseHandler func(willBeResumed bool)
onCloseHandler func(isExpectedToResume bool)
createdAt int64
}
@@ -1218,14 +1218,14 @@ func (d *DownTrack) UpTrackBitrateReport(availableLayers []int32, bitrates Bitra
}
// OnCloseHandler method to be called on remote tracked removed
func (d *DownTrack) OnCloseHandler(fn func(willBeResumed bool)) {
func (d *DownTrack) OnCloseHandler(fn func(isExpectedToResume bool)) {
d.cbMu.Lock()
defer d.cbMu.Unlock()
d.onCloseHandler = fn
}
func (d *DownTrack) getOnCloseHandler() func(willBeResumed bool) {
func (d *DownTrack) getOnCloseHandler() func(isExpectedToResume bool) {
d.cbMu.RLock()
defer d.cbMu.RUnlock()
+16 -14
View File
@@ -234,14 +234,15 @@ type Forwarder struct {
pubMuted bool
resumeBehindThreshold float64
started bool
preStartTime time.Time
extFirstTS uint64
lastSSRC uint32
referenceLayerSpatial int32
dummyStartTSOffset uint64
refInfos [buffer.DefaultMaxLayerSpatial + 1]refInfo
refIsSVC bool
started bool
preStartTime time.Time
extFirstTS uint64
lastSSRC uint32
lastSwitchExtIncomingTS uint64
referenceLayerSpatial int32
dummyStartTSOffset uint64
refInfos [buffer.DefaultMaxLayerSpatial + 1]refInfo
refIsSVC bool
provisional *VideoAllocationProvisional
@@ -569,7 +570,7 @@ func (f *Forwarder) GetMaxSubscribedSpatial() int32 {
return layer
}
func (f *Forwarder) getReferenceLayer() (int32, int32) {
func (f *Forwarder) getRefLayer() (int32, int32) {
if f.lastSSRC == 0 {
return buffer.InvalidLayerSpatial, buffer.InvalidLayerSpatial
}
@@ -595,10 +596,10 @@ func (f *Forwarder) SetRefSenderReport(isSVC bool, layer int32, srData *buffer.R
defer f.lock.Unlock()
f.refIsSVC = isSVC
refLayer, _ := f.getReferenceLayer()
refLayer, _ := f.getRefLayer()
if layer >= 0 && int(layer) < len(f.refInfos) {
f.refInfos[layer] = refInfo{srData, 0, false}
if layer == refLayer {
if layer == refLayer && srData.RTPTimestampExt >= f.lastSwitchExtIncomingTS {
f.refInfos[layer].tsOffset = f.rtpMunger.GetTSOffset()
f.refInfos[layer].isTSOffsetValid = true
}
@@ -644,7 +645,7 @@ func (f *Forwarder) GetSenderReportParams() (int32, uint64, *buffer.RTCPSenderRe
f.lock.RLock()
defer f.lock.RUnlock()
refLayer, currentLayerSpatial := f.getReferenceLayer()
refLayer, currentLayerSpatial := f.getRefLayer()
if refLayer == buffer.InvalidLayerSpatial || !f.refInfos[refLayer].isTSOffsetValid {
return buffer.InvalidLayerSpatial, 0, nil
}
@@ -1567,7 +1568,7 @@ func (f *Forwarder) GetTranslationParams(extPkt *buffer.ExtPacket, layer int32)
}, ErrUnknownKind
}
func (f *Forwarder) getReferenceLayerRTPTimestamp(ts uint32, refLayer, targetLayer int32) (uint32, error) {
func (f *Forwarder) getRefLayerRTPTimestamp(ts uint32, refLayer, targetLayer int32) (uint32, error) {
if refLayer < 0 || int(refLayer) > len(f.refInfos) || targetLayer < 0 || int(targetLayer) > len(f.refInfos) {
return 0, fmt.Errorf("invalid layer(s), refLayer: %d, targetLayer: %d", refLayer, targetLayer)
}
@@ -1671,7 +1672,7 @@ func (f *Forwarder) processSourceSwitch(extPkt *buffer.ExtPacket, layer int32) (
switchingAt := time.Now()
if !f.skipReferenceTS {
var err error
refTS, err = f.getReferenceLayerRTPTimestamp(extPkt.Packet.Timestamp, f.referenceLayerSpatial, layer)
refTS, err = f.getRefLayerRTPTimestamp(extPkt.Packet.Timestamp, f.referenceLayerSpatial, layer)
if err != nil {
// error out if refTS is not available. It can happen when there is no sender report
// for the layer being switched to. Can especially happen at the start of the track when layer switches are
@@ -1880,6 +1881,7 @@ func (f *Forwarder) getTranslationParamsCommon(extPkt *buffer.ExtPacket, layer i
f.logger.Debugw("switching feed", "from", f.lastSSRC, "to", extPkt.Packet.SSRC)
f.lastSSRC = extPkt.Packet.SSRC
f.lastSwitchExtIncomingTS = extPkt.ExtTimestamp
}
tpRTP, err := f.rtpMunger.UpdateAndGetSnTs(extPkt, tp.marker)
+16
View File
@@ -714,6 +714,22 @@ func (w *WebRTCReceiver) forwardRTP(layer int32) {
spatialTracker = w.streamTrackerManager.AddTracker(pkt.Spatial)
}
}
if spatialLayer > buffer.DefaultMaxLayerSpatial { // TODO-REMOVE-AFTER-DEBUG
w.logger.Warnw(
"invalid spatial layer", nil,
"mime", w.codec.MimeType,
"layer", layer,
"spatialLayer", spatialLayer,
"sn", pkt.Packet.SequenceNumber,
"esn", pkt.ExtSequenceNumber,
"timestamp", pkt.Packet.Timestamp,
"ets", pkt.ExtTimestamp,
"payloadSize", len(pkt.Packet.Payload),
"rtpVersion", pkt.Packet.Version,
"payloadType", pkt.Packet.PayloadType,
"ssrc", pkt.Packet.SSRC,
)
}
writeCount := w.downTrackSpreader.Broadcast(func(dt TrackSender) {
_ = dt.WriteRTP(pkt, spatialLayer)
-15
View File
@@ -75,7 +75,6 @@ type RTPMunger struct {
extHighestIncomingSN uint64
snRangeMap *utils.RangeMap[uint64, uint64]
extHighestIncomingTS uint64 // TODO-REMOVE-AFTER-DATA-COLLECTION
extLastSN uint64
extSecondLastSN uint64
@@ -139,7 +138,6 @@ func (r *RTPMunger) SeedLast(state RTPMungerState) {
func (r *RTPMunger) SetLastSnTs(extPkt *buffer.ExtPacket) {
r.extHighestIncomingSN = extPkt.ExtSequenceNumber - 1
r.extHighestIncomingTS = extPkt.ExtTimestamp - 1
r.extLastSN = extPkt.ExtSequenceNumber
r.extSecondLastSN = r.extLastSN - 1
@@ -153,7 +151,6 @@ func (r *RTPMunger) SetLastSnTs(extPkt *buffer.ExtPacket) {
func (r *RTPMunger) UpdateSnTsOffsets(extPkt *buffer.ExtPacket, snAdjust uint64, tsAdjust uint64) {
r.extHighestIncomingSN = extPkt.ExtSequenceNumber - 1
r.extHighestIncomingTS = extPkt.ExtTimestamp - 1
r.snRangeMap.ClearAndResetValue(extPkt.ExtSequenceNumber, extPkt.ExtSequenceNumber-r.extLastSN-snAdjust)
r.updateSnOffset()
@@ -194,18 +191,6 @@ func (r *RTPMunger) UpdateAndGetSnTs(extPkt *buffer.ExtPacket, marker bool) (Tra
// in-order - either contiguous packet with payload OR packet following a gap, may or may not have payload
r.extHighestIncomingSN = extPkt.ExtSequenceNumber
// TODO-REMOVE-AFTER-DATA-COLLECTION
tsDiff := int64(extPkt.ExtTimestamp - r.extHighestIncomingTS)
if tsDiff > 24000 { // 1/2 second at audio clock rate
r.logger.Infow(
"big jump in incoming timestamp",
"last", r.extHighestIncomingTS,
"current", extPkt.ExtTimestamp,
"diff", tsDiff,
)
}
r.extHighestIncomingTS = extPkt.ExtTimestamp
ordering := SequenceNumberOrderingContiguous
if diff > 1 {
ordering = SequenceNumberOrderingGap
+8 -1
View File
@@ -264,7 +264,7 @@ func (s *StreamTrackerManager) RemoveAllTrackers() {
s.trackers[layer] = nil
}
s.availableLayers = make([]int32, 0)
s.maxExpectedLayerFromTrackInfo()
s.maxExpectedLayerFromTrackInfoLocked()
s.paused = false
ddTracker := s.ddTracker
s.ddTracker = nil
@@ -530,6 +530,13 @@ func (s *StreamTrackerManager) removeAvailableLayer(layer int32) {
}
func (s *StreamTrackerManager) maxExpectedLayerFromTrackInfo() {
s.lock.Lock()
defer s.lock.Unlock()
s.maxExpectedLayerFromTrackInfoLocked()
}
func (s *StreamTrackerManager) maxExpectedLayerFromTrackInfoLocked() {
s.maxExpectedLayer = buffer.InvalidLayerSpatial
ti := s.trackInfo.Load()
if ti != nil {
+26
View File
@@ -15,9 +15,12 @@
package utils
import (
"errors"
"fmt"
"strings"
"github.com/pion/interceptor"
"github.com/pion/rtp"
"github.com/pion/webrtc/v3"
)
@@ -51,3 +54,26 @@ func GetHeaderExtensionID(extensions []interceptor.RTPHeaderExtension, extension
}
return 0
}
var (
ErrInvalidRTPVersion = errors.New("invalid RTP version")
ErrRTPPayloadTypeMismatch = errors.New("RTP payload type mismatch")
ErrRTPSSRCMismatch = errors.New("RTP SSRC mismatch")
)
// ValidateRTPPacket checks for a valid RTP packet and returns an error if fields are incorrect
func ValidateRTPPacket(pkt *rtp.Packet, expectedPayloadType uint8, expectedSSRC uint32) error {
if pkt.Version != 2 {
return fmt.Errorf("%w, expected: 2, actual: %d", ErrInvalidRTPVersion, pkt.Version)
}
if expectedPayloadType != 0 && pkt.PayloadType != expectedPayloadType {
return fmt.Errorf("%w, expected: %d, actual: %d", ErrRTPPayloadTypeMismatch, expectedPayloadType, pkt.PayloadType)
}
if expectedSSRC != 0 && pkt.SSRC != expectedSSRC {
return fmt.Errorf("%w, expected: %d, actual: %d", ErrRTPSSRCMismatch, expectedSSRC, pkt.SSRC)
}
return nil
}
+4 -1
View File
@@ -21,8 +21,9 @@ import (
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/livekit/rpc"
"github.com/livekit/protocol/logger"
"github.com/livekit/protocol/rpc"
"github.com/livekit/protocol/utils/guid"
"github.com/livekit/livekit-server/pkg/config"
"github.com/livekit/livekit-server/pkg/routing"
@@ -58,6 +59,7 @@ func (a *analyticsService) SendStats(_ context.Context, stats []*livekit.Analyti
}
for _, stat := range stats {
stat.Id = guid.New("AS_")
stat.AnalyticsKey = a.analyticsKey
stat.Node = a.nodeID
}
@@ -71,6 +73,7 @@ func (a *analyticsService) SendEvent(_ context.Context, event *livekit.Analytics
return
}
event.Id = guid.New("AE_")
event.NodeId = a.nodeID
event.AnalyticsKey = a.analyticsKey
if err := a.events.Send(&livekit.AnalyticsEvents{
+13
View File
@@ -45,6 +45,7 @@ var (
promTrackPublishCounter *prometheus.CounterVec
promTrackSubscribeCounter *prometheus.CounterVec
promSessionStartTime *prometheus.HistogramVec
promSessionDuration *prometheus.HistogramVec
)
func initRoomStats(nodeID string, nodeType livekit.NodeType) {
@@ -100,6 +101,13 @@ func initRoomStats(nodeID string, nodeType livekit.NodeType) {
ConstLabels: prometheus.Labels{"node_id": nodeID, "node_type": nodeType.String()},
Buckets: prometheus.ExponentialBucketsRange(100, 10000, 15),
}, []string{"protocol_version"})
promSessionDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: livekitNamespace,
Subsystem: "session",
Name: "duration_ms",
ConstLabels: prometheus.Labels{"node_id": nodeID, "node_type": nodeType.String()},
Buckets: prometheus.ExponentialBucketsRange(100, 4*60*60*1000, 15),
}, []string{"protocol_version"})
prometheus.MustRegister(promRoomCurrent)
prometheus.MustRegister(promRoomDuration)
@@ -109,6 +117,7 @@ func initRoomStats(nodeID string, nodeType livekit.NodeType) {
prometheus.MustRegister(promTrackPublishCounter)
prometheus.MustRegister(promTrackSubscribeCounter)
prometheus.MustRegister(promSessionStartTime)
prometheus.MustRegister(promSessionDuration)
}
func RoomStarted() {
@@ -186,3 +195,7 @@ func RecordTrackSubscribeFailure(err error, isUserError bool) {
func RecordSessionStartTime(protocolVersion int, d time.Duration) {
promSessionStartTime.WithLabelValues(strconv.Itoa(protocolVersion)).Observe(float64(d.Milliseconds()))
}
func RecordSessionDuration(protocolVersion int, d time.Duration) {
promSessionDuration.WithLabelValues(strconv.Itoa(protocolVersion)).Observe(float64(d.Milliseconds()))
}
+12
View File
@@ -35,6 +35,7 @@ import (
"google.golang.org/protobuf/proto"
"github.com/livekit/mediatransportutil/pkg/rtcconfig"
"github.com/livekit/protocol/auth"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
@@ -110,6 +111,7 @@ type Options struct {
Publish string
ClientInfo *livekit.ClientInfo
DisabledCodecs []webrtc.RTPCodecCapability
TokenCustomizer func(token *auth.AccessToken, grants *auth.VideoGrant)
SignalRequestInterceptor SignalRequestInterceptor
SignalResponseInterceptor SignalResponseInterceptor
}
@@ -563,6 +565,16 @@ func (c *RTCClient) SendIceCandidate(ic *webrtc.ICECandidate, target livekit.Sig
})
}
func (c *RTCClient) SetAttributes(attrs map[string]string) error {
return c.SendRequest(&livekit.SignalRequest{
Message: &livekit.SignalRequest_UpdateMetadata{
UpdateMetadata: &livekit.UpdateParticipantMetadata{
Attributes: attrs,
},
},
})
}
func (c *RTCClient) hasPrimaryEverConnected() bool {
if c.subscriberAsPrimary.Load() {
return c.subscriber.HasEverConnected()
+11 -3
View File
@@ -202,7 +202,11 @@ func createMultiNodeServer(nodeID string, port uint32) *service.LivekitServer {
// creates a client and runs against server
func createRTCClient(name string, port int, opts *testclient.Options) *testclient.RTCClient {
token := joinToken(testRoom, name)
var customizer func(token *auth.AccessToken, grants *auth.VideoGrant)
if opts != nil {
customizer = opts.TokenCustomizer
}
token := joinToken(testRoom, name, customizer)
ws, err := testclient.NewWebSocketConn(fmt.Sprintf("ws://localhost:%d", port), token, opts)
if err != nil {
panic(err)
@@ -241,12 +245,16 @@ func redisClient() *redis.Client {
})
}
func joinToken(room, name string) string {
func joinToken(room, name string, customFn func(token *auth.AccessToken, grants *auth.VideoGrant)) string {
at := auth.NewAccessToken(testApiKey, testApiSecret).
AddGrant(&auth.VideoGrant{RoomJoin: true, Room: room}).
SetIdentity(name).
SetName(name).
SetMetadata("metadata" + name)
grant := &auth.VideoGrant{RoomJoin: true, Room: room}
if customFn != nil {
customFn(at, grant)
}
at.AddGrant(grant)
t, err := at.ToJWT()
if err != nil {
panic(err)
+75
View File
@@ -229,6 +229,81 @@ func TestMultiNodeRefreshToken(t *testing.T) {
})
}
// ensure that token accurately reflects out of band updates
func TestMultiNodeUpdateAttributes(t *testing.T) {
if testing.Short() {
t.SkipNow()
return
}
_, _, finish := setupMultiNodeTest("TestMultiNodeUpdateAttributes")
defer finish()
c1 := createRTCClient("au1", defaultServerPort, &client.Options{
TokenCustomizer: func(token *auth.AccessToken, grants *auth.VideoGrant) {
token.SetAttributes(map[string]string{
"mykey": "au1",
})
},
})
c2 := createRTCClient("au2", secondServerPort, &client.Options{
TokenCustomizer: func(token *auth.AccessToken, grants *auth.VideoGrant) {
token.SetAttributes(map[string]string{
"mykey": "au2",
})
grants.SetCanUpdateOwnMetadata(true)
},
})
waitUntilConnected(t, c1, c2)
testutils.WithTimeout(t, func() string {
rc2 := c1.GetRemoteParticipant(c2.ID())
rc1 := c2.GetRemoteParticipant(c1.ID())
if rc2 == nil || rc1 == nil {
return "participants could not see each other"
}
if rc1.Attributes == nil || rc1.Attributes["mykey"] != "au1" {
return "rc1's initial attributes are incorrect"
}
if rc2.Attributes == nil || rc2.Attributes["mykey"] != "au2" {
return "rc2's initial attributes are incorrect"
}
return ""
})
// this one should not go through
_ = c1.SetAttributes(map[string]string{"mykey": "shouldnotchange"})
_ = c2.SetAttributes(map[string]string{"secondkey": "au2"})
// updates using room API should succeed
_, err := roomClient.UpdateParticipant(contextWithToken(adminRoomToken(testRoom)), &livekit.UpdateParticipantRequest{
Room: testRoom,
Identity: "au1",
Attributes: map[string]string{
"secondkey": "au1",
},
})
require.NoError(t, err)
testutils.WithTimeout(t, func() string {
rc1 := c2.GetRemoteParticipant(c1.ID())
rc2 := c1.GetRemoteParticipant(c2.ID())
if rc1.Attributes["secondkey"] != "au1" {
return "au1's attribute update failed"
}
if rc2.Attributes["secondkey"] != "au2" {
return "au2's attribute update failed"
}
if rc1.Attributes["mykey"] != "au1" {
return "au1's mykey should not change"
}
if rc2.Attributes["mykey"] != "au2" {
return "au2's mykey should not change"
}
return ""
})
}
func TestMultiNodeRevokePublishPermission(t *testing.T) {
_, _, finish := setupMultiNodeTest("TestMultiNodeRevokePublishPermission")
defer finish()
+2 -2
View File
@@ -408,12 +408,12 @@ func TestAutoCreate(t *testing.T) {
waitForServerToStart(s)
token := joinToken(testRoom, "start-before-create")
token := joinToken(testRoom, "start-before-create", nil)
_, err := testclient.NewWebSocketConn(fmt.Sprintf("ws://localhost:%d", defaultServerPort), token, nil)
require.Error(t, err)
// second join should also fail
token = joinToken(testRoom, "start-before-create-2")
token = joinToken(testRoom, "start-before-create-2", nil)
_, err = testclient.NewWebSocketConn(fmt.Sprintf("ws://localhost:%d", defaultServerPort), token, nil)
require.Error(t, err)
})
+1 -1
View File
@@ -14,4 +14,4 @@
package version
const Version = "1.6.1"
const Version = "1.6.2"