Merge remote-tracking branch 'origin/master' into raja_fr

This commit is contained in:
boks1971
2024-03-21 15:37:13 +05:30
72 changed files with 2042 additions and 1412 deletions
+8 -10
View File
@@ -17,9 +17,9 @@ name: Test
on:
workflow_dispatch:
push:
branches: [master]
branches: [ master ]
pull_request:
branches: [master]
branches: [ master ]
jobs:
test:
@@ -35,7 +35,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.20"
go-version: "1.21"
- name: Set up gotestfmt
run: go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@v2.4.1
@@ -56,13 +56,11 @@ jobs:
version: latest
args: build
# - name: Static Check
# uses: dominikh/staticcheck-action@v1.3.0
# with:
# checks: '["all", "-ST1000", "-ST1003", "-ST1020", "-ST1021", "-ST1022", "-SA1019"]'
# min-go-version: 1.18
# version: 2022.1.3
# install-go: false
- name: Static Check
uses: amarpal/staticcheck-action@master
with:
checks: '["all", "-ST1000", "-ST1003", "-ST1020", "-ST1021", "-ST1022", "-SA1019"]'
install-go: false
- name: Test
run: |
+7 -4
View File
@@ -18,9 +18,10 @@ name: Release to Docker
on:
workflow_dispatch:
push:
# only publish on version tags
branches:
- master # publish to 'master' tag
tags:
- 'v*.*.*'
- 'v*.*.*' # publish on version tags, updates 'latest' tag
jobs:
docker:
runs-on: ubuntu-latest
@@ -35,13 +36,14 @@ jobs:
livekit/livekit-server
# generate Docker tags based on the following events/attributes
tags: |
type=ref,event=branch
type=semver,pattern=v{{version}}
type=semver,pattern=v{{major}}.{{minor}}
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '>=1.18'
go-version: '>=1.21'
- name: Download Go modules
run: go mod download
@@ -56,6 +58,7 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
@@ -66,7 +69,7 @@ jobs:
uses: docker/build-push-action@v5
with:
context: .
push: true
push: ${{ github.event_name != 'pull_request' }}
platforms: linux/amd64,linux/arm64
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
+1 -1
View File
@@ -35,7 +35,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '>=1.18'
go-version: '>=1.21'
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v5
+7 -4
View File
@@ -51,6 +51,11 @@ https://docs.livekit.io
- Livestreaming from OBS Studio ([source](https://github.com/livekit-examples/livestream))
- [AI voice assistant using ChatGPT](https://livekit.io/kitt) ([source](https://github.com/livekit-examples/kitt))
## Ecosystem
- [Agents](https://github.com/livekit/agents): build real-time multimodal AI applications with programmable backend participants
- [Egress](https://github.com/livekit/egress): record or multi-stream rooms and export individual tracks
- [Ingress](https://github.com/livekit/ingress): ingest streams from external sources like RTMP, WHIP, HLS, or OBS Studio
## SDKs & Tools
### Client SDKs
@@ -178,14 +183,12 @@ enabling you to build automations that behave like end-users.
| JavaScript (TypeScript) | [server-sdk-js](https://github.com/livekit/server-sdk-js) | [docs](https://docs.livekit.io/server-sdk-js/) |
| Ruby | [server-sdk-ruby](https://github.com/livekit/server-sdk-ruby) | |
| Java (Kotlin) | [server-sdk-kotlin](https://github.com/livekit/server-sdk-kotlin) | |
| Python (community) | [tradablebits/livekit-server-sdk-python](https://github.com/tradablebits/livekit-server-sdk-python) | |
| Python (community) | [python-sdks](https://github.com/livekit/python-sdks) | |
| PHP (community) | [agence104/livekit-server-sdk-php](https://github.com/agence104/livekit-server-sdk-php) | |
### Ecosystem & Tools
### Tools
- [CLI](https://github.com/livekit/livekit-cli) - command line interface & load tester
- [Egress](https://github.com/livekit/egress) - export and record your rooms
- [Ingress](https://github.com/livekit/ingress) - ingest streams from RTMP / OBS Studio
- [Docker image](https://hub.docker.com/r/livekit/livekit-server)
- [Helm charts](https://github.com/livekit/livekit-helm)
+7 -4
View File
@@ -94,8 +94,10 @@ rtc:
# allow_pause: true
# # allows automatic connection fallback to TCP and TURN/TLS (if configured) when UDP has been unstable, default true
# allow_tcp_fallback: true
# # number of packets to buffer in the SFU, defaults to 500
# packet_buffer_size: 500
# # number of packets to buffer in the SFU for video, defaults to 500
# packet_buffer_size_video: 500
# # number of packets to buffer in the SFU for audio, defaults to 200
# packet_buffer_size_audio: 200
# # minimum amount of time between pli/fir rtcp packets being sent to an individual
# # producer. Increasing these times can lead to longer black screens when new participants join,
# # while reducing them can lead to higher stream bitrate.
@@ -168,8 +170,10 @@ keys:
# room:
# # allow rooms to be automatically created when participants join, defaults to true
# # auto_create: false
# # number of seconds to leave a room open when it's empty
# # number of seconds to keep the room open if no one joins
# empty_timeout: 300
# # number of seconds to keep the room open after everyone leaves
# departure_timeout: 20
# # limit number of participants that can be in a room, 0 for no limit
# max_participants: 0
# # only accept specific codecs for clients publishing to this room
@@ -309,4 +313,3 @@ keys:
# # value less or equal than 0 means no limit.
# subscription_limit_video: 0
# subscription_limit_audio: 0
+31 -30
View File
@@ -1,15 +1,15 @@
module github.com/livekit/livekit-server
go 1.20
go 1.21
require (
github.com/avast/retry-go/v4 v4.5.1
github.com/bep/debounce v1.2.1
github.com/d5/tengo/v2 v2.16.1
github.com/d5/tengo/v2 v2.17.0
github.com/dustin/go-humanize v1.0.1
github.com/elliotchance/orderedmap/v2 v2.2.0
github.com/florianl/go-tc v0.4.3
github.com/frostbyte73/core v0.0.9
github.com/frostbyte73/core v0.0.10
github.com/gammazero/deque v0.2.1
github.com/gammazero/workerpool v1.1.3
github.com/google/wire v0.6.0
@@ -18,38 +18,39 @@ require (
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/jxskiss/base62 v1.1.0
github.com/livekit/mageutil v0.0.0-20230125210925-54e8a70427c1
github.com/livekit/mediatransportutil v0.0.0-20240206082112-9bf41dcbce76
github.com/livekit/protocol v1.9.10-0.20240217202122-51aba73c0582
github.com/livekit/psrpc v0.5.3-0.20240209001357-380f59f00c58
github.com/livekit/mediatransportutil v0.0.0-20240302142739-1c3dd691a1b8
github.com/livekit/protocol v1.12.1-0.20240321094538-0d9caadf760e
github.com/livekit/psrpc v0.5.3-0.20240312110212-61ab09477c30
github.com/mackerelio/go-osstat v0.2.4
github.com/magefile/mage v1.15.0
github.com/maxbrunsfeld/counterfeiter/v6 v6.8.1
github.com/mitchellh/go-homedir v1.1.0
github.com/olekukonko/tablewriter v0.0.5
github.com/pion/dtls/v2 v2.2.10
github.com/pion/ice/v2 v2.3.13
github.com/pion/ice/v2 v2.3.14
github.com/pion/interceptor v0.1.25
github.com/pion/rtcp v1.2.13
github.com/pion/rtcp v1.2.14
github.com/pion/rtp v1.8.3
github.com/pion/sctp v1.8.12
github.com/pion/sdp/v3 v3.0.6
github.com/pion/sdp/v3 v3.0.8
github.com/pion/transport/v2 v2.2.4
github.com/pion/turn/v2 v2.1.5
github.com/pion/webrtc/v3 v3.2.28
github.com/pion/webrtc/v3 v3.2.29
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.18.0
github.com/redis/go-redis/v9 v9.4.0
github.com/prometheus/client_golang v1.19.0
github.com/redis/go-redis/v9 v9.5.1
github.com/rs/cors v1.10.1
github.com/stretchr/testify v1.8.4
github.com/stretchr/testify v1.9.0
github.com/thoas/go-funk v0.9.3
github.com/twitchtv/twirp v8.1.3+incompatible
github.com/ua-parser/uap-go v0.0.0-20240113215029-33f8e6d47f38
github.com/urfave/cli/v2 v2.27.1
github.com/urfave/negroni/v3 v3.0.0
github.com/urfave/negroni/v3 v3.1.0
go.uber.org/atomic v1.11.0
golang.org/x/exp v0.0.0-20240213143201-ec583247a57a
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20240318143956-a85f2c67cd81
golang.org/x/sync v0.6.0
google.golang.org/protobuf v1.32.0
google.golang.org/protobuf v1.33.0
gopkg.in/yaml.v3 v3.0.1
)
@@ -61,9 +62,10 @@ require (
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/eapache/channels v1.1.0 // indirect
github.com/eapache/queue v1.1.0 // indirect
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/subcommands v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect
@@ -71,14 +73,13 @@ require (
github.com/hashicorp/go-retryablehttp v0.7.5 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/josharian/native v1.1.0 // indirect
github.com/klauspost/compress v1.17.6 // indirect
github.com/klauspost/compress v1.17.7 // indirect
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
github.com/lithammer/shortuuid/v4 v4.0.0 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/mdlayher/netlink v1.7.1 // indirect
github.com/mdlayher/socket v0.4.0 // indirect
github.com/nats-io/nats.go v1.32.0 // indirect
github.com/nats-io/nats.go v1.33.1 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/pion/datachannel v1.5.5 // indirect
@@ -89,20 +90,20 @@ require (
github.com/pion/stun v0.6.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.45.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/puzpuzpuz/xsync/v3 v3.1.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.26.0 // indirect
golang.org/x/crypto v0.19.0 // indirect
golang.org/x/mod v0.15.0 // indirect
golang.org/x/net v0.21.0 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/mod v0.16.0 // indirect
golang.org/x/net v0.22.0 // indirect
golang.org/x/sys v0.18.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/tools v0.18.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 // indirect
google.golang.org/grpc v1.61.1 // indirect
golang.org/x/tools v0.19.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240311173647-c811ad7063a7 // indirect
google.golang.org/grpc v1.62.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)
+74 -58
View File
@@ -5,7 +5,9 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY=
github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
@@ -15,8 +17,8 @@ github.com/cilium/ebpf v0.8.1/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMt
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/d5/tengo/v2 v2.16.1 h1:/N6dqiGu9toqANInZEOQMM8I06icdZnmb+81DG/lZdw=
github.com/d5/tengo/v2 v2.16.1/go.mod h1:XRGjEs5I9jYIKTxly6HCF8oiiilk5E/RYXOZ5b0DZC8=
github.com/d5/tengo/v2 v2.17.0 h1:BWUN9NoJzw48jZKiYDXDIF3QrIVZRm1uV1gTzeZ2lqM=
github.com/d5/tengo/v2 v2.17.0/go.mod h1:XRGjEs5I9jYIKTxly6HCF8oiiilk5E/RYXOZ5b0DZC8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -34,16 +36,18 @@ github.com/florianl/go-tc v0.4.3 h1:xpobG2gFNvEqbclU07zjddALSjqTQTWJkxg5/kRYDpw=
github.com/florianl/go-tc v0.4.3/go.mod h1:uvp6pIlOw7Z8hhfnT5M4+V1hHVgZWRZwwMS8Z0JsRxc=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
github.com/frostbyte73/core v0.0.9 h1:AmE9GjgGpPsWk9ZkmY3HsYUs2hf2tZt+/W6r49URBQI=
github.com/frostbyte73/core v0.0.9/go.mod h1:XsOGqrqe/VEV7+8vJ+3a8qnCIXNbKsoEiu/czs7nrcU=
github.com/frostbyte73/core v0.0.10 h1:D4DQXdPb8ICayz0n75rs4UYTXrUSdxzUfeleuNJORsU=
github.com/frostbyte73/core v0.0.10/go.mod h1:XsOGqrqe/VEV7+8vJ+3a8qnCIXNbKsoEiu/czs7nrcU=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0=
github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU=
github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q=
github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc=
github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
@@ -56,18 +60,18 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE=
@@ -109,14 +113,15 @@ github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786 h1:N527AHMa79
github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786/go.mod h1:v4hqbTdfQngbVSZJVWUhGE/lbTFf9jb+ygmNUDQMuOs=
github.com/jxskiss/base62 v1.1.0 h1:A5zbF8v8WXx2xixnAKD2w+abC+sIzYJX+nxmhA6HWFw=
github.com/jxskiss/base62 v1.1.0/go.mod h1:HhWAlUXvxKThfOlZbcuFzsqwtF5TcqS9ru3y5GfjWAc=
github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -125,20 +130,18 @@ github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw
github.com/lithammer/shortuuid/v4 v4.0.0/go.mod h1:Zs8puNcrvf2rV9rTH51ZLLcj7ZXqQI3lv67aw4KiB1Y=
github.com/livekit/mageutil v0.0.0-20230125210925-54e8a70427c1 h1:jm09419p0lqTkDaKb5iXdynYrzB84ErPPO4LbRASk58=
github.com/livekit/mageutil v0.0.0-20230125210925-54e8a70427c1/go.mod h1:Rs3MhFwutWhGwmY1VQsygw28z5bWcnEYmS1OG9OxjOQ=
github.com/livekit/mediatransportutil v0.0.0-20240206082112-9bf41dcbce76 h1:Zw88krOHni51OzDUlrduYb3m7VcsaKw06TnnDhsQpjg=
github.com/livekit/mediatransportutil v0.0.0-20240206082112-9bf41dcbce76/go.mod h1:GBzn9xL+mivI1pW+tyExcKgbc0VOc29I9yJsNcAVaAc=
github.com/livekit/protocol v1.9.10-0.20240217202122-51aba73c0582 h1:hSOSQs2pKF6TD9CEwu7+LatqfvF/LiyIbeCoUPCGRho=
github.com/livekit/protocol v1.9.10-0.20240217202122-51aba73c0582/go.mod h1:/kviHT6yTNqHdZ9QsvRuxAHf7LaBROa7qe5naT1oVrU=
github.com/livekit/psrpc v0.5.3-0.20240209001357-380f59f00c58 h1:yH55rBGLRO+ict2mu6bKZ5iPwTIrIwU1i0ydgThi4+k=
github.com/livekit/psrpc v0.5.3-0.20240209001357-380f59f00c58/go.mod h1:cQjxg1oCxYHhxxv6KJH1gSvdtCHQoRZCHgPdm5N8v2g=
github.com/livekit/mediatransportutil v0.0.0-20240302142739-1c3dd691a1b8 h1:xawydPEACNO5Ncs2LgioTjWghXQ0eUN1q1RnVUUyVnI=
github.com/livekit/mediatransportutil v0.0.0-20240302142739-1c3dd691a1b8/go.mod h1:jwKUCmObuiEDH0iiuJHaGMXwRs3RjrB4G6qqgkr/5oE=
github.com/livekit/protocol v1.12.1-0.20240321094538-0d9caadf760e h1:XR7vPLN7c/R6R87UARoBW2csVKd7RuTXwG+XsjczbT0=
github.com/livekit/protocol v1.12.1-0.20240321094538-0d9caadf760e/go.mod h1:G7Pa985GhZv2MCC3UnUocBhZfi3DsWA6WmlSkkpQYTM=
github.com/livekit/psrpc v0.5.3-0.20240312110212-61ab09477c30 h1:3GEU6vP+KLTTOEqsFKW+PgIUp+i+s0jaUqogQc/hb7M=
github.com/livekit/psrpc v0.5.3-0.20240312110212-61ab09477c30/go.mod h1:CQUBSPfYYAaevg1TNCc6/aYsa8DJH4jSRFdCeSZk5u0=
github.com/mackerelio/go-osstat v0.2.4 h1:qxGbdPkFo65PXOb/F/nhDKpF2nGmGaCFDLXoZjJTtUs=
github.com/mackerelio/go-osstat v0.2.4/go.mod h1:Zy+qzGdZs3A9cuIqmgbJvwbmLQH9dJvtio5ZjJTbdlQ=
github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/maxbrunsfeld/counterfeiter/v6 v6.8.1 h1:NicmruxkeqHjDv03SfSxqmaLuisddudfP3h5wdXFbhM=
github.com/maxbrunsfeld/counterfeiter/v6 v6.8.1/go.mod h1:eyp4DdUJAKkr9tvxR3jWhw2mDK7CWABMG5r9uyaKC7I=
github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo=
@@ -162,8 +165,8 @@ github.com/mdlayher/socket v0.4.0 h1:280wsy40IC9M9q1uPGcLBwXpcTQDtoGwVt+BNoITxIw
github.com/mdlayher/socket v0.4.0/go.mod h1:xxFqz5GRCUN3UEOm9CZqEJsAbe1C8OwSK46NlmWuVoc=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/nats-io/nats.go v1.32.0 h1:Bx9BZS+aXYlxW08k8Gd3yR2s73pV5XSoAQUyp1Kwvp0=
github.com/nats-io/nats.go v1.32.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nats.go v1.33.1 h1:8TxLZZ/seeEfR97qV0/Bl939tpDnt2Z2fK3HkPypj70=
github.com/nats-io/nats.go v1.33.1/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
@@ -180,13 +183,15 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/pion/datachannel v1.5.5 h1:10ef4kwdjije+M9d7Xm9im2Y3O6A6ccQb0zcqZcJew8=
github.com/pion/datachannel v1.5.5/go.mod h1:iMz+lECmfdCMqFRhXhcA/219B0SQlbpoR2V118yimL0=
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
github.com/pion/dtls/v2 v2.2.10 h1:u2Axk+FyIR1VFTPurktB+1zoEPGIW3bmyj3LEFrXjAA=
github.com/pion/dtls/v2 v2.2.10/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
github.com/pion/ice/v2 v2.3.13 h1:xOxP+4V9nSDlUaGFRf/LvAuGHDXRcjIdsbbXPK/w7c8=
github.com/pion/ice/v2 v2.3.13/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
github.com/pion/ice/v2 v2.3.14 h1:A7UaEmalw12Fko8YO0qguUbWyE69BnN4mDEqT7cLWQI=
github.com/pion/ice/v2 v2.3.14/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw=
github.com/pion/interceptor v0.1.25 h1:pwY9r7P6ToQ3+IF0bajN0xmk/fNw/suTgaTdlwTDmhc=
github.com/pion/interceptor v0.1.25/go.mod h1:wkbPYAak5zKsfpVDYMtEfWEy8D4zL+rpxCxPImLOg3Y=
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
@@ -197,16 +202,16 @@ github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
github.com/pion/rtcp v1.2.10/go.mod h1:ztfEwXZNLGyF1oQDttz/ZKIBaeeg/oWbRYqzBM9TL1I=
github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4=
github.com/pion/rtcp v1.2.13 h1:+EQijuisKwm/8VBs8nWllr0bIndR7Lf7cZG200mpbNo=
github.com/pion/rtcp v1.2.13/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4=
github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE=
github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4=
github.com/pion/rtp v1.8.2/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU=
github.com/pion/rtp v1.8.3 h1:VEHxqzSVQxCkKDSHro5/4IUUG1ea+MFdqR2R3xSpNU8=
github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU=
github.com/pion/sctp v1.8.5/go.mod h1:SUFFfDpViyKejTAdwD1d/HQsCu+V/40cCs2nZIvC3s0=
github.com/pion/sctp v1.8.12 h1:2VX50pedElH+is6FI+OKyRTeN5oy4mrk2HjnGa3UCmY=
github.com/pion/sctp v1.8.12/go.mod h1:cMLT45jqw3+jiJCrtHVwfQLnfR0MGZ4rgOJwUOIqLkI=
github.com/pion/sdp/v3 v3.0.6 h1:WuDLhtuFUUVpTfus9ILC4HRyHsW6TdugjEX/QY9OiUw=
github.com/pion/sdp/v3 v3.0.6/go.mod h1:iiFWFpQO8Fy3S5ldclBkpXqmWy02ns78NOKoLLL0YQw=
github.com/pion/sdp/v3 v3.0.8 h1:yd/wkrS0nzXEAb+uwv1TL3SG/gzsTiXHVOtXtD7EKl0=
github.com/pion/sdp/v3 v3.0.8/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M=
github.com/pion/srtp/v2 v2.0.18 h1:vKpAXfawO9RtTRKZJbG4y0v1b11NZxQnxRl85kGuUlo=
github.com/pion/srtp/v2 v2.0.18/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA=
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
@@ -223,43 +228,49 @@ github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9
github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
github.com/pion/turn/v2 v2.1.5 h1:tTyy7TM3DCoX9IxTt/yHc/bThiRLyXK3T1YbNcgx9k4=
github.com/pion/turn/v2 v2.1.5/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
github.com/pion/webrtc/v3 v3.2.28 h1:ienStxZ6HcjtH2UlmnFpMM0loENiYjaX437uIUpQSKo=
github.com/pion/webrtc/v3 v3.2.28/go.mod h1:PNRCEuQlibrmuBhOTnol9j6KkIbUG11aHLEfNpUYey0=
github.com/pion/webrtc/v3 v3.2.29 h1:flXjxjlqpp3FjkpSSBKwv7UOfbUvan9+gFY6A5ZaAn4=
github.com/pion/webrtc/v3 v3.2.29/go.mod h1:M+5YSvBDPAkHHRwGXlplIFBQI5mXm6Y4byns1OpiX68=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/redis/go-redis/v9 v9.4.0 h1:Yzoz33UZw9I/mFhx4MNrB6Fk+XHO1VukNcCa1+lwyKk=
github.com/redis/go-redis/v9 v9.4.0/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
github.com/puzpuzpuz/xsync/v3 v3.1.0 h1:EewKT7/LNac5SLiEblJeUu8z5eERHrmRLnMQL2d7qX4=
github.com/puzpuzpuz/xsync/v3 v3.1.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/redis/go-redis/v9 v9.5.1 h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8=
github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo=
github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8=
github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw=
github.com/thoas/go-funk v0.9.3/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q=
github.com/twitchtv/twirp v8.1.3+incompatible h1:+F4TdErPgSUbMZMwp13Q/KgDVuI7HJXP61mNV3/7iuU=
@@ -268,24 +279,25 @@ github.com/ua-parser/uap-go v0.0.0-20240113215029-33f8e6d47f38 h1:F04Na0QJP9GJrw
github.com/ua-parser/uap-go v0.0.0-20240113215029-33f8e6d47f38/go.mod h1:BUbeWZiieNxAuuADTBNb3/aeje6on3DhU3rpWsQSB1E=
github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho=
github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/urfave/negroni/v3 v3.0.0 h1:Vo8CeZfu1lFR9gW8GnAb6dOGCJyijfil9j/jKKc/JhU=
github.com/urfave/negroni/v3 v3.0.0/go.mod h1:jWvnX03kcSjDBl/ShB0iHvx5uOs7mAzZXW+JvJ5XYAs=
github.com/urfave/negroni/v3 v3.1.0 h1:lzmuxGSpnJCT/ujgIAjkU3+LW3NX8alCglO/L6KjIGQ=
github.com/urfave/negroni/v3 v3.1.0/go.mod h1:jWvnX03kcSjDBl/ShB0iHvx5uOs7mAzZXW+JvJ5XYAs=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
@@ -294,17 +306,18 @@ golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIi
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE=
golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/exp v0.0.0-20240318143956-a85f2c67cd81 h1:6R2FC06FonbXQ8pK11/PDFY6N6LWlf9KlzibaCapmqc=
golang.org/x/exp v0.0.0-20240318143956-a85f2c67cd81/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -333,8 +346,8 @@ golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -386,8 +399,9 @@ golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -398,6 +412,7 @@ golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -417,16 +432,16 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 h1:FSL3lRCkhaPFxqi0s9o+V4UI2WTzAVOvkgbd4kVV4Wg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4=
google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY=
google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240311173647-c811ad7063a7 h1:8EeVk1VKMD+GD/neyEHGmz7pFblqPjHoi+PGQIlLx2s=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240311173647-c811ad7063a7/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -435,12 +450,13 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+16 -7
View File
@@ -47,8 +47,9 @@ const (
StreamTrackerTypePacket StreamTrackerType = "packet"
StreamTrackerTypeFrame StreamTrackerType = "frame"
StatsUpdateInterval = time.Second * 10
TelemetryStatsUpdateInterval = time.Second * 30
StatsUpdateInterval = time.Second * 10
TelemetryStatsUpdateInterval = time.Second * 30
TelemetryNonMediaStatsUpdateInterval = time.Minute * 5
)
var (
@@ -76,7 +77,7 @@ type Config struct {
Region string `yaml:"region,omitempty"`
SignalRelay SignalRelayConfig `yaml:"signal_relay,omitempty"`
PSRPC rpc.PSRPCConfig `yaml:"psrpc,omitempty"`
// LogLevel is deprecated
// Deprecated: LogLevel is deprecated
LogLevel string `yaml:"log_level,omitempty"`
Logging LoggingConfig `yaml:"logging,omitempty"`
Limit LimitConfig `yaml:"limit,omitempty"`
@@ -91,8 +92,12 @@ type RTCConfig struct {
StrictACKs bool `yaml:"strict_acks,omitempty"`
// Number of packets to buffer for NACK
// Deprecated: use PacketBufferSizeVideo and PacketBufferSizeAudio
PacketBufferSize int `yaml:"packet_buffer_size,omitempty"`
// Number of packets to buffer for NACK - video
PacketBufferSizeVideo int `yaml:"packet_buffer_size_video,omitempty"`
// Number of packets to buffer for NACK - audio
PacketBufferSizeAudio int `yaml:"packet_buffer_size_audio,omitempty"`
// Throttle periods for pli/fir rtcp packets
PLIThrottle PLIThrottleConfig `yaml:"pli_throttle,omitempty"`
@@ -227,6 +232,7 @@ type RoomConfig struct {
EnabledCodecs []CodecSpec `yaml:"enabled_codecs,omitempty"`
MaxParticipants uint32 `yaml:"max_participants,omitempty"`
EmptyTimeout uint32 `yaml:"empty_timeout,omitempty"`
DepartureTimeout uint32 `yaml:"departure_timeout,omitempty"`
EnableRemoteUnmute bool `yaml:"enable_remote_unmute,omitempty"`
MaxMetadataSize uint32 `yaml:"max_metadata_size,omitempty"`
PlayoutDelay PlayoutDelayConfig `yaml:"playout_delay,omitempty"`
@@ -327,8 +333,10 @@ var DefaultConfig = Config{
ICEPortRangeEnd: 0,
STUNServers: []string{},
},
PacketBufferSize: 500,
StrictACKs: true,
PacketBufferSize: 500,
PacketBufferSizeVideo: 500,
PacketBufferSizeAudio: 200,
StrictACKs: true,
PLIThrottle: PLIThrottleConfig{
LowQuality: 500 * time.Millisecond,
MidQuality: time.Second,
@@ -475,7 +483,8 @@ var DefaultConfig = Config{
{Mime: webrtc.MimeTypeVP9},
{Mime: webrtc.MimeTypeAV1},
},
EmptyTimeout: 5 * 60,
EmptyTimeout: 5 * 60,
DepartureTimeout: 20,
},
Logging: LoggingConfig{
PionLevel: "error",
+1 -1
View File
@@ -246,7 +246,7 @@ func (r *RedisRouter) keepaliveWorker(startedChan chan error) {
for ping := range pings.Channel() {
if time.Since(time.Unix(ping.Timestamp, 0)) > statsUpdateInterval {
logger.Infow("keep alive too old, skipping", "timestamp", ping.Timestamp)
break
continue
}
r.nodeMu.Lock()
+1 -1
View File
@@ -55,7 +55,7 @@ func NewSignalClient(nodeID livekit.NodeID, bus psrpc.MessageBus, config config.
c, err := rpc.NewTypedSignalClient(
nodeID,
bus,
middleware.WithClientMetrics(prometheus.PSRPCMetricsObserver{}),
middleware.WithClientMetrics(rpc.PSRPCMetricsObserver{}),
psrpc.WithClientChannelSize(config.StreamBufferSize),
)
if err != nil {
+10 -2
View File
@@ -39,7 +39,8 @@ type WebRTCConfig struct {
}
type ReceiverConfig struct {
PacketBufferSize int
PacketBufferSizeVideo int
PacketBufferSizeAudio int
}
type RTPHeaderExtensionConfig struct {
@@ -72,6 +73,12 @@ func NewWebRTCConfig(conf *config.Config) (*WebRTCConfig, error) {
if rtcConf.PacketBufferSize == 0 {
rtcConf.PacketBufferSize = 500
}
if rtcConf.PacketBufferSizeVideo == 0 {
rtcConf.PacketBufferSizeVideo = rtcConf.PacketBufferSize
}
if rtcConf.PacketBufferSizeAudio == 0 {
rtcConf.PacketBufferSizeAudio = rtcConf.PacketBufferSize
}
// publisher configuration
publisherConfig := DirectionConfig{
@@ -129,7 +136,8 @@ func NewWebRTCConfig(conf *config.Config) (*WebRTCConfig, error) {
return &WebRTCConfig{
WebRTCConfig: *webRTCConfig,
Receiver: ReceiverConfig{
PacketBufferSize: rtcConf.PacketBufferSize,
PacketBufferSizeVideo: rtcConf.PacketBufferSizeVideo,
PacketBufferSizeAudio: rtcConf.PacketBufferSizeAudio,
},
Publisher: publisherConfig,
Subscriber: subscriberConfig,
+35 -14
View File
@@ -40,7 +40,8 @@ type DynacastManager struct {
maxSubscribedQuality map[string]livekit.VideoQuality
committedMaxSubscribedQuality map[string]livekit.VideoQuality
maxSubscribedQualityDebounce func(func())
maxSubscribedQualityDebounce func(func())
maxSubscribedQualityDebouncePending bool
qualityNotifyOpQueue *utils.OpsQueue
@@ -58,8 +59,15 @@ func NewDynacastManager(params DynacastManagerParams) *DynacastManager {
dynacastQuality: make(map[string]*DynacastQuality),
maxSubscribedQuality: make(map[string]livekit.VideoQuality),
committedMaxSubscribedQuality: make(map[string]livekit.VideoQuality),
maxSubscribedQualityDebounce: debounce.New(params.DynacastPauseDelay),
qualityNotifyOpQueue: utils.NewOpsQueue("quality-notify", 64, true),
qualityNotifyOpQueue: utils.NewOpsQueue(utils.OpsQueueParams{
Name: "quality-notify",
MinSize: 64,
FlushOnStop: true,
Logger: params.Logger,
}),
}
if params.DynacastPauseDelay > 0 {
d.maxSubscribedQualityDebounce = debounce.New(params.DynacastPauseDelay)
}
d.qualityNotifyOpQueue.Start()
return d
@@ -222,21 +230,32 @@ func (d *DynacastManager) update(force bool) {
return
}
if downgradesOnly {
d.params.Logger.Debugw("debouncing quality downgrade",
"committedMaxSubscribedQuality", d.committedMaxSubscribedQuality,
"maxSubscribedQuality", d.maxSubscribedQuality,
)
d.maxSubscribedQualityDebounce(func() {
d.update(true)
})
if downgradesOnly && d.maxSubscribedQualityDebounce != nil {
if !d.maxSubscribedQualityDebouncePending {
d.params.Logger.Debugw("debouncing quality downgrade",
"committedMaxSubscribedQuality", d.committedMaxSubscribedQuality,
"maxSubscribedQuality", d.maxSubscribedQuality,
)
d.maxSubscribedQualityDebounce(func() {
d.update(true)
})
d.maxSubscribedQualityDebouncePending = true
} else {
d.params.Logger.Debugw("quality downgrade waiting for debounce",
"committedMaxSubscribedQuality", d.committedMaxSubscribedQuality,
"maxSubscribedQuality", d.maxSubscribedQuality,
)
}
d.lock.Unlock()
return
}
}
// clear debounce on send
d.maxSubscribedQualityDebounce(func() {})
if d.maxSubscribedQualityDebounce != nil {
d.maxSubscribedQualityDebounce(func() {})
d.maxSubscribedQualityDebouncePending = false
}
d.params.Logger.Debugw("committing quality change",
"force", force,
@@ -291,9 +310,11 @@ func (d *DynacastManager) enqueueSubscribedQualityChange() {
}
}
d.params.Logger.Debugw("subscribedMaxQualityChange",
d.params.Logger.Debugw(
"subscribedMaxQualityChange",
"subscribedCodecs", subscribedCodecs,
"maxSubscribedQualities", maxSubscribedQualities)
"maxSubscribedQualities", maxSubscribedQualities,
)
d.qualityNotifyOpQueue.Enqueue(func() {
d.onSubscribedMaxQualityChange(subscribedCodecs, maxSubscribedQualities)
})
+33 -6
View File
@@ -16,8 +16,10 @@ package rtc
import (
"context"
"math"
"strings"
"sync"
"time"
"github.com/pion/rtcp"
"github.com/pion/webrtc/v3"
@@ -32,6 +34,7 @@ import (
"github.com/livekit/livekit-server/pkg/sfu/buffer"
"github.com/livekit/livekit-server/pkg/sfu/connectionquality"
"github.com/livekit/livekit-server/pkg/telemetry"
util "github.com/livekit/mediatransportutil"
)
// MediaTrack represents a WebRTC track that needs to be forwarded
@@ -47,6 +50,8 @@ type MediaTrack struct {
dynacastManager *DynacastManager
lock sync.RWMutex
rttFromXR atomic.Bool
}
type MediaTrackParams struct {
@@ -183,12 +188,14 @@ func (t *MediaTrack) UpdateCodecCid(codecs []*livekit.SimulcastCodec) {
// AddReceiver adds a new RTP receiver to the track, returns true when receiver represents a new codec
func (t *MediaTrack) AddReceiver(receiver *webrtc.RTPReceiver, track *webrtc.TrackRemote, mid string) bool {
var newCodec bool
buff, rtcpReader := t.params.BufferFactory.GetBufferPair(uint32(track.SSRC()))
ssrc := uint32(track.SSRC())
buff, rtcpReader := t.params.BufferFactory.GetBufferPair(ssrc)
if buff == nil || rtcpReader == nil {
t.params.Logger.Errorw("could not retrieve buffer pair", nil)
return newCodec
}
var lastRR uint32
rtcpReader.OnPacket(func(bytes []byte) {
pkts, err := rtcp.Unmarshal(bytes)
if err != nil {
@@ -199,9 +206,29 @@ func (t *MediaTrack) AddReceiver(receiver *webrtc.RTPReceiver, track *webrtc.Tra
for _, pkt := range pkts {
switch pkt := pkt.(type) {
case *rtcp.SourceDescription:
// do nothing for now
case *rtcp.SenderReport:
buff.SetSenderReportData(pkt.RTPTime, pkt.NTPTime)
if pkt.SSRC == uint32(track.SSRC()) {
buff.SetSenderReportData(pkt.RTPTime, pkt.NTPTime)
}
case *rtcp.ExtendedReport:
rttFromXR:
for _, report := range pkt.Reports {
if rr, ok := report.(*rtcp.DLRRReportBlock); ok {
for _, dlrrReport := range rr.Reports {
if dlrrReport.LastRR <= lastRR {
continue
}
nowNTP := util.ToNtpTime(time.Now())
nowNTP32 := uint32(nowNTP >> 16)
ntpDiff := nowNTP32 - dlrrReport.LastRR - dlrrReport.DLRR
rtt := uint32(math.Ceil(float64(ntpDiff) * 1000.0 / 65536.0))
buff.SetRTT(rtt)
t.rttFromXR.Store(true)
lastRR = dlrrReport.LastRR
break rttFromXR
}
}
}
}
}
})
@@ -257,11 +284,9 @@ func (t *MediaTrack) AddReceiver(receiver *webrtc.RTPReceiver, track *webrtc.Tra
sfu.WithStreamTrackers(),
)
newWR.OnCloseHandler(func() {
t.params.Logger.Infow("webrtc receiver closed")
t.MediaTrackReceiver.SetClosing()
t.MediaTrackReceiver.ClearReceiver(mime, false)
if t.MediaTrackReceiver.TryClose() {
t.params.Logger.Infow("mediaTrack closed")
if t.dynacastManager != nil {
t.dynacastManager.Close()
}
@@ -361,7 +386,9 @@ func (t *MediaTrack) GetConnectionScoreAndQuality() (float32, livekit.Connection
}
func (t *MediaTrack) SetRTT(rtt uint32) {
t.MediaTrackReceiver.SetRTT(rtt)
if !t.rttFromXR.Load() {
t.MediaTrackReceiver.SetRTT(rtt)
}
}
func (t *MediaTrack) HasPendingCodec() bool {
+22 -1
View File
@@ -102,6 +102,7 @@ type MediaTrackReceiver struct {
trackInfo *livekit.TrackInfo
potentialCodecs []webrtc.RTPCodecParameters
state mediaTrackReceiverState
willBeResumed bool
onSetupReceiver func(mime string)
onMediaLossFeedback func(dt *sfu.DownTrack, report *rtcp.ReceiverReport)
@@ -259,6 +260,7 @@ func (t *MediaTrackReceiver) ClearReceiver(mime string, willBeResumed bool) {
for idx, receiver := range receivers {
if strings.EqualFold(receiver.Codec().MimeType, mime) {
receivers[idx] = receivers[len(receivers)-1]
receivers[len(receivers)-1] = nil
receivers = receivers[:len(receivers)-1]
break
}
@@ -274,6 +276,8 @@ func (t *MediaTrackReceiver) ClearAllReceivers(willBeResumed bool) {
t.lock.Lock()
receivers := t.receivers
t.receivers = nil
t.willBeResumed = willBeResumed
t.lock.Unlock()
for _, r := range receivers {
@@ -481,7 +485,24 @@ func (t *MediaTrackReceiver) AddSubscriber(sub types.LocalParticipant) (types.Su
Logger: tLogger,
DisableRed: t.trackInfo.GetDisableRed() || !t.params.AudioConfig.ActiveREDEncoding,
})
return t.MediaTrackSubscriptions.AddSubscriber(sub, wr)
subTrack, err := t.MediaTrackSubscriptions.AddSubscriber(sub, wr)
// media track could have been closed while adding subscription
remove := false
willBeResumed := false
t.lock.RLock()
if t.state != mediaTrackReceiverStateOpen {
willBeResumed = t.willBeResumed
remove = true
}
t.lock.RUnlock()
if remove {
_ = t.MediaTrackSubscriptions.RemoveSubscriber(sub.ID(), willBeResumed)
return nil, ErrNotOpen
}
return subTrack, err
}
// RemoveSubscriber removes participant from subscription
+6 -2
View File
@@ -103,11 +103,14 @@ func (t *MediaTrackSubscriptions) AddSubscriber(sub types.LocalParticipant, wr *
t.subscribedTracksMu.Unlock()
var rtcpFeedback []webrtc.RTCPFeedback
var maxTrack int
switch t.params.MediaTrack.Kind() {
case livekit.TrackType_AUDIO:
rtcpFeedback = t.params.SubscriberConfig.RTCPFeedback.Audio
maxTrack = t.params.ReceiverConfig.PacketBufferSizeAudio
case livekit.TrackType_VIDEO:
rtcpFeedback = t.params.SubscriberConfig.RTCPFeedback.Video
maxTrack = t.params.ReceiverConfig.PacketBufferSizeVideo
}
codecs := wr.Codecs()
for _, c := range codecs {
@@ -130,11 +133,12 @@ func (t *MediaTrackSubscriptions) AddSubscriber(sub types.LocalParticipant, wr *
BufferFactory: sub.GetBufferFactory(),
SubID: subscriberID,
StreamID: streamID,
MaxTrack: t.params.ReceiverConfig.PacketBufferSize,
MaxTrack: maxTrack,
PlayoutDelayLimit: sub.GetPlayoutDelayConfig(),
Pacer: sub.GetPacer(),
Trailer: trailer,
Logger: LoggerWithTrack(sub.GetLogger().WithComponent(sutils.ComponentSub), trackID, t.params.IsRelayed),
RTCPWriter: sub.WriteSubscriberRTCP,
})
if err != nil {
return nil, err
@@ -187,7 +191,7 @@ func (t *MediaTrackSubscriptions) AddSubscriber(sub types.LocalParticipant, wr *
downTrack.OnMaxLayerChanged(func(dt *sfu.DownTrack, layer int32) {
if t.onSubscriberMaxQualityChange != nil {
t.onSubscriberMaxQualityChange(subscriberID, dt.Codec(), layer)
t.onSubscriberMaxQualityChange(dt.SubscriberID(), dt.Codec(), layer)
}
})
+117 -80
View File
@@ -33,6 +33,12 @@ import (
"go.uber.org/atomic"
"google.golang.org/protobuf/proto"
"github.com/livekit/mediatransportutil/pkg/twcc"
"github.com/livekit/protocol/auth"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"github.com/livekit/protocol/utils"
"github.com/livekit/livekit-server/pkg/config"
"github.com/livekit/livekit-server/pkg/routing"
"github.com/livekit/livekit-server/pkg/rtc/supervisor"
@@ -46,11 +52,6 @@ import (
"github.com/livekit/livekit-server/pkg/telemetry"
"github.com/livekit/livekit-server/pkg/telemetry/prometheus"
sutils "github.com/livekit/livekit-server/pkg/utils"
"github.com/livekit/mediatransportutil/pkg/twcc"
"github.com/livekit/protocol/auth"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"github.com/livekit/protocol/utils"
)
const (
@@ -171,8 +172,6 @@ type ParticipantImpl struct {
pendingTracksLock utils.RWMutex
pendingTracks map[string]*pendingTrackInfo
pendingPublishingTracks map[livekit.TrackID]*pendingTrackInfo
// migrated in tracks that have not fired need close at participant close
pendingMigratedTracks []*MediaTrack
// supported codecs
enabledPublishCodecs []*livekit.Codec
@@ -212,7 +211,7 @@ type ParticipantImpl struct {
onStateChange func(p types.LocalParticipant, state livekit.ParticipantInfo_State)
onMigrateStateChange func(p types.LocalParticipant, migrateState types.MigrateState)
onParticipantUpdate func(types.LocalParticipant)
onDataPacket func(types.LocalParticipant, *livekit.DataPacket)
onDataPacket func(types.LocalParticipant, livekit.DataPacket_Kind, *livekit.DataPacket)
migrateState atomic.Value // types.MigrateState
@@ -242,8 +241,12 @@ func NewParticipant(params ParticipantParams) (*ParticipantImpl, error) {
return nil, ErrMissingGrants
}
p := &ParticipantImpl{
params: params,
pubRTCPQueue: sutils.NewOpsQueue("pub-rtcp", 64, false),
params: params,
pubRTCPQueue: sutils.NewOpsQueue(sutils.OpsQueueParams{
Name: "pub-rtcp",
MinSize: 64,
Logger: params.Logger,
}),
pendingTracks: make(map[string]*pendingTrackInfo),
pendingPublishingTracks: make(map[livekit.TrackID]*pendingTrackInfo),
connectedAt: time.Now(),
@@ -262,7 +265,7 @@ func NewParticipant(params ParticipantParams) (*ParticipantImpl, error) {
}
p.closeReason.Store(types.ParticipantCloseReasonNone)
p.version.Store(params.InitialVersion)
p.timedVersion.Update(params.VersionGenerator.New())
p.timedVersion.Update(params.VersionGenerator.Next())
p.migrateState.Store(types.MigrateStateInit)
p.state.Store(livekit.ParticipantInfo_JOINING)
p.grants = params.Grants
@@ -465,7 +468,7 @@ func (p *ParticipantImpl) SetPermission(permission *livekit.ParticipantPermissio
if canSubscribe {
// reconcile everything
p.SubscriptionManager.queueReconcile("")
p.SubscriptionManager.ReconcileAll()
} else {
// revoke all subscriptions
for _, st := range p.SubscriptionManager.GetSubscribedTracks() {
@@ -492,26 +495,32 @@ func (p *ParticipantImpl) CanSkipBroadcast() bool {
}
func (p *ParticipantImpl) ToProtoWithVersion() (*livekit.ParticipantInfo, utils.TimedVersion) {
v := p.version.Load()
piv := p.timedVersion.Load()
if p.dirty.Swap(false) {
v = p.version.Inc()
piv = p.params.VersionGenerator.Next()
p.timedVersion.Update(&piv)
if p.dirty.Load() {
p.lock.Lock()
if p.dirty.Swap(false) {
p.version.Inc()
p.timedVersion.Update(p.params.VersionGenerator.Next())
}
p.lock.Unlock()
}
grants := p.ClaimGrants()
p.lock.RLock()
v := p.version.Load()
piv := p.timedVersion
pi := &livekit.ParticipantInfo{
Sid: string(p.params.SID),
Identity: string(p.params.Identity),
Name: p.grants.Name,
Name: grants.Name,
State: p.State(),
JoinedAt: p.ConnectedAt().Unix(),
Version: v,
Permission: p.grants.Video.ToPermission(),
Metadata: p.grants.Metadata,
Permission: grants.Video.ToPermission(),
Metadata: grants.Metadata,
Region: p.params.Region,
IsPublisher: p.IsPublisher(),
Kind: grants.GetParticipantKind(),
}
p.lock.RUnlock()
@@ -621,7 +630,7 @@ func (p *ParticipantImpl) OnParticipantUpdate(callback func(types.LocalParticipa
p.lock.Unlock()
}
func (p *ParticipantImpl) OnDataPacket(callback func(types.LocalParticipant, *livekit.DataPacket)) {
func (p *ParticipantImpl) OnDataPacket(callback func(types.LocalParticipant, livekit.DataPacket_Kind, *livekit.DataPacket)) {
p.lock.Lock()
p.onDataPacket = callback
p.lock.Unlock()
@@ -709,10 +718,12 @@ func (p *ParticipantImpl) handleMigrateTracks() {
if mt != nil {
addedTracks = append(addedTracks, mt)
} else {
p.pubLogger.Warnw("could not find migrated track", nil, "cid", cid)
p.pubLogger.Warnw("could not find migrated track, migration failed", nil, "cid", cid)
p.pendingTracksLock.Unlock()
p.IssueFullReconnect(types.ParticipantCloseReasonMigrateCodecMismatch)
return
}
}
p.pendingMigratedTracks = append(p.pendingMigratedTracks, addedTracks...)
if len(addedTracks) != 0 {
p.dirty.Store(true)
@@ -728,18 +739,6 @@ func (p *ParticipantImpl) handleMigrateTracks() {
}()
}
func (p *ParticipantImpl) removePendingMigratedTrack(mt *MediaTrack) {
p.pendingTracksLock.Lock()
for i, t := range p.pendingMigratedTracks {
if t == mt {
p.pendingMigratedTracks[i] = p.pendingMigratedTracks[len(p.pendingMigratedTracks)-1]
p.pendingMigratedTracks = p.pendingMigratedTracks[:len(p.pendingMigratedTracks)-1]
break
}
}
p.pendingTracksLock.Unlock()
}
// AddTrack is called when client intends to publish track.
// records track details and lets client know it's ok to proceed
func (p *ParticipantImpl) AddTrack(req *livekit.AddTrackRequest) {
@@ -810,14 +809,9 @@ func (p *ParticipantImpl) Close(sendLeave bool, reason types.ParticipantCloseRea
p.pendingTracksLock.Lock()
p.pendingTracks = make(map[string]*pendingTrackInfo)
pendingMigratedTracksToClose := p.pendingMigratedTracks
p.pendingMigratedTracks = p.pendingMigratedTracks[:0]
p.pendingPublishingTracks = make(map[livekit.TrackID]*pendingTrackInfo)
p.pendingTracksLock.Unlock()
for _, t := range pendingMigratedTracksToClose {
t.Close(isExpectedToResume)
}
p.UpTrackManager.Close(isExpectedToResume)
p.updateState(livekit.ParticipantInfo_DISCONNECTED)
@@ -868,22 +862,7 @@ func (p *ParticipantImpl) clearMigrationTimer() {
p.lock.Unlock()
}
func (p *ParticipantImpl) MaybeStartMigration(force bool, onStart func()) bool {
allTransportConnected := p.TransportManager.HasSubscriberEverConnected()
if p.IsPublisher() {
allTransportConnected = allTransportConnected && p.TransportManager.HasPublisherEverConnected()
}
if !force && !allTransportConnected {
return false
}
if onStart != nil {
onStart()
}
p.sendLeaveRequest(types.ParticipantCloseReasonMigrationRequested, true, false, true)
p.CloseSignalConnection(types.SignallingCloseReasonMigration)
func (p *ParticipantImpl) setupMigrationTimerLocked() {
//
// On subscriber peer connection, remote side will try ICE on both
// pre- and post-migration ICE candidates as the migrating out
@@ -895,9 +874,6 @@ func (p *ParticipantImpl) MaybeStartMigration(force bool, onStart func()) bool {
// to try and succeed. If not, close the subscriber peer connection
// and help the remote side to narrow down its ICE candidate pool.
//
p.clearMigrationTimer()
p.lock.Lock()
p.migrationTimer = time.AfterFunc(migrationWaitDuration, func() {
p.clearMigrationTimer()
@@ -916,11 +892,45 @@ func (p *ParticipantImpl) MaybeStartMigration(force bool, onStart func()) bool {
p.TransportManager.SubscriberClose()
})
}
func (p *ParticipantImpl) MaybeStartMigration(force bool, onStart func()) bool {
allTransportConnected := p.TransportManager.HasSubscriberEverConnected()
if p.IsPublisher() {
allTransportConnected = allTransportConnected && p.TransportManager.HasPublisherEverConnected()
}
if !force && !allTransportConnected {
return false
}
if onStart != nil {
onStart()
}
p.sendLeaveRequest(types.ParticipantCloseReasonMigrationRequested, true, false, true)
p.CloseSignalConnection(types.SignallingCloseReasonMigration)
p.clearMigrationTimer()
p.lock.Lock()
p.setupMigrationTimerLocked()
p.lock.Unlock()
return true
}
func (p *ParticipantImpl) NotifyMigration() {
p.lock.Lock()
defer p.lock.Unlock()
if p.migrationTimer != nil {
// already set up
return
}
p.setupMigrationTimerLocked()
}
func (p *ParticipantImpl) SetMigrateState(s types.MigrateState) {
preState := p.MigrateState()
if preState == types.MigrateStateComplete || preState == s {
@@ -928,6 +938,9 @@ func (p *ParticipantImpl) SetMigrateState(s types.MigrateState) {
}
p.params.Logger.Debugw("SetMigrateState", "state", s)
if s == types.MigrateStateComplete {
p.handleMigrateTracks()
}
p.migrateState.Store(s)
p.dirty.Store(true)
@@ -936,7 +949,7 @@ func (p *ParticipantImpl) SetMigrateState(s types.MigrateState) {
p.TransportManager.ProcessPendingPublisherOffer()
case types.MigrateStateComplete:
p.handleMigrateTracks()
p.TransportManager.ProcessPendingPublisherDataChannels()
}
@@ -1402,7 +1415,7 @@ func (p *ParticipantImpl) onSubscriberOffer(offer webrtc.SessionDescription) err
}
func (p *ParticipantImpl) removePublishedTrack(track types.MediaTrack) {
p.RemovePublishedTrack(track, false, false)
p.RemovePublishedTrack(track, false, true)
if p.ProtocolVersion().SupportsUnpublish() {
p.sendTrackUnpublished(track.ID())
} else {
@@ -1464,8 +1477,8 @@ func (p *ParticipantImpl) onDataMessage(kind livekit.DataPacket_Kind, data []byt
p.dataChannelStats.AddBytes(uint64(len(data)), false)
dp := livekit.DataPacket{}
if err := proto.Unmarshal(data, &dp); err != nil {
dp := &livekit.DataPacket{}
if err := proto.Unmarshal(data, dp); err != nil {
p.pubLogger.Warnw("could not parse data packet", err)
return
}
@@ -1473,6 +1486,12 @@ func (p *ParticipantImpl) onDataMessage(kind livekit.DataPacket_Kind, data []byt
// trust the channel that it came in as the source of truth
dp.Kind = kind
if p.Hidden() {
dp.ParticipantIdentity = ""
} else {
dp.ParticipantIdentity = string(p.params.Identity)
}
// only forward on user payloads
switch payload := dp.Value.(type) {
case *livekit.DataPacket_User:
@@ -1480,14 +1499,34 @@ func (p *ParticipantImpl) onDataMessage(kind livekit.DataPacket_Kind, data []byt
onDataPacket := p.onDataPacket
p.lock.RUnlock()
if onDataPacket != nil {
u := payload.User
if p.Hidden() {
payload.User.ParticipantSid = ""
payload.User.ParticipantIdentity = ""
u.ParticipantSid = ""
u.ParticipantIdentity = ""
} else {
payload.User.ParticipantSid = string(p.params.SID)
payload.User.ParticipantIdentity = string(p.params.Identity)
u.ParticipantSid = string(p.params.SID)
u.ParticipantIdentity = string(p.params.Identity)
}
if dp.ParticipantIdentity != "" {
u.ParticipantIdentity = dp.ParticipantIdentity
} else {
dp.ParticipantIdentity = u.ParticipantIdentity
}
if len(dp.DestinationIdentities) != 0 {
u.DestinationIdentities = dp.DestinationIdentities
} else {
dp.DestinationIdentities = u.DestinationIdentities
}
onDataPacket(p, kind, dp)
}
case *livekit.DataPacket_SipDtmf:
if p.grants.GetParticipantKind() == livekit.ParticipantInfo_SIP {
p.lock.RLock()
onDataPacket := p.onDataPacket
p.lock.RUnlock()
if onDataPacket != nil {
onDataPacket(p, kind, dp)
}
onDataPacket(p, &dp)
}
default:
p.pubLogger.Warnw("received unsupported data packet", nil, "payload", payload)
@@ -1953,9 +1992,9 @@ func (p *ParticipantImpl) mediaTrackReceived(track *webrtc.TrackRemote, rtpRecei
}
ti.MimeType = track.Codec().MimeType
if utils.NewTimedVersionFromProto(ti.Version).IsZero() {
if utils.TimedVersionFromProto(ti.Version).IsZero() {
// only assign version on a fresh publish, i. e. avoid updating version in scenarios like migration
ti.Version = p.params.VersionGenerator.New().ToProto()
ti.Version = p.params.VersionGenerator.Next().ToProto()
}
mt = p.addMediaTrack(signalCid, track.ID(), ti)
newTrack = true
@@ -1964,9 +2003,7 @@ func (p *ParticipantImpl) mediaTrackReceived(track *webrtc.TrackRemote, rtpRecei
p.pendingTracksLock.Unlock()
if mt.AddReceiver(rtpReceiver, track, mid) {
p.removePendingMigratedTrack(mt)
}
mt.AddReceiver(rtpReceiver, track, mid)
if newTrack {
go func() {
@@ -1986,7 +2023,7 @@ func (p *ParticipantImpl) addMigratedTrack(cid string, ti *livekit.TrackInfo) *M
p.pubLogger.Infow("add migrated track", "cid", cid, "trackID", ti.Sid, "track", logger.Proto(ti))
rtpReceiver := p.TransportManager.GetPublisherRTPReceiver(ti.Mid)
if rtpReceiver == nil {
p.pubLogger.Errorw("could not find receiver for migrated track", nil, "trackID", ti.Sid)
p.pubLogger.Errorw("could not find receiver for migrated track", nil, "trackID", ti.Sid, "mid", ti.Mid)
return nil
}
@@ -2104,7 +2141,7 @@ func (p *ParticipantImpl) addMediaTrack(signalCid string, sdpCid string, ti *liv
p.dirty.Store(true)
p.pubLogger.Infow("track unpublished", "trackID", ti.Sid, "track", logger.Proto(ti))
p.pubLogger.Debugw("track unpublished", "trackID", ti.Sid, "track", logger.Proto(ti))
if onTrackUnpublished := p.getOnTrackUnpublished(); onTrackUnpublished != nil {
onTrackUnpublished(p, mt)
}
@@ -2468,19 +2505,19 @@ func codecsFromMediaDescription(m *sdp.MediaDescription) (out []sdp.Codec, err e
return out, nil
}
func (p *ParticipantImpl) SendDataPacket(dp *livekit.DataPacket, data []byte) error {
func (p *ParticipantImpl) SendDataPacket(kind livekit.DataPacket_Kind, encoded []byte) error {
if p.State() != livekit.ParticipantInfo_ACTIVE {
return ErrDataChannelUnavailable
}
err := p.TransportManager.SendDataPacket(dp, data)
err := p.TransportManager.SendDataPacket(kind, encoded)
if err != nil {
if (errors.Is(err, sctp.ErrStreamClosed) || errors.Is(err, io.ErrClosedPipe)) && p.params.ReconnectOnDataChannelError {
p.params.Logger.Infow("issuing full reconnect on data channel error", "error", err)
p.IssueFullReconnect(types.ParticipantCloseReasonDataChannelError)
}
} else {
p.dataChannelStats.AddBytes(uint64(len(data)), true)
p.dataChannelStats.AddBytes(uint64(len(encoded)), true)
}
return err
}
+3
View File
@@ -25,6 +25,7 @@ import (
"go.uber.org/atomic"
"google.golang.org/protobuf/proto"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
"github.com/livekit/livekit-server/pkg/telemetry/telemetryfakes"
"github.com/livekit/protocol/auth"
"github.com/livekit/protocol/livekit"
@@ -730,6 +731,8 @@ func newParticipantForTestWithOpts(identity livekit.ParticipantIdentity, opts *p
if err != nil {
panic(err)
}
ff := buffer.NewFactoryOfBufferFactory(500, 200)
rtcConf.SetBufferFactory(ff.CreateBufferFactory())
grants := &auth.ClaimGrants{
Video: &auth.VideoGrant{},
}
+1
View File
@@ -157,6 +157,7 @@ func (p *ParticipantImpl) setCodecPreferencesVideoForPublisher(offer webrtc.Sess
for i, attr := range unmatchVideo.Attributes {
if strings.Contains(attr.Value, dd.ExtensionURI) {
unmatchVideo.Attributes[i] = unmatchVideo.Attributes[len(unmatchVideo.Attributes)-1]
unmatchVideo.Attributes[len(unmatchVideo.Attributes)-1] = sdp.Attribute{}
unmatchVideo.Attributes = unmatchVideo.Attributes[:len(unmatchVideo.Attributes)-1]
break
}
+3 -3
View File
@@ -19,10 +19,11 @@ import (
"time"
"github.com/frostbyte73/core"
"github.com/livekit/livekit-server/pkg/rtc/types"
"github.com/livekit/livekit-server/pkg/telemetry"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"github.com/livekit/livekit-server/pkg/rtc/types"
"github.com/livekit/livekit-server/pkg/telemetry"
)
const (
@@ -51,7 +52,6 @@ func NewParticipantTrafficLoad(params ParticipantTrafficLoadParams) *Participant
p := &ParticipantTrafficLoad{
params: params,
tracksStatsMedia: make(map[livekit.TrackID]*livekit.RTPStats),
closed: core.NewFuse(),
}
go p.reporter()
return p
+39 -67
View File
@@ -20,6 +20,7 @@ import (
"fmt"
"io"
"math"
"slices"
"sort"
"strings"
"sync"
@@ -47,8 +48,7 @@ import (
)
const (
DefaultEmptyTimeout = 5 * 60 // 5m
AudioLevelQuantization = 8 // ideally power of 2 to minimize float decimal
AudioLevelQuantization = 8 // ideally power of 2 to minimize float decimal
invAudioLevelQuantization = 1.0 / AudioLevelQuantization
subscriberUpdateInterval = 3 * time.Second
@@ -59,8 +59,7 @@ const (
var (
// var to allow unit test override
RoomDepartureGrace uint32 = 20
roomUpdateInterval = 5 * time.Second // frequency to update room participant counts
roomUpdateInterval = 5 * time.Second // frequency to update room participant counts
)
type broadcastOptions struct {
@@ -87,7 +86,7 @@ type Room struct {
joinedAt atomic.Int64
// time that the last participant left the room
leftAt atomic.Int64
holds atomic.Int32
holds atomic.Int32
lock sync.RWMutex
@@ -139,6 +138,7 @@ func NewRoom(
room *livekit.Room,
internal *livekit.RoomInternal,
config WebRTCConfig,
roomConfig config.RoomConfig,
audioConfig *config.AudioConfig,
serverInfo *livekit.ServerInfo,
telemetry telemetry.TelemetryService,
@@ -164,7 +164,7 @@ func NewRoom(
participantOpts: make(map[livekit.ParticipantIdentity]*ParticipantOptions),
participantRequestSources: make(map[livekit.ParticipantIdentity]routing.MessageSource),
hasPublished: make(map[livekit.ParticipantIdentity]bool),
bufferFactory: buffer.NewFactoryOfBufferFactory(config.Receiver.PacketBufferSize),
bufferFactory: buffer.NewFactoryOfBufferFactory(config.Receiver.PacketBufferSizeVideo, config.Receiver.PacketBufferSizeAudio),
batchedUpdates: make(map[livekit.ParticipantIdentity]*participantUpdate),
closed: make(chan struct{}),
trailer: []byte(utils.RandomSecret()),
@@ -172,13 +172,16 @@ func NewRoom(
disconnectSignalOnResumeNoMessagesParticipants: make(map[livekit.ParticipantIdentity]*disconnectSignalOnResumeNoMessages),
}
r.protoProxy = utils.NewProtoProxy[*livekit.Room](roomUpdateInterval, r.updateProto)
if r.protoRoom.EmptyTimeout == 0 {
r.protoRoom.EmptyTimeout = DefaultEmptyTimeout
r.protoRoom.EmptyTimeout = roomConfig.EmptyTimeout
}
if r.protoRoom.DepartureTimeout == 0 {
r.protoRoom.DepartureTimeout = roomConfig.DepartureTimeout
}
if r.protoRoom.CreationTime == 0 {
r.protoRoom.CreationTime = time.Now().Unix()
}
r.protoProxy = utils.NewProtoProxy[*livekit.Room](roomUpdateInterval, r.updateProto)
if agentClient != nil {
go func() {
@@ -707,7 +710,7 @@ func (r *Room) SyncState(participant types.LocalParticipant, state *livekit.Sync
}
func (r *Room) UpdateSubscriptionPermission(participant types.LocalParticipant, subscriptionPermission *livekit.SubscriptionPermission) error {
if err := participant.UpdateSubscriptionPermission(subscriptionPermission, utils.TimedVersion{}, r.GetParticipant, r.GetParticipantByID); err != nil {
if err := participant.UpdateSubscriptionPermission(subscriptionPermission, utils.TimedVersion(0), r.GetParticipantByID); err != nil {
return err
}
for _, track := range participant.GetPublishedTracks() {
@@ -774,7 +777,7 @@ func (r *Room) CloseIfEmpty() {
if r.FirstJoinedAt() > 0 && r.LastLeftAt() > 0 {
elapsed = time.Now().Unix() - r.LastLeftAt()
// need to give time in case participant is reconnecting
timeout = RoomDepartureGrace
timeout = r.protoRoom.DepartureTimeout
} else {
elapsed = time.Now().Unix() - r.protoRoom.CreationTime
timeout = r.protoRoom.EmptyTimeout
@@ -818,14 +821,8 @@ func (r *Room) OnParticipantChanged(f func(participant types.LocalParticipant))
r.onParticipantChanged = f
}
func (r *Room) SendDataPacket(up *livekit.UserPacket, kind livekit.DataPacket_Kind) {
dp := &livekit.DataPacket{
Kind: kind,
Value: &livekit.DataPacket_User{
User: up,
},
}
r.onDataPacket(nil, dp)
func (r *Room) SendDataPacket(dp *livekit.DataPacket, kind livekit.DataPacket_Kind) {
r.onDataPacket(nil, kind, dp)
}
func (r *Room) SetMetadata(metadata string) <-chan struct{} {
@@ -1083,8 +1080,8 @@ func (r *Room) onParticipantUpdate(p types.LocalParticipant) {
}
}
func (r *Room) onDataPacket(source types.LocalParticipant, dp *livekit.DataPacket) {
BroadcastDataPacketForRoom(r, source, dp, r.Logger)
func (r *Room) onDataPacket(source types.LocalParticipant, kind livekit.DataPacket_Kind, dp *livekit.DataPacket) {
BroadcastDataPacketForRoom(r, source, kind, dp, r.Logger)
}
func (r *Room) subscribeToExistingTracks(p types.LocalParticipant) {
@@ -1169,33 +1166,6 @@ func (r *Room) sendParticipantUpdates(updates []*participantUpdate) {
}
}
// for protocol 2, send all active speakers
func (r *Room) sendActiveSpeakers(speakers []*livekit.SpeakerInfo) {
dp := &livekit.DataPacket{
Kind: livekit.DataPacket_LOSSY,
Value: &livekit.DataPacket_Speaker{
Speaker: &livekit.ActiveSpeakerUpdate{
Speakers: speakers,
},
},
}
var dpData []byte
for _, p := range r.GetParticipants() {
if p.ProtocolVersion().HandlesDataPackets() && !p.ProtocolVersion().SupportsSpeakerChanged() {
if dpData == nil {
var err error
dpData, err = proto.Marshal(dp)
if err != nil {
r.Logger.Errorw("failed to marshal ActiveSpeaker data packet", err)
return
}
}
_ = p.SendDataPacket(dp, dpData)
}
}
}
// for protocol 3, send only changed updates
func (r *Room) sendSpeakerChanges(speakers []*livekit.SpeakerInfo) {
for _, p := range r.GetParticipants() {
@@ -1344,7 +1314,6 @@ func (r *Room) audioUpdateWorker() {
// see if an update is needed
if len(changedSpeakers) > 0 {
r.sendActiveSpeakers(activeSpeakers)
r.sendSpeakerChanges(changedSpeakers)
}
@@ -1493,18 +1462,34 @@ func (r *Room) DebugInfo() map[string]interface{} {
// ------------------------------------------------------------
func BroadcastDataPacketForRoom(r types.Room, source types.LocalParticipant, dp *livekit.DataPacket, logger logger.Logger) {
func BroadcastDataPacketForRoom(r types.Room, source types.LocalParticipant, kind livekit.DataPacket_Kind, dp *livekit.DataPacket, logger logger.Logger) {
dp.Kind = kind // backward compatibility
dest := dp.GetUser().GetDestinationSids()
var dpData []byte
destIdentities := dp.GetUser().GetDestinationIdentities()
if u := dp.GetUser(); u != nil {
if len(dp.DestinationIdentities) == 0 {
dp.DestinationIdentities = u.DestinationIdentities
} else {
u.DestinationIdentities = dp.DestinationIdentities
}
if dp.ParticipantIdentity != "" {
u.ParticipantIdentity = dp.ParticipantIdentity
} else {
dp.ParticipantIdentity = u.ParticipantIdentity
}
}
destIdentities := dp.DestinationIdentities
participants := r.GetLocalParticipants()
capacity := len(dest)
capacity := len(destIdentities)
if capacity == 0 {
capacity = len(dest)
}
if capacity == 0 {
capacity = len(participants)
}
destParticipants := make([]types.LocalParticipant, 0, capacity)
var dpData []byte
for _, op := range participants {
if op.State() != livekit.ParticipantInfo_ACTIVE {
continue
@@ -1513,20 +1498,7 @@ func BroadcastDataPacketForRoom(r types.Room, source types.LocalParticipant, dp
continue
}
if len(dest) > 0 || len(destIdentities) > 0 {
found := false
for _, dID := range dest {
if op.ID() == livekit.ParticipantID(dID) {
found = true
break
}
}
for _, dIdentity := range destIdentities {
if op.Identity() == livekit.ParticipantIdentity(dIdentity) {
found = true
break
}
}
if !found {
if !slices.Contains(dest, string(op.ID())) && !slices.Contains(destIdentities, string(op.Identity())) {
continue
}
}
@@ -1542,7 +1514,7 @@ func BroadcastDataPacketForRoom(r types.Room, source types.LocalParticipant, dp
}
utils.ParallelExec(destParticipants, dataForwardLoadBalanceThreshold, 1, func(op types.LocalParticipant) {
err := op.SendDataPacket(dp, dpData)
err := op.SendDataPacket(kind, dpData)
if err != nil && !errors.Is(err, io.ErrClosedPipe) && !errors.Is(err, sctp.ErrStreamClosed) &&
!errors.Is(err, ErrTransportFailure) && !errors.Is(err, ErrDataChannelBufferFull) {
op.GetLogger().Infow("send data packet error", "error", err)
+118 -54
View File
@@ -22,10 +22,11 @@ import (
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
"github.com/livekit/livekit-server/version"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/webhook"
"github.com/livekit/livekit-server/version"
"github.com/livekit/livekit-server/pkg/config"
"github.com/livekit/livekit-server/pkg/rtc/types"
"github.com/livekit/livekit-server/pkg/rtc/types/typesfakes"
@@ -48,8 +49,6 @@ const (
func init() {
config.InitLoggerFromConfig(&config.DefaultConfig.Logging)
// allow immediate closure in testing
RoomDepartureGrace = 1
roomUpdateInterval = defaultDelay
}
@@ -377,7 +376,7 @@ func TestRoomClosure(t *testing.T) {
rm.lock.Unlock()
rm.RemoveParticipant(p.Identity(), p.ID(), types.ParticipantCloseReasonClientRequestLeave)
time.Sleep(time.Duration(RoomDepartureGrace)*time.Second + defaultDelay)
time.Sleep(time.Duration(rm.ToProto().DepartureTimeout)*time.Second + defaultDelay)
rm.CloseIfEmpty()
require.Len(t, rm.GetParticipants(), 0)
@@ -571,65 +570,126 @@ func TestActiveSpeakers(t *testing.T) {
func TestDataChannel(t *testing.T) {
t.Parallel()
t.Run("participants should receive data", func(t *testing.T) {
rm := newRoomWithParticipants(t, testRoomOpts{num: 3})
defer rm.Close(types.ParticipantCloseReasonNone)
participants := rm.GetParticipants()
p := participants[0].(*typesfakes.FakeLocalParticipant)
const (
curAPI = iota
legacySID
legacyIdentity
)
modes := []int{
curAPI, legacySID, legacyIdentity,
}
modeNames := []string{
"cur", "legacy sid", "legacy identity",
}
packet := livekit.DataPacket{
Kind: livekit.DataPacket_RELIABLE,
Value: &livekit.DataPacket_User{
User: &livekit.UserPacket{
ParticipantSid: string(p.ID()),
Payload: []byte("message.."),
},
},
setSource := func(mode int, dp *livekit.DataPacket, p types.LocalParticipant) {
switch mode {
case curAPI:
dp.ParticipantIdentity = string(p.Identity())
case legacySID:
dp.GetUser().ParticipantSid = string(p.ID())
case legacyIdentity:
dp.GetUser().ParticipantIdentity = string(p.Identity())
}
p.OnDataPacketArgsForCall(0)(p, &packet)
}
setDest := func(mode int, dp *livekit.DataPacket, p types.LocalParticipant) {
switch mode {
case curAPI:
dp.DestinationIdentities = []string{string(p.Identity())}
case legacySID:
dp.GetUser().DestinationSids = []string{string(p.ID())}
case legacyIdentity:
dp.GetUser().DestinationIdentities = []string{string(p.Identity())}
}
}
// ensure everyone has received the packet
for _, op := range participants {
fp := op.(*typesfakes.FakeLocalParticipant)
if fp == p {
require.Zero(t, fp.SendDataPacketCallCount())
continue
}
require.Equal(t, 1, fp.SendDataPacketCallCount())
dp, _ := fp.SendDataPacketArgsForCall(0)
require.Equal(t, packet.Value, dp.Value)
t.Run("participants should receive data", func(t *testing.T) {
for _, mode := range modes {
mode := mode
t.Run(modeNames[mode], func(t *testing.T) {
rm := newRoomWithParticipants(t, testRoomOpts{num: 3})
defer rm.Close(types.ParticipantCloseReasonNone)
participants := rm.GetParticipants()
p := participants[0].(*typesfakes.FakeLocalParticipant)
packet := &livekit.DataPacket{
Kind: livekit.DataPacket_RELIABLE,
Value: &livekit.DataPacket_User{
User: &livekit.UserPacket{
Payload: []byte("message.."),
},
},
}
setSource(mode, packet, p)
packetExp := proto.Clone(packet).(*livekit.DataPacket)
if mode != legacySID {
packetExp.ParticipantIdentity = string(p.Identity())
packetExp.GetUser().ParticipantIdentity = string(p.Identity())
}
encoded, _ := proto.Marshal(packetExp)
p.OnDataPacketArgsForCall(0)(p, packet.Kind, packet)
// ensure everyone has received the packet
for _, op := range participants {
fp := op.(*typesfakes.FakeLocalParticipant)
if fp == p {
require.Zero(t, fp.SendDataPacketCallCount())
continue
}
require.Equal(t, 1, fp.SendDataPacketCallCount())
_, got := fp.SendDataPacketArgsForCall(0)
require.Equal(t, encoded, got)
}
})
}
})
t.Run("only one participant should receive the data", func(t *testing.T) {
rm := newRoomWithParticipants(t, testRoomOpts{num: 4})
defer rm.Close(types.ParticipantCloseReasonNone)
participants := rm.GetParticipants()
p := participants[0].(*typesfakes.FakeLocalParticipant)
p1 := participants[1].(*typesfakes.FakeLocalParticipant)
for _, mode := range modes {
mode := mode
t.Run(modeNames[mode], func(t *testing.T) {
rm := newRoomWithParticipants(t, testRoomOpts{num: 4})
defer rm.Close(types.ParticipantCloseReasonNone)
participants := rm.GetParticipants()
p := participants[0].(*typesfakes.FakeLocalParticipant)
p1 := participants[1].(*typesfakes.FakeLocalParticipant)
packet := livekit.DataPacket{
Kind: livekit.DataPacket_RELIABLE,
Value: &livekit.DataPacket_User{
User: &livekit.UserPacket{
ParticipantSid: string(p.ID()),
Payload: []byte("message to p1.."),
DestinationSids: []string{string(p1.ID())},
},
},
}
p.OnDataPacketArgsForCall(0)(p, &packet)
packet := &livekit.DataPacket{
Kind: livekit.DataPacket_RELIABLE,
Value: &livekit.DataPacket_User{
User: &livekit.UserPacket{
Payload: []byte("message to p1.."),
},
},
}
setSource(mode, packet, p)
setDest(mode, packet, p1)
// only p1 should receive the data
for _, op := range participants {
fp := op.(*typesfakes.FakeLocalParticipant)
if fp != p1 {
require.Zero(t, fp.SendDataPacketCallCount())
}
packetExp := proto.Clone(packet).(*livekit.DataPacket)
if mode != legacySID {
packetExp.ParticipantIdentity = string(p.Identity())
packetExp.GetUser().ParticipantIdentity = string(p.Identity())
packetExp.DestinationIdentities = []string{string(p1.Identity())}
packetExp.GetUser().DestinationIdentities = []string{string(p1.Identity())}
}
encoded, _ := proto.Marshal(packetExp)
p.OnDataPacketArgsForCall(0)(p, packet.Kind, packet)
// only p1 should receive the data
for _, op := range participants {
fp := op.(*typesfakes.FakeLocalParticipant)
if fp != p1 {
require.Zero(t, fp.SendDataPacketCallCount())
}
}
require.Equal(t, 1, p1.SendDataPacketCallCount())
_, got := p1.SendDataPacketArgsForCall(0)
require.Equal(t, encoded, got)
})
}
require.Equal(t, 1, p1.SendDataPacketCallCount())
dp, _ := p1.SendDataPacketArgsForCall(0)
require.Equal(t, packet.Value, dp.Value)
})
t.Run("publishing disallowed", func(t *testing.T) {
@@ -648,7 +708,7 @@ func TestDataChannel(t *testing.T) {
},
}
if p.CanPublishData() {
p.OnDataPacketArgsForCall(0)(p, &packet)
p.OnDataPacketArgsForCall(0)(p, packet.Kind, &packet)
}
// no one should've been sent packet
@@ -737,6 +797,10 @@ func newRoomWithParticipants(t *testing.T, opts testRoomOpts) *Room {
&livekit.Room{Name: "room"},
nil,
WebRTCConfig{},
config.RoomConfig{
EmptyTimeout: 5 * 60,
DepartureTimeout: 1,
},
&config.AudioConfig{
UpdateInterval: audioUpdateInterval,
SmoothIntervals: opts.audioSmoothIntervals,
+3 -3
View File
@@ -56,7 +56,7 @@ type SubscribedTrack struct {
versionGenerator utils.TimedVersionGenerator
settingsLock sync.Mutex
settings *livekit.UpdateTrackSettings
settingsVersion *utils.TimedVersion
settingsVersion utils.TimedVersion
bindLock sync.Mutex
bound bool
@@ -243,7 +243,7 @@ func (t *SubscribedTrack) applySettings() {
}
t.logger.Debugw("updating subscriber track settings", "settings", logger.Proto(t.settings))
t.settingsVersion = t.versionGenerator.New()
t.settingsVersion = t.versionGenerator.Next()
settingsVersion := t.settingsVersion
t.settingsLock.Unlock()
@@ -264,7 +264,7 @@ func (t *SubscribedTrack) applySettings() {
}
t.settingsLock.Lock()
if settingsVersion.Compare(t.settingsVersion) != 0 {
if settingsVersion != t.settingsVersion {
// a newer settings has superceded this one
t.settingsLock.Unlock()
return
+8 -2
View File
@@ -255,6 +255,10 @@ func (m *SubscriptionManager) WaitUntilSubscribed(timeout time.Duration) error {
return context.DeadlineExceeded
}
func (m *SubscriptionManager) ReconcileAll() {
m.queueReconcile(trackIDForReconcileSubscriptions)
}
func (m *SubscriptionManager) setDesired(trackID livekit.TrackID, desired bool) (*trackSubscription, bool) {
m.lock.RLock()
defer m.lock.RUnlock()
@@ -628,9 +632,11 @@ func (m *SubscriptionManager) handleSubscribedTrackClose(s *trackSubscription, w
var relieveFromLimits bool
switch subTrack.MediaTrack().Kind() {
case livekit.TrackType_VIDEO:
relieveFromLimits = m.params.SubscriptionLimitVideo > 0 && m.subscribedVideoCount.Dec() == m.params.SubscriptionLimitVideo-1
videoCount := m.subscribedVideoCount.Dec()
relieveFromLimits = m.params.SubscriptionLimitVideo > 0 && videoCount == m.params.SubscriptionLimitVideo-1
case livekit.TrackType_AUDIO:
relieveFromLimits = m.params.SubscriptionLimitAudio > 0 && m.subscribedAudioCount.Dec() == m.params.SubscriptionLimitAudio-1
audioCount := m.subscribedAudioCount.Dec()
relieveFromLimits = m.params.SubscriptionLimitAudio > 0 && audioCount == m.params.SubscriptionLimitAudio-1
}
// remove from subscribedTo
+1 -1
View File
@@ -45,7 +45,7 @@ func NewMockParticipant(identity livekit.ParticipantIdentity, protocol types.Pro
Identity: string(identity),
State: livekit.ParticipantInfo_JOINED,
IsPublisher: publisher,
}, utils.TimedVersion{})
}, utils.TimedVersion(0))
p.SetMetadataCalls(func(m string) {
var f func(participant types.LocalParticipant)
+56 -55
View File
@@ -35,21 +35,24 @@ import (
"github.com/pkg/errors"
"go.uber.org/atomic"
"github.com/livekit/livekit-server/pkg/config"
"github.com/livekit/livekit-server/pkg/rtc/transport"
"github.com/livekit/livekit-server/pkg/rtc/types"
lkinterceptor "github.com/livekit/livekit-server/pkg/sfu/interceptor"
"github.com/livekit/livekit-server/pkg/sfu/pacer"
"github.com/livekit/livekit-server/pkg/sfu/rtpextension"
"github.com/livekit/livekit-server/pkg/sfu/streamallocator"
sfuutils "github.com/livekit/livekit-server/pkg/sfu/utils"
"github.com/livekit/livekit-server/pkg/telemetry/prometheus"
sutils "github.com/livekit/livekit-server/pkg/utils"
lkinterceptor "github.com/livekit/mediatransportutil/pkg/interceptor"
lktwcc "github.com/livekit/mediatransportutil/pkg/twcc"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"github.com/livekit/protocol/logger/pionlogger"
lksdp "github.com/livekit/protocol/sdp"
"github.com/livekit/livekit-server/pkg/config"
"github.com/livekit/livekit-server/pkg/rtc/transport"
"github.com/livekit/livekit-server/pkg/rtc/types"
sfuinterceptor "github.com/livekit/livekit-server/pkg/sfu/interceptor"
"github.com/livekit/livekit-server/pkg/sfu/pacer"
"github.com/livekit/livekit-server/pkg/sfu/rtpextension"
"github.com/livekit/livekit-server/pkg/sfu/streamallocator"
sfuutils "github.com/livekit/livekit-server/pkg/sfu/utils"
"github.com/livekit/livekit-server/pkg/telemetry/prometheus"
"github.com/livekit/livekit-server/pkg/utils"
sutils "github.com/livekit/livekit-server/pkg/utils"
)
const (
@@ -322,6 +325,10 @@ func newPeerConnection(params TransportParams, onBandwidthEstimator func(estimat
}
}
}
} else {
// sfu only use interceptor to send XR but don't read response from it (use buffer instead),
// so use a empty callback here
ir.Add(lkinterceptor.NewRTTFromXRFactory(func(rtt uint32) {}))
}
if len(params.SimTracks) > 0 {
f, err := NewUnhandleSimulcastInterceptorFactory(UnhandleSimulcastTracks(params.SimTracks))
@@ -361,7 +368,7 @@ func newPeerConnection(params TransportParams, onBandwidthEstimator func(estimat
}
}
// put rtx interceptor behind unhandle simulcast interceptor so it can get the correct mid & rid
ir.Add(lkinterceptor.NewRTXInfoExtractorFactory(setTWCCForVideo, func(repair, base uint32) {
ir.Add(sfuinterceptor.NewRTXInfoExtractorFactory(setTWCCForVideo, func(repair, base uint32) {
params.Logger.Debugw("rtx pair found from extension", "repair", repair, "base", base)
params.Config.BufferFactory.SetRTXPair(repair, base)
}, params.Logger))
@@ -379,10 +386,14 @@ func NewPCTransport(params TransportParams) (*PCTransport, error) {
params.Logger = logger.GetLogger()
}
t := &PCTransport{
params: params,
debouncedNegotiate: debounce.New(negotiationFrequency),
negotiationState: transport.NegotiationStateNone,
eventsQueue: sutils.NewOpsQueue("transport", 64, false),
params: params,
debouncedNegotiate: debounce.New(negotiationFrequency),
negotiationState: transport.NegotiationStateNone,
eventsQueue: sutils.NewOpsQueue(utils.OpsQueueParams{
Name: "transport",
MinSize: 64,
Logger: params.Logger,
}),
previousTrackDescription: make(map[string]*trackDescription),
canReuseTransceiver: true,
connectionDetails: types.NewICEConnectionDetails(params.Transport, params.Logger),
@@ -603,14 +614,8 @@ func (t *PCTransport) handleConnectionFailed(forceShortConn bool) {
isShort, duration = t.IsShortConnection(time.Now())
if isShort {
pair, err := t.getSelectedPair()
if err != nil {
t.params.Logger.Warnw("short ICE connection", err, "duration", duration)
} else {
t.params.Logger.Infow("short ICE connection", "pair", pair, "duration", duration)
}
t.params.Logger.Debugw("short ICE connection", "error", err, "pair", pair, "duration", duration)
}
} else {
t.params.Logger.Infow("force short ICE connection")
}
t.params.Handler.OnFailed(isShort)
@@ -795,16 +800,27 @@ func (t *PCTransport) CreateDataChannel(label string, dci *webrtc.DataChannelIni
if err != nil {
return err
}
var (
dcPtr **webrtc.DataChannel
dcReady *bool
)
switch dc.Label() {
default:
// TODO: Appears that it's never called, so not sure what needs to be done here. We just keep the DC open?
// Maybe just add "reliable" parameter instead of checking the label.
t.params.Logger.Warnw("unknown data channel label", nil, "label", dc.Label())
return nil
case ReliableDataChannel:
dcPtr = &t.reliableDC
dcReady = &t.reliableDCOpened
case LossyDataChannel:
dcPtr = &t.lossyDC
dcReady = &t.lossyDCOpened
}
dcReadyHandler := func() {
t.lock.Lock()
switch dc.Label() {
case ReliableDataChannel:
t.reliableDCOpened = true
case LossyDataChannel:
t.lossyDCOpened = true
}
*dcReady = true
t.lock.Unlock()
t.params.Logger.Debugw(dc.Label() + " data channel open")
@@ -822,30 +838,15 @@ func (t *PCTransport) CreateDataChannel(label string, dci *webrtc.DataChannelIni
}
t.lock.Lock()
switch dc.Label() {
case ReliableDataChannel:
t.reliableDC = dc
if t.params.DirectionConfig.StrictACKs {
t.reliableDC.OnOpen(dcReadyHandler)
} else {
t.reliableDC.OnDial(dcReadyHandler)
}
t.reliableDC.OnClose(dcCloseHandler)
t.reliableDC.OnError(dcErrorHandler)
case LossyDataChannel:
t.lossyDC = dc
if t.params.DirectionConfig.StrictACKs {
t.lossyDC.OnOpen(dcReadyHandler)
} else {
t.lossyDC.OnDial(dcReadyHandler)
}
t.lossyDC.OnClose(dcCloseHandler)
t.lossyDC.OnError(dcErrorHandler)
default:
t.params.Logger.Warnw("unknown data channel label", nil, "label", dc.Label())
defer t.lock.Unlock()
*dcPtr = dc
if t.params.DirectionConfig.StrictACKs {
dc.OnOpen(dcReadyHandler)
} else {
dc.OnDial(dcReadyHandler)
}
t.lock.Unlock()
dc.OnClose(dcCloseHandler)
dc.OnError(dcErrorHandler)
return nil
}
@@ -899,10 +900,10 @@ func (t *PCTransport) WriteRTCP(pkts []rtcp.Packet) error {
return t.pc.WriteRTCP(pkts)
}
func (t *PCTransport) SendDataPacket(dp *livekit.DataPacket, data []byte) error {
func (t *PCTransport) SendDataPacket(kind livekit.DataPacket_Kind, encoded []byte) error {
var dc *webrtc.DataChannel
t.lock.RLock()
if dp.Kind == livekit.DataPacket_RELIABLE {
if kind == livekit.DataPacket_RELIABLE {
dc = t.reliableDC
} else {
dc = t.lossyDC
@@ -921,7 +922,7 @@ func (t *PCTransport) SendDataPacket(dp *livekit.DataPacket, data []byte) error
return ErrDataChannelBufferFull
}
return dc.Send(data)
return dc.Send(encoded)
}
func (t *PCTransport) Close() {
+14 -10
View File
@@ -28,14 +28,15 @@ import (
"go.uber.org/atomic"
"google.golang.org/protobuf/proto"
"github.com/livekit/mediatransportutil/pkg/twcc"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"github.com/livekit/livekit-server/pkg/config"
"github.com/livekit/livekit-server/pkg/rtc/transport"
"github.com/livekit/livekit-server/pkg/rtc/types"
"github.com/livekit/livekit-server/pkg/sfu"
"github.com/livekit/livekit-server/pkg/sfu/pacer"
"github.com/livekit/mediatransportutil/pkg/twcc"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
)
const (
@@ -240,9 +241,9 @@ func (t *TransportManager) RemoveSubscribedTrack(subTrack types.SubscribedTrack)
t.subscriber.RemoveTrackFromStreamAllocator(subTrack)
}
func (t *TransportManager) SendDataPacket(dp *livekit.DataPacket, data []byte) error {
func (t *TransportManager) SendDataPacket(kind livekit.DataPacket_Kind, encoded []byte) error {
// downstream data is sent via primary peer connection
return t.getTransport(true).SendDataPacket(dp, data)
return t.getTransport(true).SendDataPacket(kind, encoded)
}
func (t *TransportManager) createDataChannelsForSubscriber(pendingDataChannels []*livekit.DataChannelInfo) error {
@@ -523,8 +524,11 @@ func (t *TransportManager) handleConnectionFailed(isShortLived bool) {
if !t.hasRecentSignalLocked() || !signalValid {
// the failed might cause by network interrupt because signal closed or we have not seen any signal in the time window,
// so don't switch to next candidate type
t.params.Logger.Infow("ignoring prefer candidate check by ICE failure because signal connection interrupted",
"lastSignalSince", lastSignalSince, "signalValid", signalValid)
t.params.Logger.Debugw(
"ignoring prefer candidate check by ICE failure because signal connection interrupted",
"lastSignalSince", lastSignalSince,
"signalValid", signalValid,
)
t.failureCount = 0
t.lastFailure = time.Time{}
t.lock.Unlock()
@@ -572,13 +576,13 @@ func (t *TransportManager) handleConnectionFailed(isShortLived bool) {
switch preferNext {
case livekit.ICECandidateType_ICT_TCP:
t.params.Logger.Infow("prefer TCP transport on both peer connections")
t.params.Logger.Debugw("prefer TCP transport on both peer connections")
case livekit.ICECandidateType_ICT_TLS:
t.params.Logger.Infow("prefer TLS transport both peer connections")
t.params.Logger.Debugw("prefer TLS transport both peer connections")
case livekit.ICECandidateType_ICT_NONE:
t.params.Logger.Infow("allowing all transports on both peer connections")
t.params.Logger.Debugw("allowing all transports on both peer connections")
}
// irrespective of which one fails, force prefer candidate on both as the other one might
+5 -3
View File
@@ -276,7 +276,6 @@ type Participant interface {
UpdateSubscriptionPermission(
subscriptionPermission *livekit.SubscriptionPermission,
timedVersion utils.TimedVersion,
resolverByIdentity func(participantIdentity livekit.ParticipantIdentity) LocalParticipant,
resolverBySid func(participantID livekit.ParticipantID) LocalParticipant,
) error
UpdateVideoLayers(updateVideoLayers *livekit.UpdateVideoLayers) error
@@ -344,6 +343,8 @@ type LocalParticipant interface {
AddTransceiverFromTrackToSubscriber(trackLocal webrtc.TrackLocal, params AddTrackParams) (*webrtc.RTPSender, *webrtc.RTPTransceiver, error)
RemoveTrackFromSubscriber(sender *webrtc.RTPSender) error
WriteSubscriberRTCP(pkts []rtcp.Packet) error
// subscriptions
SubscribeToTrack(trackID livekit.TrackID)
UnsubscribeFromTrack(trackID livekit.TrackID)
@@ -364,7 +365,7 @@ type LocalParticipant interface {
SendJoinResponse(joinResponse *livekit.JoinResponse) error
SendParticipantUpdate(participants []*livekit.ParticipantInfo) error
SendSpeakerUpdate(speakers []*livekit.SpeakerInfo, force bool) error
SendDataPacket(packet *livekit.DataPacket, data []byte) error
SendDataPacket(kind livekit.DataPacket_Kind, encoded []byte) error
SendRoomUpdate(room *livekit.Room) error
SendConnectionQualityUpdate(update *livekit.ConnectionQualityUpdate) error
SubscriptionPermissionUpdate(publisherID livekit.ParticipantID, trackID livekit.TrackID, allowed bool)
@@ -383,7 +384,7 @@ type LocalParticipant interface {
OnTrackUnpublished(callback func(LocalParticipant, MediaTrack))
// OnParticipantUpdate - metadata or permission is updated
OnParticipantUpdate(callback func(LocalParticipant))
OnDataPacket(callback func(LocalParticipant, *livekit.DataPacket))
OnDataPacket(callback func(LocalParticipant, livekit.DataPacket_Kind, *livekit.DataPacket))
OnSubscribeStatusChanged(fn func(publisherID livekit.ParticipantID, subscribed bool))
OnClose(callback func(LocalParticipant))
OnClaimsChanged(callback func(LocalParticipant))
@@ -393,6 +394,7 @@ type LocalParticipant interface {
// session migration
MaybeStartMigration(force bool, onStart func()) bool
NotifyMigration()
SetMigrateState(s MigrateState)
MigrateState() MigrateState
SetMigrateInfo(previousOffer, previousAnswer *webrtc.SessionDescription, mediaTracks []*livekit.TrackPublishedResponse, dataChannels []*livekit.DataChannelInfo)
@@ -557,6 +557,10 @@ type FakeLocalParticipant struct {
negotiateArgsForCall []struct {
arg1 bool
}
NotifyMigrationStub func()
notifyMigrationMutex sync.RWMutex
notifyMigrationArgsForCall []struct {
}
OnClaimsChangedStub func(func(types.LocalParticipant))
onClaimsChangedMutex sync.RWMutex
onClaimsChangedArgsForCall []struct {
@@ -567,10 +571,10 @@ type FakeLocalParticipant struct {
onCloseArgsForCall []struct {
arg1 func(types.LocalParticipant)
}
OnDataPacketStub func(func(types.LocalParticipant, *livekit.DataPacket))
OnDataPacketStub func(func(types.LocalParticipant, livekit.DataPacket_Kind, *livekit.DataPacket))
onDataPacketMutex sync.RWMutex
onDataPacketArgsForCall []struct {
arg1 func(types.LocalParticipant, *livekit.DataPacket)
arg1 func(types.LocalParticipant, livekit.DataPacket_Kind, *livekit.DataPacket)
}
OnICEConfigChangedStub func(func(participant types.LocalParticipant, iceConfig *livekit.ICEConfig))
onICEConfigChangedMutex sync.RWMutex
@@ -656,10 +660,10 @@ type FakeLocalParticipant struct {
sendConnectionQualityUpdateReturnsOnCall map[int]struct {
result1 error
}
SendDataPacketStub func(*livekit.DataPacket, []byte) error
SendDataPacketStub func(livekit.DataPacket_Kind, []byte) error
sendDataPacketMutex sync.RWMutex
sendDataPacketArgsForCall []struct {
arg1 *livekit.DataPacket
arg1 livekit.DataPacket_Kind
arg2 []byte
}
sendDataPacketReturns struct {
@@ -938,13 +942,12 @@ type FakeLocalParticipant struct {
arg1 livekit.TrackID
arg2 *livekit.UpdateTrackSettings
}
UpdateSubscriptionPermissionStub func(*livekit.SubscriptionPermission, utils.TimedVersion, func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant, func(participantID livekit.ParticipantID) types.LocalParticipant) error
UpdateSubscriptionPermissionStub func(*livekit.SubscriptionPermission, utils.TimedVersion, func(participantID livekit.ParticipantID) types.LocalParticipant) error
updateSubscriptionPermissionMutex sync.RWMutex
updateSubscriptionPermissionArgsForCall []struct {
arg1 *livekit.SubscriptionPermission
arg2 utils.TimedVersion
arg3 func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant
arg4 func(participantID livekit.ParticipantID) types.LocalParticipant
arg3 func(participantID livekit.ParticipantID) types.LocalParticipant
}
updateSubscriptionPermissionReturns struct {
result1 error
@@ -980,6 +983,17 @@ type FakeLocalParticipant struct {
waitUntilSubscribedReturnsOnCall map[int]struct {
result1 error
}
WriteSubscriberRTCPStub func([]rtcp.Packet) error
writeSubscriberRTCPMutex sync.RWMutex
writeSubscriberRTCPArgsForCall []struct {
arg1 []rtcp.Packet
}
writeSubscriberRTCPReturns struct {
result1 error
}
writeSubscriberRTCPReturnsOnCall map[int]struct {
result1 error
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
@@ -3830,6 +3844,30 @@ func (fake *FakeLocalParticipant) NegotiateArgsForCall(i int) bool {
return argsForCall.arg1
}
func (fake *FakeLocalParticipant) NotifyMigration() {
fake.notifyMigrationMutex.Lock()
fake.notifyMigrationArgsForCall = append(fake.notifyMigrationArgsForCall, struct {
}{})
stub := fake.NotifyMigrationStub
fake.recordInvocation("NotifyMigration", []interface{}{})
fake.notifyMigrationMutex.Unlock()
if stub != nil {
fake.NotifyMigrationStub()
}
}
func (fake *FakeLocalParticipant) NotifyMigrationCallCount() int {
fake.notifyMigrationMutex.RLock()
defer fake.notifyMigrationMutex.RUnlock()
return len(fake.notifyMigrationArgsForCall)
}
func (fake *FakeLocalParticipant) NotifyMigrationCalls(stub func()) {
fake.notifyMigrationMutex.Lock()
defer fake.notifyMigrationMutex.Unlock()
fake.NotifyMigrationStub = stub
}
func (fake *FakeLocalParticipant) OnClaimsChanged(arg1 func(types.LocalParticipant)) {
fake.onClaimsChangedMutex.Lock()
fake.onClaimsChangedArgsForCall = append(fake.onClaimsChangedArgsForCall, struct {
@@ -3894,10 +3932,10 @@ func (fake *FakeLocalParticipant) OnCloseArgsForCall(i int) func(types.LocalPart
return argsForCall.arg1
}
func (fake *FakeLocalParticipant) OnDataPacket(arg1 func(types.LocalParticipant, *livekit.DataPacket)) {
func (fake *FakeLocalParticipant) OnDataPacket(arg1 func(types.LocalParticipant, livekit.DataPacket_Kind, *livekit.DataPacket)) {
fake.onDataPacketMutex.Lock()
fake.onDataPacketArgsForCall = append(fake.onDataPacketArgsForCall, struct {
arg1 func(types.LocalParticipant, *livekit.DataPacket)
arg1 func(types.LocalParticipant, livekit.DataPacket_Kind, *livekit.DataPacket)
}{arg1})
stub := fake.OnDataPacketStub
fake.recordInvocation("OnDataPacket", []interface{}{arg1})
@@ -3913,13 +3951,13 @@ func (fake *FakeLocalParticipant) OnDataPacketCallCount() int {
return len(fake.onDataPacketArgsForCall)
}
func (fake *FakeLocalParticipant) OnDataPacketCalls(stub func(func(types.LocalParticipant, *livekit.DataPacket))) {
func (fake *FakeLocalParticipant) OnDataPacketCalls(stub func(func(types.LocalParticipant, livekit.DataPacket_Kind, *livekit.DataPacket))) {
fake.onDataPacketMutex.Lock()
defer fake.onDataPacketMutex.Unlock()
fake.OnDataPacketStub = stub
}
func (fake *FakeLocalParticipant) OnDataPacketArgsForCall(i int) func(types.LocalParticipant, *livekit.DataPacket) {
func (fake *FakeLocalParticipant) OnDataPacketArgsForCall(i int) func(types.LocalParticipant, livekit.DataPacket_Kind, *livekit.DataPacket) {
fake.onDataPacketMutex.RLock()
defer fake.onDataPacketMutex.RUnlock()
argsForCall := fake.onDataPacketArgsForCall[i]
@@ -4423,7 +4461,7 @@ func (fake *FakeLocalParticipant) SendConnectionQualityUpdateReturnsOnCall(i int
}{result1}
}
func (fake *FakeLocalParticipant) SendDataPacket(arg1 *livekit.DataPacket, arg2 []byte) error {
func (fake *FakeLocalParticipant) SendDataPacket(arg1 livekit.DataPacket_Kind, arg2 []byte) error {
var arg2Copy []byte
if arg2 != nil {
arg2Copy = make([]byte, len(arg2))
@@ -4432,7 +4470,7 @@ func (fake *FakeLocalParticipant) SendDataPacket(arg1 *livekit.DataPacket, arg2
fake.sendDataPacketMutex.Lock()
ret, specificReturn := fake.sendDataPacketReturnsOnCall[len(fake.sendDataPacketArgsForCall)]
fake.sendDataPacketArgsForCall = append(fake.sendDataPacketArgsForCall, struct {
arg1 *livekit.DataPacket
arg1 livekit.DataPacket_Kind
arg2 []byte
}{arg1, arg2Copy})
stub := fake.SendDataPacketStub
@@ -4454,13 +4492,13 @@ func (fake *FakeLocalParticipant) SendDataPacketCallCount() int {
return len(fake.sendDataPacketArgsForCall)
}
func (fake *FakeLocalParticipant) SendDataPacketCalls(stub func(*livekit.DataPacket, []byte) error) {
func (fake *FakeLocalParticipant) SendDataPacketCalls(stub func(livekit.DataPacket_Kind, []byte) error) {
fake.sendDataPacketMutex.Lock()
defer fake.sendDataPacketMutex.Unlock()
fake.SendDataPacketStub = stub
}
func (fake *FakeLocalParticipant) SendDataPacketArgsForCall(i int) (*livekit.DataPacket, []byte) {
func (fake *FakeLocalParticipant) SendDataPacketArgsForCall(i int) (livekit.DataPacket_Kind, []byte) {
fake.sendDataPacketMutex.RLock()
defer fake.sendDataPacketMutex.RUnlock()
argsForCall := fake.sendDataPacketArgsForCall[i]
@@ -5990,21 +6028,20 @@ func (fake *FakeLocalParticipant) UpdateSubscribedTrackSettingsArgsForCall(i int
return argsForCall.arg1, argsForCall.arg2
}
func (fake *FakeLocalParticipant) UpdateSubscriptionPermission(arg1 *livekit.SubscriptionPermission, arg2 utils.TimedVersion, arg3 func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant, arg4 func(participantID livekit.ParticipantID) types.LocalParticipant) error {
func (fake *FakeLocalParticipant) UpdateSubscriptionPermission(arg1 *livekit.SubscriptionPermission, arg2 utils.TimedVersion, arg3 func(participantID livekit.ParticipantID) types.LocalParticipant) error {
fake.updateSubscriptionPermissionMutex.Lock()
ret, specificReturn := fake.updateSubscriptionPermissionReturnsOnCall[len(fake.updateSubscriptionPermissionArgsForCall)]
fake.updateSubscriptionPermissionArgsForCall = append(fake.updateSubscriptionPermissionArgsForCall, struct {
arg1 *livekit.SubscriptionPermission
arg2 utils.TimedVersion
arg3 func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant
arg4 func(participantID livekit.ParticipantID) types.LocalParticipant
}{arg1, arg2, arg3, arg4})
arg3 func(participantID livekit.ParticipantID) types.LocalParticipant
}{arg1, arg2, arg3})
stub := fake.UpdateSubscriptionPermissionStub
fakeReturns := fake.updateSubscriptionPermissionReturns
fake.recordInvocation("UpdateSubscriptionPermission", []interface{}{arg1, arg2, arg3, arg4})
fake.recordInvocation("UpdateSubscriptionPermission", []interface{}{arg1, arg2, arg3})
fake.updateSubscriptionPermissionMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3, arg4)
return stub(arg1, arg2, arg3)
}
if specificReturn {
return ret.result1
@@ -6018,17 +6055,17 @@ func (fake *FakeLocalParticipant) UpdateSubscriptionPermissionCallCount() int {
return len(fake.updateSubscriptionPermissionArgsForCall)
}
func (fake *FakeLocalParticipant) UpdateSubscriptionPermissionCalls(stub func(*livekit.SubscriptionPermission, utils.TimedVersion, func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant, func(participantID livekit.ParticipantID) types.LocalParticipant) error) {
func (fake *FakeLocalParticipant) UpdateSubscriptionPermissionCalls(stub func(*livekit.SubscriptionPermission, utils.TimedVersion, func(participantID livekit.ParticipantID) types.LocalParticipant) error) {
fake.updateSubscriptionPermissionMutex.Lock()
defer fake.updateSubscriptionPermissionMutex.Unlock()
fake.UpdateSubscriptionPermissionStub = stub
}
func (fake *FakeLocalParticipant) UpdateSubscriptionPermissionArgsForCall(i int) (*livekit.SubscriptionPermission, utils.TimedVersion, func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant, func(participantID livekit.ParticipantID) types.LocalParticipant) {
func (fake *FakeLocalParticipant) UpdateSubscriptionPermissionArgsForCall(i int) (*livekit.SubscriptionPermission, utils.TimedVersion, func(participantID livekit.ParticipantID) types.LocalParticipant) {
fake.updateSubscriptionPermissionMutex.RLock()
defer fake.updateSubscriptionPermissionMutex.RUnlock()
argsForCall := fake.updateSubscriptionPermissionArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
}
func (fake *FakeLocalParticipant) UpdateSubscriptionPermissionReturns(result1 error) {
@@ -6209,6 +6246,72 @@ func (fake *FakeLocalParticipant) WaitUntilSubscribedReturnsOnCall(i int, result
}{result1}
}
func (fake *FakeLocalParticipant) WriteSubscriberRTCP(arg1 []rtcp.Packet) error {
var arg1Copy []rtcp.Packet
if arg1 != nil {
arg1Copy = make([]rtcp.Packet, len(arg1))
copy(arg1Copy, arg1)
}
fake.writeSubscriberRTCPMutex.Lock()
ret, specificReturn := fake.writeSubscriberRTCPReturnsOnCall[len(fake.writeSubscriberRTCPArgsForCall)]
fake.writeSubscriberRTCPArgsForCall = append(fake.writeSubscriberRTCPArgsForCall, struct {
arg1 []rtcp.Packet
}{arg1Copy})
stub := fake.WriteSubscriberRTCPStub
fakeReturns := fake.writeSubscriberRTCPReturns
fake.recordInvocation("WriteSubscriberRTCP", []interface{}{arg1Copy})
fake.writeSubscriberRTCPMutex.Unlock()
if stub != nil {
return stub(arg1)
}
if specificReturn {
return ret.result1
}
return fakeReturns.result1
}
func (fake *FakeLocalParticipant) WriteSubscriberRTCPCallCount() int {
fake.writeSubscriberRTCPMutex.RLock()
defer fake.writeSubscriberRTCPMutex.RUnlock()
return len(fake.writeSubscriberRTCPArgsForCall)
}
func (fake *FakeLocalParticipant) WriteSubscriberRTCPCalls(stub func([]rtcp.Packet) error) {
fake.writeSubscriberRTCPMutex.Lock()
defer fake.writeSubscriberRTCPMutex.Unlock()
fake.WriteSubscriberRTCPStub = stub
}
func (fake *FakeLocalParticipant) WriteSubscriberRTCPArgsForCall(i int) []rtcp.Packet {
fake.writeSubscriberRTCPMutex.RLock()
defer fake.writeSubscriberRTCPMutex.RUnlock()
argsForCall := fake.writeSubscriberRTCPArgsForCall[i]
return argsForCall.arg1
}
func (fake *FakeLocalParticipant) WriteSubscriberRTCPReturns(result1 error) {
fake.writeSubscriberRTCPMutex.Lock()
defer fake.writeSubscriberRTCPMutex.Unlock()
fake.WriteSubscriberRTCPStub = nil
fake.writeSubscriberRTCPReturns = struct {
result1 error
}{result1}
}
func (fake *FakeLocalParticipant) WriteSubscriberRTCPReturnsOnCall(i int, result1 error) {
fake.writeSubscriberRTCPMutex.Lock()
defer fake.writeSubscriberRTCPMutex.Unlock()
fake.WriteSubscriberRTCPStub = nil
if fake.writeSubscriberRTCPReturnsOnCall == nil {
fake.writeSubscriberRTCPReturnsOnCall = make(map[int]struct {
result1 error
})
}
fake.writeSubscriberRTCPReturnsOnCall[i] = struct {
result1 error
}{result1}
}
func (fake *FakeLocalParticipant) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
@@ -6324,6 +6427,8 @@ func (fake *FakeLocalParticipant) Invocations() map[string][][]interface{} {
defer fake.migrateStateMutex.RUnlock()
fake.negotiateMutex.RLock()
defer fake.negotiateMutex.RUnlock()
fake.notifyMigrationMutex.RLock()
defer fake.notifyMigrationMutex.RUnlock()
fake.onClaimsChangedMutex.RLock()
defer fake.onClaimsChangedMutex.RUnlock()
fake.onCloseMutex.RLock()
@@ -6432,6 +6537,8 @@ func (fake *FakeLocalParticipant) Invocations() map[string][][]interface{} {
defer fake.verifySubscribeParticipantInfoMutex.RUnlock()
fake.waitUntilSubscribedMutex.RLock()
defer fake.waitUntilSubscribedMutex.RUnlock()
fake.writeSubscriberRTCPMutex.RLock()
defer fake.writeSubscriberRTCPMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
for key, value := range fake.invocations {
copiedInvocations[key] = value
+10 -12
View File
@@ -207,13 +207,12 @@ type FakeParticipant struct {
toProtoReturnsOnCall map[int]struct {
result1 *livekit.ParticipantInfo
}
UpdateSubscriptionPermissionStub func(*livekit.SubscriptionPermission, utils.TimedVersion, func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant, func(participantID livekit.ParticipantID) types.LocalParticipant) error
UpdateSubscriptionPermissionStub func(*livekit.SubscriptionPermission, utils.TimedVersion, func(participantID livekit.ParticipantID) types.LocalParticipant) error
updateSubscriptionPermissionMutex sync.RWMutex
updateSubscriptionPermissionArgsForCall []struct {
arg1 *livekit.SubscriptionPermission
arg2 utils.TimedVersion
arg3 func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant
arg4 func(participantID livekit.ParticipantID) types.LocalParticipant
arg3 func(participantID livekit.ParticipantID) types.LocalParticipant
}
updateSubscriptionPermissionReturns struct {
result1 error
@@ -1268,21 +1267,20 @@ func (fake *FakeParticipant) ToProtoReturnsOnCall(i int, result1 *livekit.Partic
}{result1}
}
func (fake *FakeParticipant) UpdateSubscriptionPermission(arg1 *livekit.SubscriptionPermission, arg2 utils.TimedVersion, arg3 func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant, arg4 func(participantID livekit.ParticipantID) types.LocalParticipant) error {
func (fake *FakeParticipant) UpdateSubscriptionPermission(arg1 *livekit.SubscriptionPermission, arg2 utils.TimedVersion, arg3 func(participantID livekit.ParticipantID) types.LocalParticipant) error {
fake.updateSubscriptionPermissionMutex.Lock()
ret, specificReturn := fake.updateSubscriptionPermissionReturnsOnCall[len(fake.updateSubscriptionPermissionArgsForCall)]
fake.updateSubscriptionPermissionArgsForCall = append(fake.updateSubscriptionPermissionArgsForCall, struct {
arg1 *livekit.SubscriptionPermission
arg2 utils.TimedVersion
arg3 func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant
arg4 func(participantID livekit.ParticipantID) types.LocalParticipant
}{arg1, arg2, arg3, arg4})
arg3 func(participantID livekit.ParticipantID) types.LocalParticipant
}{arg1, arg2, arg3})
stub := fake.UpdateSubscriptionPermissionStub
fakeReturns := fake.updateSubscriptionPermissionReturns
fake.recordInvocation("UpdateSubscriptionPermission", []interface{}{arg1, arg2, arg3, arg4})
fake.recordInvocation("UpdateSubscriptionPermission", []interface{}{arg1, arg2, arg3})
fake.updateSubscriptionPermissionMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3, arg4)
return stub(arg1, arg2, arg3)
}
if specificReturn {
return ret.result1
@@ -1296,17 +1294,17 @@ func (fake *FakeParticipant) UpdateSubscriptionPermissionCallCount() int {
return len(fake.updateSubscriptionPermissionArgsForCall)
}
func (fake *FakeParticipant) UpdateSubscriptionPermissionCalls(stub func(*livekit.SubscriptionPermission, utils.TimedVersion, func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant, func(participantID livekit.ParticipantID) types.LocalParticipant) error) {
func (fake *FakeParticipant) UpdateSubscriptionPermissionCalls(stub func(*livekit.SubscriptionPermission, utils.TimedVersion, func(participantID livekit.ParticipantID) types.LocalParticipant) error) {
fake.updateSubscriptionPermissionMutex.Lock()
defer fake.updateSubscriptionPermissionMutex.Unlock()
fake.UpdateSubscriptionPermissionStub = stub
}
func (fake *FakeParticipant) UpdateSubscriptionPermissionArgsForCall(i int) (*livekit.SubscriptionPermission, utils.TimedVersion, func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant, func(participantID livekit.ParticipantID) types.LocalParticipant) {
func (fake *FakeParticipant) UpdateSubscriptionPermissionArgsForCall(i int) (*livekit.SubscriptionPermission, utils.TimedVersion, func(participantID livekit.ParticipantID) types.LocalParticipant) {
fake.updateSubscriptionPermissionMutex.RLock()
defer fake.updateSubscriptionPermissionMutex.RUnlock()
argsForCall := fake.updateSubscriptionPermissionArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
}
func (fake *FakeParticipant) UpdateSubscriptionPermissionReturns(result1 error) {
+14 -16
View File
@@ -46,8 +46,8 @@ type UpTrackManager struct {
closed bool
// publishedTracks that participant is publishing
publishedTracks map[livekit.TrackID]types.MediaTrack
subscriptionPermission *livekit.SubscriptionPermission
publishedTracks map[livekit.TrackID]types.MediaTrack
subscriptionPermission *livekit.SubscriptionPermission
// subscriber permission for published tracks
subscriberPermissions map[livekit.ParticipantIdentity]*livekit.TrackPermission // subscriberIdentity => *livekit.TrackPermission
@@ -157,7 +157,6 @@ func (u *UpTrackManager) GetPublishedTracks() []types.MediaTrack {
func (u *UpTrackManager) UpdateSubscriptionPermission(
subscriptionPermission *livekit.SubscriptionPermission,
timedVersion utils.TimedVersion,
_ func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant, // TODO: separate PR to remove this argument
resolverBySid func(participantID livekit.ParticipantID) types.LocalParticipant,
) error {
u.lock.Lock()
@@ -167,25 +166,21 @@ func (u *UpTrackManager) UpdateSubscriptionPermission(
// we do not want to initialize subscriptionPermissionVersion too early since if another machine is the
// owner for the data, we'd prefer to use their TimedVersion
// ignore older version
if !timedVersion.After(&u.subscriptionPermissionVersion) {
perms := ""
if u.subscriptionPermission != nil {
perms = u.subscriptionPermission.String()
}
if !timedVersion.After(u.subscriptionPermissionVersion) {
u.params.Logger.Debugw(
"skipping older subscription permission version",
"existingValue", perms,
"existingVersion", u.subscriptionPermissionVersion.String(),
"existingValue", logger.Proto(u.subscriptionPermission),
"existingVersion", &u.subscriptionPermissionVersion,
"requestingValue", logger.Proto(subscriptionPermission),
"requestingVersion", timedVersion.String(),
"requestingVersion", &timedVersion,
)
u.lock.Unlock()
return nil
}
u.subscriptionPermissionVersion.Update(&timedVersion)
u.subscriptionPermissionVersion.Update(timedVersion)
} else {
// for requests coming from the current node, use local versions
u.subscriptionPermissionVersion.Update(u.params.VersionGenerator.New())
u.subscriptionPermissionVersion.Update(u.params.VersionGenerator.Next())
}
// store as is for use when migrating
@@ -193,7 +188,7 @@ func (u *UpTrackManager) UpdateSubscriptionPermission(
if subscriptionPermission == nil {
u.params.Logger.Debugw(
"updating subscription permission, setting to nil",
"version", u.subscriptionPermissionVersion.String(),
"version", u.subscriptionPermissionVersion,
)
// possible to get a nil when migrating
u.lock.Unlock()
@@ -203,11 +198,14 @@ func (u *UpTrackManager) UpdateSubscriptionPermission(
u.params.Logger.Debugw(
"updating subscription permission",
"permissions", logger.Proto(u.subscriptionPermission),
"version", u.subscriptionPermissionVersion.String(),
"version", u.subscriptionPermissionVersion,
)
if err := u.parseSubscriptionPermissionsLocked(subscriptionPermission, func(pID livekit.ParticipantID) types.LocalParticipant {
u.lock.Unlock()
p := resolverBySid(pID)
var p types.LocalParticipant
if resolverBySid != nil {
p = resolverBySid(pID)
}
u.lock.Lock()
return p
}); err != nil {
+15 -15
View File
@@ -49,14 +49,14 @@ func TestUpdateSubscriptionPermission(t *testing.T) {
subscriptionPermission := &livekit.SubscriptionPermission{
AllParticipants: true,
}
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil, nil)
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil)
require.Nil(t, um.subscriberPermissions)
// nobody is allowed to subscribe
subscriptionPermission = &livekit.SubscriptionPermission{
TrackPermissions: []*livekit.TrackPermission{},
}
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil, nil)
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil)
require.NotNil(t, um.subscriberPermissions)
require.Equal(t, 0, len(um.subscriberPermissions))
@@ -92,7 +92,7 @@ func TestUpdateSubscriptionPermission(t *testing.T) {
perms2,
},
}
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil, sidResolver)
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), sidResolver)
require.Equal(t, 2, len(um.subscriberPermissions))
require.EqualValues(t, perms1, um.subscriberPermissions["p1"])
require.EqualValues(t, perms2, um.subscriberPermissions["p2"])
@@ -117,7 +117,7 @@ func TestUpdateSubscriptionPermission(t *testing.T) {
perms3,
},
}
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil, nil)
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil)
require.Equal(t, 3, len(um.subscriberPermissions))
require.EqualValues(t, perms1, um.subscriberPermissions["p1"])
require.EqualValues(t, perms2, um.subscriberPermissions["p2"])
@@ -170,7 +170,7 @@ func TestUpdateSubscriptionPermission(t *testing.T) {
perms2,
},
}
err := um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil, sidResolver)
err := um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), sidResolver)
require.NoError(t, err)
require.Equal(t, 2, len(um.subscriberPermissions))
require.EqualValues(t, perms1, um.subscriberPermissions["p1"])
@@ -189,7 +189,7 @@ func TestUpdateSubscriptionPermission(t *testing.T) {
return nil
}
err = um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil, badSidResolver)
err = um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), badSidResolver)
require.NoError(t, err)
require.Equal(t, 2, len(um.subscriberPermissions))
require.EqualValues(t, perms1, um.subscriberPermissions["p1"])
@@ -202,17 +202,17 @@ func TestUpdateSubscriptionPermission(t *testing.T) {
v0, v1, v2 := vg.Next(), vg.Next(), vg.Next()
um.UpdateSubscriptionPermission(&livekit.SubscriptionPermission{}, v1, nil, nil)
um.UpdateSubscriptionPermission(&livekit.SubscriptionPermission{}, v1, nil)
require.Equal(t, v1.Load(), um.subscriptionPermissionVersion.Load(), "first update should be applied")
um.UpdateSubscriptionPermission(&livekit.SubscriptionPermission{}, v2, nil, nil)
um.UpdateSubscriptionPermission(&livekit.SubscriptionPermission{}, v2, nil)
require.Equal(t, v2.Load(), um.subscriptionPermissionVersion.Load(), "ordered updates should be applied")
um.UpdateSubscriptionPermission(&livekit.SubscriptionPermission{}, v0, nil, nil)
um.UpdateSubscriptionPermission(&livekit.SubscriptionPermission{}, v0, nil)
require.Equal(t, v2.Load(), um.subscriptionPermissionVersion.Load(), "out of order updates should be ignored")
um.UpdateSubscriptionPermission(&livekit.SubscriptionPermission{}, utils.TimedVersion{}, nil, nil)
require.True(t, um.subscriptionPermissionVersion.After(&v2), "zero version in updates should use next local version")
um.UpdateSubscriptionPermission(&livekit.SubscriptionPermission{}, utils.TimedVersion(0), nil)
require.True(t, um.subscriptionPermissionVersion.After(v2), "zero version in updates should use next local version")
})
}
@@ -233,7 +233,7 @@ func TestSubscriptionPermission(t *testing.T) {
subscriptionPermission := &livekit.SubscriptionPermission{
AllParticipants: true,
}
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil, nil)
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil)
require.True(t, um.hasPermissionLocked("audio", "p1"))
require.True(t, um.hasPermissionLocked("audio", "p2"))
@@ -241,7 +241,7 @@ func TestSubscriptionPermission(t *testing.T) {
subscriptionPermission = &livekit.SubscriptionPermission{
TrackPermissions: []*livekit.TrackPermission{},
}
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil, nil)
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil)
require.False(t, um.hasPermissionLocked("audio", "p1"))
require.False(t, um.hasPermissionLocked("audio", "p2"))
@@ -258,7 +258,7 @@ func TestSubscriptionPermission(t *testing.T) {
},
},
}
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil, nil)
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil)
require.True(t, um.hasPermissionLocked("audio", "p1"))
require.True(t, um.hasPermissionLocked("video", "p1"))
require.True(t, um.hasPermissionLocked("audio", "p2"))
@@ -293,7 +293,7 @@ func TestSubscriptionPermission(t *testing.T) {
},
},
}
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil, nil)
um.UpdateSubscriptionPermission(subscriptionPermission, vg.Next(), nil)
require.True(t, um.hasPermissionLocked("audio", "p1"))
require.True(t, um.hasPermissionLocked("video", "p1"))
require.True(t, um.hasPermissionLocked("screen", "p1"))
+11 -10
View File
@@ -26,6 +26,7 @@ import (
"github.com/livekit/protocol/logger"
"github.com/livekit/livekit-server/pkg/sfu"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
)
// wrapper around WebRTC receiver, overriding its ID
@@ -263,13 +264,13 @@ func (d *DummyReceiver) AddDownTrack(track sfu.TrackSender) error {
return nil
}
func (d *DummyReceiver) DeleteDownTrack(participantID livekit.ParticipantID) {
func (d *DummyReceiver) DeleteDownTrack(subscriberID livekit.ParticipantID) {
d.downtrackLock.Lock()
defer d.downtrackLock.Unlock()
if r, ok := d.receiver.Load().(sfu.TrackReceiver); ok {
r.DeleteDownTrack(participantID)
r.DeleteDownTrack(subscriberID)
} else {
delete(d.downtracks, participantID)
delete(d.downtracks, subscriberID)
}
}
@@ -323,13 +324,6 @@ func (d *DummyReceiver) GetRedReceiver() sfu.TrackReceiver {
return d
}
func (d *DummyReceiver) GetCalculatedClockRate(layer int32) uint32 {
if r, ok := d.receiver.Load().(sfu.TrackReceiver); ok {
return r.GetCalculatedClockRate(layer)
}
return 0
}
func (d *DummyReceiver) GetReferenceLayerRTPTimestamp(ts uint32, layer int32, referenceLayer int32) (uint32, error) {
if r, ok := d.receiver.Load().(sfu.TrackReceiver); ok {
return r.GetReferenceLayerRTPTimestamp(ts, layer, referenceLayer)
@@ -337,6 +331,13 @@ func (d *DummyReceiver) GetReferenceLayerRTPTimestamp(ts uint32, layer int32, re
return 0, errors.New("receiver not available")
}
func (d *DummyReceiver) GetRTCPSenderReportData(layer int32) *buffer.RTCPSenderReportData {
if r, ok := d.receiver.Load().(sfu.TrackReceiver); ok {
return r.GetRTCPSenderReportData(layer)
}
return nil
}
func (d *DummyReceiver) GetTrackStats() *livekit.RTPStats {
if r, ok := d.receiver.Load().(sfu.TrackReceiver); ok {
return r.GetTrackStats()
+4
View File
@@ -79,6 +79,9 @@ func (r *StandardRoomAllocator) CreateRoom(ctx context.Context, req *livekit.Cre
if req.EmptyTimeout > 0 {
rm.EmptyTimeout = req.EmptyTimeout
}
if req.DepartureTimeout > 0 {
rm.DepartureTimeout = req.DepartureTimeout
}
if req.MaxParticipants > 0 {
rm.MaxParticipants = req.MaxParticipants
}
@@ -157,6 +160,7 @@ func (r *StandardRoomAllocator) ValidateCreateRoom(ctx context.Context, roomName
func applyDefaultRoomConfig(room *livekit.Room, internal *livekit.RoomInternal, conf *config.RoomConfig) {
room.EmptyTimeout = conf.EmptyTimeout
room.DepartureTimeout = conf.DepartureTimeout
room.MaxParticipants = conf.MaxParticipants
for _, codec := range conf.EnabledCodecs {
room.EnabledCodecs = append(room.EnabledCodecs, &livekit.Codec{
+1
View File
@@ -42,6 +42,7 @@ func TestCreateRoom(t *testing.T) {
room, _, err := ra.CreateRoom(context.Background(), &livekit.CreateRoomRequest{Name: "myroom"})
require.NoError(t, err)
require.Equal(t, conf.Room.EmptyTimeout, room.EmptyTimeout)
require.Equal(t, conf.Room.DepartureTimeout, room.DepartureTimeout)
require.NotEmpty(t, room.EnabledCodecs)
})
+17 -11
View File
@@ -24,14 +24,13 @@ import (
"github.com/pkg/errors"
"golang.org/x/exp/maps"
"github.com/livekit/livekit-server/pkg/telemetry/prometheus"
"github.com/livekit/livekit-server/version"
"github.com/livekit/mediatransportutil/pkg/rtcconfig"
"github.com/livekit/protocol/auth"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"github.com/livekit/protocol/rpc"
"github.com/livekit/protocol/utils"
"github.com/livekit/protocol/utils/must"
"github.com/livekit/psrpc"
"github.com/livekit/livekit-server/pkg/clientconfiguration"
@@ -40,6 +39,8 @@ import (
"github.com/livekit/livekit-server/pkg/rtc"
"github.com/livekit/livekit-server/pkg/rtc/types"
"github.com/livekit/livekit-server/pkg/telemetry"
"github.com/livekit/livekit-server/pkg/telemetry/prometheus"
"github.com/livekit/livekit-server/version"
)
const (
@@ -455,7 +456,7 @@ func (r *RoomManager) StartSession(
}
participantTopic := rpc.FormatParticipantTopic(roomName, participant.Identity())
participantServer := utils.Must(rpc.NewTypedParticipantServer(r, r.bus))
participantServer := must.Get(rpc.NewTypedParticipantServer(r, r.bus))
killParticipantServer := r.participantServers.Replace(participantTopic, participantServer)
if err := participantServer.RegisterAllParticipantTopics(participantTopic); err != nil {
killParticipantServer()
@@ -544,10 +545,10 @@ func (r *RoomManager) getOrCreateRoom(ctx context.Context, roomName livekit.Room
}
// construct ice servers
newRoom := rtc.NewRoom(ri, internal, *r.rtcConfig, &r.config.Audio, r.serverInfo, r.telemetry, r.agentClient, r.egressLauncher)
newRoom := rtc.NewRoom(ri, internal, *r.rtcConfig, r.config.Room, &r.config.Audio, r.serverInfo, r.telemetry, r.agentClient, r.egressLauncher)
roomTopic := rpc.FormatRoomTopic(roomName)
roomServer := utils.Must(rpc.NewTypedRoomServer(r, r.bus))
roomServer := must.Get(rpc.NewTypedRoomServer(r, r.bus))
killRoomServer := r.roomServers.Replace(roomTopic, roomServer)
if err := roomServer.RegisterAllRoomTopics(roomTopic); err != nil {
killRoomServer()
@@ -752,13 +753,18 @@ func (r *RoomManager) SendData(ctx context.Context, req *livekit.SendDataRequest
}
room.Logger.Debugw("api send data", "size", len(req.Data))
up := &livekit.UserPacket{
Payload: req.Data,
DestinationSids: req.DestinationSids,
room.SendDataPacket(&livekit.DataPacket{
Kind: req.Kind,
DestinationIdentities: req.DestinationIdentities,
Topic: req.Topic,
}
room.SendDataPacket(up, req.Kind)
Value: &livekit.DataPacket_User{
User: &livekit.UserPacket{
Payload: req.Data,
DestinationSids: req.DestinationSids,
DestinationIdentities: req.DestinationIdentities,
Topic: req.Topic,
},
},
}, req.Kind)
return &livekit.SendDataResponse{}, nil
}
+16 -26
View File
@@ -247,12 +247,10 @@ func (s *RTCService) ServeHTTP(w http.ResponseWriter, r *http.Request) {
pi.ID = livekit.ParticipantID(initialResponse.GetJoin().GetParticipant().GetSid())
}
var signalStats *telemetry.BytesTrackStats
if pi.ID != "" {
signalStats = telemetry.NewBytesTrackStats(
telemetry.BytesTrackIDForParticipantID(telemetry.BytesTrackTypeSignal, pi.ID),
pi.ID,
s.telemetry)
signalStats := telemetry.NewBytesSignalStats(r.Context(), s.telemetry)
if join := initialResponse.GetJoin(); join != nil {
signalStats.ResolveRoom(join.GetRoom())
signalStats.ResolveParticipant(join.GetParticipant())
}
pLogger := rtc.LoggerWithParticipant(
@@ -274,9 +272,7 @@ func (s *RTCService) ServeHTTP(w http.ResponseWriter, r *http.Request) {
cr.RequestSink.Close()
close(done)
if signalStats != nil {
signalStats.Stop()
}
signalStats.Stop()
}()
// upgrade only once the basics are good to go
@@ -303,9 +299,8 @@ func (s *RTCService) ServeHTTP(w http.ResponseWriter, r *http.Request) {
pLogger.Warnw("could not write initial response", err)
return
}
if signalStats != nil {
signalStats.AddBytes(uint64(count), true)
}
signalStats.AddBytes(uint64(count), true)
pLogger.Debugw("new client WS connected",
"connID", cr.ConnectionID,
"reconnect", pi.Reconnect,
@@ -349,20 +344,17 @@ func (s *RTCService) ServeHTTP(w http.ResponseWriter, r *http.Request) {
pLogger.Debugw("sending offer", "offer", m)
case *livekit.SignalResponse_Answer:
pLogger.Debugw("sending answer", "answer", m)
}
if pi.ID == "" && res.GetJoin() != nil {
pi.ID = livekit.ParticipantID(res.GetJoin().GetParticipant().GetSid())
signalStats = telemetry.NewBytesTrackStats(
telemetry.BytesTrackIDForParticipantID(telemetry.BytesTrackTypeSignal, pi.ID),
pi.ID,
s.telemetry)
case *livekit.SignalResponse_Join:
signalStats.ResolveRoom(m.Join.GetRoom())
signalStats.ResolveParticipant(m.Join.GetParticipant())
case *livekit.SignalResponse_RoomUpdate:
signalStats.ResolveRoom(m.RoomUpdate.GetRoom())
}
if count, err := sigConn.WriteResponse(res); err != nil {
pLogger.Warnw("error writing to websocket", err)
return
} else if signalStats != nil {
} else {
signalStats.AddBytes(uint64(count), true)
}
}
@@ -390,9 +382,7 @@ func (s *RTCService) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
return
}
if signalStats != nil {
signalStats.AddBytes(uint64(count), false)
}
signalStats.AddBytes(uint64(count), false)
switch m := req.Message.(type) {
case *livekit.SignalRequest_Ping:
@@ -405,7 +395,7 @@ func (s *RTCService) ServeHTTP(w http.ResponseWriter, r *http.Request) {
Pong: time.Now().UnixMilli(),
},
})
if perr == nil && signalStats != nil {
if perr == nil {
signalStats.AddBytes(uint64(count), true)
}
case *livekit.SignalRequest_PingReq:
@@ -417,7 +407,7 @@ func (s *RTCService) ServeHTTP(w http.ResponseWriter, r *http.Request) {
},
},
})
if perr == nil && signalStats != nil {
if perr == nil {
signalStats.AddBytes(uint64(count), true)
}
}
+1 -1
View File
@@ -63,7 +63,7 @@ func NewSignalServer(
nodeID,
&signalService{region, sessionHandler, config},
bus,
middleware.WithServerMetrics(prometheus.PSRPCMetricsObserver{}),
middleware.WithServerMetrics(rpc.PSRPCMetricsObserver{}),
psrpc.WithServerChannelSize(config.StreamBufferSize),
)
if err != nil {
+45 -15
View File
@@ -16,11 +16,13 @@ package service
import (
"context"
"fmt"
"time"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"github.com/livekit/protocol/rpc"
"github.com/livekit/protocol/sip"
"github.com/livekit/protocol/utils"
"github.com/livekit/psrpc"
@@ -60,19 +62,34 @@ func (s *SIPService) CreateSIPTrunk(ctx context.Context, req *livekit.CreateSIPT
if s.store == nil {
return nil, ErrSIPNotConnected
}
info := &livekit.SIPTrunkInfo{
SipTrunkId: utils.NewGuid(utils.SIPTrunkPrefix),
InboundAddresses: req.InboundAddresses,
OutboundAddress: req.OutboundAddress,
OutboundNumber: req.OutboundNumber,
InboundNumbersRegex: req.InboundNumbersRegex,
InboundUsername: req.InboundUsername,
InboundPassword: req.InboundPassword,
OutboundUsername: req.OutboundUsername,
OutboundPassword: req.OutboundPassword,
if len(req.InboundNumbersRegex) != 0 {
return nil, fmt.Errorf("Trunks with InboundNumbersRegex are deprecated. Use InboundNumbers instead.")
}
// Keep ID empty, so that validation can print "<new>" instead of a non-existent ID in the error.
info := &livekit.SIPTrunkInfo{
InboundAddresses: req.InboundAddresses,
OutboundAddress: req.OutboundAddress,
OutboundNumber: req.OutboundNumber,
InboundNumbers: req.InboundNumbers,
InboundUsername: req.InboundUsername,
InboundPassword: req.InboundPassword,
OutboundUsername: req.OutboundUsername,
OutboundPassword: req.OutboundPassword,
}
// Validate all trunks including the new one first.
list, err := s.store.ListSIPTrunk(ctx)
if err != nil {
return nil, err
}
list = append(list, info)
if err = sip.ValidateTrunks(list); err != nil {
return nil, err
}
// Now we can generate ID and store.
info.SipTrunkId = utils.NewGuid(utils.SIPTrunkPrefix)
if err := s.store.StoreSIPTrunk(ctx, info); err != nil {
return nil, err
}
@@ -114,13 +131,25 @@ func (s *SIPService) CreateSIPDispatchRule(ctx context.Context, req *livekit.Cre
return nil, ErrSIPNotConnected
}
// Keep ID empty, so that validation can print "<new>" instead of a non-existent ID in the error.
info := &livekit.SIPDispatchRuleInfo{
SipDispatchRuleId: utils.NewGuid(utils.SIPDispatchRulePrefix),
Rule: req.Rule,
TrunkIds: req.TrunkIds,
HidePhoneNumber: req.HidePhoneNumber,
Rule: req.Rule,
TrunkIds: req.TrunkIds,
HidePhoneNumber: req.HidePhoneNumber,
}
// Validate all rules including the new one first.
list, err := s.store.ListSIPDispatchRule(ctx)
if err != nil {
return nil, err
}
list = append(list, info)
if err = sip.ValidateDispatchRules(list); err != nil {
return nil, err
}
// Now we can generate ID and store.
info.SipDispatchRuleId = utils.NewGuid(utils.SIPDispatchRulePrefix)
if err := s.store.StoreSIPDispatchRule(ctx, info); err != nil {
return nil, err
}
@@ -167,6 +196,7 @@ func (s *SIPService) CreateSIPParticipantWithToken(ctx context.Context, req *liv
CallTo: req.SipCallTo,
RoomName: req.RoomName,
ParticipantIdentity: req.ParticipantIdentity,
Dtmf: req.Dtmf,
WsUrl: wsUrl,
Token: token,
}
+1 -2
View File
@@ -32,7 +32,6 @@ import (
"github.com/livekit/livekit-server/pkg/routing"
"github.com/livekit/livekit-server/pkg/rtc"
"github.com/livekit/livekit-server/pkg/telemetry"
"github.com/livekit/livekit-server/pkg/telemetry/prometheus"
"github.com/livekit/protocol/auth"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
@@ -233,7 +232,7 @@ func getPSRPCConfig(config *config.Config) rpc.PSRPCConfig {
}
func getPSRPCClientParams(config rpc.PSRPCConfig, bus psrpc.MessageBus) rpc.ClientParams {
return rpc.NewClientParams(config, bus, logger.GetLogger(), prometheus.PSRPCMetricsObserver{})
return rpc.NewClientParams(config, bus, logger.GetLogger(), rpc.PSRPCMetricsObserver{})
}
func newInProcessTurnServer(conf *config.Config, authHandler turn.AuthHandler) (*turn.Server, error) {
+2 -3
View File
@@ -1,6 +1,6 @@
// Code generated by Wire. DO NOT EDIT.
//go:generate go run github.com/google/wire/cmd/wire
//go:generate go run -mod=mod github.com/google/wire/cmd/wire
//go:build !wireinject
// +build !wireinject
@@ -13,7 +13,6 @@ import (
"github.com/livekit/livekit-server/pkg/routing"
"github.com/livekit/livekit-server/pkg/rtc"
"github.com/livekit/livekit-server/pkg/telemetry"
"github.com/livekit/livekit-server/pkg/telemetry/prometheus"
"github.com/livekit/protocol/auth"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
@@ -284,7 +283,7 @@ func getPSRPCConfig(config2 *config.Config) rpc.PSRPCConfig {
}
func getPSRPCClientParams(config2 rpc.PSRPCConfig, bus psrpc.MessageBus) rpc.ClientParams {
return rpc.NewClientParams(config2, bus, logger.GetLogger(), prometheus.PSRPCMetricsObserver{})
return rpc.NewClientParams(config2, bus, logger.GetLogger(), rpc.PSRPCMetricsObserver{})
}
func newInProcessTurnServer(conf *config.Config, authHandler turn.AuthHandler) (*turn.Server, error) {
+53 -29
View File
@@ -44,6 +44,9 @@ import (
const (
ReportDelta = time.Second
InitPacketBufferSizeVideo = 300
InitPacketBufferSizeAudio = 70
)
type pendingPacket struct {
@@ -68,8 +71,8 @@ type Buffer struct {
sync.RWMutex
bucket *bucket.Bucket
nacker *nack.NackQueue
videoPool *sync.Pool
audioPool *sync.Pool
maxVideoPkts int
maxAudioPkts int
codecType webrtc.RTPCodecType
payloadType uint8
extPackets deque.Deque[*ExtPacket]
@@ -100,6 +103,7 @@ type Buffer struct {
rtpStats *RTPStatsReceiver
rrSnapshotId uint32
deltaStatsSnapshotId uint32
ppsSnapshotId uint32
lastFractionLostToReport uint8 // Last fraction lost from subscribers, should report to publisher; Audio only
@@ -126,18 +130,19 @@ type Buffer struct {
extPacketTooMuchCount atomic.Uint32
primaryBufferForRTX *Buffer
rtxPktBuf []byte
}
// NewBuffer constructs a new Buffer
func NewBuffer(ssrc uint32, vp, ap *sync.Pool) *Buffer {
func NewBuffer(ssrc uint32, maxVideoPkts, maxAudioPkts int) *Buffer {
l := logger.GetLogger() // will be reset with correct context via SetLogger
b := &Buffer{
mediaSSRC: ssrc,
videoPool: vp,
audioPool: ap,
snRangeMap: utils.NewRangeMap[uint64, uint64](100),
pliThrottle: int64(500 * time.Millisecond),
logger: l.WithComponent(sutils.ComponentPub).WithComponent(sutils.ComponentSFU),
mediaSSRC: ssrc,
maxVideoPkts: maxVideoPkts,
maxAudioPkts: maxAudioPkts,
snRangeMap: utils.NewRangeMap[uint64, uint64](100),
pliThrottle: int64(500 * time.Millisecond),
logger: l.WithComponent(sutils.ComponentPub).WithComponent(sutils.ComponentSFU),
}
b.extPackets.SetMinCapacity(7)
return b
@@ -188,6 +193,7 @@ func (b *Buffer) Bind(params webrtc.RTPParameters, codec webrtc.RTPCodecCapabili
})
b.rrSnapshotId = b.rtpStats.NewSnapshotId()
b.deltaStatsSnapshotId = b.rtpStats.NewSnapshotId()
b.ppsSnapshotId = b.rtpStats.NewSnapshotId()
b.clockRate = codec.ClockRate
b.lastReport = time.Now()
@@ -225,10 +231,10 @@ func (b *Buffer) Bind(params webrtc.RTPParameters, codec webrtc.RTPCodecCapabili
switch {
case strings.HasPrefix(b.mime, "audio/"):
b.codecType = webrtc.RTPCodecTypeAudio
b.bucket = bucket.NewBucket(b.audioPool.Get().(*[]byte))
b.bucket = bucket.NewBucket(InitPacketBufferSizeAudio)
case strings.HasPrefix(b.mime, "video/"):
b.codecType = webrtc.RTPCodecTypeVideo
b.bucket = bucket.NewBucket(b.videoPool.Get().(*[]byte))
b.bucket = bucket.NewBucket(InitPacketBufferSizeVideo)
if b.frameRateCalculator[0] == nil {
if strings.EqualFold(codec.MimeType, webrtc.MimeTypeVP8) {
b.frameRateCalculator[0] = NewFrameRateCalculatorVP8(b.clockRate, b.logger)
@@ -347,22 +353,23 @@ func (b *Buffer) writeRTX(rtxPkt *rtp.Packet) (n int, err error) {
return
}
videoPktPtr := b.videoPool.Get().(*[]byte)
defer b.videoPool.Put(videoPktPtr)
if b.rtxPktBuf == nil {
b.rtxPktBuf = make([]byte, bucket.MaxPktSize)
}
videoPkt := *rtxPkt
videoPkt.PayloadType = b.payloadType
videoPkt.SequenceNumber = binary.BigEndian.Uint16(rtxPkt.Payload[:2])
videoPkt.SSRC = b.mediaSSRC
videoPkt.Payload = rtxPkt.Payload[2:]
n, err = videoPkt.MarshalTo((*videoPktPtr))
n, err = videoPkt.MarshalTo(b.rtxPktBuf)
if err != nil {
b.logger.Errorw("could not marshal repaired packet", err, "ssrc", b.mediaSSRC, "sn", videoPkt.SequenceNumber)
return
}
b.calc((*videoPktPtr)[:n], &videoPkt, time.Now(), true)
b.calc(b.rtxPktBuf[:n], &videoPkt, time.Now(), true)
return
}
@@ -417,13 +424,6 @@ func (b *Buffer) Close() error {
defer b.Unlock()
b.closeOnce.Do(func() {
if b.bucket != nil && b.codecType == webrtc.RTPCodecTypeVideo {
b.videoPool.Put(b.bucket.Src())
}
if b.bucket != nil && b.codecType == webrtc.RTPCodecTypeAudio {
b.audioPool.Put(b.bucket.Src())
}
b.closed.Store(true)
if b.rtpStats != nil {
@@ -457,14 +457,14 @@ func (b *Buffer) SetPLIThrottle(duration int64) {
func (b *Buffer) SendPLI(force bool) {
b.RLock()
if (b.rtpStats == nil || b.rtpStats.TimeSinceLastPli() < b.pliThrottle) && !force {
b.RUnlock()
rtpStats := b.rtpStats
pliThrottle := b.pliThrottle
b.RUnlock()
if (rtpStats == nil && !force) || !rtpStats.CheckAndUpdatePli(pliThrottle, force) {
return
}
b.rtpStats.UpdatePliAndTime(1)
b.RUnlock()
b.logger.Debugw("send pli", "ssrc", b.mediaSSRC, "force", force)
pli := []rtcp.Packet{
&rtcp.PictureLossIndication{SenderSSRC: b.mediaSSRC, MediaSSRC: b.mediaSSRC},
@@ -785,6 +785,30 @@ func (b *Buffer) doReports(arrivalTime time.Time) {
if pkts != nil && b.onRtcpFeedback != nil {
b.onRtcpFeedback(pkts)
}
b.mayGrowBucket()
}
func (b *Buffer) mayGrowBucket() {
cap := b.bucket.Capacity()
maxPkts := b.maxVideoPkts
if b.codecType == webrtc.RTPCodecTypeAudio {
maxPkts = b.maxAudioPkts
}
if cap >= maxPkts {
return
}
oldCap := cap
deltaInfo := b.rtpStats.DeltaInfo(b.ppsSnapshotId)
if deltaInfo != nil && deltaInfo.Duration > 500*time.Millisecond {
pps := int(time.Duration(deltaInfo.Packets) * time.Second / deltaInfo.Duration)
for pps > cap && cap < maxPkts {
cap = b.bucket.Grow()
}
if cap > oldCap {
b.logger.Debugw("grow bucket", "from", oldCap, "to", cap, "pps", pps)
}
}
}
func (b *Buffer) buildNACKPacket() ([]rtcp.Packet, int) {
@@ -825,7 +849,7 @@ func (b *Buffer) SetSenderReportData(rtpTime uint32, ntpTime uint64) {
}
}
func (b *Buffer) GetSenderReportData() (*RTCPSenderReportData, *RTCPSenderReportData) {
func (b *Buffer) GetSenderReportData() *RTCPSenderReportData {
b.RLock()
defer b.RUnlock()
@@ -833,7 +857,7 @@ func (b *Buffer) GetSenderReportData() (*RTCPSenderReportData, *RTCPSenderReport
return b.rtpStats.GetRtcpSenderReportData()
}
return nil, nil
return nil
}
func (b *Buffer) SetLastFractionLostReport(lost uint8) {
+15 -23
View File
@@ -48,15 +48,8 @@ var opusCodec = webrtc.RTPCodecParameters{
}
func TestNack(t *testing.T) {
pool := &sync.Pool{
New: func() interface{} {
b := make([]byte, 1500)
return &b
},
}
t.Run("nack normal", func(t *testing.T) {
buff := NewBuffer(123, pool, pool)
buff := NewBuffer(123, 1, 1)
buff.codecType = webrtc.RTPCodecTypeVideo
require.NotNil(t, buff)
var wg sync.WaitGroup
@@ -101,7 +94,7 @@ func TestNack(t *testing.T) {
})
t.Run("nack with seq wrap", func(t *testing.T) {
buff := NewBuffer(123, pool, pool)
buff := NewBuffer(123, 1, 1)
buff.codecType = webrtc.RTPCodecTypeVideo
require.NotNil(t, buff)
var wg sync.WaitGroup
@@ -193,13 +186,7 @@ func TestNewBuffer(t *testing.T) {
},
},
}
pool := &sync.Pool{
New: func() interface{} {
b := make([]byte, 1500)
return &b
},
}
buff := NewBuffer(123, pool, pool)
buff := NewBuffer(123, 1, 1)
buff.codecType = webrtc.RTPCodecTypeVideo
require.NotNil(t, buff)
buff.OnRtcpFeedback(func(_ []rtcp.Packet) {})
@@ -219,13 +206,7 @@ func TestNewBuffer(t *testing.T) {
}
func TestFractionLostReport(t *testing.T) {
pool := &sync.Pool{
New: func() interface{} {
b := make([]byte, 1500)
return &b
},
}
buff := NewBuffer(123, pool, pool)
buff := NewBuffer(123, 1, 1)
require.NotNil(t, buff)
buff.codecType = webrtc.RTPCodecTypeVideo
var wg sync.WaitGroup
@@ -261,3 +242,14 @@ func TestFractionLostReport(t *testing.T) {
}
wg.Wait()
}
func BenchmarkMemcpu(b *testing.B) {
buf := make([]byte, 1500*1500*10)
buf2 := make([]byte, 1500*1500*20)
b.ResetTimer()
for i := 0; i < b.N; i++ {
copy(buf2, buf)
}
}
+16 -28
View File
@@ -19,49 +19,37 @@ import (
"sync"
"github.com/pion/transport/v2/packetio"
"github.com/livekit/mediatransportutil/pkg/bucket"
)
type FactoryOfBufferFactory struct {
videoPool *sync.Pool
audioPool *sync.Pool
trackingPacketsVideo int
trackingPacketsAudio int
}
func NewFactoryOfBufferFactory(trackingPackets int) *FactoryOfBufferFactory {
func NewFactoryOfBufferFactory(trackingPacketsVideo int, trackingPacketsAudio int) *FactoryOfBufferFactory {
return &FactoryOfBufferFactory{
videoPool: &sync.Pool{
New: func() interface{} {
b := make([]byte, trackingPackets*bucket.MaxPktSize)
return &b
},
},
audioPool: &sync.Pool{
New: func() interface{} {
b := make([]byte, bucket.MaxPktSize*200)
return &b
},
},
trackingPacketsVideo: trackingPacketsVideo,
trackingPacketsAudio: trackingPacketsAudio,
}
}
func (f *FactoryOfBufferFactory) CreateBufferFactory() *Factory {
return &Factory{
videoPool: f.videoPool,
audioPool: f.audioPool,
rtpBuffers: make(map[uint32]*Buffer),
rtcpReaders: make(map[uint32]*RTCPReader),
rtxPair: make(map[uint32]uint32),
trackingPacketsVideo: f.trackingPacketsVideo,
trackingPacketsAudio: f.trackingPacketsAudio,
rtpBuffers: make(map[uint32]*Buffer),
rtcpReaders: make(map[uint32]*RTCPReader),
rtxPair: make(map[uint32]uint32),
}
}
type Factory struct {
sync.RWMutex
videoPool *sync.Pool
audioPool *sync.Pool
rtpBuffers map[uint32]*Buffer
rtcpReaders map[uint32]*RTCPReader
rtxPair map[uint32]uint32 // repair -> base
trackingPacketsVideo int
trackingPacketsAudio int
rtpBuffers map[uint32]*Buffer
rtcpReaders map[uint32]*RTCPReader
rtxPair map[uint32]uint32 // repair -> base
}
func (f *Factory) GetOrNew(packetType packetio.BufferPacketType, ssrc uint32) io.ReadWriteCloser {
@@ -84,7 +72,7 @@ func (f *Factory) GetOrNew(packetType packetio.BufferPacketType, ssrc uint32) io
if reader, ok := f.rtpBuffers[ssrc]; ok {
return reader
}
buffer := NewBuffer(ssrc, f.videoPool, f.audioPool)
buffer := NewBuffer(ssrc, f.trackingPacketsVideo, f.trackingPacketsAudio)
f.rtpBuffers[ssrc] = buffer
for repair, base := range f.rtxPair {
if repair == ssrc {
+3 -3
View File
@@ -23,7 +23,7 @@ type FrameEntity struct {
endSeq *uint64
integrity bool
packetsConsective func(uint64, uint64) bool
pktHistory *PacketHistory
}
func (fe *FrameEntity) AddPacket(extSeq uint64, ddVal *dd.DependencyDescriptor) {
@@ -40,7 +40,7 @@ func (fe *FrameEntity) AddPacket(extSeq uint64, ddVal *dd.DependencyDescriptor)
}
if fe.startSeq != nil && fe.endSeq != nil {
if fe.packetsConsective(*fe.startSeq, *fe.endSeq) {
if fe.pktHistory.PacketsConsecutive(*fe.startSeq, *fe.endSeq) {
fe.integrity = true
}
}
@@ -179,7 +179,7 @@ func NewFrameIntegrityChecker(frameCount, packetCount int) *FrameIntegrityChecke
}
for i := range fc.frames {
fc.frames[i].packetsConsective = fc.pktHistory.PacketsConsecutive
fc.frames[i].pktHistory = fc.pktHistory
fc.frames[i].Reset()
}
return fc
+8 -5
View File
@@ -163,13 +163,16 @@ func (v *VP8) Unmarshal(payload []byte) error {
func (v *VP8) Marshal() ([]byte, error) {
buf := make([]byte, v.HeaderSize)
err := v.MarshalTo(buf)
return buf, err
n, err := v.MarshalTo(buf)
if err != nil {
return nil, err
}
return buf[:n], err
}
func (v *VP8) MarshalTo(buf []byte) error {
func (v *VP8) MarshalTo(buf []byte) (int, error) {
if len(buf) < v.HeaderSize {
return errShortPacket
return 0, errShortPacket
}
idx := 0
@@ -223,7 +226,7 @@ func (v *VP8) MarshalTo(buf []byte) error {
idx++
}
return nil
return idx, nil
}
// -------------------------------------
+65 -31
View File
@@ -19,6 +19,7 @@ import (
"sync"
"time"
"go.uber.org/zap/zapcore"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/livekit/mediatransportutil"
@@ -33,7 +34,7 @@ const (
cFirstSnapshotID = 1
cFirstPacketTimeAdjustWindow = 2 * time.Minute
cFirstPacketTimeAdjustThreshold = 5 * time.Minute
cFirstPacketTimeAdjustThreshold = 15 * time.Second
)
// -------------------------------------------------------
@@ -123,6 +124,18 @@ func (r *RTCPSenderReportData) ToString() string {
return fmt.Sprintf("ntp: %s, rtp: %d, extRtp: %d, at: %s", r.NTPTimestamp.Time().String(), r.RTPTimestamp, r.RTPTimestampExt, r.At.String())
}
func (r *RTCPSenderReportData) MarshalLogObject(e zapcore.ObjectEncoder) error {
if r == nil {
return nil
}
e.AddTime("NTPTimestamp", r.NTPTimestamp.Time())
e.AddUint32("RTPTimestamp", r.RTPTimestamp)
e.AddUint64("RTPTimestampExt", r.RTPTimestampExt)
e.AddTime("At", r.At)
return nil
}
// ------------------------------------------------------------------
type RTPStatsParams struct {
@@ -336,6 +349,18 @@ func (r *rtpStatsBase) UpdateNackProcessed(nackAckCount uint32, nackMissCount ui
r.nackRepeated += nackRepeatedCount
}
func (r *rtpStatsBase) CheckAndUpdatePli(throttle int64, force bool) bool {
r.lock.Lock()
defer r.lock.Unlock()
if !r.endTime.IsZero() || (!force && time.Now().UnixNano()-r.lastPli.UnixNano() < throttle) {
return false
}
r.updatePliLocked(1)
r.updatePliTimeLocked()
return true
}
func (r *rtpStatsBase) UpdatePliAndTime(pliCount uint32) {
r.lock.Lock()
defer r.lock.Unlock()
@@ -385,13 +410,6 @@ func (r *rtpStatsBase) LastPli() time.Time {
return r.lastPli
}
func (r *rtpStatsBase) TimeSinceLastPli() int64 {
r.lock.RLock()
defer r.lock.RUnlock()
return time.Now().UnixNano() - r.lastPli.UnixNano()
}
func (r *rtpStatsBase) UpdateLayerLockPliAndTime(pliCount uint32) {
r.lock.Lock()
defer r.lock.Unlock()
@@ -466,7 +484,7 @@ func (r *rtpStatsBase) GetRtt() uint32 {
return r.rtt
}
func (r *rtpStatsBase) maybeAdjustFirstPacketTime(ts uint32, startTS uint32) {
func (r *rtpStatsBase) maybeAdjustFirstPacketTime(srData *RTCPSenderReportData, tsOffset uint64, extStartTS uint64) {
if time.Since(r.startTime) > cFirstPacketTimeAdjustWindow {
return
}
@@ -477,7 +495,9 @@ func (r *rtpStatsBase) maybeAdjustFirstPacketTime(ts uint32, startTS uint32) {
// abnormal delay (maybe due to pacing or maybe due to queuing
// in some network element along the way), push back first time
// to an earlier instance.
samplesDiff := int32(ts - startTS)
timeSinceReceive := time.Since(srData.At)
extNowTS := srData.RTPTimestampExt - tsOffset + uint64(timeSinceReceive.Nanoseconds()*int64(r.params.ClockRate)/1e9)
samplesDiff := int64(extNowTS - extStartTS)
if samplesDiff < 0 {
// out-of-order, skip
return
@@ -487,28 +507,24 @@ func (r *rtpStatsBase) maybeAdjustFirstPacketTime(ts uint32, startTS uint32) {
timeSinceFirst := time.Since(r.firstTime)
now := r.firstTime.Add(timeSinceFirst)
firstTime := now.Add(-samplesDuration)
if firstTime.Before(r.firstTime) {
r.logger.Debugw(
"adjusting first packet time",
getFields := func() []interface{} {
return []interface{}{
"startTime", r.startTime.String(),
"nowTime", now.String(),
"before", r.firstTime.String(),
"after", firstTime.String(),
"adjustment", r.firstTime.Sub(firstTime).String(),
"nowTS", ts,
"startTS", startTS,
)
"extNowTS", extNowTS,
"extStartTS", extStartTS,
}
}
if firstTime.Before(r.firstTime) {
if r.firstTime.Sub(firstTime) > cFirstPacketTimeAdjustThreshold {
r.logger.Infow("first packet time adjustment too big, ignoring",
"startTime", r.startTime.String(),
"nowTime", now.String(),
"before", r.firstTime.String(),
"after", firstTime.String(),
"adjustment", r.firstTime.Sub(firstTime).String(),
"nowTS", ts,
"startTS", startTS,
)
r.logger.Infow("adjusting first packet time, too big, ignoring", getFields()...)
} else {
r.logger.Debugw("adjusting first packet time", getFields()...)
r.firstTime = firstTime
}
}
@@ -660,7 +676,7 @@ func (r *rtpStatsBase) toString(
str += ", rtt(ms):"
str += fmt.Sprintf("%d|%d", p.RttCurrent, p.RttMax)
str += fmt.Sprintf(", pd: %s, rd: %s", RTPDriftToString(p.PacketDrift), RTPDriftToString(p.ReportDrift))
str += fmt.Sprintf(", pd: %s, nrd: %s, rrd: %s", RTPDriftToString(p.PacketDrift), RTPDriftToString(p.ReportDrift), RTPDriftToString(p.RebasedReportDrift))
return str
}
@@ -701,7 +717,7 @@ func (r *rtpStatsBase) toProto(
jitterTime := jitter / float64(r.params.ClockRate) * 1e6
maxJitterTime := maxJitter / float64(r.params.ClockRate) * 1e6
packetDrift, reportDrift := r.getDrift(extStartTS, extHighestTS)
packetDrift, ntpReportDrift, rebasedReportDrift := r.getDrift(extStartTS, extHighestTS)
p := &livekit.RTPStats{
StartTime: timestamppb.New(r.startTime),
@@ -745,7 +761,8 @@ func (r *rtpStatsBase) toProto(
RttCurrent: r.rtt,
RttMax: r.maxRtt,
PacketDrift: packetDrift,
ReportDrift: reportDrift,
ReportDrift: ntpReportDrift,
RebasedReportDrift: rebasedReportDrift,
}
gapsPresent := false
@@ -827,7 +844,7 @@ func (r *rtpStatsBase) getAndResetSnapshot(snapshotID uint32, extStartSN uint64,
return &then, &now
}
func (r *rtpStatsBase) getDrift(extStartTS, extHighestTS uint64) (packetDrift *livekit.RTPDrift, reportDrift *livekit.RTPDrift) {
func (r *rtpStatsBase) getDrift(extStartTS, extHighestTS uint64) (packetDrift *livekit.RTPDrift, ntpReportDrift *livekit.RTPDrift, rebasedReportDrift *livekit.RTPDrift) {
if !r.firstTime.IsZero() {
elapsed := r.highestTime.Sub(r.firstTime)
rtpClockTicks := extHighestTS - extStartTS
@@ -848,11 +865,12 @@ func (r *rtpStatsBase) getDrift(extStartTS, extHighestTS uint64) (packetDrift *l
}
if r.srFirst != nil && r.srNewest != nil && r.srFirst.RTPTimestamp != r.srNewest.RTPTimestamp {
elapsed := r.srNewest.NTPTimestamp.Time().Sub(r.srFirst.NTPTimestamp.Time())
rtpClockTicks := r.srNewest.RTPTimestampExt - r.srFirst.RTPTimestampExt
elapsed := r.srNewest.NTPTimestamp.Time().Sub(r.srFirst.NTPTimestamp.Time())
driftSamples := int64(rtpClockTicks - uint64(elapsed.Nanoseconds()*int64(r.params.ClockRate)/1e9))
if elapsed.Seconds() > 0.0 {
reportDrift = &livekit.RTPDrift{
ntpReportDrift = &livekit.RTPDrift{
StartTime: timestamppb.New(r.srFirst.NTPTimestamp.Time()),
EndTime: timestamppb.New(r.srNewest.NTPTimestamp.Time()),
Duration: elapsed.Seconds(),
@@ -864,6 +882,22 @@ func (r *rtpStatsBase) getDrift(extStartTS, extHighestTS uint64) (packetDrift *l
ClockRate: float64(rtpClockTicks) / elapsed.Seconds(),
}
}
elapsed = r.srNewest.At.Sub(r.srFirst.At)
driftSamples = int64(rtpClockTicks - uint64(elapsed.Nanoseconds()*int64(r.params.ClockRate)/1e9))
if elapsed.Seconds() > 0.0 {
rebasedReportDrift = &livekit.RTPDrift{
StartTime: timestamppb.New(r.srFirst.At),
EndTime: timestamppb.New(r.srNewest.At),
Duration: elapsed.Seconds(),
StartTimestamp: r.srFirst.RTPTimestampExt,
EndTimestamp: r.srNewest.RTPTimestampExt,
RtpClockTicks: rtpClockTicks,
DriftSamples: driftSamples,
DriftMs: (float64(driftSamples) * 1000) / float64(r.params.ClockRate),
ClockRate: float64(rtpClockTicks) / elapsed.Seconds(),
}
}
}
return
}
+132 -27
View File
@@ -28,6 +28,32 @@ import (
const (
cHistorySize = 4096
// RTCP Sender Reports are re-based to SFU time base so that all subscriber side
// can have the same time base (i. e. SFU time base). To convert publisher side
// RTCP Sender Reports to SFU timebase, a propagation delay is maintained.
// propagation_delay = time_of_report_reception - ntp_timestamp_in_report
//
// Propagation delay is adapted continuously. If it falls, adapt quickly to the
// lower value as that could be the real propagation delay. If it rises, adapt slowly
// as it might be a temporary change or slow drift. See below for handling of high deltas
// which could be a result of a path change.
cPropagationDelayFallFactor = float64(0.95)
cPropagationDelayRiseFactor = float64(0.05)
// do not adapt to small OR large (outlier) changes
cPropagationDelayDeltaThresholdMin = 5 * time.Millisecond
cPropagationDelayDeltaThresholdMaxFactor = 2
// To account for path changes mid-stream, if the delta of the propagation delay is consistently higher, reset.
// Reset at whichever of the below happens later.
//
// A long term version of delta of propagation delay is maintained and delta propagation delay exceeding
// a factor of the long term version is considered a sharp increase. That will trigger the start of the
// path change condition and if it persists, propagation delay will be reset.
cPropagationDelayDeltaAdaptationFactor = float64(0.05)
cPropagationDelayDeltaHighResetNumReports = 3
cPropagationDelayDeltaHighResetWait = 10 * time.Second
)
type RTPFlowState struct {
@@ -53,6 +79,11 @@ type RTPStatsReceiver struct {
history *protoutils.Bitmap[uint64]
propagationDelay time.Duration
longTermDeltaPropagationDelay time.Duration
propagationDelayDeltaHighCount int
propagationDelayDeltaHighStartTime time.Time
clockSkewCount int
outOfOrderSsenderReportCount int
}
@@ -251,9 +282,10 @@ func (r *RTPStatsReceiver) SetRtcpSenderReportData(srData *RTCPSenderReportData)
// prevent against extreme case of anachronous sender reports
if r.srNewest != nil && r.srNewest.NTPTimestamp > srData.NTPTimestamp {
r.logger.Infow(
"received anachronous sender report",
"last", r.srNewest.ToString(),
"current", srData.ToString(),
"received sender report, anachronous, dropping",
"first", r.srFirst,
"last", r.srNewest,
"current", srData,
)
return
}
@@ -264,30 +296,33 @@ func (r *RTPStatsReceiver) SetRtcpSenderReportData(srData *RTCPSenderReportData)
if (srData.RTPTimestamp-r.srNewest.RTPTimestamp) < (1<<31) && srData.RTPTimestamp < r.srNewest.RTPTimestamp {
tsCycles += (1 << 32)
}
if tsCycles >= (1 << 32) {
if (srData.RTPTimestamp-r.srNewest.RTPTimestamp) >= (1<<31) && srData.RTPTimestamp > r.srNewest.RTPTimestamp {
tsCycles -= (1 << 32)
}
}
}
srDataCopy := *srData
srDataCopy.RTPTimestampExt = uint64(srDataCopy.RTPTimestamp) + tsCycles
r.maybeAdjustFirstPacketTime(srDataCopy.RTPTimestamp, r.timestamp.GetStart())
if r.srNewest != nil && srDataCopy.RTPTimestampExt < r.srNewest.RTPTimestampExt {
// This can happen when a track is replaced with a null and then restored -
// i. e. muting replacing with null and unmute restoring the original track.
// Under such a condition reset the sender reports to start from this point.
// Resetting will ensure sample rate calculations do not go haywire due to negative time.
// Or it could be due bad report generation.
// In any case, ignore out-of-order reports.
if r.outOfOrderSsenderReportCount%10 == 0 {
r.logger.Infow(
"received sender report, out-of-order, resetting",
"last", r.srNewest.ToString(),
"current", srDataCopy.ToString(),
"received sender report, out-of-order, skipping",
"first", r.srFirst,
"last", r.srNewest,
"current", &srDataCopy,
"count", r.outOfOrderSsenderReportCount,
)
}
r.outOfOrderSsenderReportCount++
r.srFirst = nil
r.srNewest = nil
return
}
if r.srNewest != nil {
@@ -303,11 +338,15 @@ func (r *RTPStatsReceiver) SetRtcpSenderReportData(srData *RTCPSenderReportData)
(timeSinceFirst > 0.2 && math.Abs(float64(r.params.ClockRate)-calculatedClockRateFromFirst) > 0.2*float64(r.params.ClockRate)) {
if r.clockSkewCount%100 == 0 {
r.logger.Infow(
"clock rate skew",
"first", r.srFirst.ToString(),
"last", r.srNewest.ToString(),
"current", srDataCopy.ToString(),
"received sender report, clock skew",
"first", r.srFirst,
"last", r.srNewest,
"current", &srDataCopy,
"timeSinceFirst", timeSinceFirst,
"rtpDiffSinceFirst", rtpDiffSinceFirst,
"calculatedFirst", calculatedClockRateFromFirst,
"timeSinceLast", timeSinceLast,
"rtpDiffSinceLast", rtpDiffSinceLast,
"calculatedLast", calculatedClockRateFromLast,
"count", r.clockSkewCount,
)
@@ -316,26 +355,92 @@ func (r *RTPStatsReceiver) SetRtcpSenderReportData(srData *RTCPSenderReportData)
}
}
r.srNewest = &srDataCopy
var propagationDelay time.Duration
var deltaPropagationDelay time.Duration
getPropagationFields := func() []interface{} {
return []interface{}{
"propagationDelay", r.propagationDelay.String(),
"receivedPropagationDelay", propagationDelay.String(),
"longTermDeltaPropagationDelay", r.longTermDeltaPropagationDelay.String(),
"receivedDeltaPropagationDelay", deltaPropagationDelay.String(),
"deltaHighCount", r.propagationDelayDeltaHighCount,
"sinceDeltaHighStart", time.Since(r.propagationDelayDeltaHighStartTime).String(),
"first", r.srFirst,
"last", r.srNewest,
"current", &srDataCopy,
}
}
initPropagationDelay := func(pd time.Duration) {
r.propagationDelay = pd
r.longTermDeltaPropagationDelay = 0
r.propagationDelayDeltaHighCount = 0
r.propagationDelayDeltaHighStartTime = time.Time{}
}
ntpTime := srDataCopy.NTPTimestamp.Time()
propagationDelay = srDataCopy.At.Sub(ntpTime)
if r.srFirst == nil {
r.srFirst = &srDataCopy
initPropagationDelay(propagationDelay)
r.logger.Debugw("initializing propagation delay", getPropagationFields()...)
} else {
deltaPropagationDelay = propagationDelay - r.propagationDelay
if deltaPropagationDelay.Abs() > cPropagationDelayDeltaThresholdMin { // ignore small changes
if r.longTermDeltaPropagationDelay != 0 && deltaPropagationDelay > 0 && deltaPropagationDelay > r.longTermDeltaPropagationDelay*time.Duration(cPropagationDelayDeltaThresholdMaxFactor) {
r.logger.Debugw("sharp increase in propagation delay, skipping", getPropagationFields()...) // TODO-REMOVE
r.propagationDelayDeltaHighCount++
if r.propagationDelayDeltaHighStartTime.IsZero() {
r.propagationDelayDeltaHighStartTime = time.Now()
}
if r.propagationDelayDeltaHighCount >= cPropagationDelayDeltaHighResetNumReports && time.Since(r.propagationDelayDeltaHighStartTime) >= cPropagationDelayDeltaHighResetWait {
r.logger.Debugw("re-initializing propagation delay", append(getPropagationFields(), "newPropagationDelay", propagationDelay.String())...)
initPropagationDelay(propagationDelay)
}
} else {
r.propagationDelayDeltaHighCount = 0
r.propagationDelayDeltaHighStartTime = time.Time{}
if deltaPropagationDelay.Abs() > cPropagationDelayDeltaThresholdMin {
factor := cPropagationDelayFallFactor
if propagationDelay > r.propagationDelay {
factor = cPropagationDelayRiseFactor
}
fields := append(
getPropagationFields(),
"adjustedPropagationDelay", r.propagationDelay+time.Duration(factor*float64(propagationDelay-r.propagationDelay)),
) // TODO-REMOVE
r.logger.Debugw("adapting propagation delay", fields...) // TODO-REMOVE
r.propagationDelay += time.Duration(factor * float64(propagationDelay-r.propagationDelay))
}
}
} else {
r.propagationDelayDeltaHighCount = 0
r.propagationDelayDeltaHighStartTime = time.Time{}
}
if r.longTermDeltaPropagationDelay == 0 {
r.longTermDeltaPropagationDelay = deltaPropagationDelay
} else {
r.longTermDeltaPropagationDelay += time.Duration(cPropagationDelayDeltaAdaptationFactor * float64(deltaPropagationDelay-r.longTermDeltaPropagationDelay))
}
}
// adjust receive time to estimated propagation delay
srDataCopy.At = ntpTime.Add(r.propagationDelay)
r.srNewest = &srDataCopy
r.maybeAdjustFirstPacketTime(r.srNewest, 0, r.timestamp.GetExtendedStart())
}
func (r *RTPStatsReceiver) GetRtcpSenderReportData() (srFirst *RTCPSenderReportData, srNewest *RTCPSenderReportData) {
func (r *RTPStatsReceiver) GetRtcpSenderReportData() *RTCPSenderReportData {
r.lock.RLock()
defer r.lock.RUnlock()
if r.srFirst != nil {
srFirstCopy := *r.srFirst
srFirst = &srFirstCopy
if r.srNewest == nil {
return nil
}
if r.srNewest != nil {
srNewestCopy := *r.srNewest
srNewest = &srNewestCopy
}
return
srNewestCopy := *r.srNewest
return &srNewestCopy
}
func (r *RTPStatsReceiver) GetRtcpReceptionReport(ssrc uint32, proxyFracLost uint8, snapshotID uint32) *rtcp.ReceptionReport {
+69 -103
View File
@@ -29,6 +29,8 @@ import (
const (
cSnInfoSize = 4096
cSnInfoMask = cSnInfoSize - 1
cSenderReportInitialWait = time.Second
)
type snInfoFlag byte
@@ -157,12 +159,8 @@ type RTPStatsSender struct {
nextSenderSnapshotID uint32
senderSnapshots []senderSnapshot
clockSkewCount int
outOfOrderSenderReportCount int
metadataCacheOverflowCount int
srFeedFirst *RTCPSenderReportData
srFeedNewest *RTCPSenderReportData
clockSkewCount int
metadataCacheOverflowCount int
}
func NewRTPStatsSender(params RTPStatsParams) *RTPStatsSender {
@@ -201,15 +199,6 @@ func (r *RTPStatsSender) Seed(from *RTPStatsSender) {
r.nextSenderSnapshotID = from.nextSenderSnapshotID
r.senderSnapshots = make([]senderSnapshot, cap(from.senderSnapshots))
copy(r.senderSnapshots, from.senderSnapshots)
if from.srFeedFirst != nil {
srFeedFirst := *from.srFeedFirst
r.srFeedFirst = &srFeedFirst
}
if from.srFeedNewest != nil {
srFeedNewest := *from.srFeedNewest
r.srFeedNewest = &srFeedNewest
}
}
func (r *RTPStatsSender) NewSnapshotId() uint32 {
@@ -533,6 +522,33 @@ func (r *RTPStatsSender) UpdateFromReceiverReport(rr rtcp.ReceptionReport) (rtt
s.maxJitter = r.jitterFromRR
}
if int64(extReceivedRRSN-s.extLastRRSN) < 0 || (extReceivedRRSN-s.extLastRRSN) > (1<<15) {
timeSinceLastRR := time.Since(r.lastRRTime)
if r.lastRRTime.IsZero() {
timeSinceLastRR = time.Since(r.startTime)
}
r.logger.Infow(
"rr interval too big, skipping",
"lastRRTime", r.lastRRTime.String(),
"lastRR", r.lastRR,
"timeSinceLastRR", timeSinceLastRR.String(),
"receivedRR", rr,
"extStartSN", r.extStartSN,
"extHighestSN", r.extHighestSN,
"extStartTS", r.extStartTS,
"extHighestTS", r.extHighestTS,
"extLastRRSN", s.extLastRRSN,
"firstTime", r.firstTime.String(),
"startTime", r.startTime.String(),
"highestTime", r.highestTime.String(),
"extReceivedRRSN", extReceivedRRSN,
"packetsInInterval", extReceivedRRSN-s.extLastRRSN,
"extHighestSNFromRR", r.extHighestSNFromRR,
"packetsLostFromRR", r.packetsLostFromRR,
)
continue
}
// on every RR, calculate delta since last RR using packet metadata cache
is := r.getIntervalStats(s.extLastRRSN+1, extReceivedRRSN+1, r.extHighestSN)
eis := &s.intervalStats
@@ -583,17 +599,15 @@ func (r *RTPStatsSender) LastReceiverReportTime() time.Time {
return r.lastRRTime
}
func (r *RTPStatsSender) MaybeAdjustFirstPacketTime(srFirst *RTCPSenderReportData, srNewest *RTCPSenderReportData, ts uint32) {
func (r *RTPStatsSender) MaybeAdjustFirstPacketTime(publisherSRData *RTCPSenderReportData, tsOffset uint64) {
r.lock.Lock()
defer r.lock.Unlock()
srFirstCopy := *srFirst
r.srFeedFirst = &srFirstCopy
if !r.initialized || publisherSRData == nil {
return
}
srNewestCopy := *srNewest
r.srFeedNewest = &srNewestCopy
r.maybeAdjustFirstPacketTime(ts, uint32(r.extStartTS))
r.maybeAdjustFirstPacketTime(publisherSRData, tsOffset, r.extStartTS)
}
func (r *RTPStatsSender) GetExpectedRTPTimestamp(at time.Time) (expectedTSExt uint64, err error) {
@@ -611,116 +625,68 @@ func (r *RTPStatsSender) GetExpectedRTPTimestamp(at time.Time) (expectedTSExt ui
return
}
func (r *RTPStatsSender) GetRtcpSenderReport(ssrc uint32, calculatedClockRate uint32) *rtcp.SenderReport {
func (r *RTPStatsSender) GetRtcpSenderReport(ssrc uint32, publisherSRData *RTCPSenderReportData, tsOffset uint64) *rtcp.SenderReport {
r.lock.Lock()
defer r.lock.Unlock()
if !r.initialized {
if !r.initialized || publisherSRData == nil {
return nil
}
// construct current time based on monotonic clock
timeSinceFirst := time.Since(r.firstTime)
now := r.firstTime.Add(timeSinceFirst)
timeSincePublisherSR := time.Since(publisherSRData.At)
now := publisherSRData.At.Add(timeSincePublisherSR)
nowNTP := mediatransportutil.ToNtpTime(now)
timeSinceHighest := now.Sub(r.highestTime)
nowRTPExt := r.extHighestTS + uint64(timeSinceHighest.Nanoseconds()*int64(r.params.ClockRate)/1e9)
nowRTPExtUsingTime := nowRTPExt
nowRTP := uint32(nowRTPExt)
// It is possible that publisher is pacing at a slower rate.
// That would make `highestTS` to be lagging the RTP time stamp in the RTCP Sender Report from publisher.
// Check for that using calculated clock rate and use the later time stamp if applicable.
var nowRTPExtUsingRate uint64
if calculatedClockRate != 0 {
nowRTPExtUsingRate = r.extStartTS + uint64(float64(calculatedClockRate)*timeSinceFirst.Seconds())
if nowRTPExtUsingRate > nowRTPExt {
nowRTPExt = nowRTPExtUsingRate
nowRTP = uint32(nowRTPExt)
}
}
nowRTPExt := publisherSRData.RTPTimestampExt - tsOffset + uint64(timeSincePublisherSR.Nanoseconds()*int64(r.params.ClockRate)/1e9)
srData := &RTCPSenderReportData{
NTPTimestamp: nowNTP,
RTPTimestamp: nowRTP,
RTPTimestamp: uint32(nowRTPExt),
RTPTimestampExt: nowRTPExt,
At: now,
}
getFields := func() []interface{} {
return []interface{}{
"first", r.srFirst,
"last", r.srNewest,
"curr", srData,
"feed", publisherSRData,
"tsOffset", tsOffset,
"timeNow", time.Now().String(),
"extStartTS", r.extStartTS,
"extHighestTS", r.extHighestTS,
"highestTime", r.highestTime.String(),
"timeSinceHighest", now.Sub(r.highestTime).String(),
"firstTime", r.firstTime.String(),
"timeSinceFirst", now.Sub(r.firstTime).String(),
"timeSincePublisherSR", timeSincePublisherSR.String(),
"nowRTPExt", nowRTPExt,
}
}
if r.srNewest != nil && nowRTPExt >= r.srNewest.RTPTimestampExt {
timeSinceLastReport := nowNTP.Time().Sub(r.srNewest.NTPTimestamp.Time())
rtpDiffSinceLastReport := nowRTPExt - r.srNewest.RTPTimestampExt
windowClockRate := float64(rtpDiffSinceLastReport) / timeSinceLastReport.Seconds()
if timeSinceLastReport.Seconds() > 0.2 && math.Abs(float64(r.params.ClockRate)-windowClockRate) > 0.2*float64(r.params.ClockRate) {
if r.clockSkewCount%10 == 0 {
r.logger.Infow(
"sending sender report, clock skew",
"first", r.srFirst.ToString(),
"last", r.srNewest.ToString(),
"curr", srData.ToString(),
"firstFeed", r.srFeedFirst.ToString(),
"lastFeed", r.srFeedNewest.ToString(),
"timeNow", time.Now().String(),
"extStartTS", r.extStartTS,
"extHighestTS", r.extHighestTS,
"highestTime", r.highestTime.String(),
"timeSinceHighest", timeSinceHighest.String(),
"firstTime", r.firstTime.String(),
"timeSinceFirst", timeSinceFirst.String(),
"nowRTPExtUsingTime", nowRTPExtUsingTime,
"calculatedClockRate", calculatedClockRate,
"nowRTPExtUsingRate", nowRTPExtUsingRate,
fields := append(
getFields(),
"timeSinceLastReport", timeSinceLastReport.String(),
"rtpDiffSinceLastReport", rtpDiffSinceLastReport,
"windowClockRate", windowClockRate,
"count", r.clockSkewCount,
)
r.logger.Infow("sending sender report, clock skew", fields...)
}
r.clockSkewCount++
}
}
if r.srNewest != nil && nowRTPExt < r.srNewest.RTPTimestampExt {
// If report being generated is behind, use the time difference and
// clock rate of codec to produce next report.
//
// Current report could be behind due to the following
// - Publisher pacing
// - Due to above, report from publisher side is ahead of packet timestamps.
// Note that report will map wall clock to timestamp at capture time and happens before the pacer.
// - Pause/Mute followed by resume, some combination of events that could
// result in this module not having calculated clock rate of publisher side.
// - When the above happens, current will be generated using highestTS which could be behind.
// That could end up behind the last report's timestamp in extreme cases
if r.outOfOrderSenderReportCount%10 == 0 {
r.logger.Infow(
"sending sender report, out-of-order, repairing",
"first", r.srFirst.ToString(),
"last", r.srNewest.ToString(),
"curr", srData.ToString(),
"firstFeed", r.srFeedFirst.ToString(),
"lastFeed", r.srFeedNewest.ToString(),
"timeNow", time.Now().String(),
"extStartTS", r.extStartTS,
"extHighestTS", r.extHighestTS,
"highestTime", r.highestTime.String(),
"timeSinceHighest", timeSinceHighest.String(),
"firstTime", r.firstTime.String(),
"timeSinceFirst", timeSinceFirst.String(),
"nowRTPExtUsingTime", nowRTPExtUsingTime,
"calculatedClockRate", calculatedClockRate,
"nowRTPExtUsingRate", nowRTPExtUsingRate,
"count", r.outOfOrderSenderReportCount,
)
}
r.outOfOrderSenderReportCount++
ntpDiffSinceLast := nowNTP.Time().Sub(r.srNewest.NTPTimestamp.Time())
nowRTPExt = r.srNewest.RTPTimestampExt + uint64(ntpDiffSinceLast.Seconds()*float64(r.params.ClockRate))
nowRTP = uint32(nowRTPExt)
srData.RTPTimestamp = nowRTP
srData.RTPTimestampExt = nowRTPExt
// If report being generated is behind the last report, skip it.
// Should not happen.
r.logger.Infow("sending sender report, out-of-order, skipping", getFields()...)
return nil
}
r.srNewest = srData
@@ -731,7 +697,7 @@ func (r *RTPStatsSender) GetRtcpSenderReport(ssrc uint32, calculatedClockRate ui
return &rtcp.SenderReport{
SSRC: ssrc,
NTPTime: uint64(nowNTP),
RTPTime: nowRTP,
RTPTime: uint32(nowRTPExt),
PacketCount: uint32(r.getTotalPacketsPrimary(r.extStartSN, r.extHighestSN) + r.packetsDuplicate + r.packetsPadding),
OctetCount: uint32(r.bytes + r.bytesDuplicate + r.bytesPadding),
}
+2 -2
View File
@@ -33,7 +33,7 @@ type CodecMunger interface {
SetLast(extPkt *buffer.ExtPacket)
UpdateOffsets(extPkt *buffer.ExtPacket)
UpdateAndGet(extPkt *buffer.ExtPacket, snOutOfOrder bool, snHasGap bool, maxTemporal int32) ([]byte, error)
UpdateAndGet(extPkt *buffer.ExtPacket, snOutOfOrder bool, snHasGap bool, maxTemporal int32, outputHeader []byte) (int, int, error)
UpdateAndGetPadding(newPicture bool) ([]byte, error)
UpdateAndGetPadding(newPicture bool, outputHeader []byte) (int, error)
}
+4 -4
View File
@@ -45,10 +45,10 @@ func (n *Null) SetLast(_extPkt *buffer.ExtPacket) {
func (n *Null) UpdateOffsets(_extPkt *buffer.ExtPacket) {
}
func (n *Null) UpdateAndGet(_extPkt *buffer.ExtPacket, snOutOfOrder bool, snHasGap bool, maxTemporal int32) ([]byte, error) {
return nil, nil
func (n *Null) UpdateAndGet(_extPkt *buffer.ExtPacket, snOutOfOrder bool, snHasGap bool, maxTemporal int32, outputHeader []byte) (int, int, error) {
return 0, 0, nil
}
func (n *Null) UpdateAndGetPadding(newPicture bool) ([]byte, error) {
return nil, nil
func (n *Null) UpdateAndGetPadding(newPicture bool, outputHeader []byte) (int, error) {
return 0, nil
}
+16 -8
View File
@@ -158,10 +158,10 @@ func (v *VP8) UpdateOffsets(extPkt *buffer.ExtPacket) {
v.exemptedPictureIds = orderedmap.NewOrderedMap[int32, bool]()
}
func (v *VP8) UpdateAndGet(extPkt *buffer.ExtPacket, snOutOfOrder bool, snHasGap bool, maxTemporalLayer int32) ([]byte, error) {
func (v *VP8) UpdateAndGet(extPkt *buffer.ExtPacket, snOutOfOrder bool, snHasGap bool, maxTemporalLayer int32, outputHeader []byte) (int, int, error) {
vp8, ok := extPkt.Payload.(buffer.VP8)
if !ok {
return nil, ErrNotVP8
return 0, 0, ErrNotVP8
}
extPictureId := v.pictureIdWrapHandler.Unwrap(vp8.PictureID, vp8.M)
@@ -170,7 +170,7 @@ func (v *VP8) UpdateAndGet(extPkt *buffer.ExtPacket, snOutOfOrder bool, snHasGap
if snOutOfOrder {
pictureIdOffset, ok := v.missingPictureIds.Get(extPictureId)
if !ok {
return nil, ErrOutOfOrderVP8PictureIdCacheMiss
return 0, 0, ErrOutOfOrderVP8PictureIdCacheMiss
}
// the out-of-order picture id cannot be deleted from the cache
@@ -195,7 +195,11 @@ func (v *VP8) UpdateAndGet(extPkt *buffer.ExtPacket, snOutOfOrder bool, snHasGap
IsKeyFrame: vp8.IsKeyFrame,
HeaderSize: vp8.HeaderSize + buffer.VPxPictureIdSizeDiff(mungedPictureId > 127, vp8.M),
}
return vp8Packet.Marshal()
n, err := vp8Packet.MarshalTo(outputHeader)
if err != nil {
return 0, 0, err
}
return vp8.HeaderSize, n, nil
}
prevMaxPictureId := v.pictureIdWrapHandler.MaxPictureId()
@@ -263,7 +267,7 @@ func (v *VP8) UpdateAndGet(extPkt *buffer.ExtPacket, snOutOfOrder bool, snHasGap
v.pictureIdOffset += 1
}
return nil, ErrFilteredVP8TemporalLayer
return 0, 0, ErrFilteredVP8TemporalLayer
}
}
}
@@ -298,10 +302,14 @@ func (v *VP8) UpdateAndGet(extPkt *buffer.ExtPacket, snOutOfOrder bool, snHasGap
IsKeyFrame: vp8.IsKeyFrame,
HeaderSize: vp8.HeaderSize + buffer.VPxPictureIdSizeDiff(mungedPictureId > 127, vp8.M),
}
return vp8Packet.Marshal()
n, err := vp8Packet.MarshalTo(outputHeader)
if err != nil {
return 0, 0, err
}
return vp8.HeaderSize, n, nil
}
func (v *VP8) UpdateAndGetPadding(newPicture bool) ([]byte, error) {
func (v *VP8) UpdateAndGetPadding(newPicture bool, outputHeader []byte) (int, error) {
offset := 0
if newPicture {
offset = 1
@@ -359,7 +367,7 @@ func (v *VP8) UpdateAndGetPadding(newPicture bool) ([]byte, error) {
IsKeyFrame: true,
HeaderSize: headerSize,
}
return vp8Packet.Marshal()
return vp8Packet.MarshalTo(outputHeader)
}
// for testing only
+33 -21
View File
@@ -166,6 +166,7 @@ func TestUpdateOffsets(t *testing.T) {
func TestOutOfOrderPictureId(t *testing.T) {
v := newVP8()
buf := make([]byte, 100)
params := &testutils.TestExtPacketParams{
SequenceNumber: 23333,
@@ -189,16 +190,17 @@ func TestOutOfOrderPictureId(t *testing.T) {
}
extPkt, _ := testutils.GetTestExtPacketVP8(params, vp8)
v.SetLast(extPkt)
v.UpdateAndGet(extPkt, false, false, 2)
v.UpdateAndGet(extPkt, false, false, 2, buf)
// out-of-order sequence number not in the missing picture id cache
vp8.PictureID = 13466
extPkt, _ = testutils.GetTestExtPacketVP8(params, vp8)
codecBytes, err := v.UpdateAndGet(extPkt, true, false, 2)
nIn, nOut, err := v.UpdateAndGet(extPkt, true, false, 2, buf)
require.Error(t, err)
require.ErrorIs(t, err, ErrOutOfOrderVP8PictureIdCacheMiss)
require.Nil(t, codecBytes)
require.Equal(t, 0, nIn)
require.Equal(t, 0, nOut)
// create a hole in picture id
vp8.PictureID = 13469
@@ -221,9 +223,10 @@ func TestOutOfOrderPictureId(t *testing.T) {
}
marshalledVP8, err := expectedVP8.Marshal()
require.NoError(t, err)
codecBytes, err = v.UpdateAndGet(extPkt, false, true, 2)
nIn, nOut, err = v.UpdateAndGet(extPkt, false, true, 2, buf)
require.NoError(t, err)
require.Equal(t, marshalledVP8, codecBytes)
require.Equal(t, 6, nIn)
require.Equal(t, marshalledVP8, buf[:nOut])
// all three, the last, the current and the in-between should have been added to missing picture id cache
value, ok := v.PictureIdOffset(13467)
@@ -259,13 +262,15 @@ func TestOutOfOrderPictureId(t *testing.T) {
}
marshalledVP8, err = expectedVP8.Marshal()
require.NoError(t, err)
codecBytes, err = v.UpdateAndGet(extPkt, true, false, 2)
nIn, nOut, err = v.UpdateAndGet(extPkt, true, false, 2, buf)
require.NoError(t, err)
require.Equal(t, marshalledVP8, codecBytes)
require.Equal(t, 6, nIn)
require.Equal(t, marshalledVP8, buf[:nOut])
}
func TestTemporalLayerFiltering(t *testing.T) {
v := newVP8()
buf := make([]byte, 100)
params := &testutils.TestExtPacketParams{
SequenceNumber: 23333,
@@ -291,10 +296,11 @@ func TestTemporalLayerFiltering(t *testing.T) {
v.SetLast(extPkt)
// translate
tp, err := v.UpdateAndGet(extPkt, false, false, 0)
nIn, nOut, err := v.UpdateAndGet(extPkt, false, false, 0, buf)
require.Error(t, err)
require.ErrorIs(t, err, ErrFilteredVP8TemporalLayer)
require.Nil(t, tp)
require.Equal(t, 0, nIn)
require.Equal(t, 0, nOut)
dropped, _ := v.droppedPictureIds.Get(13467)
require.True(t, dropped)
require.EqualValues(t, 1, v.pictureIdOffset)
@@ -304,10 +310,11 @@ func TestTemporalLayerFiltering(t *testing.T) {
params.SequenceNumber = 23334
extPkt, _ = testutils.GetTestExtPacketVP8(params, vp8)
tp, err = v.UpdateAndGet(extPkt, false, false, 0)
nIn, nOut, err = v.UpdateAndGet(extPkt, false, false, 0, buf)
require.Error(t, err)
require.ErrorIs(t, err, ErrFilteredVP8TemporalLayer)
require.Nil(t, tp)
require.Equal(t, 0, nIn)
require.Equal(t, 0, nOut)
dropped, _ = v.droppedPictureIds.Get(13467)
require.True(t, dropped)
require.EqualValues(t, 1, v.pictureIdOffset)
@@ -317,10 +324,11 @@ func TestTemporalLayerFiltering(t *testing.T) {
params.SequenceNumber = 23337
extPkt, _ = testutils.GetTestExtPacketVP8(params, vp8)
tp, err = v.UpdateAndGet(extPkt, false, false, 0)
nIn, nOut, err = v.UpdateAndGet(extPkt, false, false, 0, buf)
require.Error(t, err)
require.ErrorIs(t, err, ErrFilteredVP8TemporalLayer)
require.Nil(t, tp)
require.Equal(t, 0, nIn)
require.Equal(t, 0, nOut)
dropped, _ = v.droppedPictureIds.Get(13467)
require.True(t, dropped)
require.EqualValues(t, 1, v.pictureIdOffset)
@@ -328,6 +336,7 @@ func TestTemporalLayerFiltering(t *testing.T) {
func TestGapInSequenceNumberSamePicture(t *testing.T) {
v := newVP8()
buf := make([]byte, 100)
params := &testutils.TestExtPacketParams{
SequenceNumber: 65533,
@@ -370,9 +379,10 @@ func TestGapInSequenceNumberSamePicture(t *testing.T) {
}
marshalledVP8, err := expectedVP8.Marshal()
require.NoError(t, err)
codecBytes, err := v.UpdateAndGet(extPkt, false, false, 2)
nIn, nOut, err := v.UpdateAndGet(extPkt, false, false, 2, buf)
require.NoError(t, err)
require.Equal(t, marshalledVP8, codecBytes)
require.Equal(t, 6, nIn)
require.Equal(t, marshalledVP8, buf[:nOut])
// telling there is a gap in sequence number will add pictures to missing picture cache
expectedVP8 = &buffer.VP8{
@@ -392,9 +402,10 @@ func TestGapInSequenceNumberSamePicture(t *testing.T) {
}
marshalledVP8, err = expectedVP8.Marshal()
require.NoError(t, err)
codecBytes, err = v.UpdateAndGet(extPkt, false, true, 2)
nIn, nOut, err = v.UpdateAndGet(extPkt, false, true, 2, buf)
require.NoError(t, err)
require.Equal(t, marshalledVP8, codecBytes)
require.Equal(t, 6, nIn)
require.Equal(t, marshalledVP8, buf[:nOut])
value, ok := v.PictureIdOffset(13467)
require.True(t, ok)
@@ -403,6 +414,7 @@ func TestGapInSequenceNumberSamePicture(t *testing.T) {
func TestUpdateAndGetPadding(t *testing.T) {
v := newVP8()
buf := make([]byte, 100)
params := &testutils.TestExtPacketParams{
SequenceNumber: 23333,
@@ -430,7 +442,7 @@ func TestUpdateAndGetPadding(t *testing.T) {
v.SetLast(extPkt)
// getting padding with repeat of last picture
blankBytes, err := v.UpdateAndGetPadding(false)
n, err := v.UpdateAndGetPadding(false, buf)
require.NoError(t, err)
expectedVP8 := buffer.VP8{
FirstByte: 16,
@@ -449,10 +461,10 @@ func TestUpdateAndGetPadding(t *testing.T) {
}
marshalledVP8, err := expectedVP8.Marshal()
require.NoError(t, err)
require.Equal(t, marshalledVP8, blankBytes)
require.Equal(t, marshalledVP8, buf[:n])
// getting padding with new picture
blankBytes, err = v.UpdateAndGetPadding(true)
n, err = v.UpdateAndGetPadding(true, buf)
require.NoError(t, err)
expectedVP8 = buffer.VP8{
FirstByte: 16,
@@ -471,7 +483,7 @@ func TestUpdateAndGetPadding(t *testing.T) {
}
marshalledVP8, err = expectedVP8.Marshal()
require.NoError(t, err)
require.Equal(t, marshalledVP8, blankBytes)
require.Equal(t, marshalledVP8, buf[:n])
}
func TestVP8PictureIdWrapHandler(t *testing.T) {
+2 -2
View File
@@ -23,9 +23,10 @@ import (
"github.com/pion/webrtc/v3"
"go.uber.org/atomic"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
)
const (
@@ -80,7 +81,6 @@ func NewConnectionStats(params ConnectionStatsParams) *ConnectionStats {
IncludeJitter: params.IncludeJitter,
Logger: params.Logger,
}),
done: core.NewFuse(),
}
}
+18
View File
@@ -23,6 +23,7 @@ import (
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"github.com/livekit/protocol/utils"
"go.uber.org/zap/zapcore"
)
const (
@@ -159,6 +160,23 @@ func (w *windowStat) String() string {
)
}
func (w *windowStat) MarshalLogObject(e zapcore.ObjectEncoder) error {
if w == nil {
return nil
}
e.AddTime("startedAt", w.startedAt)
e.AddString("duration", w.duration.String())
e.AddUint32("packetsExpected", w.packetsExpected)
e.AddUint32("packetsLost", w.packetsLost)
e.AddUint32("packetsMissing", w.packetsMissing)
e.AddUint32("packetsOutOfOrder", w.packetsOutOfOrder)
e.AddUint64("bytes", w.bytes)
e.AddUint32("rttMax", w.rttMax)
e.AddFloat64("jitterMax", w.jitterMax)
return nil
}
// ------------------------------------------
type qualityScorerParams struct {
+141 -117
View File
@@ -59,8 +59,7 @@ type TrackSender interface {
payloadType webrtc.PayloadType,
isSVC bool,
layer int32,
srFirst *buffer.RTCPSenderReportData,
srNewest *buffer.RTCPSenderReportData,
publisherSRData *buffer.RTCPSenderReportData,
) error
HandleTrackFrameRateReport(payloadType webrtc.PayloadType, fps [][]float32) error
}
@@ -90,9 +89,9 @@ var (
ErrOutOfOrderSequenceNumberCacheMiss = errors.New("out-of-order sequence number not found in cache")
ErrPaddingOnlyPacket = errors.New("padding only packet that need not be forwarded")
ErrDuplicatePacket = errors.New("duplicate packet")
ErrSequenceNumberOffsetNotFound = errors.New("sequence number offset not found")
ErrPaddingNotOnFrameBoundary = errors.New("padding cannot send on non-frame boundary")
ErrDownTrackAlreadyBound = errors.New("already bound")
ErrPayloadOverflow = errors.New("payload overflow")
)
var (
@@ -207,6 +206,7 @@ type DowntrackParams struct {
Pacer pacer.Pacer
Logger logger.Logger
Trailer []byte
RTCPWriter func([]rtcp.Packet) error
}
// DownTrack implements TrackLocal, is the track used to write packets
@@ -275,7 +275,7 @@ type DownTrack struct {
pacer pacer.Pacer
maxLayerNotifierChMu sync.RWMutex
maxLayerNotifierCh chan struct{}
maxLayerNotifierCh chan string
maxLayerNotifierChClosed bool
keyFrameRequesterChMu sync.RWMutex
@@ -287,6 +287,8 @@ type DownTrack struct {
onMaxSubscribedLayerChanged func(dt *DownTrack, layer int32)
onRttUpdate func(dt *DownTrack, rtt uint32)
onCloseHandler func(willBeResumed bool)
createdAt int64
}
// NewDownTrack returns a DownTrack.
@@ -309,8 +311,9 @@ func NewDownTrack(params DowntrackParams) (*DownTrack, error) {
kind: kind,
codec: codecs[0].RTPCodecCapability,
pacer: params.Pacer,
maxLayerNotifierCh: make(chan struct{}, 1),
maxLayerNotifierCh: make(chan string, 1),
keyFrameRequesterCh: make(chan struct{}, 1),
createdAt: time.Now().UnixNano(),
}
d.forwarder = NewForwarder(
d.kind,
@@ -325,14 +328,6 @@ func NewDownTrack(params DowntrackParams) (*DownTrack, error) {
})
d.deltaStatsSenderSnapshotId = d.rtpStats.NewSenderSnapshotId()
if delay := params.PlayoutDelayLimit; delay.GetEnabled() {
var err error
d.playoutDelay, err = NewPlayoutDelayController(delay.GetMin(), delay.GetMax(), params.Logger, d.rtpStats)
if err != nil {
return nil, err
}
}
d.connectionStats = connectionquality.NewConnectionStats(connectionquality.ConnectionStatsParams{
MimeType: codecs[0].MimeType, // LK-TODO have to notify on codec change
IsFECEnabled: strings.EqualFold(codecs[0].MimeType, webrtc.MimeTypeOpus) && strings.Contains(strings.ToLower(codecs[0].SDPFmtpLine), "fec"),
@@ -346,6 +341,13 @@ func NewDownTrack(params DowntrackParams) (*DownTrack, error) {
})
if d.kind == webrtc.RTPCodecTypeVideo {
if delay := params.PlayoutDelayLimit; delay.GetEnabled() {
var err error
d.playoutDelay, err = NewPlayoutDelayController(delay.GetMin(), delay.GetMax(), params.Logger, d.rtpStats)
if err != nil {
return nil, err
}
}
go d.maxLayerNotifierWorker()
go d.keyFrameRequester()
}
@@ -525,7 +527,10 @@ func (d *DownTrack) Codec() webrtc.RTPCodecCapability { return d.codec }
// StreamID is the group this track belongs too. This must be unique
func (d *DownTrack) StreamID() string { return d.params.StreamID }
func (d *DownTrack) SubscriberID() livekit.ParticipantID { return d.params.SubID }
func (d *DownTrack) SubscriberID() livekit.ParticipantID {
// add `createdAt` to ensure repeated subscriptions from same subscrober to same publisher does not collide
return livekit.ParticipantID(fmt.Sprintf("%s:%d", d.params.SubID, d.createdAt))
}
// Sets RTP header extensions for this track
func (d *DownTrack) SetRTPHeaderExtensions(rtpHeaderExtensions []webrtc.RTPHeaderExtensionParameter) {
@@ -643,7 +648,7 @@ func (d *DownTrack) keyFrameRequester() {
}
}
func (d *DownTrack) postMaxLayerNotifierEvent() {
func (d *DownTrack) postMaxLayerNotifierEvent(event string) {
if d.kind != webrtc.RTPCodecTypeVideo {
return
}
@@ -651,27 +656,29 @@ func (d *DownTrack) postMaxLayerNotifierEvent() {
d.maxLayerNotifierChMu.RLock()
if !d.maxLayerNotifierChClosed {
select {
case d.maxLayerNotifierCh <- struct{}{}:
case d.maxLayerNotifierCh <- event:
default:
d.params.Logger.Debugw("max layer notifier channel busy", "event", event)
}
}
d.maxLayerNotifierChMu.RUnlock()
}
func (d *DownTrack) maxLayerNotifierWorker() {
more := true
for more {
_, more = <-d.maxLayerNotifierCh
for event := range d.maxLayerNotifierCh {
maxLayerSpatial := d.forwarder.GetMaxSubscribedSpatial()
d.params.Logger.Debugw("max subscribed layer processed", "layer", maxLayerSpatial, "event", event)
maxLayerSpatial := buffer.InvalidLayerSpatial
if more {
maxLayerSpatial = d.forwarder.GetMaxSubscribedSpatial()
}
if onMaxSubscribedLayerChanged := d.getOnMaxLayerChanged(); onMaxSubscribedLayerChanged != nil {
d.params.Logger.Debugw("max subscribed layer changed", "maxLayerSpatial", maxLayerSpatial)
d.params.Logger.Debugw("notifying max subscribed layer", "layer", maxLayerSpatial, "event", event)
onMaxSubscribedLayerChanged(d, maxLayerSpatial)
}
}
if onMaxSubscribedLayerChanged := d.getOnMaxLayerChanged(); onMaxSubscribedLayerChanged != nil {
d.params.Logger.Debugw("notifying max subscribed layer", "layer", buffer.InvalidLayerSpatial, "event", "close")
onMaxSubscribedLayerChanged(d, buffer.InvalidLayerSpatial)
}
}
// WriteRTP writes an RTP Packet to the DownTrack
@@ -688,25 +695,25 @@ func (d *DownTrack) WriteRTP(extPkt *buffer.ExtPacket, layer int32) error {
return err
}
var payload []byte
poolEntity := PacketFactory.Get().(*[]byte)
if len(tp.codecBytes) != 0 {
incomingVP8, ok := extPkt.Payload.(buffer.VP8)
if ok {
payload = d.translateVP8PacketTo(extPkt.Packet, &incomingVP8, tp.codecBytes, poolEntity)
}
payload := *poolEntity
shouldForward, incomingHeaderSize, outgoingHeaderSize, err := d.forwarder.TranslateCodecHeader(extPkt, &tp.rtp, payload)
if !shouldForward {
PacketFactory.Put(poolEntity)
return err
}
if payload == nil {
payload = (*poolEntity)[:len(extPkt.Packet.Payload)]
copy(payload, extPkt.Packet.Payload)
n := copy(payload[outgoingHeaderSize:], extPkt.Packet.Payload[incomingHeaderSize:])
if n != len(extPkt.Packet.Payload[incomingHeaderSize:]) {
d.params.Logger.Errorw("payload overflow", nil, "want", len(extPkt.Packet.Payload[incomingHeaderSize:]), "have", n)
PacketFactory.Put(poolEntity)
return ErrPayloadOverflow
}
payload = payload[:outgoingHeaderSize+n]
hdr, err := d.getTranslatedRTPHeader(extPkt, tp)
hdr, err := d.getTranslatedRTPHeader(extPkt, &tp)
if err != nil {
d.params.Logger.Errorw("could not get translated RTP header", err)
if poolEntity != nil {
PacketFactory.Put(poolEntity)
}
PacketFactory.Put(poolEntity)
return err
}
@@ -727,7 +734,8 @@ func (d *DownTrack) WriteRTP(extPkt *buffer.ExtPacket, layer int32) error {
tp.rtp.extTimestamp,
hdr.Marker,
int8(layer),
tp.codecBytes,
payload[:outgoingHeaderSize],
incomingHeaderSize,
tp.ddBytes,
)
}
@@ -741,7 +749,7 @@ func (d *DownTrack) WriteRTP(extPkt *buffer.ExtPacket, layer int32) error {
extSequenceNumber: tp.rtp.extSequenceNumber,
extTimestamp: tp.rtp.extTimestamp,
isKeyFrame: extPkt.KeyFrame,
tp: tp,
tp: &tp,
},
)
d.pacer.Enqueue(pacer.Packet{
@@ -903,7 +911,7 @@ func (d *DownTrack) handleMute(muted bool, changed bool) {
// Note that while publisher mute is active, subscriber changes can also happen
// and that could turn on/off layers on publisher side.
//
d.postMaxLayerNotifierEvent()
d.postMaxLayerNotifierEvent("mute")
if sal := d.getStreamAllocatorListener(); sal != nil {
sal.OnSubscriptionChanged(d)
@@ -973,7 +981,7 @@ func (d *DownTrack) CloseWithFlush(flush bool) {
d.onBindAndConnectedChange()
d.params.Logger.Debugw("closing sender", "kind", d.kind)
}
d.params.Receiver.DeleteDownTrack(d.params.SubID)
d.params.Receiver.DeleteDownTrack(d.SubscriberID())
if d.rtcpReader != nil && flush {
d.params.Logger.Debugw("downtrack close rtcp reader")
@@ -1015,7 +1023,7 @@ func (d *DownTrack) SetMaxSpatialLayer(spatialLayer int32) {
return
}
d.postMaxLayerNotifierEvent()
d.postMaxLayerNotifierEvent("max-subscribed")
if sal := d.getStreamAllocatorListener(); sal != nil {
sal.OnSubscribedLayerChanged(d, maxLayer)
@@ -1080,7 +1088,7 @@ func (d *DownTrack) UpTrackMaxTemporalLayerSeenChange(maxTemporalLayerSeen int32
}
}
func (d *DownTrack) maybeAddTransition(_bitrate int64, distance float64, pauseReason VideoPauseReason) {
func (d *DownTrack) maybeAddTransition(_ int64, distance float64, pauseReason VideoPauseReason) {
if d.kind == webrtc.RTPCodecTypeAudio {
return
}
@@ -1211,7 +1219,7 @@ func (d *DownTrack) ProvisionalAllocateGetCooperativeTransition(allowOvershoot b
transition, availableLayers, brs := d.forwarder.ProvisionalAllocateGetCooperativeTransition(allowOvershoot)
d.params.Logger.Debugw(
"stream: cooperative transition",
"transition", transition,
"transition", &transition,
"availableLayers", availableLayers,
"bitrates", brs,
)
@@ -1222,7 +1230,7 @@ func (d *DownTrack) ProvisionalAllocateGetBestWeightedTransition() VideoTransiti
transition, availableLayers, brs := d.forwarder.ProvisionalAllocateGetBestWeightedTransition()
d.params.Logger.Debugw(
"stream: best weighted transition",
"transition", transition,
"transition", &transition,
"availableLayers", availableLayers,
"bitrates", brs,
)
@@ -1295,11 +1303,8 @@ func (d *DownTrack) CreateSenderReport() *rtcp.SenderReport {
return nil
}
clockLayer := d.forwarder.CurrentLayer().Spatial
if clockLayer == buffer.InvalidLayerSpatial {
clockLayer = d.forwarder.GetReferenceLayerSpatial()
}
return d.rtpStats.GetRtcpSenderReport(d.ssrc, d.params.Receiver.GetCalculatedClockRate(clockLayer))
layer, tsOffset := d.forwarder.GetCurrentSpatialAndTSOffset()
return d.rtpStats.GetRtcpSenderReport(d.ssrc, d.params.Receiver.GetRTCPSenderReportData(layer), tsOffset)
}
func (d *DownTrack) writeBlankFrameRTP(duration float32, generation uint32) chan struct{} {
@@ -1435,20 +1440,19 @@ func (d *DownTrack) getOpusRedBlankFrame(_frameEndNeeded bool) ([]byte, error) {
}
func (d *DownTrack) getVP8BlankFrame(frameEndNeeded bool) ([]byte, error) {
blankVP8, err := d.forwarder.GetPadding(frameEndNeeded)
if err != nil {
return nil, err
}
// 8x8 key frame
// Used even when closing out a previous frame. Looks like receivers
// do not care about content (it will probably end up being an undecodable
// frame, but that should be okay as there are key frames following)
payload := make([]byte, 1000)
copy(payload[:len(blankVP8)], blankVP8)
copy(payload[len(blankVP8):], VP8KeyFrame8x8)
trailerLen := d.maybeAddTrailer(payload[len(blankVP8)+len(VP8KeyFrame8x8):])
return payload[:len(blankVP8)+len(VP8KeyFrame8x8)+trailerLen], nil
n, err := d.forwarder.GetPadding(frameEndNeeded, payload)
if err != nil {
return nil, err
}
copy(payload[n:], VP8KeyFrame8x8)
trailerLen := d.maybeAddTrailer(payload[n+len(VP8KeyFrame8x8):])
return payload[:n+len(VP8KeyFrame8x8)+trailerLen], nil
}
func (d *DownTrack) getH264BlankFrame(_frameEndNeeded bool) ([]byte, error) {
@@ -1498,12 +1502,16 @@ func (d *DownTrack) handleRTCP(bytes []byte) {
for _, pkt := range pkts {
switch p := pkt.(type) {
case *rtcp.PictureLossIndication:
numPLIs++
sendPliOnce()
if p.MediaSSRC == d.ssrc {
numPLIs++
sendPliOnce()
}
case *rtcp.FullIntraRequest:
numFIRs++
sendPliOnce()
if p.MediaSSRC == d.ssrc {
numFIRs++
sendPliOnce()
}
case *rtcp.ReceiverEstimatedMaximumBitrate:
if sal := d.getStreamAllocatorListener(); sal != nil {
@@ -1546,13 +1554,15 @@ func (d *DownTrack) handleRTCP(bytes []byte) {
}
case *rtcp.TransportLayerNack:
var nacks []uint16
for _, pair := range p.Nacks {
packetList := pair.PacketList()
numNACKs += uint32(len(packetList))
nacks = append(nacks, packetList...)
if p.MediaSSRC == d.ssrc {
var nacks []uint16
for _, pair := range p.Nacks {
packetList := pair.PacketList()
numNACKs += uint32(len(packetList))
nacks = append(nacks, packetList...)
}
go d.retransmitPackets(nacks)
}
go d.retransmitPackets(nacks)
case *rtcp.TransportLayerCC:
if p.MediaSSRC == d.ssrc {
@@ -1560,6 +1570,34 @@ func (d *DownTrack) handleRTCP(bytes []byte) {
sal.OnTransportCCFeedback(d, p)
}
}
case *rtcp.ExtendedReport:
// SFU only responds with the DLRRReport for the track has the sender SSRC, the behavior is different with
// browser's implementation, which includes all sent tracks. It is ok since all the tracks
// use the same connection, and server-sdk-go can get the rtt from the first DLRRReport
// (libwebrtc/browsers don't send XR to calculate rtt, it only responds)
var lastRR uint32
for _, report := range p.Reports {
if rr, ok := report.(*rtcp.ReceiverReferenceTimeReportBlock); ok {
lastRR = uint32(rr.NTPTimestamp >> 16)
break
}
}
if lastRR > 0 {
d.params.RTCPWriter([]rtcp.Packet{&rtcp.ExtendedReport{
SenderSSRC: d.ssrc,
Reports: []rtcp.ReportBlock{
&rtcp.DLRRReportBlock{
Reports: []rtcp.DLRRReport{{
SSRC: p.SenderSSRC,
LastRR: lastRR,
DLRR: 0, // no delay
}},
},
},
}})
}
}
}
@@ -1633,18 +1671,6 @@ func (d *DownTrack) retransmitPackets(nacks []uint16) {
if err == io.EOF {
break
}
// TODO-VP9-DEBUG-REMOVE-START
d.params.Logger.Debugw(
"NACK miss",
"isn", epm.sourceSeqNo,
"osn", epm.targetSeqNo,
"ots", epm.timestamp,
"eosn", epm.extSequenceNumber,
"eots", epm.extTimestamp,
"sid", epm.layer,
"error", err,
)
// TODO-VP9-DEBUG-REMOVE-END
nackMisses++
continue
}
@@ -1664,21 +1690,23 @@ func (d *DownTrack) retransmitPackets(nacks []uint16) {
pkt.Header.SSRC = d.ssrc
pkt.Header.PayloadType = d.payloadType
var payload []byte
poolEntity := PacketFactory.Get().(*[]byte)
if d.mime == "video/vp8" && len(pkt.Payload) > 0 && len(epm.codecBytes) != 0 {
var incomingVP8 buffer.VP8
if err = incomingVP8.Unmarshal(pkt.Payload); err != nil {
d.params.Logger.Errorw("could not unmarshal VP8 packet", err)
PacketFactory.Put(poolEntity)
continue
}
payload = d.translateVP8PacketTo(&pkt, &incomingVP8, epm.codecBytes, poolEntity)
payload := *poolEntity
if len(epm.codecBytesSlice) != 0 {
n := copy(payload, epm.codecBytesSlice)
m := copy(payload[n:], pkt.Payload[epm.numCodecBytesIn:])
payload = payload[:n+m]
} else {
copy(payload, epm.codecBytes[:epm.numCodecBytesOut])
copy(payload[epm.numCodecBytesOut:], pkt.Payload[epm.numCodecBytesIn:])
payload = payload[:int(epm.numCodecBytesOut)+len(pkt.Payload)-int(epm.numCodecBytesIn)]
}
if payload == nil {
payload = (*poolEntity)[:len(pkt.Payload)]
copy(payload, pkt.Payload)
var ddBytes []byte
if len(epm.ddBytesSlice) != 0 {
ddBytes = epm.ddBytesSlice
} else {
ddBytes = epm.ddBytes[:epm.ddBytesSize]
}
d.sendingPacket(
@@ -1694,7 +1722,7 @@ func (d *DownTrack) retransmitPackets(nacks []uint16) {
)
d.pacer.Enqueue(pacer.Packet{
Header: &pkt.Header,
Extensions: []pacer.ExtensionData{{ID: uint8(d.dependencyDescriptorExtID), Payload: epm.ddBytes}},
Extensions: []pacer.ExtensionData{{ID: uint8(d.dependencyDescriptorExtID), Payload: ddBytes}},
Payload: payload,
AbsSendTimeExtID: uint8(d.absSendTimeExtID),
TransportWideExtID: uint8(d.transportWideExtID),
@@ -1725,11 +1753,10 @@ func (d *DownTrack) retransmitPackets(nacks []uint16) {
}
func (d *DownTrack) getTranslatedRTPHeader(extPkt *buffer.ExtPacket, tp *TranslationParams) (*rtp.Header, error) {
tpRTP := tp.rtp
hdr := extPkt.Packet.Header
hdr.PayloadType = d.payloadType
hdr.Timestamp = uint32(tpRTP.extTimestamp)
hdr.SequenceNumber = uint16(tpRTP.extSequenceNumber)
hdr.Timestamp = uint32(tp.rtp.extTimestamp)
hdr.SequenceNumber = uint16(tp.rtp.extSequenceNumber)
hdr.SSRC = d.ssrc
if tp.marker {
hdr.Marker = tp.marker
@@ -1738,16 +1765,6 @@ func (d *DownTrack) getTranslatedRTPHeader(extPkt *buffer.ExtPacket, tp *Transla
return &hdr, nil
}
func (d *DownTrack) translateVP8PacketTo(pkt *rtp.Packet, incomingVP8 *buffer.VP8, translatedVP8 []byte, outbuf *[]byte) []byte {
buf := (*outbuf)[:len(pkt.Payload)+len(translatedVP8)-incomingVP8.HeaderSize]
srcPayload := pkt.Payload[incomingVP8.HeaderSize:]
dstPayload := buf[len(translatedVP8):]
copy(dstPayload, srcPayload)
copy(buf[:len(translatedVP8)], translatedVP8)
return buf
}
func (d *DownTrack) DebugInfo() map[string]interface{} {
stats := map[string]interface{}{
"LastPli": d.rtpStats.LastPli(),
@@ -1831,6 +1848,9 @@ func (d *DownTrack) onBindAndConnectedChange() {
if d.activePaddingOnMuteUpTrack.Load() {
go d.sendPaddingOnMute()
}
// kick off PLI request if allocation is pending
d.postKeyFrameRequestEvent()
}
}
@@ -1925,19 +1945,19 @@ func (d *DownTrack) HandleRTCPSenderReportData(
_payloadType webrtc.PayloadType,
isSVC bool,
layer int32,
srFirst *buffer.RTCPSenderReportData,
srNewest *buffer.RTCPSenderReportData,
publisherSRData *buffer.RTCPSenderReportData,
) error {
if (layer == d.forwarder.GetReferenceLayerSpatial() || (layer == 0 && isSVC)) && srNewest != nil {
d.rtpStats.MaybeAdjustFirstPacketTime(
srFirst,
srNewest,
srNewest.RTPTimestamp+uint32(d.forwarder.GetReferenceTimestampOffset()),
)
currentLayer, tsOffset := d.forwarder.GetCurrentSpatialAndTSOffset()
if layer == currentLayer || (layer == 0 && isSVC) {
d.handleRTCPSenderReportData(publisherSRData, tsOffset)
}
return nil
}
func (d *DownTrack) handleRTCPSenderReportData(publisherSRData *buffer.RTCPSenderReportData, tsOffset uint64) {
d.rtpStats.MaybeAdjustFirstPacketTime(publisherSRData, tsOffset)
}
func (d *DownTrack) HandleTrackFrameRateReport(_payloadType webrtc.PayloadType, _fps [][]float32) error {
return nil
}
@@ -1987,10 +2007,14 @@ func (d *DownTrack) sendingPacket(hdr *rtp.Header, payloadSize int, spmd *sendPa
if spmd.tp != nil {
if spmd.tp.isSwitching {
d.postMaxLayerNotifierEvent()
d.postMaxLayerNotifierEvent("switching")
}
if spmd.tp.isResuming {
// adjust first packet time on a resumption so that subsequent switches get a more accurate expected time stamp
currentLayer, tsOffset := d.forwarder.GetCurrentSpatialAndTSOffset()
d.handleRTCPSenderReportData(d.params.Receiver.GetRTCPSenderReportData(currentLayer), tsOffset)
if sal := d.getStreamAllocatorListener(); sal != nil {
sal.OnResume(d)
}
+88 -58
View File
@@ -25,6 +25,7 @@ import (
"github.com/pion/rtp"
"github.com/pion/webrtc/v3"
"go.uber.org/zap/zapcore"
"github.com/livekit/protocol/logger"
@@ -92,7 +93,7 @@ type VideoAllocation struct {
DistanceToDesired float64
}
func (v VideoAllocation) String() string {
func (v *VideoAllocation) String() string {
return fmt.Sprintf("VideoAllocation{pause: %s, def: %+v, bwr: %d, del: %d, bwn: %d, rates: %+v, target: %s, req: %d, max: %s, dist: %0.2f}",
v.PauseReason,
v.IsDeficient,
@@ -107,6 +108,24 @@ func (v VideoAllocation) String() string {
)
}
func (v *VideoAllocation) MarshalLogObject(e zapcore.ObjectEncoder) error {
if v == nil {
return nil
}
e.AddString("PauseReason", v.PauseReason.String())
e.AddBool("IsDeficient", v.IsDeficient)
e.AddInt64("BandwidthRquested", v.BandwidthRequested)
e.AddInt64("BandwidthDelta", v.BandwidthDelta)
e.AddInt64("BandwidthNeeded", v.BandwidthNeeded)
e.AddReflected("Bitrates", v.Bitrates)
e.AddReflected("TargetLayer", v.TargetLayer)
e.AddInt32("RequestLayerSpatial", v.RequestLayerSpatial)
e.AddReflected("MaxLayer", v.MaxLayer)
e.AddFloat64("DistanceToDesired", v.DistanceToDesired)
return nil
}
var (
VideoAllocationDefault = VideoAllocation{
PauseReason: VideoPauseReasonFeedDry, // start with no feed till feed is seen
@@ -137,18 +156,28 @@ type VideoTransition struct {
BandwidthDelta int64
}
func (v VideoTransition) String() string {
func (v *VideoTransition) String() string {
return fmt.Sprintf("VideoTransition{from: %s, to: %s, del: %d}", v.From, v.To, v.BandwidthDelta)
}
func (v *VideoTransition) MarshalLogObject(e zapcore.ObjectEncoder) error {
if v == nil {
return nil
}
e.AddReflected("From", v.From)
e.AddReflected("To", v.To)
e.AddInt64("BandwidthDelta", v.BandwidthDelta)
return nil
}
// -------------------------------------------------------------------
type TranslationParams struct {
shouldDrop bool
isResuming bool
isSwitching bool
rtp *TranslationParamsRTP
codecBytes []byte
rtp TranslationParamsRTP
ddBytes []byte
marker bool
}
@@ -248,7 +277,7 @@ func (f *Forwarder) SetMaxPublishedLayer(maxPublishedLayer int32) bool {
}
f.vls.SetMaxSeenSpatial(maxPublishedLayer)
f.logger.Debugw("setting max published layer", "maxPublishedLayer", maxPublishedLayer)
f.logger.Debugw("setting max published layer", "layer", maxPublishedLayer)
return true
}
@@ -427,7 +456,7 @@ func (f *Forwarder) PubMute(pubMuted bool) bool {
return false
}
f.logger.Debugw("setting forwarder pub mute", "pubMuted", pubMuted)
f.logger.Debugw("setting forwarder pub mute", "muted", pubMuted)
f.pubMuted = pubMuted
// resync when pub muted so that sequence numbers do not jump on unmute
@@ -527,18 +556,15 @@ func (f *Forwarder) GetMaxSubscribedSpatial() int32 {
return layer
}
func (f *Forwarder) GetReferenceLayerSpatial() int32 {
func (f *Forwarder) GetCurrentSpatialAndTSOffset() (int32, uint64) {
f.lock.RLock()
defer f.lock.RUnlock()
return f.referenceLayerSpatial
}
if f.kind == webrtc.RTPCodecTypeAudio {
return 0, f.rtpMunger.GetPinnedTSOffset()
}
func (f *Forwarder) GetReferenceTimestampOffset() uint64 {
f.lock.RLock()
defer f.lock.RUnlock()
return f.refTSOffset
return f.vls.GetCurrent().Spatial, f.rtpMunger.GetPinnedTSOffset()
}
func (f *Forwarder) isDeficientLocked() bool {
@@ -1360,7 +1386,7 @@ func (f *Forwarder) updateAllocation(alloc VideoAllocation, reason string) Video
alloc.PauseReason != f.lastAllocation.PauseReason ||
alloc.TargetLayer != f.lastAllocation.TargetLayer ||
alloc.RequestLayerSpatial != f.lastAllocation.RequestLayerSpatial {
f.logger.Debugw(fmt.Sprintf("stream allocation: %s", reason), "allocation", alloc)
f.logger.Debugw(fmt.Sprintf("stream allocation: %s", reason), "allocation", &alloc)
}
f.lastAllocation = alloc
@@ -1433,12 +1459,12 @@ func (f *Forwarder) FilterRTX(nacks []uint16) (filtered []uint16, disallowedLaye
return
}
func (f *Forwarder) GetTranslationParams(extPkt *buffer.ExtPacket, layer int32) (*TranslationParams, error) {
func (f *Forwarder) GetTranslationParams(extPkt *buffer.ExtPacket, layer int32) (TranslationParams, error) {
f.lock.Lock()
defer f.lock.Unlock()
if f.muted || f.pubMuted {
return &TranslationParams{
return TranslationParams{
shouldDrop: true,
}, nil
}
@@ -1450,7 +1476,9 @@ func (f *Forwarder) GetTranslationParams(extPkt *buffer.ExtPacket, layer int32)
return f.getTranslationParamsVideo(extPkt, layer)
}
return nil, ErrUnknownKind
return TranslationParams{
shouldDrop: true,
}, ErrUnknownKind
}
func (f *Forwarder) processSourceSwitch(extPkt *buffer.ExtPacket, layer int32) error {
@@ -1647,11 +1675,11 @@ func (f *Forwarder) processSourceSwitch(extPkt *buffer.ExtPacket, layer int32) e
}
// should be called with lock held
func (f *Forwarder) getTranslationParamsCommon(extPkt *buffer.ExtPacket, layer int32, tp *TranslationParams) (*TranslationParams, error) {
func (f *Forwarder) getTranslationParamsCommon(extPkt *buffer.ExtPacket, layer int32, tp *TranslationParams) error {
if f.lastSSRC != extPkt.Packet.SSRC {
if err := f.processSourceSwitch(extPkt, layer); err != nil {
tp.shouldDrop = true
return tp, nil
return nil
}
f.logger.Debugw("switching feed", "from", f.lastSSRC, "to", extPkt.Packet.SSRC)
f.lastSSRC = extPkt.Packet.SSRC
@@ -1661,29 +1689,34 @@ func (f *Forwarder) getTranslationParamsCommon(extPkt *buffer.ExtPacket, layer i
if err != nil {
tp.shouldDrop = true
if err == ErrPaddingOnlyPacket || err == ErrDuplicatePacket || err == ErrOutOfOrderSequenceNumberCacheMiss {
return tp, nil
return nil
}
return tp, err
return err
}
tp.rtp = tpRTP
return nil
}
// should be called with lock held
func (f *Forwarder) getTranslationParamsAudio(extPkt *buffer.ExtPacket, layer int32) (TranslationParams, error) {
tp := TranslationParams{}
if err := f.getTranslationParamsCommon(extPkt, layer, &tp); err != nil {
tp.shouldDrop = true
return tp, err
}
return tp, nil
}
// should be called with lock held
func (f *Forwarder) getTranslationParamsAudio(extPkt *buffer.ExtPacket, layer int32) (*TranslationParams, error) {
return f.getTranslationParamsCommon(extPkt, layer, &TranslationParams{})
}
// should be called with lock held
func (f *Forwarder) getTranslationParamsVideo(extPkt *buffer.ExtPacket, layer int32) (*TranslationParams, error) {
func (f *Forwarder) getTranslationParamsVideo(extPkt *buffer.ExtPacket, layer int32) (TranslationParams, error) {
maybeRollback := func(isSwitching bool) {
if isSwitching {
f.vls.Rollback()
}
}
tp := &TranslationParams{}
tp := TranslationParams{}
if !f.vls.GetTarget().IsValid() {
// stream is paused by streamallocator
tp.shouldDrop = true
@@ -1698,20 +1731,6 @@ func (f *Forwarder) getTranslationParamsVideo(extPkt *buffer.ExtPacket, layer in
if tpRTP, err := f.rtpMunger.UpdateAndGetSnTs(extPkt, result.RTPMarker); err == nil {
if tpRTP.snOrdering == SequenceNumberOrderingContiguous {
f.rtpMunger.PacketDropped(extPkt)
} else {
// TODO-VP9-DEBUG-REMOVE-START
f.logger.Debugw(
"dropping packet skipped as not contiguous",
"isn", extPkt.ExtSequenceNumber,
"its", extPkt.ExtTimestamp,
"osn", tpRTP.extSequenceNumber,
"ots", tpRTP.extTimestamp,
"payloadLen", len(extPkt.Packet.Payload),
"sid", extPkt.Spatial,
"tid", extPkt.Temporal,
"snOrdering", tpRTP.snOrdering,
)
// TODO-VP9-DEBUG-REMOVE-END
}
}
}
@@ -1746,38 +1765,49 @@ func (f *Forwarder) getTranslationParamsVideo(extPkt *buffer.ExtPacket, layer in
return tp, nil
}
_, err := f.getTranslationParamsCommon(extPkt, layer, tp)
if tp.shouldDrop || len(extPkt.Packet.Payload) == 0 {
err := f.getTranslationParamsCommon(extPkt, layer, &tp)
if tp.shouldDrop {
maybeRollback(result.IsSwitching)
return tp, err
}
return tp, nil
}
func (f *Forwarder) TranslateCodecHeader(extPkt *buffer.ExtPacket, tpr *TranslationParamsRTP, outputBuffer []byte) (bool, int, int, error) {
f.lock.Lock()
defer f.lock.Unlock()
maybeRollback := func(isSwitching bool) {
if isSwitching {
f.vls.Rollback()
}
}
// codec specific forwarding check and any needed packet munging
tl, isSwitching := f.vls.SelectTemporal(extPkt)
codecBytes, err := f.codecMunger.UpdateAndGet(
inputSize, outputSize, err := f.codecMunger.UpdateAndGet(
extPkt,
tp.rtp.snOrdering == SequenceNumberOrderingOutOfOrder,
tp.rtp.snOrdering == SequenceNumberOrderingGap,
tpr.snOrdering == SequenceNumberOrderingOutOfOrder,
tpr.snOrdering == SequenceNumberOrderingGap,
tl,
outputBuffer,
)
if err != nil {
tp.rtp = nil
tp.shouldDrop = true
if err == codecmunger.ErrFilteredVP8TemporalLayer || err == codecmunger.ErrOutOfOrderVP8PictureIdCacheMiss {
if err == codecmunger.ErrFilteredVP8TemporalLayer {
// filtered temporal layer, update sequence number offset to prevent holes
f.rtpMunger.PacketDropped(extPkt)
}
maybeRollback(result.IsSwitching || isSwitching)
return tp, nil
maybeRollback(isSwitching)
return false, 0, 0, nil
}
maybeRollback(result.IsSwitching || isSwitching)
return tp, err
maybeRollback(isSwitching)
return false, 0, 0, err
}
tp.codecBytes = codecBytes
return tp, nil
return true, inputSize, outputSize, nil
}
func (f *Forwarder) maybeStart() {
@@ -1854,11 +1884,11 @@ func (f *Forwarder) GetSnTsForBlankFrames(frameRate uint32, numPackets int) ([]S
return snts, frameEndNeeded, err
}
func (f *Forwarder) GetPadding(frameEndNeeded bool) ([]byte, error) {
func (f *Forwarder) GetPadding(frameEndNeeded bool, outputBuffer []byte) (int, error) {
f.lock.Lock()
defer f.lock.Unlock()
return f.codecMunger.UpdateAndGetPadding(!frameEndNeeded)
return f.codecMunger.UpdateAndGetPadding(!frameEndNeeded, outputBuffer)
}
func (f *Forwarder) RTPMungerDebugInfo() map[string]interface{} {
+127 -87
View File
@@ -1200,7 +1200,7 @@ func TestForwarderGetTranslationParamsMuted(t *testing.T) {
}
actualTP, err := f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
}
func TestForwarderGetTranslationParamsAudio(t *testing.T) {
@@ -1216,7 +1216,7 @@ func TestForwarderGetTranslationParamsAudio(t *testing.T) {
// should lock onto the first packet
expectedTP := TranslationParams{
rtp: &TranslationParamsRTP{
rtp: TranslationParamsRTP{
snOrdering: SequenceNumberOrderingContiguous,
extSequenceNumber: 23333,
extTimestamp: 0xabcdef,
@@ -1224,7 +1224,7 @@ func TestForwarderGetTranslationParamsAudio(t *testing.T) {
}
actualTP, err := f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
require.True(t, f.started)
require.Equal(t, f.lastSSRC, params.SSRC)
@@ -1234,7 +1234,7 @@ func TestForwarderGetTranslationParamsAudio(t *testing.T) {
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
// add a missing sequence number to the cache
err = f.rtpMunger.snRangeMap.ExcludeRange(23334, 23335)
@@ -1261,7 +1261,7 @@ func TestForwarderGetTranslationParamsAudio(t *testing.T) {
extPkt, _ = testutils.GetTestExtPacket(params)
expectedTP = TranslationParams{
rtp: &TranslationParamsRTP{
rtp: TranslationParamsRTP{
snOrdering: SequenceNumberOrderingOutOfOrder,
extSequenceNumber: 23334,
extTimestamp: 0xabcdef,
@@ -1269,7 +1269,7 @@ func TestForwarderGetTranslationParamsAudio(t *testing.T) {
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
// padding only packet in order should be dropped
params = &testutils.TestExtPacketParams{
@@ -1284,7 +1284,7 @@ func TestForwarderGetTranslationParamsAudio(t *testing.T) {
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
// in order packet should be forwarded
params = &testutils.TestExtPacketParams{
@@ -1296,7 +1296,7 @@ func TestForwarderGetTranslationParamsAudio(t *testing.T) {
extPkt, _ = testutils.GetTestExtPacket(params)
expectedTP = TranslationParams{
rtp: &TranslationParamsRTP{
rtp: TranslationParamsRTP{
snOrdering: SequenceNumberOrderingContiguous,
extSequenceNumber: 23336,
extTimestamp: 0xabcdef,
@@ -1304,7 +1304,7 @@ func TestForwarderGetTranslationParamsAudio(t *testing.T) {
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
// padding only packet after a gap should not be dropped
params = &testutils.TestExtPacketParams{
@@ -1315,7 +1315,7 @@ func TestForwarderGetTranslationParamsAudio(t *testing.T) {
extPkt, _ = testutils.GetTestExtPacket(params)
expectedTP = TranslationParams{
rtp: &TranslationParamsRTP{
rtp: TranslationParamsRTP{
snOrdering: SequenceNumberOrderingGap,
extSequenceNumber: 23338,
extTimestamp: 0xabcdef,
@@ -1323,7 +1323,7 @@ func TestForwarderGetTranslationParamsAudio(t *testing.T) {
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
// out-of-order should be forwarded using cache
params = &testutils.TestExtPacketParams{
@@ -1335,7 +1335,7 @@ func TestForwarderGetTranslationParamsAudio(t *testing.T) {
extPkt, _ = testutils.GetTestExtPacket(params)
expectedTP = TranslationParams{
rtp: &TranslationParamsRTP{
rtp: TranslationParamsRTP{
snOrdering: SequenceNumberOrderingOutOfOrder,
extSequenceNumber: 23335,
extTimestamp: 0xabcdef,
@@ -1343,7 +1343,7 @@ func TestForwarderGetTranslationParamsAudio(t *testing.T) {
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
// switching source should lock onto the new source, but sequence number should be contiguous
params = &testutils.TestExtPacketParams{
@@ -1355,7 +1355,7 @@ func TestForwarderGetTranslationParamsAudio(t *testing.T) {
extPkt, _ = testutils.GetTestExtPacket(params)
expectedTP = TranslationParams{
rtp: &TranslationParamsRTP{
rtp: TranslationParamsRTP{
snOrdering: SequenceNumberOrderingContiguous,
extSequenceNumber: 23339,
extTimestamp: 0xabcdf0,
@@ -1363,11 +1363,12 @@ func TestForwarderGetTranslationParamsAudio(t *testing.T) {
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
require.Equal(t, f.lastSSRC, params.SSRC)
}
func TestForwarderGetTranslationParamsVideo(t *testing.T) {
buf := make([]byte, 100)
f := newForwarder(testutils.TestVP8Codec, webrtc.RTPCodecTypeVideo)
params := &testutils.TestExtPacketParams{
@@ -1400,7 +1401,7 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
}
actualTP, err := f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
// although target layer matches, not a key frame, so should drop
f.vls.SetTarget(buffer.VideoLayer{
@@ -1412,7 +1413,7 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
// should lock onto packet (key frame)
vp8 = &buffer.VP8{
@@ -1431,6 +1432,22 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
IsKeyFrame: true,
}
extPkt, _ = testutils.GetTestExtPacketVP8(params, vp8)
expectedTP = TranslationParams{
isSwitching: true,
isResuming: true,
rtp: TranslationParamsRTP{
snOrdering: SequenceNumberOrderingContiguous,
extSequenceNumber: 23333,
extTimestamp: 0xabcdef,
},
marker: true,
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, actualTP)
require.True(t, f.started)
require.Equal(t, f.lastSSRC, params.SSRC)
expectedVP8 := &buffer.VP8{
FirstByte: 25,
I: true,
@@ -1448,22 +1465,12 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
}
marshalledVP8, err := expectedVP8.Marshal()
require.NoError(t, err)
expectedTP = TranslationParams{
isSwitching: true,
isResuming: true,
rtp: &TranslationParamsRTP{
snOrdering: SequenceNumberOrderingContiguous,
extSequenceNumber: 23333,
extTimestamp: 0xabcdef,
},
codecBytes: marshalledVP8,
marker: true,
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
shouldForward, incomingHeaderSize, outgoingHeaderSize, err := f.TranslateCodecHeader(extPkt, &actualTP.rtp, buf)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.True(t, f.started)
require.Equal(t, f.lastSSRC, params.SSRC)
require.True(t, shouldForward)
require.Equal(t, 6, incomingHeaderSize)
require.Equal(t, 6, outgoingHeaderSize)
require.Equal(t, marshalledVP8, buf[:outgoingHeaderSize])
// send a duplicate, should be dropped
expectedTP = TranslationParams{
@@ -1472,7 +1479,7 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
// out-of-order packet not in cache should be dropped
params = &testutils.TestExtPacketParams{
@@ -1487,7 +1494,7 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
// padding only packet in order should be dropped
params = &testutils.TestExtPacketParams{
@@ -1501,7 +1508,7 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
// in order packet should be forwarded
params = &testutils.TestExtPacketParams{
@@ -1511,6 +1518,17 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
PayloadSize: 20,
}
extPkt, _ = testutils.GetTestExtPacketVP8(params, vp8)
expectedTP = TranslationParams{
rtp: TranslationParamsRTP{
snOrdering: SequenceNumberOrderingContiguous,
extSequenceNumber: 23334,
extTimestamp: 0xabcdef,
},
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, actualTP)
expectedVP8 = &buffer.VP8{
FirstByte: 25,
I: true,
@@ -1528,17 +1546,12 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
}
marshalledVP8, err = expectedVP8.Marshal()
require.NoError(t, err)
expectedTP = TranslationParams{
rtp: &TranslationParamsRTP{
snOrdering: SequenceNumberOrderingContiguous,
extSequenceNumber: 23334,
extTimestamp: 0xabcdef,
},
codecBytes: marshalledVP8,
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
shouldForward, incomingHeaderSize, outgoingHeaderSize, err = f.TranslateCodecHeader(extPkt, &actualTP.rtp, buf)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.True(t, shouldForward)
require.Equal(t, 6, incomingHeaderSize)
require.Equal(t, 6, outgoingHeaderSize)
require.Equal(t, marshalledVP8, buf[:outgoingHeaderSize])
// temporal layer matching target, should be forwarded
params = &testutils.TestExtPacketParams{
@@ -1564,6 +1577,17 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
IsKeyFrame: true,
}
extPkt, _ = testutils.GetTestExtPacketVP8(params, vp8)
expectedTP = TranslationParams{
rtp: TranslationParamsRTP{
snOrdering: SequenceNumberOrderingContiguous,
extSequenceNumber: 23335,
extTimestamp: 0xabcdef,
},
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, actualTP)
expectedVP8 = &buffer.VP8{
FirstByte: 25,
I: true,
@@ -1581,17 +1605,12 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
}
marshalledVP8, err = expectedVP8.Marshal()
require.NoError(t, err)
expectedTP = TranslationParams{
rtp: &TranslationParamsRTP{
snOrdering: SequenceNumberOrderingContiguous,
extSequenceNumber: 23335,
extTimestamp: 0xabcdef,
},
codecBytes: marshalledVP8,
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
shouldForward, incomingHeaderSize, outgoingHeaderSize, err = f.TranslateCodecHeader(extPkt, &actualTP.rtp, buf)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.True(t, shouldForward)
require.Equal(t, 6, incomingHeaderSize)
require.Equal(t, 6, outgoingHeaderSize)
require.Equal(t, marshalledVP8, buf[:outgoingHeaderSize])
// temporal layer higher than target, should be dropped
params = &testutils.TestExtPacketParams{
@@ -1617,11 +1636,19 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
}
extPkt, _ = testutils.GetTestExtPacketVP8(params, vp8)
expectedTP = TranslationParams{
shouldDrop: true,
rtp: TranslationParamsRTP{
snOrdering: SequenceNumberOrderingContiguous,
extSequenceNumber: 23336,
extTimestamp: 0xabcdef,
},
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
shouldForward, incomingHeaderSize, outgoingHeaderSize, err = f.TranslateCodecHeader(extPkt, &actualTP.rtp, buf)
require.NoError(t, err)
require.False(t, shouldForward)
// RTP sequence number and VP8 picture id should be contiguous after dropping higher temporal layer picture
params = &testutils.TestExtPacketParams{
@@ -1646,6 +1673,17 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
IsKeyFrame: false,
}
extPkt, _ = testutils.GetTestExtPacketVP8(params, vp8)
expectedTP = TranslationParams{
rtp: TranslationParamsRTP{
snOrdering: SequenceNumberOrderingContiguous,
extSequenceNumber: 23336,
extTimestamp: 0xabcdef,
},
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, actualTP)
expectedVP8 = &buffer.VP8{
FirstByte: 25,
I: true,
@@ -1663,17 +1701,12 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
}
marshalledVP8, err = expectedVP8.Marshal()
require.NoError(t, err)
expectedTP = TranslationParams{
rtp: &TranslationParamsRTP{
snOrdering: SequenceNumberOrderingContiguous,
extSequenceNumber: 23336,
extTimestamp: 0xabcdef,
},
codecBytes: marshalledVP8,
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
shouldForward, incomingHeaderSize, outgoingHeaderSize, err = f.TranslateCodecHeader(extPkt, &actualTP.rtp, buf)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.True(t, shouldForward)
require.Equal(t, 6, incomingHeaderSize)
require.Equal(t, 6, outgoingHeaderSize)
require.Equal(t, marshalledVP8, buf[:outgoingHeaderSize])
// padding only packet after a gap should be forwarded
params = &testutils.TestExtPacketParams{
@@ -1684,7 +1717,7 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
extPkt, _ = testutils.GetTestExtPacket(params)
expectedTP = TranslationParams{
rtp: &TranslationParamsRTP{
rtp: TranslationParamsRTP{
snOrdering: SequenceNumberOrderingGap,
extSequenceNumber: 23338,
extTimestamp: 0xabcdef,
@@ -1692,7 +1725,7 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
// out-of-order should be forwarded using cache, even if it is padding only
params = &testutils.TestExtPacketParams{
@@ -1703,7 +1736,7 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
extPkt, _ = testutils.GetTestExtPacket(params)
expectedTP = TranslationParams{
rtp: &TranslationParamsRTP{
rtp: TranslationParamsRTP{
snOrdering: SequenceNumberOrderingOutOfOrder,
extSequenceNumber: 23337,
extTimestamp: 0xabcdef,
@@ -1711,7 +1744,7 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
}
actualTP, err = f.GetTranslationParams(extPkt, 0)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, expectedTP, actualTP)
// switching SSRC (happens for new layer or new track source)
// should lock onto the new source, but sequence number should be contiguous
@@ -1743,6 +1776,19 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
}
extPkt, _ = testutils.GetTestExtPacketVP8(params, vp8)
expectedTP = TranslationParams{
isSwitching: true,
rtp: TranslationParamsRTP{
snOrdering: SequenceNumberOrderingContiguous,
extSequenceNumber: 23339,
extTimestamp: 0xabcdf0,
},
}
actualTP, err = f.GetTranslationParams(extPkt, 1)
require.NoError(t, err)
require.Equal(t, expectedTP, actualTP)
require.Equal(t, f.lastSSRC, params.SSRC)
expectedVP8 = &buffer.VP8{
FirstByte: 25,
I: true,
@@ -1760,19 +1806,12 @@ func TestForwarderGetTranslationParamsVideo(t *testing.T) {
}
marshalledVP8, err = expectedVP8.Marshal()
require.NoError(t, err)
expectedTP = TranslationParams{
isSwitching: true,
rtp: &TranslationParamsRTP{
snOrdering: SequenceNumberOrderingContiguous,
extSequenceNumber: 23339,
extTimestamp: 0xabcdf0,
},
codecBytes: marshalledVP8,
}
actualTP, err = f.GetTranslationParams(extPkt, 1)
shouldForward, incomingHeaderSize, outgoingHeaderSize, err = f.TranslateCodecHeader(extPkt, &actualTP.rtp, buf)
require.NoError(t, err)
require.Equal(t, expectedTP, *actualTP)
require.Equal(t, f.lastSSRC, params.SSRC)
require.True(t, shouldForward)
require.Equal(t, 5, incomingHeaderSize)
require.Equal(t, 6, outgoingHeaderSize)
require.Equal(t, marshalledVP8, buf[:outgoingHeaderSize])
}
func TestForwarderGetSnTsForPadding(t *testing.T) {
@@ -1920,6 +1959,7 @@ func TestForwarderGetSnTsForBlankFrames(t *testing.T) {
}
func TestForwarderGetPaddingVP8(t *testing.T) {
buf := make([]byte, 100)
f := newForwarder(testutils.TestVP8Codec, webrtc.RTPCodecTypeVideo)
params := &testutils.TestExtPacketParams{
@@ -1970,11 +2010,11 @@ func TestForwarderGetPaddingVP8(t *testing.T) {
HeaderSize: 6,
IsKeyFrame: true,
}
blankVP8, err := f.GetPadding(true)
n, err := f.GetPadding(true, buf)
require.NoError(t, err)
marshalledVP8, err := expectedVP8.Marshal()
require.NoError(t, err)
require.Equal(t, marshalledVP8, blankVP8)
require.Equal(t, marshalledVP8, buf[:n])
// getting padding with no frame end needed, should get next picture id
expectedVP8 = buffer.VP8{
@@ -1992,9 +2032,9 @@ func TestForwarderGetPaddingVP8(t *testing.T) {
HeaderSize: 6,
IsKeyFrame: true,
}
blankVP8, err = f.GetPadding(false)
n, err = f.GetPadding(false, buf)
require.NoError(t, err)
marshalledVP8, err = expectedVP8.Marshal()
require.NoError(t, err)
require.Equal(t, marshalledVP8, blankVP8)
require.Equal(t, marshalledVP8, buf[:n])
}
+5
View File
@@ -33,6 +33,8 @@ const (
jitterLowMultiToDelay = 10
jitterHighMultiToDelay = 15
jitterHighThreshold = 15
targetDelayLogThreshold = 500
)
func (s PlayoutDelayState) String() string {
@@ -110,6 +112,9 @@ func (c *PlayoutDelayController) SetJitter(jitter uint32) {
c.lock.Unlock()
return
}
if targetDelay > targetDelayLogThreshold {
c.logger.Debugw("high playout delay", "target", targetDelay, "jitter", jitter, "nackPercent", nackPercent, "current", c.currentDelay)
}
c.currentDelay = targetDelay
c.lock.Unlock()
c.createExtData()
+8 -8
View File
@@ -84,8 +84,8 @@ type TrackReceiver interface {
GetFrameRates() [][]float32
GetTemporalLayerFpsForSpatial(layer int32) (bool, []float32)
GetCalculatedClockRate(layer int32) uint32
GetReferenceLayerRTPTimestamp(ts uint32, layer int32, referenceLayer int32) (uint32, error)
GetRTCPSenderReportData(layer int32) *buffer.RTCPSenderReportData
GetTrackStats() *livekit.RTPStats
}
@@ -350,11 +350,11 @@ func (w *WebRTCReceiver) AddUpTrack(track *webrtc.TrackRemote, buff *buffer.Buff
})
buff.OnRtcpFeedback(w.sendRTCP)
buff.OnRtcpSenderReport(func() {
srFirst, srNewest := buff.GetSenderReportData()
w.streamTrackerManager.SetRTCPSenderReportData(layer, srFirst, srNewest)
srData := buff.GetSenderReportData()
w.streamTrackerManager.SetRTCPSenderReportData(layer, srData)
w.downTrackSpreader.Broadcast(func(dt TrackSender) {
_ = dt.HandleRTCPSenderReportData(w.codec.PayloadType, w.isSVC, layer, srFirst, srNewest)
_ = dt.HandleRTCPSenderReportData(w.codec.PayloadType, w.isSVC, layer, srData)
})
})
@@ -829,14 +829,14 @@ func (w *WebRTCReceiver) GetTemporalLayerFpsForSpatial(layer int32) (bool, []flo
return b.GetTemporalLayerFpsForSpatial(layer)
}
func (w *WebRTCReceiver) GetCalculatedClockRate(layer int32) uint32 {
return w.streamTrackerManager.GetCalculatedClockRate(layer)
}
func (w *WebRTCReceiver) GetReferenceLayerRTPTimestamp(ts uint32, layer int32, referenceLayer int32) (uint32, error) {
return w.streamTrackerManager.GetReferenceLayerRTPTimestamp(ts, layer, referenceLayer)
}
func (w *WebRTCReceiver) GetRTCPSenderReportData(layer int32) *buffer.RTCPSenderReportData {
return w.streamTrackerManager.GetRTCPSenderReportData(layer)
}
// closes all track senders in parallel, returns when all are closed
func closeTrackSenders(senders []TrackSender) {
wg := sync.WaitGroup{}
+16 -7
View File
@@ -83,6 +83,7 @@ type RTPMunger struct {
extLastTS uint64
extSecondLastTS uint64
tsOffset uint64
pinnedTSOffset uint64
lastMarker bool
secondLastMarker bool
@@ -107,6 +108,7 @@ func (r *RTPMunger) DebugInfo() map[string]interface{} {
"ExtLastTS": r.extLastTS,
"ExtSecondLastTS": r.extSecondLastTS,
"TSOffset": r.tsOffset,
"PinnedTSOffset": r.pinnedTSOffset,
"LastMarker": r.lastMarker,
"SecondLastMarker": r.secondLastMarker,
}
@@ -123,6 +125,10 @@ func (r *RTPMunger) GetLast() RTPMungerState {
}
}
func (r *RTPMunger) GetPinnedTSOffset() uint64 {
return r.pinnedTSOffset
}
func (r *RTPMunger) SeedLast(state RTPMungerState) {
r.extLastSN = state.ExtLastSN
r.extSecondLastSN = state.ExtSecondLastSN
@@ -142,6 +148,8 @@ func (r *RTPMunger) SetLastSnTs(extPkt *buffer.ExtPacket) {
r.extLastTS = extPkt.ExtTimestamp
r.extSecondLastTS = extPkt.ExtTimestamp
r.tsOffset = 0
r.pinnedTSOffset = r.tsOffset
}
func (r *RTPMunger) UpdateSnTsOffsets(extPkt *buffer.ExtPacket, snAdjust uint64, tsAdjust uint64) {
@@ -151,6 +159,7 @@ func (r *RTPMunger) UpdateSnTsOffsets(extPkt *buffer.ExtPacket, snAdjust uint64,
r.updateSnOffset()
r.tsOffset = extPkt.ExtTimestamp - r.extLastTS - tsAdjust
r.pinnedTSOffset = r.tsOffset
}
func (r *RTPMunger) PacketDropped(extPkt *buffer.ExtPacket) {
@@ -180,7 +189,7 @@ func (r *RTPMunger) PacketDropped(extPkt *buffer.ExtPacket) {
r.lastMarker = r.secondLastMarker
}
func (r *RTPMunger) UpdateAndGetSnTs(extPkt *buffer.ExtPacket, marker bool) (*TranslationParamsRTP, error) {
func (r *RTPMunger) UpdateAndGetSnTs(extPkt *buffer.ExtPacket, marker bool) (TranslationParamsRTP, error) {
diff := int64(extPkt.ExtSequenceNumber - r.extHighestIncomingSN)
if (diff == 1 && len(extPkt.Packet.Payload) != 0) || diff > 1 {
// in-order - either contiguous packet with payload OR packet following a gap, may or may not have payload
@@ -210,7 +219,7 @@ func (r *RTPMunger) UpdateAndGetSnTs(extPkt *buffer.ExtPacket, marker bool) (*Tr
r.isInRtxGateRegion = false
}
return &TranslationParamsRTP{
return TranslationParamsRTP{
snOrdering: ordering,
extSequenceNumber: extMungedSN,
extTimestamp: extMungedTS,
@@ -221,7 +230,7 @@ func (r *RTPMunger) UpdateAndGetSnTs(extPkt *buffer.ExtPacket, marker bool) (*Tr
// out-of-order, look up sequence number offset cache
snOffset, err := r.snRangeMap.GetValue(extPkt.ExtSequenceNumber)
if err != nil {
return &TranslationParamsRTP{
return TranslationParamsRTP{
snOrdering: SequenceNumberOrderingOutOfOrder,
}, ErrOutOfOrderSequenceNumberCacheMiss
}
@@ -237,12 +246,12 @@ func (r *RTPMunger) UpdateAndGetSnTs(extPkt *buffer.ExtPacket, marker bool) (*Tr
"snOffsetIncoming", snOffset,
"snOffsetHighest", r.snOffset,
)
return &TranslationParamsRTP{
return TranslationParamsRTP{
snOrdering: SequenceNumberOrderingOutOfOrder,
}, ErrOutOfOrderSequenceNumberCacheMiss
}
return &TranslationParamsRTP{
return TranslationParamsRTP{
snOrdering: SequenceNumberOrderingOutOfOrder,
extSequenceNumber: extSequenceNumber,
extTimestamp: extPkt.ExtTimestamp - r.tsOffset,
@@ -259,13 +268,13 @@ func (r *RTPMunger) UpdateAndGetSnTs(extPkt *buffer.ExtPacket, marker bool) (*Tr
r.updateSnOffset()
return &TranslationParamsRTP{
return TranslationParamsRTP{
snOrdering: SequenceNumberOrderingContiguous,
}, ErrPaddingOnlyPacket
}
// can get duplicate packet due to FEC
return &TranslationParamsRTP{
return TranslationParamsRTP{
snOrdering: SequenceNumberOrderingDuplicate,
}, ErrDuplicatePacket
}
+12 -12
View File
@@ -213,7 +213,7 @@ func TestOutOfOrderSequenceNumber(t *testing.T) {
tp, err = r.UpdateAndGetSnTs(extPkt, extPkt.Packet.Marker)
require.NoError(t, err)
require.Equal(t, tpExpected, *tp)
require.Equal(t, tpExpected, tp)
params = &testutils.TestExtPacketParams{
SequenceNumber: 23332,
@@ -229,7 +229,7 @@ func TestOutOfOrderSequenceNumber(t *testing.T) {
tp, err = r.UpdateAndGetSnTs(extPkt, extPkt.Packet.Marker)
require.Error(t, err, ErrOutOfOrderSequenceNumberCacheMiss)
require.Equal(t, tpExpected, *tp)
require.Equal(t, tpExpected, tp)
}
func TestDuplicateSequenceNumber(t *testing.T) {
@@ -253,7 +253,7 @@ func TestDuplicateSequenceNumber(t *testing.T) {
tp, err := r.UpdateAndGetSnTs(extPkt, extPkt.Packet.Marker)
require.ErrorIs(t, err, ErrDuplicatePacket)
require.Equal(t, tpExpected, *tp)
require.Equal(t, tpExpected, tp)
}
func TestPaddingOnlyPacket(t *testing.T) {
@@ -275,7 +275,7 @@ func TestPaddingOnlyPacket(t *testing.T) {
tp, err := r.UpdateAndGetSnTs(extPkt, extPkt.Packet.Marker)
require.Error(t, err)
require.ErrorIs(t, err, ErrPaddingOnlyPacket)
require.Equal(t, tpExpected, *tp)
require.Equal(t, tpExpected, tp)
require.Equal(t, uint64(23333), r.extHighestIncomingSN)
require.Equal(t, uint64(23333), r.extLastSN)
snOffset, err := r.snRangeMap.GetValue(r.extHighestIncomingSN)
@@ -297,7 +297,7 @@ func TestPaddingOnlyPacket(t *testing.T) {
tp, err = r.UpdateAndGetSnTs(extPkt, extPkt.Packet.Marker)
require.NoError(t, err)
require.Equal(t, tpExpected, *tp)
require.Equal(t, tpExpected, tp)
require.Equal(t, uint64(23335), r.extHighestIncomingSN)
require.Equal(t, uint64(23334), r.extLastSN)
snOffset, err = r.snRangeMap.GetValue(r.extHighestIncomingSN)
@@ -338,7 +338,7 @@ func TestGapInSequenceNumber(t *testing.T) {
tp, err := r.UpdateAndGetSnTs(extPkt, extPkt.Packet.Marker)
require.NoError(t, err)
require.Equal(t, tpExpected, *tp)
require.Equal(t, tpExpected, tp)
require.Equal(t, uint64(65536+1), r.extHighestIncomingSN)
require.Equal(t, uint64(65536+1), r.extLastSN)
snOffset, err := r.snRangeMap.GetValue(r.extHighestIncomingSN)
@@ -367,7 +367,7 @@ func TestGapInSequenceNumber(t *testing.T) {
tp, err = r.UpdateAndGetSnTs(extPkt, extPkt.Packet.Marker)
require.ErrorIs(t, err, ErrPaddingOnlyPacket)
require.Equal(t, tpExpected, *tp)
require.Equal(t, tpExpected, tp)
require.Equal(t, uint64(65536+2), r.extHighestIncomingSN)
require.Equal(t, uint64(65536+1), r.extLastSN)
snOffset, err = r.snRangeMap.GetValue(r.extHighestIncomingSN)
@@ -391,7 +391,7 @@ func TestGapInSequenceNumber(t *testing.T) {
tp, err = r.UpdateAndGetSnTs(extPkt, extPkt.Packet.Marker)
require.NoError(t, err)
require.Equal(t, tpExpected, *tp)
require.Equal(t, tpExpected, tp)
require.Equal(t, uint64(65536+4), r.extHighestIncomingSN)
require.Equal(t, uint64(65536+3), r.extLastSN)
snOffset, err = r.snRangeMap.GetValue(r.extHighestIncomingSN)
@@ -418,7 +418,7 @@ func TestGapInSequenceNumber(t *testing.T) {
tp, err = r.UpdateAndGetSnTs(extPkt, extPkt.Packet.Marker)
require.ErrorIs(t, err, ErrPaddingOnlyPacket)
require.Equal(t, tpExpected, *tp)
require.Equal(t, tpExpected, tp)
require.Equal(t, uint64(65536+5), r.extHighestIncomingSN)
require.Equal(t, uint64(65536+3), r.extLastSN)
snOffset, err = r.snRangeMap.GetValue(r.extHighestIncomingSN)
@@ -442,7 +442,7 @@ func TestGapInSequenceNumber(t *testing.T) {
tp, err = r.UpdateAndGetSnTs(extPkt, extPkt.Packet.Marker)
require.NoError(t, err)
require.Equal(t, tpExpected, *tp)
require.Equal(t, tpExpected, tp)
require.Equal(t, uint64(65536+7), r.extHighestIncomingSN)
require.Equal(t, uint64(65536+5), r.extLastSN)
snOffset, err = r.snRangeMap.GetValue(r.extHighestIncomingSN)
@@ -475,7 +475,7 @@ func TestGapInSequenceNumber(t *testing.T) {
tp, err = r.UpdateAndGetSnTs(extPkt, extPkt.Packet.Marker)
require.NoError(t, err)
require.Equal(t, tpExpected, *tp)
require.Equal(t, tpExpected, tp)
require.Equal(t, uint64(65536+7), r.extHighestIncomingSN)
require.Equal(t, uint64(65536+5), r.extLastSN)
snOffset, err = r.snRangeMap.GetValue(r.extHighestIncomingSN)
@@ -498,7 +498,7 @@ func TestGapInSequenceNumber(t *testing.T) {
tp, err = r.UpdateAndGetSnTs(extPkt, extPkt.Packet.Marker)
require.NoError(t, err)
require.Equal(t, tpExpected, *tp)
require.Equal(t, tpExpected, tp)
require.Equal(t, uint64(65536+7), r.extHighestIncomingSN)
require.Equal(t, uint64(65536+5), r.extLastSN)
snOffset, err = r.snRangeMap.GetValue(r.extHighestIncomingSN)
+32 -12
View File
@@ -67,9 +67,14 @@ type packetMeta struct {
// Spatial layer of packet
layer int8
// Information that differs depending on the codec
codecBytes []byte
codecBytes [8]byte
numCodecBytesIn uint8
numCodecBytesOut uint8
codecBytesSlice []byte
// Dependency Descriptor of packet
ddBytes []byte
ddBytes [8]byte
ddBytesSize uint8
ddBytesSlice []byte
}
type extPacketMeta struct {
@@ -127,6 +132,7 @@ func (s *sequencer) push(
marker bool,
layer int8,
codecBytes []byte,
numCodecBytesIn int,
ddBytes []byte,
) {
s.Lock()
@@ -190,14 +196,28 @@ func (s *sequencer) push(
slot := extModifiedSNAdjusted % uint64(s.size)
s.meta[slot] = packetMeta{
sourceSeqNo: uint16(extIncomingSN),
targetSeqNo: uint16(extModifiedSN),
timestamp: uint32(extModifiedTS),
marker: marker,
layer: layer,
codecBytes: append([]byte{}, codecBytes...),
ddBytes: append([]byte{}, ddBytes...),
lastNack: s.getRefTime(packetTime), // delay retransmissions after the original transmission
sourceSeqNo: uint16(extIncomingSN),
targetSeqNo: uint16(extModifiedSN),
timestamp: uint32(extModifiedTS),
marker: marker,
layer: layer,
numCodecBytesIn: uint8(numCodecBytesIn),
lastNack: s.getRefTime(packetTime), // delay retransmissions after the original transmission
}
pm := &s.meta[slot]
pm.numCodecBytesOut = uint8(len(codecBytes))
if len(codecBytes) > len(pm.codecBytes) {
pm.codecBytesSlice = append([]byte{}, codecBytes...)
} else {
copy(pm.codecBytes[:pm.numCodecBytesOut], codecBytes)
}
pm.ddBytesSize = uint8(len(ddBytes))
if len(ddBytes) > len(pm.ddBytes) {
pm.ddBytesSlice = append([]byte{}, ddBytes...)
} else {
copy(pm.ddBytes[:pm.ddBytesSize], ddBytes)
}
if extModifiedSN > s.extHighestSN {
@@ -322,8 +342,8 @@ func (s *sequencer) getExtPacketMetas(seqNo []uint16) []extPacketMeta {
extSequenceNumber: extSN,
extTimestamp: extTS,
}
epm.codecBytes = append([]byte{}, meta.codecBytes...)
epm.ddBytes = append([]byte{}, meta.ddBytes...)
epm.codecBytesSlice = append([]byte{}, meta.codecBytesSlice...)
epm.ddBytesSlice = append([]byte{}, meta.ddBytesSlice...)
extPacketMetas = append(extPacketMetas, epm)
}
}
+133 -51
View File
@@ -29,11 +29,11 @@ func Test_sequencer(t *testing.T) {
off := uint16(15)
for i := uint64(1); i < 518; i++ {
seq.push(time.Now(), i, i+uint64(off), 123, true, 2, nil, nil)
seq.push(time.Now(), i, i+uint64(off), 123, true, 2, nil, 0, nil)
}
// send the last two out-of-order
seq.push(time.Now(), 519, 519+uint64(off), 123, false, 2, nil, nil)
seq.push(time.Now(), 518, 518+uint64(off), 123, true, 2, nil, nil)
seq.push(time.Now(), 519, 519+uint64(off), 123, false, 2, nil, 0, nil)
seq.push(time.Now(), 518, 518+uint64(off), 123, true, 2, nil, 0, nil)
req := []uint16{57, 58, 62, 63, 513, 514, 515, 516, 517}
res := seq.getExtPacketMetas(req)
@@ -63,14 +63,14 @@ func Test_sequencer(t *testing.T) {
require.Equal(t, val.extTimestamp, uint64(123))
}
seq.push(time.Now(), 521, 521+uint64(off), 123, true, 1, nil, nil)
seq.push(time.Now(), 521, 521+uint64(off), 123, true, 1, nil, 0, nil)
m := seq.getExtPacketMetas([]uint16{521 + off})
require.Equal(t, 0, len(m))
time.Sleep((ignoreRetransmission + 10) * time.Millisecond)
m = seq.getExtPacketMetas([]uint16{521 + off})
require.Equal(t, 1, len(m))
seq.push(time.Now(), 505, 505+uint64(off), 123, false, 1, nil, nil)
seq.push(time.Now(), 505, 505+uint64(off), 123, false, 1, nil, 0, nil)
m = seq.getExtPacketMetas([]uint16{505 + off})
require.Equal(t, 0, len(m))
time.Sleep((ignoreRetransmission + 10) * time.Millisecond)
@@ -87,14 +87,18 @@ func Test_sequencer_getNACKSeqNo_exclusion(t *testing.T) {
isPadding bool
}
type fields struct {
inputs []input
offset uint64
markerOdd bool
markerEven bool
codecBytesOdd []byte
codecBytesEven []byte
ddBytesOdd []byte
ddBytesEven []byte
inputs []input
offset uint64
markerOdd bool
markerEven bool
codecBytesOdd []byte
numCodecBytesInOdd int
codecBytesEven []byte
numCodecBytesInEven int
codecBytesOversized []byte
ddBytesOdd []byte
ddBytesEven []byte
ddBytesOversized []byte
}
tests := []struct {
@@ -117,13 +121,17 @@ func Test_sequencer_getNACKSeqNo_exclusion(t *testing.T) {
{65532, true},
{65534, false},
},
offset: 5,
markerOdd: true,
markerEven: false,
codecBytesOdd: []byte{1, 2, 3, 4},
codecBytesEven: []byte{5, 6, 7},
ddBytesOdd: []byte{8, 9, 10},
ddBytesEven: []byte{11, 12},
offset: 5,
markerOdd: true,
markerEven: false,
codecBytesOdd: []byte{1, 2, 3, 4},
numCodecBytesInOdd: 3,
codecBytesEven: []byte{5, 6, 7},
numCodecBytesInEven: 4,
codecBytesOversized: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
ddBytesOdd: []byte{8, 9, 10},
ddBytesEven: []byte{11, 12},
ddBytesOversized: []byte{11, 12, 13, 14, 15, 16, 17, 18, 19},
},
args: args{
seqNo: []uint16{65526 + 5, 65527 + 5, 65530 + 5, 0 /* 65531 input */, 1 /* 65532 input */, 2 /* 65533 input */, 3 /* 65534 input */},
@@ -143,10 +151,44 @@ func Test_sequencer_getNACKSeqNo_exclusion(t *testing.T) {
if i.isPadding {
n.pushPadding(i.seqNo+tt.fields.offset, i.seqNo+tt.fields.offset)
} else {
if i.seqNo%2 == 0 {
n.push(time.Now(), i.seqNo, i.seqNo+tt.fields.offset, 123, tt.fields.markerEven, 3, tt.fields.codecBytesEven, tt.fields.ddBytesEven)
if i.seqNo%5 == 0 {
n.push(
time.Now(),
i.seqNo,
i.seqNo+tt.fields.offset,
123,
tt.fields.markerOdd,
3,
tt.fields.codecBytesOversized,
len(tt.fields.codecBytesOversized),
tt.fields.ddBytesOversized,
)
} else {
n.push(time.Now(), i.seqNo, i.seqNo+tt.fields.offset, 123, tt.fields.markerOdd, 3, tt.fields.codecBytesOdd, tt.fields.ddBytesOdd)
if i.seqNo%2 == 0 {
n.push(
time.Now(),
i.seqNo,
i.seqNo+tt.fields.offset,
123,
tt.fields.markerEven,
3,
tt.fields.codecBytesEven,
tt.fields.numCodecBytesInEven,
tt.fields.ddBytesEven,
)
} else {
n.push(
time.Now(),
i.seqNo,
i.seqNo+tt.fields.offset,
123,
tt.fields.markerOdd,
3,
tt.fields.codecBytesOdd,
tt.fields.numCodecBytesInOdd,
tt.fields.ddBytesOdd,
)
}
}
}
}
@@ -156,14 +198,26 @@ func Test_sequencer_getNACKSeqNo_exclusion(t *testing.T) {
var got []uint16
for _, sn := range g {
got = append(got, sn.sourceSeqNo)
if sn.sourceSeqNo%2 == 0 {
require.Equal(t, tt.fields.markerEven, sn.marker)
require.Equal(t, tt.fields.codecBytesEven, sn.codecBytes)
require.Equal(t, tt.fields.ddBytesEven, sn.ddBytes)
} else {
if sn.sourceSeqNo%5 == 0 {
require.Equal(t, tt.fields.markerOdd, sn.marker)
require.Equal(t, tt.fields.codecBytesOdd, sn.codecBytes)
require.Equal(t, tt.fields.ddBytesOdd, sn.ddBytes)
require.Equal(t, tt.fields.codecBytesOversized, sn.codecBytesSlice)
require.Equal(t, uint8(len(tt.fields.codecBytesOversized)), sn.numCodecBytesIn)
require.Equal(t, tt.fields.ddBytesOversized, sn.ddBytesSlice)
require.Equal(t, uint8(len(tt.fields.codecBytesOversized)), sn.ddBytesSize)
} else {
if sn.sourceSeqNo%2 == 0 {
require.Equal(t, tt.fields.markerEven, sn.marker)
require.Equal(t, tt.fields.codecBytesEven, sn.codecBytes[:sn.numCodecBytesOut])
require.Equal(t, uint8(tt.fields.numCodecBytesInEven), sn.numCodecBytesIn)
require.Equal(t, tt.fields.ddBytesEven, sn.ddBytes[:sn.ddBytesSize])
require.Equal(t, uint8(len(tt.fields.ddBytesEven)), sn.ddBytesSize)
} else {
require.Equal(t, tt.fields.markerOdd, sn.marker)
require.Equal(t, tt.fields.codecBytesOdd, sn.codecBytes[:sn.numCodecBytesOut])
require.Equal(t, uint8(tt.fields.numCodecBytesInOdd), sn.numCodecBytesIn)
require.Equal(t, tt.fields.ddBytesOdd, sn.ddBytes[:sn.ddBytesSize])
require.Equal(t, uint8(len(tt.fields.ddBytesOdd)), sn.ddBytesSize)
}
}
}
if !reflect.DeepEqual(got, tt.want) {
@@ -182,14 +236,16 @@ func Test_sequencer_getNACKSeqNo_no_exclusion(t *testing.T) {
isPadding bool
}
type fields struct {
inputs []input
offset uint64
markerOdd bool
markerEven bool
codecBytesOdd []byte
codecBytesEven []byte
ddBytesOdd []byte
ddBytesEven []byte
inputs []input
offset uint64
markerOdd bool
markerEven bool
codecBytesOdd []byte
numCodecBytesInOdd int
codecBytesEven []byte
numCodecBytesInEven int
ddBytesOdd []byte
ddBytesEven []byte
}
tests := []struct {
@@ -213,13 +269,15 @@ func Test_sequencer_getNACKSeqNo_no_exclusion(t *testing.T) {
{12, false},
{13, false},
},
offset: 5,
markerOdd: true,
markerEven: false,
codecBytesOdd: []byte{1, 2, 3, 4},
codecBytesEven: []byte{5, 6, 7},
ddBytesOdd: []byte{8, 9, 10},
ddBytesEven: []byte{11, 12},
offset: 5,
markerOdd: true,
markerEven: false,
codecBytesOdd: []byte{1, 2, 3, 4},
numCodecBytesInOdd: 3,
codecBytesEven: []byte{5, 6, 7},
numCodecBytesInEven: 4,
ddBytesOdd: []byte{8, 9, 10},
ddBytesEven: []byte{11, 12},
},
args: args{
seqNo: []uint16{4 + 5, 5 + 5, 8 + 5, 9 + 5, 10 + 5, 11 + 5, 12 + 5},
@@ -238,9 +296,29 @@ func Test_sequencer_getNACKSeqNo_no_exclusion(t *testing.T) {
n.pushPadding(i.seqNo+tt.fields.offset, i.seqNo+tt.fields.offset)
} else {
if i.seqNo%2 == 0 {
n.push(time.Now(), i.seqNo, i.seqNo+tt.fields.offset, 123, tt.fields.markerEven, 3, tt.fields.codecBytesEven, tt.fields.ddBytesEven)
n.push(
time.Now(),
i.seqNo,
i.seqNo+tt.fields.offset,
123,
tt.fields.markerEven,
3,
tt.fields.codecBytesEven,
tt.fields.numCodecBytesInEven,
tt.fields.ddBytesEven,
)
} else {
n.push(time.Now(), i.seqNo, i.seqNo+tt.fields.offset, 123, tt.fields.markerOdd, 3, tt.fields.codecBytesOdd, tt.fields.ddBytesOdd)
n.push(
time.Now(),
i.seqNo,
i.seqNo+tt.fields.offset,
123,
tt.fields.markerOdd,
3,
tt.fields.codecBytesOdd,
tt.fields.numCodecBytesInOdd,
tt.fields.ddBytesOdd,
)
}
}
}
@@ -252,12 +330,16 @@ func Test_sequencer_getNACKSeqNo_no_exclusion(t *testing.T) {
got = append(got, sn.sourceSeqNo)
if sn.sourceSeqNo%2 == 0 {
require.Equal(t, tt.fields.markerEven, sn.marker)
require.Equal(t, tt.fields.codecBytesEven, sn.codecBytes)
require.Equal(t, tt.fields.ddBytesEven, sn.ddBytes)
require.Equal(t, tt.fields.codecBytesEven, sn.codecBytes[:sn.numCodecBytesOut])
require.Equal(t, uint8(tt.fields.numCodecBytesInEven), sn.numCodecBytesIn)
require.Equal(t, tt.fields.ddBytesEven, sn.ddBytes[:sn.ddBytesSize])
require.Equal(t, uint8(len(tt.fields.ddBytesEven)), sn.ddBytesSize)
} else {
require.Equal(t, tt.fields.markerOdd, sn.marker)
require.Equal(t, tt.fields.codecBytesOdd, sn.codecBytes)
require.Equal(t, tt.fields.ddBytesOdd, sn.ddBytes)
require.Equal(t, tt.fields.codecBytesOdd, sn.codecBytes[:sn.numCodecBytesOut])
require.Equal(t, uint8(tt.fields.numCodecBytesInOdd), sn.numCodecBytesIn)
require.Equal(t, tt.fields.ddBytesOdd, sn.ddBytes[:sn.ddBytesSize])
require.Equal(t, uint8(len(tt.fields.ddBytesOdd)), sn.ddBytesSize)
}
}
if !reflect.DeepEqual(got, tt.want) {
+5 -1
View File
@@ -180,7 +180,11 @@ func NewStreamAllocator(params StreamAllocatorParams) *StreamAllocator {
}),
rateMonitor: NewRateMonitor(),
videoTracks: make(map[livekit.TrackID]*Track),
eventsQueue: utils.NewOpsQueue("stream-allocator", 64, true),
eventsQueue: utils.NewOpsQueue(utils.OpsQueueParams{
Name: "stream-allocator",
MinSize: 64,
Logger: params.Logger,
}),
}
s.probeController = NewProbeController(ProbeControllerParams{
+17 -41
View File
@@ -25,17 +25,16 @@ import (
"go.uber.org/atomic"
"google.golang.org/protobuf/proto"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"github.com/livekit/livekit-server/pkg/config"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
"github.com/livekit/livekit-server/pkg/sfu/streamtracker"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
)
const (
senderReportThresholdSeconds = float64(60.0)
minDurationForClockRateCalculation = 15 * time.Second
)
// ---------------------------------------------------
@@ -51,12 +50,6 @@ type StreamTrackerManagerListener interface {
// ---------------------------------------------------
type endsSenderReport struct {
first *buffer.RTCPSenderReportData
newest *buffer.RTCPSenderReportData
lastUpdated time.Time
}
type StreamTrackerManager struct {
logger logger.Logger
trackInfo atomic.Pointer[livekit.TrackInfo]
@@ -77,7 +70,7 @@ type StreamTrackerManager struct {
paused bool
senderReportMu sync.RWMutex
senderReports [buffer.DefaultMaxLayerSpatial + 1]endsSenderReport
senderReports [buffer.DefaultMaxLayerSpatial + 1]*buffer.RTCPSenderReportData
layerOffsets [buffer.DefaultMaxLayerSpatial + 1][buffer.DefaultMaxLayerSpatial + 1]uint32
closed core.Fuse
@@ -98,7 +91,6 @@ func NewStreamTrackerManager(
maxPublishedLayer: buffer.InvalidLayerSpatial,
maxTemporalLayerSeen: buffer.InvalidLayerTemporal,
clockRate: clockRate,
closed: core.NewFuse(),
}
s.trackInfo.Store(proto.Clone(trackInfo).(*livekit.TrackInfo))
@@ -559,8 +551,8 @@ func (s *StreamTrackerManager) maxExpectedLayerFromTrackInfo() {
}
func (s *StreamTrackerManager) updateLayerOffsetLocked(ref, other int32) {
srRef := s.senderReports[ref].newest
srOther := s.senderReports[other].newest
srRef := s.senderReports[ref]
srOther := s.senderReports[other]
if srRef == nil || srRef.NTPTimestamp == 0 || srOther == nil || srOther.NTPTimestamp == 0 {
return
}
@@ -600,7 +592,7 @@ func (s *StreamTrackerManager) updateLayerOffsetLocked(ref, other int32) {
s.layerOffsets[ref][other] = offset
}
func (s *StreamTrackerManager) SetRTCPSenderReportData(layer int32, srFirst *buffer.RTCPSenderReportData, srNewest *buffer.RTCPSenderReportData) {
func (s *StreamTrackerManager) SetRTCPSenderReportData(layer int32, srData *buffer.RTCPSenderReportData) {
s.senderReportMu.Lock()
defer s.senderReportMu.Unlock()
@@ -608,9 +600,7 @@ func (s *StreamTrackerManager) SetRTCPSenderReportData(layer int32, srFirst *buf
return
}
s.senderReports[layer].first = srFirst
s.senderReports[layer].newest = srNewest
s.senderReports[layer].lastUpdated = time.Now()
s.senderReports[layer] = srData
// (re)fill offsets as necessary for received layer.
for i := int32(0); i < buffer.DefaultMaxLayerSpatial+1; i++ {
@@ -626,35 +616,21 @@ func (s *StreamTrackerManager) SetRTCPSenderReportData(layer int32, srFirst *buf
}
}
func (s *StreamTrackerManager) GetCalculatedClockRate(layer int32) uint32 {
s.senderReportMu.RLock()
defer s.senderReportMu.RUnlock()
func (s *StreamTrackerManager) GetRTCPSenderReportData(layer int32) *buffer.RTCPSenderReportData {
s.senderReportMu.Lock()
defer s.senderReportMu.Unlock()
if layer < 0 || int(layer) >= len(s.senderReports) {
// invalid layer
return 0
return nil
}
srFirst := s.senderReports[layer].first
srNewest := s.senderReports[layer].newest
if srFirst == nil || srFirst.NTPTimestamp == 0 || srNewest == nil || srNewest.NTPTimestamp == 0 || srFirst.RTPTimestamp == srNewest.RTPTimestamp {
// sender reports invalid or same
return 0
// SVC-TODO: better SVC detection
if s.isSVC {
// there is only one stream in SVC
layer = 0
}
if s.senderReports[layer].lastUpdated.IsZero() || time.Since(s.senderReports[layer].lastUpdated).Seconds() > senderReportThresholdSeconds {
// sender report updated too far back
return 0
}
tsf := srNewest.NTPTimestamp.Time().Sub(srFirst.NTPTimestamp.Time())
if tsf < minDurationForClockRateCalculation {
// not enough time has elapsed to get a stable clock rate calculation
return 0
}
rdsf := srNewest.RTPTimestampExt - srFirst.RTPTimestampExt
return uint32(float64(rdsf) / tsf.Seconds())
return s.senderReports[layer]
}
func (s *StreamTrackerManager) GetReferenceLayerRTPTimestamp(ts uint32, layer int32, referenceLayer int32) (uint32, error) {
+13 -16
View File
@@ -80,16 +80,17 @@ func (t *telemetryService) ParticipantJoined(
shouldSendEvent bool,
) {
t.enqueue(func() {
prometheus.IncrementParticipantRtcConnected(1)
prometheus.AddParticipant()
t.createWorker(
_, found := t.getOrCreateWorker(
ctx,
livekit.RoomID(room.Sid),
livekit.RoomName(room.Name),
livekit.ParticipantID(participant.Sid),
livekit.ParticipantIdentity(participant.Identity),
)
if !found {
prometheus.IncrementParticipantRtcConnected(1)
prometheus.AddParticipant()
}
if shouldSendEvent {
ev := newParticipantEvent(livekit.AnalyticsEventType_PARTICIPANT_JOINED, room, participant)
@@ -117,18 +118,14 @@ func (t *telemetryService) ParticipantActive(
})
}
worker, ok := t.getWorker(livekit.ParticipantID(participant.Sid))
if !ok {
// in case of session migration, we may not have seen a Join event take place.
// we'd need to create the worker here before being able to process events
worker = t.createWorker(
ctx,
livekit.RoomID(room.Sid),
livekit.RoomName(room.Name),
livekit.ParticipantID(participant.Sid),
livekit.ParticipantIdentity(participant.Identity),
)
worker, found := t.getOrCreateWorker(
ctx,
livekit.RoomID(room.Sid),
livekit.RoomName(room.Name),
livekit.ParticipantID(participant.Sid),
livekit.ParticipantIdentity(participant.Identity),
)
if !found {
// need to also account for participant count
prometheus.AddParticipant()
}
+2 -1
View File
@@ -23,6 +23,7 @@ import (
"github.com/livekit/livekit-server/pkg/config"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/rpc"
)
const (
@@ -108,7 +109,7 @@ func Init(nodeID string, nodeType livekit.NodeType, env string) {
initPacketStats(nodeID, nodeType, env)
initRoomStats(nodeID, nodeType, env)
initPSRPCStats(nodeID, nodeType, env)
rpc.InitPSRPCStats(prometheus.Labels{"node_id": nodeID, "node_type": nodeType.String(), "env": env})
initQualityStats(nodeID, nodeType, env)
}
-125
View File
@@ -1,125 +0,0 @@
// Copyright 2023 LiveKit, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/livekit/protocol/livekit"
"github.com/livekit/psrpc"
"github.com/livekit/psrpc/pkg/middleware"
)
var (
psrpcRequestTime *prometheus.HistogramVec
psrpcStreamSendTime *prometheus.HistogramVec
psrpcStreamReceiveTotal *prometheus.CounterVec
psrpcStreamCurrent *prometheus.GaugeVec
psrpcErrorTotal *prometheus.CounterVec
)
func initPSRPCStats(nodeID string, nodeType livekit.NodeType, env string) {
labels := []string{"role", "kind", "service", "method"}
streamLabels := []string{"role", "service", "method"}
psrpcRequestTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: livekitNamespace,
Subsystem: "psrpc",
Name: "request_time_ms",
ConstLabels: prometheus.Labels{"node_id": nodeID, "node_type": nodeType.String(), "env": env},
Buckets: []float64{10, 50, 100, 300, 500, 1000, 1500, 2000, 5000, 10000},
}, labels)
psrpcStreamSendTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: livekitNamespace,
Subsystem: "psrpc",
Name: "stream_send_time_ms",
ConstLabels: prometheus.Labels{"node_id": nodeID, "node_type": nodeType.String(), "env": env},
Buckets: []float64{10, 50, 100, 300, 500, 1000, 1500, 2000, 5000, 10000},
}, streamLabels)
psrpcStreamReceiveTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: livekitNamespace,
Subsystem: "psrpc",
Name: "stream_receive_total",
ConstLabels: prometheus.Labels{"node_id": nodeID, "node_type": nodeType.String(), "env": env},
}, streamLabels)
psrpcStreamCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: livekitNamespace,
Subsystem: "psrpc",
Name: "stream_count",
ConstLabels: prometheus.Labels{"node_id": nodeID, "node_type": nodeType.String(), "env": env},
}, streamLabels)
psrpcErrorTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: livekitNamespace,
Subsystem: "psrpc",
Name: "error_total",
ConstLabels: prometheus.Labels{"node_id": nodeID, "node_type": nodeType.String(), "env": env},
}, labels)
prometheus.MustRegister(psrpcRequestTime)
prometheus.MustRegister(psrpcStreamSendTime)
prometheus.MustRegister(psrpcStreamReceiveTotal)
prometheus.MustRegister(psrpcStreamCurrent)
prometheus.MustRegister(psrpcErrorTotal)
}
var _ middleware.MetricsObserver = PSRPCMetricsObserver{}
type PSRPCMetricsObserver struct{}
func (o PSRPCMetricsObserver) OnUnaryRequest(role middleware.MetricRole, info psrpc.RPCInfo, duration time.Duration, err error) {
if err != nil {
psrpcErrorTotal.WithLabelValues(role.String(), "rpc", info.Service, info.Method).Inc()
} else if role == middleware.ClientRole {
psrpcRequestTime.WithLabelValues(role.String(), "rpc", info.Service, info.Method).Observe(float64(duration.Milliseconds()))
} else {
psrpcRequestTime.WithLabelValues(role.String(), "rpc", info.Service, info.Method).Observe(float64(duration.Milliseconds()))
}
}
func (o PSRPCMetricsObserver) OnMultiRequest(role middleware.MetricRole, info psrpc.RPCInfo, duration time.Duration, responseCount int, errorCount int) {
if responseCount == 0 {
psrpcErrorTotal.WithLabelValues(role.String(), "multirpc", info.Service, info.Method).Inc()
} else if role == middleware.ClientRole {
psrpcRequestTime.WithLabelValues(role.String(), "multirpc", info.Service, info.Method).Observe(float64(duration.Milliseconds()))
} else {
psrpcRequestTime.WithLabelValues(role.String(), "multirpc", info.Service, info.Method).Observe(float64(duration.Milliseconds()))
}
}
func (o PSRPCMetricsObserver) OnStreamSend(role middleware.MetricRole, info psrpc.RPCInfo, duration time.Duration, err error) {
if err != nil {
psrpcErrorTotal.WithLabelValues(role.String(), "stream", info.Service, info.Method).Inc()
} else {
psrpcStreamSendTime.WithLabelValues(role.String(), info.Service, info.Method).Observe(float64(duration.Milliseconds()))
}
}
func (o PSRPCMetricsObserver) OnStreamRecv(role middleware.MetricRole, info psrpc.RPCInfo, err error) {
if err != nil {
psrpcErrorTotal.WithLabelValues(role.String(), "stream", info.Service, info.Method).Inc()
} else {
psrpcStreamReceiveTotal.WithLabelValues(role.String(), info.Service, info.Method).Inc()
}
}
func (o PSRPCMetricsObserver) OnStreamOpen(role middleware.MetricRole, info psrpc.RPCInfo) {
psrpcStreamCurrent.WithLabelValues(role.String(), info.Service, info.Method).Inc()
}
func (o PSRPCMetricsObserver) OnStreamClose(role middleware.MetricRole, info psrpc.RPCInfo) {
psrpcStreamCurrent.WithLabelValues(role.String(), info.Service, info.Method).Dec()
}
+71 -8
View File
@@ -15,9 +15,12 @@
package telemetry
import (
"context"
"fmt"
"sync"
"time"
"github.com/frostbyte73/core"
"go.uber.org/atomic"
"github.com/livekit/livekit-server/pkg/config"
@@ -53,7 +56,7 @@ type BytesTrackStats struct {
totalSendBytes, totalRecvBytes atomic.Uint64
totalSendMessages, totalRecvMessages atomic.Uint32
telemetry TelemetryService
isStopped atomic.Bool
done core.Fuse
}
func NewBytesTrackStats(trackID livekit.TrackID, pID livekit.ParticipantID, telemetry TelemetryService) *BytesTrackStats {
@@ -91,7 +94,7 @@ func (s *BytesTrackStats) GetTrafficTotals() *TrafficTotals {
}
func (s *BytesTrackStats) Stop() {
s.isStopped.Store(true)
s.done.Break()
}
func (s *BytesTrackStats) report() {
@@ -119,15 +122,75 @@ func (s *BytesTrackStats) report() {
}
func (s *BytesTrackStats) reporter() {
ticker := time.NewTicker(config.TelemetryStatsUpdateInterval)
defer ticker.Stop()
for !s.isStopped.Load() {
<-ticker.C
ticker := time.NewTicker(config.TelemetryNonMediaStatsUpdateInterval)
defer func() {
ticker.Stop()
s.report()
}()
for {
select {
case <-s.done.Watch():
return
case <-ticker.C:
s.report()
}
}
}
// -----------------------------------------------------------------------
type BytesSignalStats struct {
BytesTrackStats
ctx context.Context
mu sync.Mutex
ri *livekit.Room
pi *livekit.ParticipantInfo
}
func NewBytesSignalStats(ctx context.Context, telemetry TelemetryService) *BytesSignalStats {
return &BytesSignalStats{
BytesTrackStats: BytesTrackStats{
telemetry: telemetry,
},
ctx: ctx,
}
}
func (s *BytesSignalStats) ResolveRoom(ri *livekit.Room) {
s.mu.Lock()
defer s.mu.Unlock()
if s.ri == nil && ri.GetSid() != "" {
s.ri = ri
s.maybeStart()
}
}
func (s *BytesSignalStats) ResolveParticipant(pi *livekit.ParticipantInfo) {
s.mu.Lock()
defer s.mu.Unlock()
if s.pi == nil {
s.pi = pi
s.maybeStart()
}
}
func (s *BytesSignalStats) maybeStart() {
if s.ri == nil || s.pi == nil {
return
}
s.report()
s.pID = livekit.ParticipantID(s.pi.Sid)
s.trackID = BytesTrackIDForParticipantID(BytesTrackTypeSignal, s.pID)
s.telemetry.ParticipantJoined(s.ctx, s.ri, s.pi, nil, nil, false)
go s.reporter()
}
func (s *BytesSignalStats) reporter() {
s.BytesTrackStats.reporter()
s.telemetry.ParticipantLeft(s.ctx, s.ri, s.pi, false)
}
// -----------------------------------------------------------------------
+21 -9
View File
@@ -19,12 +19,13 @@ import (
"sync"
"time"
"golang.org/x/exp/maps"
"github.com/livekit/livekit-server/pkg/config"
"github.com/livekit/livekit-server/pkg/utils"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"github.com/livekit/protocol/webhook"
"golang.org/x/exp/maps"
)
//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . TelemetryService
@@ -103,9 +104,14 @@ func NewTelemetryService(notifier webhook.QueuedNotifier, analytics AnalyticsSer
t := &telemetryService{
AnalyticsService: analytics,
notifier: notifier,
jobsQueue: utils.NewOpsQueue("telemetry", jobsQueueMinSize, true),
workers: make(map[livekit.ParticipantID]*StatsWorker),
notifier: notifier,
jobsQueue: utils.NewOpsQueue(utils.OpsQueueParams{
Name: "telemetry",
MinSize: jobsQueueMinSize,
FlushOnStop: true,
Logger: logger.GetLogger(),
}),
workers: make(map[livekit.ParticipantID]*StatsWorker),
}
t.jobsQueue.Start()
@@ -153,12 +159,19 @@ func (t *telemetryService) getWorker(participantID livekit.ParticipantID) (worke
return
}
func (t *telemetryService) createWorker(ctx context.Context,
func (t *telemetryService) getOrCreateWorker(ctx context.Context,
roomID livekit.RoomID,
roomName livekit.RoomName,
participantID livekit.ParticipantID,
participantIdentity livekit.ParticipantIdentity,
) *StatsWorker {
) (*StatsWorker, bool) {
t.lock.Lock()
defer t.lock.Unlock()
if worker, ok := t.workers[participantID]; ok {
return worker, true
}
worker := newStatsWorker(
ctx,
t,
@@ -168,11 +181,10 @@ func (t *telemetryService) createWorker(ctx context.Context,
participantIdentity,
)
t.lock.Lock()
t.workers[participantID] = worker
t.workersShadow = maps.Values(t.workers)
t.lock.Unlock()
return worker
return worker, false
}
func (t *telemetryService) cleanupWorkers() {
+15 -9
View File
@@ -19,12 +19,19 @@ import (
"sync"
"github.com/gammazero/deque"
"github.com/livekit/protocol/logger"
"github.com/livekit/protocol/utils"
)
type OpsQueueParams struct {
Name string
MinSize uint
FlushOnStop bool
Logger logger.Logger
}
type OpsQueue struct {
name string
flushOnStop bool
params OpsQueueParams
lock sync.Mutex
ops deque.Deque[func()]
@@ -34,14 +41,13 @@ type OpsQueue struct {
isStopped bool
}
func NewOpsQueue(name string, minSize uint, flushOnStop bool) *OpsQueue {
func NewOpsQueue(params OpsQueueParams) *OpsQueue {
oq := &OpsQueue{
name: name,
flushOnStop: flushOnStop,
wake: make(chan struct{}, 1),
doneChan: make(chan struct{}),
params: params,
wake: make(chan struct{}, 1),
doneChan: make(chan struct{}),
}
oq.ops.SetMinCapacity(uint(utils.Min(bits.Len64(uint64(minSize-1)), 7)))
oq.ops.SetMinCapacity(uint(utils.Min(bits.Len64(uint64(oq.params.MinSize-1)), 7)))
return oq
}
@@ -95,7 +101,7 @@ func (oq *OpsQueue) process() {
<-oq.wake
for {
oq.lock.Lock()
if oq.isStopped && (!oq.flushOnStop || oq.ops.Len() == 0) {
if oq.isStopped && (!oq.params.FlushOnStop || oq.ops.Len() == 0) {
oq.lock.Unlock()
return
}
+5 -6
View File
@@ -679,19 +679,17 @@ func (c *RTCClient) PublishData(data []byte, kind livekit.DataPacket_Kind) error
return err
}
dp := &livekit.DataPacket{
dpData, err := proto.Marshal(&livekit.DataPacket{
Kind: kind,
Value: &livekit.DataPacket_User{
User: &livekit.UserPacket{Payload: data},
},
}
dpData, err := proto.Marshal(dp)
})
if err != nil {
return err
}
return c.publisher.SendDataPacket(dp, dpData)
return c.publisher.SendDataPacket(kind, dpData)
}
func (c *RTCClient) GetPublishedTrackIDs() []string {
@@ -732,12 +730,13 @@ func (c *RTCClient) ensurePublisherConnected() error {
}
}
func (c *RTCClient) handleDataMessage(_ livekit.DataPacket_Kind, data []byte) {
func (c *RTCClient) handleDataMessage(kind livekit.DataPacket_Kind, data []byte) {
dp := &livekit.DataPacket{}
err := proto.Unmarshal(data, dp)
if err != nil {
return
}
dp.Kind = kind
if val, ok := dp.Value.(*livekit.DataPacket_User); ok {
if c.OnDataReceived != nil {
c.OnDataReceived(val.User.Payload, val.User.ParticipantSid)