Signalling V2 protocol implementation start (#3794)

* WIP

* name

* refactor validate

* WIP

* WIP

* signal cache initial impl

* HandleConnect in room manager

* generate subscriber offer

* handle ConnectRequest as stand alone

* segmentation, reassembly

* clean up

* rearrange

* lock scope

* support metadata in connect request

* prom

* add SifTrailer to ConnectResponse

* prom for get offer error counter

* RtcInit counter

* Jie feedback

* signal client

* consolidate v1 and v2 into SignalClient

* clean up

* comment

* deps

* mage generate

* fix tests

* pass around roomName and participantIdentity

* mage generate
This commit is contained in:
Raja Subramanian
2025-07-18 00:01:21 +05:30
committed by GitHub
parent ba702a5323
commit b9a44c3fbf
32 changed files with 2300 additions and 395 deletions
+17 -17
View File
@@ -12,7 +12,7 @@ require (
github.com/elliotchance/orderedmap/v2 v2.7.0
github.com/florianl/go-tc v0.4.4
github.com/frostbyte73/core v0.1.1
github.com/gammazero/deque v1.0.0
github.com/gammazero/deque v1.1.0
github.com/gammazero/workerpool v1.1.3
github.com/google/uuid v1.6.0
github.com/google/wire v0.6.0
@@ -23,7 +23,7 @@ require (
github.com/jxskiss/base62 v1.1.0
github.com/livekit/mageutil v0.0.0-20250511045019-0f1ff63f7731
github.com/livekit/mediatransportutil v0.0.0-20250519131108-fb90f5acfded
github.com/livekit/protocol v1.39.4-0.20250716132625-924eb8b30d90
github.com/livekit/protocol v1.39.4-0.20250717134438-9922dc496733
github.com/livekit/psrpc v0.6.1-0.20250511053145-465289d72c3c
github.com/mackerelio/go-osstat v0.2.5
github.com/magefile/mage v1.15.0
@@ -36,12 +36,12 @@ require (
github.com/pion/ice/v4 v4.0.10
github.com/pion/interceptor v0.1.40
github.com/pion/rtcp v1.2.15
github.com/pion/rtp v1.8.19
github.com/pion/rtp v1.8.21
github.com/pion/sctp v1.8.39
github.com/pion/sdp/v3 v3.0.14
github.com/pion/transport/v3 v3.0.7
github.com/pion/turn/v4 v4.0.2
github.com/pion/webrtc/v4 v4.1.2
github.com/pion/webrtc/v4 v4.1.3
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.22.0
github.com/redis/go-redis/v9 v9.11.0
@@ -55,9 +55,9 @@ require (
go.uber.org/atomic v1.11.0
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
golang.org/x/mod v0.25.0
golang.org/x/sync v0.15.0
golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc
golang.org/x/mod v0.26.0
golang.org/x/sync v0.16.0
google.golang.org/protobuf v1.36.6
gopkg.in/yaml.v3 v3.0.1
)
@@ -87,7 +87,7 @@ require (
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/cel-go v0.25.0 // indirect
github.com/google/cel-go v0.26.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/subcommands v1.2.0 // indirect
@@ -96,7 +96,7 @@ require (
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/josharian/native v1.1.0 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.11 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/lithammer/shortuuid/v4 v4.2.0 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/mdlayher/netlink v1.7.1 // indirect
@@ -130,13 +130,13 @@ require (
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect
go.uber.org/zap/exp v0.3.0 // indirect
golang.org/x/crypto v0.39.0 // indirect
golang.org/x/net v0.41.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/text v0.26.0 // indirect
golang.org/x/tools v0.34.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/grpc v1.73.0 // indirect
golang.org/x/crypto v0.40.0 // indirect
golang.org/x/net v0.42.0 // indirect
golang.org/x/sys v0.34.0 // indirect
golang.org/x/text v0.27.0 // indirect
golang.org/x/tools v0.35.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250715232539-7130f93afb79 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250715232539-7130f93afb79 // indirect
google.golang.org/grpc v1.74.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)
+44 -44
View File
@@ -72,8 +72,8 @@ github.com/frostbyte73/core v0.1.1 h1:ChhJOR7bAKOCPbA+lqDLE2cGKlCG5JXsDvvQr4YaJI
github.com/frostbyte73/core v0.1.1/go.mod h1:mhfOtR+xWAvwXiwor7jnqPMnu4fxbv1F2MwZ0BEpzZo=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34=
github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo=
github.com/gammazero/deque v1.1.0 h1:OyiyReBbnEG2PP0Bnv1AASLIYvyKqIFN5xfl1t8oGLo=
github.com/gammazero/deque v1.1.0/go.mod h1:JVrR+Bj1NMQbPnYclvDlvSX0nVGReLrQZ0aUMuWLctg=
github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q=
github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc=
github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=
@@ -88,8 +88,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/cel-go v0.25.0 h1:jsFw9Fhn+3y2kBbltZR4VEz5xKkcIFRPDnuEzAGv5GY=
github.com/google/cel-go v0.25.0/go.mod h1:hjEb6r5SuOSlhCHmFoLzu8HGCERvIsDAbxDAyNU/MmI=
github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@@ -146,8 +146,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.2.11 h1:0OwqZRYI2rFrjS4kvkDnqJkKHdHaRnCm68/DY4OxRzU=
github.com/klauspost/cpuid/v2 v2.2.11/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
@@ -167,8 +167,8 @@ github.com/livekit/mageutil v0.0.0-20250511045019-0f1ff63f7731 h1:9x+U2HGLrSw5AT
github.com/livekit/mageutil v0.0.0-20250511045019-0f1ff63f7731/go.mod h1:Rs3MhFwutWhGwmY1VQsygw28z5bWcnEYmS1OG9OxjOQ=
github.com/livekit/mediatransportutil v0.0.0-20250519131108-fb90f5acfded h1:ylZPdnlX1RW9Z15SD4mp87vT2D2shsk0hpLJwSPcq3g=
github.com/livekit/mediatransportutil v0.0.0-20250519131108-fb90f5acfded/go.mod h1:mSNtYzSf6iY9xM3UX42VEI+STHvMgHmrYzEHPcdhB8A=
github.com/livekit/protocol v1.39.4-0.20250716132625-924eb8b30d90 h1:bHJac9MxR/vSaMp8x/IKeSITZ7s9ZWAItqQfALdeL6U=
github.com/livekit/protocol v1.39.4-0.20250716132625-924eb8b30d90/go.mod h1:6l+zgRJZ9sY96LM7DA3EMcKQC5zsVyZVP73c+9wgvCA=
github.com/livekit/protocol v1.39.4-0.20250717134438-9922dc496733 h1:4Oe8IsNJC8JP5O8K+mhlNfdbNhJPEutoVTtJo5qxg8s=
github.com/livekit/protocol v1.39.4-0.20250717134438-9922dc496733/go.mod h1:6l+zgRJZ9sY96LM7DA3EMcKQC5zsVyZVP73c+9wgvCA=
github.com/livekit/psrpc v0.6.1-0.20250511053145-465289d72c3c h1:WwEr0YBejYbKzk8LSaO9h8h0G9MnE7shyDu8yXQWmEc=
github.com/livekit/psrpc v0.6.1-0.20250511053145-465289d72c3c/go.mod h1:kmD+AZPkWu0MaXIMv57jhNlbiSZZ/Jx4bzlxBDVmJes=
github.com/mackerelio/go-osstat v0.2.5 h1:+MqTbZUhoIt4m8qzkVoXUJg1EuifwlAJSk4Yl2GXh+o=
@@ -246,8 +246,8 @@ github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
github.com/pion/rtp v1.8.21 h1:3yrOwmZFyUpcIosNcWRpQaU+UXIJ6yxLuJ8Bx0mw37Y=
github.com/pion/rtp v1.8.21/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
github.com/pion/sdp/v3 v3.0.14 h1:1h7gBr9FhOWH5GjWWY5lcw/U85MtdcibTyt/o6RxRUI=
@@ -260,8 +260,8 @@ github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
github.com/pion/webrtc/v4 v4.1.3 h1:YZ67Boj9X/hk190jJZ8+HFGQ6DqSZ/fYP3sLAZv7c3c=
github.com/pion/webrtc/v4 v4.1.3/go.mod h1:rsq+zQ82ryfR9vbb0L1umPJ6Ogq7zm8mcn9fcGnxomM=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -332,16 +332,16 @@ github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -359,18 +359,18 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o=
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc h1:TS73t7x3KarrNd5qAipmspBDS1rkMcgVG/fS1aRb4Rc=
golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -393,8 +393,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -403,8 +403,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -438,8 +438,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
@@ -455,8 +455,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@@ -465,18 +465,18 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
google.golang.org/genproto/googleapis/api v0.0.0-20250715232539-7130f93afb79 h1:iOye66xuaAK0WnkPuhQPUFy8eJcmwUXqGGP3om6IxX8=
google.golang.org/genproto/googleapis/api v0.0.0-20250715232539-7130f93afb79/go.mod h1:HKJDgKsFUnv5VAGeQjz8kxcgDP0HoE0iZNp0OdZNlhE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250715232539-7130f93afb79 h1:1ZwqphdOdWYXsUHgMpU/101nCtf/kSp9hOrcvFsnl10=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250715232539-7130f93afb79/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.74.0 h1:sxRSkyLxlceWQiqDofxDot3d4u7DyoHPc7SBXMj8gGY=
google.golang.org/grpc v1.74.0/go.mod h1:NZUaK8dAMUfzhK6uxZ+9511LtOrk73UGWOFoNvz7z+s=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+17 -5
View File
@@ -153,8 +153,21 @@ type StartParticipantSignalResults struct {
type MessageRouter interface {
// CreateRoom starts an rtc room
CreateRoom(ctx context.Context, req *livekit.CreateRoomRequest) (res *livekit.Room, err error)
// StartParticipantSignal participant signal connection is ready to start
StartParticipantSignal(ctx context.Context, roomName livekit.RoomName, pi ParticipantInit) (res StartParticipantSignalResults, err error)
StartParticipantSignal(
ctx context.Context,
roomName livekit.RoomName,
pi ParticipantInit,
) (res StartParticipantSignalResults, err error)
// HandleParticipantConnectRequest handles connection request from participant
HandleParticipantConnectRequest(
ctx context.Context,
roomName livekit.RoomName,
participantIdentity livekit.ParticipantIdentity,
rscr *rpc.RelaySignalv2ConnectRequest,
) (resp *rpc.RelaySignalv2ConnectResponse, err error)
}
func CreateRouter(
@@ -229,10 +242,9 @@ func (pi *ParticipantInit) ToStartSession(roomName livekit.RoomName, connectionI
}
ss := &livekit.StartSession{
RoomName: string(roomName),
Identity: string(pi.Identity),
Name: string(pi.Name),
// connection id is to allow the RTC node to identify where to route the message back to
RoomName: string(roomName),
Identity: string(pi.Identity),
Name: string(pi.Name),
ConnectionId: string(connectionID),
Reconnect: pi.Reconnect,
ReconnectReason: pi.ReconnectReason,
+31 -1
View File
@@ -24,6 +24,7 @@ import (
"github.com/livekit/livekit-server/pkg/config"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"github.com/livekit/protocol/rpc"
)
var _ Router = (*LocalRouter)(nil)
@@ -110,7 +111,8 @@ func (r *LocalRouter) StartParticipantSignal(ctx context.Context, roomName livek
func (r *LocalRouter) StartParticipantSignalWithNodeID(ctx context.Context, roomName livekit.RoomName, pi ParticipantInit, nodeID livekit.NodeID) (res StartParticipantSignalResults, err error) {
connectionID, reqSink, resSource, err := r.signalClient.StartParticipantSignal(ctx, roomName, pi, nodeID)
if err != nil {
logger.Errorw("could not handle new participant", err,
logger.Errorw(
"could not handle new participant", err,
"room", roomName,
"participant", pi.Identity,
"connID", connectionID,
@@ -126,6 +128,34 @@ func (r *LocalRouter) StartParticipantSignalWithNodeID(ctx context.Context, room
return
}
func (r *LocalRouter) HandleParticipantConnectRequest(
ctx context.Context,
roomName livekit.RoomName,
participantIdentity livekit.ParticipantIdentity,
rscr *rpc.RelaySignalv2ConnectRequest,
) (*rpc.RelaySignalv2ConnectResponse, error) {
return r.HandleParticipantConnectRequestWithNodeID(ctx, roomName, participantIdentity, rscr, r.currentNode.NodeID())
}
func (r *LocalRouter) HandleParticipantConnectRequestWithNodeID(
ctx context.Context,
roomName livekit.RoomName,
participantIdentity livekit.ParticipantIdentity,
rscr *rpc.RelaySignalv2ConnectRequest,
nodeID livekit.NodeID,
) (*rpc.RelaySignalv2ConnectResponse, error) {
resp, err := r.signalClient.HandleParticipantConnectRequest(ctx, roomName, participantIdentity, nodeID, rscr)
if err != nil {
logger.Errorw(
"could not handle new participant", err,
"room", roomName,
"participant", participantIdentity,
// SIGNALLING-V2-TODO "connID", connectionID,
)
}
return resp, err
}
func (r *LocalRouter) Start() error {
if r.isStarted.Swap(true) {
return nil
+21
View File
@@ -169,6 +169,27 @@ func (r *RedisRouter) StartParticipantSignal(ctx context.Context, roomName livek
return r.StartParticipantSignalWithNodeID(ctx, roomName, pi, livekit.NodeID(rtcNode.Id))
}
// HandleParticipantConnectRequest sends participant connect request to the RTC node for the room
func (r *RedisRouter) HandleParticipantConnectRequest(
ctx context.Context,
roomName livekit.RoomName,
participantIdentity livekit.ParticipantIdentity,
rscr *rpc.RelaySignalv2ConnectRequest,
) (*rpc.RelaySignalv2ConnectResponse, error) {
rtcNode, err := r.GetNodeForRoom(ctx, roomName)
if err != nil {
return nil, err
}
return r.HandleParticipantConnectRequestWithNodeID(
ctx,
roomName,
participantIdentity,
rscr,
livekit.NodeID(rtcNode.Id),
)
}
func (r *RedisRouter) Start() error {
if r.isStarted.Swap(true) {
return nil
+86
View File
@@ -7,6 +7,7 @@ import (
"github.com/livekit/livekit-server/pkg/routing"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/rpc"
)
type FakeRouter struct {
@@ -64,6 +65,22 @@ type FakeRouter struct {
getRegionReturnsOnCall map[int]struct {
result1 string
}
HandleParticipantConnectRequestStub func(context.Context, livekit.RoomName, livekit.ParticipantIdentity, *rpc.RelaySignalv2ConnectRequest) (*rpc.RelaySignalv2ConnectResponse, error)
handleParticipantConnectRequestMutex sync.RWMutex
handleParticipantConnectRequestArgsForCall []struct {
arg1 context.Context
arg2 livekit.RoomName
arg3 livekit.ParticipantIdentity
arg4 *rpc.RelaySignalv2ConnectRequest
}
handleParticipantConnectRequestReturns struct {
result1 *rpc.RelaySignalv2ConnectResponse
result2 error
}
handleParticipantConnectRequestReturnsOnCall map[int]struct {
result1 *rpc.RelaySignalv2ConnectResponse
result2 error
}
ListNodesStub func() ([]*livekit.Node, error)
listNodesMutex sync.RWMutex
listNodesArgsForCall []struct {
@@ -421,6 +438,73 @@ func (fake *FakeRouter) GetRegionReturnsOnCall(i int, result1 string) {
}{result1}
}
func (fake *FakeRouter) HandleParticipantConnectRequest(arg1 context.Context, arg2 livekit.RoomName, arg3 livekit.ParticipantIdentity, arg4 *rpc.RelaySignalv2ConnectRequest) (*rpc.RelaySignalv2ConnectResponse, error) {
fake.handleParticipantConnectRequestMutex.Lock()
ret, specificReturn := fake.handleParticipantConnectRequestReturnsOnCall[len(fake.handleParticipantConnectRequestArgsForCall)]
fake.handleParticipantConnectRequestArgsForCall = append(fake.handleParticipantConnectRequestArgsForCall, struct {
arg1 context.Context
arg2 livekit.RoomName
arg3 livekit.ParticipantIdentity
arg4 *rpc.RelaySignalv2ConnectRequest
}{arg1, arg2, arg3, arg4})
stub := fake.HandleParticipantConnectRequestStub
fakeReturns := fake.handleParticipantConnectRequestReturns
fake.recordInvocation("HandleParticipantConnectRequest", []interface{}{arg1, arg2, arg3, arg4})
fake.handleParticipantConnectRequestMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3, arg4)
}
if specificReturn {
return ret.result1, ret.result2
}
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeRouter) HandleParticipantConnectRequestCallCount() int {
fake.handleParticipantConnectRequestMutex.RLock()
defer fake.handleParticipantConnectRequestMutex.RUnlock()
return len(fake.handleParticipantConnectRequestArgsForCall)
}
func (fake *FakeRouter) HandleParticipantConnectRequestCalls(stub func(context.Context, livekit.RoomName, livekit.ParticipantIdentity, *rpc.RelaySignalv2ConnectRequest) (*rpc.RelaySignalv2ConnectResponse, error)) {
fake.handleParticipantConnectRequestMutex.Lock()
defer fake.handleParticipantConnectRequestMutex.Unlock()
fake.HandleParticipantConnectRequestStub = stub
}
func (fake *FakeRouter) HandleParticipantConnectRequestArgsForCall(i int) (context.Context, livekit.RoomName, livekit.ParticipantIdentity, *rpc.RelaySignalv2ConnectRequest) {
fake.handleParticipantConnectRequestMutex.RLock()
defer fake.handleParticipantConnectRequestMutex.RUnlock()
argsForCall := fake.handleParticipantConnectRequestArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4
}
func (fake *FakeRouter) HandleParticipantConnectRequestReturns(result1 *rpc.RelaySignalv2ConnectResponse, result2 error) {
fake.handleParticipantConnectRequestMutex.Lock()
defer fake.handleParticipantConnectRequestMutex.Unlock()
fake.HandleParticipantConnectRequestStub = nil
fake.handleParticipantConnectRequestReturns = struct {
result1 *rpc.RelaySignalv2ConnectResponse
result2 error
}{result1, result2}
}
func (fake *FakeRouter) HandleParticipantConnectRequestReturnsOnCall(i int, result1 *rpc.RelaySignalv2ConnectResponse, result2 error) {
fake.handleParticipantConnectRequestMutex.Lock()
defer fake.handleParticipantConnectRequestMutex.Unlock()
fake.HandleParticipantConnectRequestStub = nil
if fake.handleParticipantConnectRequestReturnsOnCall == nil {
fake.handleParticipantConnectRequestReturnsOnCall = make(map[int]struct {
result1 *rpc.RelaySignalv2ConnectResponse
result2 error
})
}
fake.handleParticipantConnectRequestReturnsOnCall[i] = struct {
result1 *rpc.RelaySignalv2ConnectResponse
result2 error
}{result1, result2}
}
func (fake *FakeRouter) ListNodes() ([]*livekit.Node, error) {
fake.listNodesMutex.Lock()
ret, specificReturn := fake.listNodesReturnsOnCall[len(fake.listNodesArgsForCall)]
@@ -855,6 +939,8 @@ func (fake *FakeRouter) Invocations() map[string][][]interface{} {
defer fake.getNodeForRoomMutex.RUnlock()
fake.getRegionMutex.RLock()
defer fake.getRegionMutex.RUnlock()
fake.handleParticipantConnectRequestMutex.RLock()
defer fake.handleParticipantConnectRequestMutex.RUnlock()
fake.listNodesMutex.RLock()
defer fake.listNodesMutex.RUnlock()
fake.registerNodeMutex.RLock()
@@ -7,6 +7,7 @@ import (
"github.com/livekit/livekit-server/pkg/routing"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/rpc"
)
type FakeSignalClient struct {
@@ -20,6 +21,23 @@ type FakeSignalClient struct {
activeCountReturnsOnCall map[int]struct {
result1 int
}
HandleParticipantConnectRequestStub func(context.Context, livekit.RoomName, livekit.ParticipantIdentity, livekit.NodeID, *rpc.RelaySignalv2ConnectRequest) (*rpc.RelaySignalv2ConnectResponse, error)
handleParticipantConnectRequestMutex sync.RWMutex
handleParticipantConnectRequestArgsForCall []struct {
arg1 context.Context
arg2 livekit.RoomName
arg3 livekit.ParticipantIdentity
arg4 livekit.NodeID
arg5 *rpc.RelaySignalv2ConnectRequest
}
handleParticipantConnectRequestReturns struct {
result1 *rpc.RelaySignalv2ConnectResponse
result2 error
}
handleParticipantConnectRequestReturnsOnCall map[int]struct {
result1 *rpc.RelaySignalv2ConnectResponse
result2 error
}
StartParticipantSignalStub func(context.Context, livekit.RoomName, routing.ParticipantInit, livekit.NodeID) (livekit.ConnectionID, routing.MessageSink, routing.MessageSource, error)
startParticipantSignalMutex sync.RWMutex
startParticipantSignalArgsForCall []struct {
@@ -97,6 +115,74 @@ func (fake *FakeSignalClient) ActiveCountReturnsOnCall(i int, result1 int) {
}{result1}
}
func (fake *FakeSignalClient) HandleParticipantConnectRequest(arg1 context.Context, arg2 livekit.RoomName, arg3 livekit.ParticipantIdentity, arg4 livekit.NodeID, arg5 *rpc.RelaySignalv2ConnectRequest) (*rpc.RelaySignalv2ConnectResponse, error) {
fake.handleParticipantConnectRequestMutex.Lock()
ret, specificReturn := fake.handleParticipantConnectRequestReturnsOnCall[len(fake.handleParticipantConnectRequestArgsForCall)]
fake.handleParticipantConnectRequestArgsForCall = append(fake.handleParticipantConnectRequestArgsForCall, struct {
arg1 context.Context
arg2 livekit.RoomName
arg3 livekit.ParticipantIdentity
arg4 livekit.NodeID
arg5 *rpc.RelaySignalv2ConnectRequest
}{arg1, arg2, arg3, arg4, arg5})
stub := fake.HandleParticipantConnectRequestStub
fakeReturns := fake.handleParticipantConnectRequestReturns
fake.recordInvocation("HandleParticipantConnectRequest", []interface{}{arg1, arg2, arg3, arg4, arg5})
fake.handleParticipantConnectRequestMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3, arg4, arg5)
}
if specificReturn {
return ret.result1, ret.result2
}
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeSignalClient) HandleParticipantConnectRequestCallCount() int {
fake.handleParticipantConnectRequestMutex.RLock()
defer fake.handleParticipantConnectRequestMutex.RUnlock()
return len(fake.handleParticipantConnectRequestArgsForCall)
}
func (fake *FakeSignalClient) HandleParticipantConnectRequestCalls(stub func(context.Context, livekit.RoomName, livekit.ParticipantIdentity, livekit.NodeID, *rpc.RelaySignalv2ConnectRequest) (*rpc.RelaySignalv2ConnectResponse, error)) {
fake.handleParticipantConnectRequestMutex.Lock()
defer fake.handleParticipantConnectRequestMutex.Unlock()
fake.HandleParticipantConnectRequestStub = stub
}
func (fake *FakeSignalClient) HandleParticipantConnectRequestArgsForCall(i int) (context.Context, livekit.RoomName, livekit.ParticipantIdentity, livekit.NodeID, *rpc.RelaySignalv2ConnectRequest) {
fake.handleParticipantConnectRequestMutex.RLock()
defer fake.handleParticipantConnectRequestMutex.RUnlock()
argsForCall := fake.handleParticipantConnectRequestArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5
}
func (fake *FakeSignalClient) HandleParticipantConnectRequestReturns(result1 *rpc.RelaySignalv2ConnectResponse, result2 error) {
fake.handleParticipantConnectRequestMutex.Lock()
defer fake.handleParticipantConnectRequestMutex.Unlock()
fake.HandleParticipantConnectRequestStub = nil
fake.handleParticipantConnectRequestReturns = struct {
result1 *rpc.RelaySignalv2ConnectResponse
result2 error
}{result1, result2}
}
func (fake *FakeSignalClient) HandleParticipantConnectRequestReturnsOnCall(i int, result1 *rpc.RelaySignalv2ConnectResponse, result2 error) {
fake.handleParticipantConnectRequestMutex.Lock()
defer fake.handleParticipantConnectRequestMutex.Unlock()
fake.HandleParticipantConnectRequestStub = nil
if fake.handleParticipantConnectRequestReturnsOnCall == nil {
fake.handleParticipantConnectRequestReturnsOnCall = make(map[int]struct {
result1 *rpc.RelaySignalv2ConnectResponse
result2 error
})
}
fake.handleParticipantConnectRequestReturnsOnCall[i] = struct {
result1 *rpc.RelaySignalv2ConnectResponse
result2 error
}{result1, result2}
}
func (fake *FakeSignalClient) StartParticipantSignal(arg1 context.Context, arg2 livekit.RoomName, arg3 routing.ParticipantInit, arg4 livekit.NodeID) (livekit.ConnectionID, routing.MessageSink, routing.MessageSource, error) {
fake.startParticipantSignalMutex.Lock()
ret, specificReturn := fake.startParticipantSignalReturnsOnCall[len(fake.startParticipantSignalArgsForCall)]
@@ -175,6 +261,8 @@ func (fake *FakeSignalClient) Invocations() map[string][][]interface{} {
defer fake.invocationsMutex.RUnlock()
fake.activeCountMutex.RLock()
defer fake.activeCountMutex.RUnlock()
fake.handleParticipantConnectRequestMutex.RLock()
defer fake.handleParticipantConnectRequestMutex.RUnlock()
fake.startParticipantSignalMutex.RLock()
defer fake.startParticipantSignalMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
+57 -8
View File
@@ -41,17 +41,26 @@ var ErrSignalMessageDropped = errors.New("signal message dropped")
type SignalClient interface {
ActiveCount() int
StartParticipantSignal(ctx context.Context, roomName livekit.RoomName, pi ParticipantInit, nodeID livekit.NodeID) (connectionID livekit.ConnectionID, reqSink MessageSink, resSource MessageSource, err error)
HandleParticipantConnectRequest(
ctx context.Context,
roomName livekit.RoomName,
participantIdentity livekit.ParticipantIdentity,
nodeID livekit.NodeID,
rscr *rpc.RelaySignalv2ConnectRequest,
) (*rpc.RelaySignalv2ConnectResponse, error)
}
type signalClient struct {
nodeID livekit.NodeID
config config.SignalRelayConfig
client rpc.TypedSignalClient
active atomic.Int32
nodeID livekit.NodeID
config config.SignalRelayConfig
client rpc.TypedSignalClient
clientv2 rpc.TypedSignalv2Client
active atomic.Int32
}
func NewSignalClient(nodeID livekit.NodeID, bus psrpc.MessageBus, config config.SignalRelayConfig) (SignalClient, error) {
c, err := rpc.NewTypedSignalClient(
client, err := rpc.NewTypedSignalClient(
nodeID,
bus,
middleware.WithClientMetrics(rpc.PSRPCMetricsObserver{}),
@@ -61,10 +70,20 @@ func NewSignalClient(nodeID livekit.NodeID, bus psrpc.MessageBus, config config.
return nil, err
}
clientv2, err := rpc.NewTypedSignalv2Client(
nodeID,
bus,
middleware.WithClientMetrics(rpc.PSRPCMetricsObserver{}),
)
if err != nil {
return nil, err
}
return &signalClient{
nodeID: nodeID,
config: config,
client: c,
nodeID: nodeID,
config: config,
client: client,
clientv2: clientv2,
}, nil
}
@@ -142,6 +161,28 @@ func (r *signalClient) StartParticipantSignal(
return connectionID, sink, resChan, nil
}
func (r *signalClient) HandleParticipantConnectRequest(
ctx context.Context,
roomName livekit.RoomName,
participantIdentity livekit.ParticipantIdentity,
nodeID livekit.NodeID,
rscr *rpc.RelaySignalv2ConnectRequest,
) (*rpc.RelaySignalv2ConnectResponse, error) {
lgr := utils.GetLogger(ctx).WithValues(
"room", roomName,
"participant", participantIdentity,
"reqNodeID", nodeID,
// SIGNALLING-V2-TODO "connID", connectionID,
"connectRequest", logger.Proto(rscr),
)
lgr.Debugw("handling participant connect request")
return r.clientv2.RelaySignalv2Connect(ctx, nodeID, rscr)
}
// ------------------------------
type signalRequestMessageWriter struct{}
func (e signalRequestMessageWriter) Write(seq uint64, close bool, msgs []proto.Message) *rpc.RelaySignalRequest {
@@ -156,6 +197,8 @@ func (e signalRequestMessageWriter) Write(seq uint64, close bool, msgs []proto.M
return r
}
// -------------------------------
type signalResponseMessageReader struct{}
func (e signalResponseMessageReader) Read(rm *rpc.RelaySignalResponse) ([]proto.Message, error) {
@@ -166,6 +209,8 @@ func (e signalResponseMessageReader) Read(rm *rpc.RelaySignalResponse) ([]proto.
return msgs, nil
}
// -----------------------------------------
type RelaySignalMessage interface {
proto.Message
GetSeq() uint64
@@ -212,6 +257,8 @@ func CopySignalStreamToMessageChannel[SendType, RecvType RelaySignalMessage](
return stream.Err()
}
// ----------------------------------------
type signalMessageReader[SendType, RecvType RelaySignalMessage] struct {
seq uint64
reader SignalMessageReader[RecvType]
@@ -239,6 +286,8 @@ func (r *signalMessageReader[SendType, RecvType]) Read(msg RecvType) ([]proto.Me
return res, nil
}
// ----------------------------------------
type SignalSinkParams[SendType, RecvType RelaySignalMessage] struct {
Stream psrpc.Stream[SendType, RecvType]
Logger logger.Logger
+25 -23
View File
@@ -197,6 +197,7 @@ type ParticipantParams struct {
DisableSenderReportPassThrough bool
MetricConfig metric.MetricConfig
UseOneShotSignallingMode bool
SynchronousLocalCandidatesMode bool
EnableMetrics bool
DataChannelMaxBufferedAmount uint64
DatachannelSlowThreshold int
@@ -1778,29 +1779,30 @@ func (p *ParticipantImpl) setupTransportManager() error {
params := TransportManagerParams{
// primary connection does not change, canSubscribe can change if permission was updated
// after the participant has joined
SubscriberAsPrimary: subscriberAsPrimary,
Config: p.params.Config,
Twcc: p.twcc,
ProtocolVersion: p.params.ProtocolVersion,
CongestionControlConfig: p.params.CongestionControlConfig,
EnabledPublishCodecs: p.enabledPublishCodecs,
EnabledSubscribeCodecs: p.enabledSubscribeCodecs,
SimTracks: p.params.SimTracks,
ClientInfo: p.params.ClientInfo,
Migration: p.params.Migration,
AllowTCPFallback: p.params.AllowTCPFallback,
TCPFallbackRTTThreshold: p.params.TCPFallbackRTTThreshold,
AllowUDPUnstableFallback: p.params.AllowUDPUnstableFallback,
TURNSEnabled: p.params.TURNSEnabled,
AllowPlayoutDelay: p.params.PlayoutDelay.GetEnabled(),
DataChannelMaxBufferedAmount: p.params.DataChannelMaxBufferedAmount,
DatachannelSlowThreshold: p.params.DatachannelSlowThreshold,
Logger: p.params.Logger.WithComponent(sutils.ComponentTransport),
PublisherHandler: pth,
SubscriberHandler: sth,
DataChannelStats: p.dataChannelStats,
UseOneShotSignallingMode: p.params.UseOneShotSignallingMode,
FireOnTrackBySdp: p.params.FireOnTrackBySdp,
SubscriberAsPrimary: subscriberAsPrimary,
Config: p.params.Config,
Twcc: p.twcc,
ProtocolVersion: p.params.ProtocolVersion,
CongestionControlConfig: p.params.CongestionControlConfig,
EnabledPublishCodecs: p.enabledPublishCodecs,
EnabledSubscribeCodecs: p.enabledSubscribeCodecs,
SimTracks: p.params.SimTracks,
ClientInfo: p.params.ClientInfo,
Migration: p.params.Migration,
AllowTCPFallback: p.params.AllowTCPFallback,
TCPFallbackRTTThreshold: p.params.TCPFallbackRTTThreshold,
AllowUDPUnstableFallback: p.params.AllowUDPUnstableFallback,
TURNSEnabled: p.params.TURNSEnabled,
AllowPlayoutDelay: p.params.PlayoutDelay.GetEnabled(),
DataChannelMaxBufferedAmount: p.params.DataChannelMaxBufferedAmount,
DatachannelSlowThreshold: p.params.DatachannelSlowThreshold,
Logger: p.params.Logger.WithComponent(sutils.ComponentTransport),
PublisherHandler: pth,
SubscriberHandler: sth,
DataChannelStats: p.dataChannelStats,
UseOneShotSignallingMode: p.params.UseOneShotSignallingMode,
SynchronousLocalCandidatesMode: p.params.SynchronousLocalCandidatesMode,
FireOnTrackBySdp: p.params.FireOnTrackBySdp,
}
if p.params.SyncStreams && p.params.PlayoutDelay.GetEnabled() && p.params.ClientInfo.isFirefox() {
// we will disable playout delay for Firefox if the user is expecting
+234 -16
View File
@@ -143,7 +143,7 @@ type Room struct {
userPacketDeduper *UserPacketDeduper
dataMessagecache *utils.TimeSizeCache[types.DataMessageCache]
dataMessageCache *utils.TimeSizeCache[types.DataMessageCache]
}
type ParticipantOptions struct {
@@ -273,7 +273,7 @@ func NewRoom(
disconnectSignalOnResumeParticipants: make(map[livekit.ParticipantIdentity]time.Time),
disconnectSignalOnResumeNoMessagesParticipants: make(map[livekit.ParticipantIdentity]*disconnectSignalOnResumeNoMessages),
userPacketDeduper: NewUserPacketDeduper(),
dataMessagecache: utils.NewTimeSizeCache[types.DataMessageCache](utils.TimeSizeCacheParams{
dataMessageCache: utils.NewTimeSizeCache[types.DataMessageCache](utils.TimeSizeCacheParams{
TTL: dataMessageCacheTTL,
MaxSize: dataMessageCacheSize,
}),
@@ -426,7 +426,12 @@ func (r *Room) Release() {
r.holds.Dec()
}
func (r *Room) Join(participant types.LocalParticipant, requestSource routing.MessageSource, opts *ParticipantOptions, iceServers []*livekit.ICEServer) error {
func (r *Room) Join(
participant types.LocalParticipant,
requestSource routing.MessageSource,
opts *ParticipantOptions,
iceServers []*livekit.ICEServer,
) error {
r.lock.Lock()
defer r.lock.Unlock()
@@ -464,7 +469,7 @@ func (r *Room) Join(participant types.LocalParticipant, requestSource routing.Me
defer onStateChangeMu.Unlock()
if state := p.State(); state == livekit.ParticipantInfo_ACTIVE {
// subscribe participant to existing published tracks
r.subscribeToExistingTracks(p)
r.subscribeToExistingTracks(p, false)
connectTime := time.Since(p.ConnectedAt())
meta := &livekit.AnalyticsClientMeta{
@@ -503,7 +508,7 @@ func (r *Room) Join(participant types.LocalParticipant, requestSource routing.Me
}
})
participant.OnSubscriberReady(func(p types.LocalParticipant) {
r.subscribeToExistingTracks(p)
r.subscribeToExistingTracks(p, false)
})
// it's important to set this before connection, we don't want to miss out on any published tracks
participant.OnTrackPublished(r.onTrackPublished)
@@ -550,7 +555,8 @@ func (r *Room) Join(participant types.LocalParticipant, requestSource routing.Me
r.launchTargetAgents(maps.Values(r.agentDispatches), participant, livekit.JobType_JT_PARTICIPANT)
r.logger.Debugw("new participant joined",
r.logger.Debugw(
"new participant joined",
"pID", participant.ID(),
"participant", participant.Identity(),
"clientInfo", logger.Proto(participant.GetClientInfo()),
@@ -591,7 +597,7 @@ func (r *Room) Join(participant types.LocalParticipant, requestSource routing.Me
// initiates sub connection as primary
if participant.ProtocolVersion().SupportFastStart() {
go func() {
r.subscribeToExistingTracks(participant)
r.subscribeToExistingTracks(participant, false)
participant.Negotiate(true)
}()
} else {
@@ -604,6 +610,193 @@ func (r *Room) Join(participant types.LocalParticipant, requestSource routing.Me
return nil
}
// SIGNALLING-V2-TODO: consolidate common parts between this function and Join()
func (r *Room) Joinv2(
participant types.LocalParticipant,
opts *ParticipantOptions,
iceServers []*livekit.ICEServer,
) (*livekit.ConnectResponse, error) {
connectResponse, err := func() (*livekit.ConnectResponse, error) {
r.lock.Lock()
defer r.lock.Unlock()
if r.IsClosed() {
return nil, ErrRoomClosed
}
if r.participants[participant.Identity()] != nil {
return nil, ErrAlreadyJoined
}
if r.protoRoom.MaxParticipants > 0 && !participant.IsDependent() {
numParticipants := uint32(0)
for _, p := range r.participants {
if !p.IsDependent() {
numParticipants++
}
}
if numParticipants >= r.protoRoom.MaxParticipants {
return nil, ErrMaxParticipantsExceeded
}
}
if r.FirstJoinedAt() == 0 && !participant.IsDependent() {
r.joinedAt.Store(time.Now().Unix())
}
var onStateChangeMu sync.Mutex
participant.OnStateChange(func(p types.LocalParticipant) {
if r.onParticipantChanged != nil {
r.onParticipantChanged(p)
}
r.broadcastParticipantState(p, broadcastOptions{skipSource: true})
onStateChangeMu.Lock()
defer onStateChangeMu.Unlock()
if state := p.State(); state == livekit.ParticipantInfo_ACTIVE {
// subscribe participant to existing published tracks
r.subscribeToExistingTracks(p, false)
connectTime := time.Since(p.ConnectedAt())
meta := &livekit.AnalyticsClientMeta{
ClientConnectTime: uint32(connectTime.Milliseconds()),
}
infos := p.GetICEConnectionInfo()
var connectionType roomobs.ConnectionType
for _, info := range infos {
if info.Type != types.ICEConnectionTypeUnknown {
meta.ConnectionType = info.Type.String()
connectionType = info.Type.ReporterType()
break
}
}
r.telemetry.ParticipantActive(context.Background(),
r.ToProto(),
p.ToProto(),
meta,
false,
)
participant.GetReporter().Tx(func(tx roomobs.ParticipantSessionTx) {
tx.ReportClientConnectTime(uint16(connectTime.Milliseconds()))
tx.ReportConnectResult(roomobs.ConnectionResultSuccess)
tx.ReportConnectionType(connectionType)
})
fields := append(
connectionDetailsFields(infos),
"clientInfo", logger.Proto(sutils.ClientInfoWithoutAddress(p.GetClientInfo())),
)
p.GetLogger().Infow("participant active", fields...)
} else if state == livekit.ParticipantInfo_DISCONNECTED {
// remove participant from room
go r.RemoveParticipant(p.Identity(), p.ID(), p.CloseReason())
}
})
participant.OnSubscriberReady(func(p types.LocalParticipant) {
r.subscribeToExistingTracks(p, false)
})
// it's important to set this before connection, we don't want to miss out on any published tracks
participant.OnTrackPublished(r.onTrackPublished)
participant.OnTrackUpdated(r.onTrackUpdated)
participant.OnTrackUnpublished(r.onTrackUnpublished)
participant.OnParticipantUpdate(r.onParticipantUpdate)
participant.OnDataPacket(r.onDataPacket)
participant.OnDataMessage(r.onDataMessage)
participant.OnMetrics(r.onMetrics)
participant.OnSubscribeStatusChanged(func(publisherID livekit.ParticipantID, subscribed bool) {
if subscribed {
pub := r.GetParticipantByID(publisherID)
if pub != nil && pub.State() == livekit.ParticipantInfo_ACTIVE {
// when a participant subscribes to another participant,
// send speaker update if the subscribed to participant is active.
level, active := pub.GetAudioLevel()
if active {
_ = participant.SendSpeakerUpdate([]*livekit.SpeakerInfo{
{
Sid: string(pub.ID()),
Level: float32(level),
Active: active,
},
}, false)
}
if cq := pub.GetConnectionQuality(); cq != nil {
update := &livekit.ConnectionQualityUpdate{}
update.Updates = append(update.Updates, cq)
_ = participant.SendConnectionQualityUpdate(update)
}
}
} else {
// no longer subscribed to the publisher, clear speaker status
_ = participant.SendSpeakerUpdate([]*livekit.SpeakerInfo{
{
Sid: string(publisherID),
Level: 0,
Active: false,
},
}, true)
}
})
r.launchTargetAgents(maps.Values(r.agentDispatches), participant, livekit.JobType_JT_PARTICIPANT)
r.logger.Debugw(
"new participant joined",
"pID", participant.ID(),
"participant", participant.Identity(),
"clientInfo", logger.Proto(participant.GetClientInfo()),
"options", opts,
"numParticipants", len(r.participants),
)
if participant.IsRecorder() && !r.protoRoom.ActiveRecording {
r.protoRoom.ActiveRecording = true
r.protoProxy.MarkDirty(true)
} else {
r.protoProxy.MarkDirty(false)
}
r.participants[participant.Identity()] = participant
r.participantOpts[participant.Identity()] = opts
if r.onParticipantChanged != nil {
r.onParticipantChanged(participant)
}
time.AfterFunc(time.Minute, func() {
if !participant.Verify() {
r.RemoveParticipant(participant.Identity(), participant.ID(), types.ParticipantCloseReasonJoinTimeout)
}
})
connectResponse := r.createConnectResponseLocked(participant, iceServers)
participant.SetMigrateState(types.MigrateStateComplete)
return connectResponse, nil
}()
if err != nil {
return connectResponse, err
}
// SIGNALLING-V2-TODO
// 1. process published audio_tracks
// 2. process published video_tracks
// 3. HandleOffer and get answer (publisher)
r.subscribeToExistingTracks(participant, true)
offer, err := participant.GetOffer()
if err != nil {
participant.GetLogger().Warnw("could not get offer", err)
prometheus.ServiceOperationCounter.WithLabelValues("participant_join", "error", "get_subscriber_offer").Add(1)
return nil, err
}
connectResponse.SubscriberSdp = ToProtoSessionDescription(offer, 0) // SIGNALLING-V2-TODO - need to proper offerId?
prometheus.ServiceOperationCounter.WithLabelValues("participant_join", "success", "").Add(1)
return connectResponse, nil
}
func (r *Room) ReplaceParticipantRequestSource(identity livekit.ParticipantIdentity, reqSource routing.MessageSource) {
r.lock.Lock()
if rs, ok := r.participantRequestSources[identity]; ok {
@@ -790,7 +983,7 @@ func (r *Room) UpdateSubscriptions(
// handle subscription changes
for _, trackID := range trackIDs {
if subscribe {
participant.SubscribeToTrack(trackID)
participant.SubscribeToTrack(trackID, false)
} else {
participant.UnsubscribeFromTrack(trackID)
}
@@ -799,7 +992,7 @@ func (r *Room) UpdateSubscriptions(
for _, pt := range participantTracks {
for _, trackID := range livekit.StringsAsIDs[livekit.TrackID](pt.TrackSids) {
if subscribe {
participant.SubscribeToTrack(trackID)
participant.SubscribeToTrack(trackID, false)
} else {
participant.UnsubscribeFromTrack(trackID)
}
@@ -1193,6 +1386,30 @@ func (r *Room) createJoinResponseLocked(participant types.LocalParticipant, iceS
}
}
func (r *Room) createConnectResponseLocked(
participant types.LocalParticipant,
iceServers []*livekit.ICEServer,
) *livekit.ConnectResponse {
iceConfig := participant.GetICEConfig()
hasICEFallback := iceConfig.GetPreferencePublisher() != livekit.ICECandidateType_ICT_NONE || iceConfig.GetPreferenceSubscriber() != livekit.ICECandidateType_ICT_NONE
return &livekit.ConnectResponse{
Room: r.ToProto(),
Participant: participant.ToProto(),
OtherParticipants: GetOtherParticipantInfo(
participant,
false, // isMigratingIn
toParticipants(maps.Values(r.participants)),
false, // skipSubscriberBroadcast
),
IceServers: iceServers,
ClientConfiguration: participant.GetClientConfiguration(),
ServerInfo: r.serverInfo,
SifTrailer: r.trailer,
EnabledPublishCodecs: participant.GetEnabledPublishCodecs(),
FastPublish: participant.CanPublish() && !hasICEFallback,
}
}
// a ParticipantImpl in the room added a new track, subscribe other participants to it
func (r *Room) onTrackPublished(participant types.LocalParticipant, track types.MediaTrack) {
// publish participant update, since track state is changed
@@ -1213,13 +1430,14 @@ func (r *Room) onTrackPublished(participant types.LocalParticipant, track types.
continue
}
r.logger.Debugw("subscribing to new track",
r.logger.Debugw(
"subscribing to new track",
"participant", existingParticipant.Identity(),
"pID", existingParticipant.ID(),
"publisher", participant.Identity(),
"publisherID", participant.ID(),
"trackID", track.ID())
existingParticipant.SubscribeToTrack(track.ID())
existingParticipant.SubscribeToTrack(track.ID(), false)
}
onParticipantChanged := r.onParticipantChanged
r.lock.RUnlock()
@@ -1307,7 +1525,7 @@ func (r *Room) onDataPacket(source types.LocalParticipant, kind livekit.DataPack
r.logger.Errorw("failed to marshal data packet for cache", err, "participant", source.Identity(), "seq", dp.GetSequence())
return
}
r.dataMessagecache.Add(&types.DataMessageCache{
r.dataMessageCache.Add(&types.DataMessageCache{
SenderID: source.ID(),
Seq: dp.GetSequence(),
Data: data,
@@ -1325,7 +1543,7 @@ func (r *Room) onMetrics(source types.Participant, dp *livekit.DataPacket) {
BroadcastMetricsForRoom(r, source, dp, r.logger)
}
func (r *Room) subscribeToExistingTracks(p types.LocalParticipant) {
func (r *Room) subscribeToExistingTracks(p types.LocalParticipant, isSync bool) {
r.lock.RLock()
shouldSubscribe := r.autoSubscribe(p)
r.lock.RUnlock()
@@ -1343,7 +1561,7 @@ func (r *Room) subscribeToExistingTracks(p types.LocalParticipant) {
// subscribe to all
for _, track := range op.GetPublishedTracks() {
trackIDs = append(trackIDs, track.ID())
p.SubscribeToTrack(track.ID())
p.SubscribeToTrack(track.ID(), isSync)
}
}
if len(trackIDs) > 0 {
@@ -1447,7 +1665,7 @@ func (r *Room) changeUpdateWorker() {
SendParticipantUpdates(maps.Values(updatesMap), r.GetParticipants())
case <-cleanDataMessageTicker.C:
r.dataMessagecache.Prune()
r.dataMessageCache.Prune()
}
}
}
@@ -1723,7 +1941,7 @@ func (r *Room) IsDataMessageUserPacketDuplicate(up *livekit.UserPacket) bool {
func (r *Room) GetCachedReliableDataMessage(seqs map[livekit.ParticipantID]uint32) []*types.DataMessageCache {
msgs := make([]*types.DataMessageCache, 0, len(seqs)*10)
for _, msg := range r.dataMessagecache.Get() {
for _, msg := range r.dataMessageCache.Get() {
seq, ok := seqs[msg.SenderID]
if ok && msg.Seq >= seq {
msgs = append(msgs, msg)
+108
View File
@@ -0,0 +1,108 @@
// Copyright 2023 LiveKit, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rtc
import (
"math/rand"
"sync"
"github.com/gammazero/deque"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
)
type SignalCacheParams struct {
Logger logger.Logger
FirstMessageId uint32 // should be used for testing only
}
type SignalCache struct {
params SignalCacheParams
lock sync.Mutex
messageId uint32
messages deque.Deque[*livekit.Signalv2ServerMessage]
}
func NewSignalCache(params SignalCacheParams) *SignalCache {
s := &SignalCache{
params: params,
messageId: params.FirstMessageId,
}
if s.messageId == 0 {
s.messageId = uint32(rand.Intn(1<<8) + 1)
}
s.messages.SetBaseCap(16)
return s
}
func (s *SignalCache) Add(msg *livekit.Signalv2ServerMessage, lastRemoteId uint32) {
s.AddBatch([]*livekit.Signalv2ServerMessage{msg}, lastRemoteId)
}
func (s *SignalCache) AddBatch(msgs []*livekit.Signalv2ServerMessage, lastRemoteId uint32) {
s.lock.Lock()
defer s.lock.Unlock()
for _, msg := range msgs {
msg.MessageId = s.messageId
s.messageId++
msg.LastProcessedRemoteMessageId = lastRemoteId
s.messages.PushBack(msg)
}
}
func (s *SignalCache) Clear(till uint32) {
s.lock.Lock()
defer s.lock.Unlock()
s.clearLocked(till)
}
func (s *SignalCache) clearLocked(till uint32) {
for s.messages.Len() != 0 {
front := s.messages.Front()
if front.GetMessageId() > till {
break
}
s.messages.PopFront()
}
}
func (s *SignalCache) GetFromFront() []*livekit.Signalv2ServerMessage {
s.lock.Lock()
defer s.lock.Unlock()
return s.getFromFrontLocked()
}
func (s *SignalCache) getFromFrontLocked() []*livekit.Signalv2ServerMessage {
var msgs []*livekit.Signalv2ServerMessage
for msg := range s.messages.Iter() {
msgs = append(msgs, msg)
}
return msgs
}
func (s *SignalCache) ClearAndGetFrom(from uint32) []*livekit.Signalv2ServerMessage {
s.lock.Lock()
defer s.lock.Unlock()
s.clearLocked(from - 1)
return s.getFromFrontLocked()
}
+75
View File
@@ -0,0 +1,75 @@
package rtc
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/livekit/protocol/livekit"
)
func TestSignalCache(t *testing.T) {
firstMessageId := uint32(10)
cache := NewSignalCache(SignalCacheParams{
FirstMessageId: firstMessageId,
})
inputMessages := []*livekit.Signalv2ServerMessage{
&livekit.Signalv2ServerMessage{
Message: &livekit.Signalv2ServerMessage_ConnectResponse{},
},
// SIGNALLING-V2-TODO: replace with other kinds of messages when more types are added
&livekit.Signalv2ServerMessage{
Message: &livekit.Signalv2ServerMessage_ConnectResponse{},
},
&livekit.Signalv2ServerMessage{
Message: &livekit.Signalv2ServerMessage_ConnectResponse{},
},
&livekit.Signalv2ServerMessage{
Message: &livekit.Signalv2ServerMessage_ConnectResponse{},
},
}
// Add() - add one message at a time
for _, inputMessage := range inputMessages {
cache.Add(inputMessage, 2345)
}
// get all messages in cache
outputMessages := cache.GetFromFront()
require.Equal(t, inputMessages, outputMessages)
// clear one and get again
cache.Clear(firstMessageId)
outputMessages = cache.GetFromFront()
require.Equal(t, inputMessages[1:], outputMessages)
// clearing some evicted messages should not clear anything
cache.Clear(firstMessageId) // firstMessageId has been cleared already at this point
outputMessages = cache.GetFromFront()
require.Equal(t, inputMessages[1:], outputMessages)
// clear some and get rest in one go
outputMessages = cache.ClearAndGetFrom(firstMessageId + 3)
require.Equal(t, 1, len(outputMessages))
require.Equal(t, inputMessages[3:], outputMessages)
// getting again should get the same messages again as they sill should in cache
outputMessages = cache.GetFromFront()
require.Equal(t, inputMessages[3:], outputMessages)
// clearing all and getting should return nil
require.Nil(t, cache.ClearAndGetFrom(firstMessageId+uint32(len(inputMessages))))
// getting again should return nil as the cache is fully cleared above
require.Nil(t, cache.GetFromFront())
// AddBatch() - add all messages at once
cache.AddBatch(inputMessages, 4567)
// get all messages in cache
outputMessages = cache.GetFromFront()
require.Equal(t, inputMessages, outputMessages)
}
+144
View File
@@ -0,0 +1,144 @@
// Copyright 2023 LiveKit, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rtc
import (
"math/rand"
"sync"
"time"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
)
const (
defaultMaxFragmentSize = 8192
)
type reassembly struct {
startedAt time.Time
fragments []*livekit.Fragment
isCorrupted bool
}
type SignalFragmentParams struct {
Logger logger.Logger
MaxFragmentSize int
FirstPacketId uint32 // should be used for testing only
}
type SignalFragment struct {
params SignalFragmentParams
lock sync.Mutex
packetId uint32
reassemblies map[uint32]*reassembly
}
func NewSignalFragment(params SignalFragmentParams) *SignalFragment {
s := &SignalFragment{
params: params,
packetId: params.FirstPacketId,
reassemblies: make(map[uint32]*reassembly),
}
if s.params.MaxFragmentSize == 0 {
s.params.MaxFragmentSize = defaultMaxFragmentSize
}
if s.packetId == 0 {
s.packetId = uint32(rand.Intn(1<<8) + 1)
}
return s
}
func (s *SignalFragment) Segment(data []byte) []*livekit.Fragment {
s.lock.Lock()
defer s.lock.Unlock()
if len(data) <= s.params.MaxFragmentSize {
return nil
}
var fragments []*livekit.Fragment
numFragments := uint32((len(data) + s.params.MaxFragmentSize - 1) / s.params.MaxFragmentSize)
fragmentNumber := uint32(1)
consumed := 0
for len(data[consumed:]) != 0 {
fragmentSize := min(len(data[consumed:]), s.params.MaxFragmentSize)
fragment := &livekit.Fragment{
PacketId: s.packetId,
FragmentNumber: fragmentNumber,
NumFragments: numFragments,
FragmentSize: uint32(fragmentSize),
TotalSize: uint32(len(data)),
Data: data[consumed : consumed+fragmentSize],
}
fragments = append(fragments, fragment)
fragmentNumber++
consumed += fragmentSize
}
return fragments
}
func (s *SignalFragment) Reassemble(fragment *livekit.Fragment) []byte {
s.lock.Lock()
defer s.lock.Unlock()
re, ok := s.reassemblies[fragment.PacketId]
if !ok {
re = &reassembly{
startedAt: time.Now(),
fragments: make([]*livekit.Fragment, fragment.NumFragments),
}
s.reassemblies[fragment.PacketId] = re
}
if int(fragment.FragmentNumber) <= len(re.fragments) {
if int(fragment.FragmentSize) != len(fragment.Data) {
re.isCorrupted = true // runt packet, data size of blob does not match fragment size
} else {
re.fragments[fragment.FragmentNumber-1] = fragment
}
} else {
re.isCorrupted = true
}
if re.isCorrupted {
return nil
}
// try to reassemble
expectedTotalSize := uint32(0)
totalSize := 0
for _, fr := range re.fragments {
if fr == nil {
return nil // not received all fragments of packet yet
}
expectedTotalSize = fr.TotalSize // can read this from any fragment of packet
totalSize += len(fr.Data)
}
if expectedTotalSize != 0 && uint32(totalSize) != expectedTotalSize {
re.isCorrupted = true
return nil
}
data := make([]byte, 0, expectedTotalSize)
for _, fr := range re.fragments {
data = append(data, fr.Data...)
}
return data
}
// SIGNALLING-V2-TODO: need a prune worker to handle stale re-assemblies
+125
View File
@@ -0,0 +1,125 @@
package rtc
import (
"testing"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
"github.com/livekit/protocol/livekit"
)
func TestSignalFragment(t *testing.T) {
inputMessage := &livekit.Signalv2ServerEnvelope{
ServerMessages: []*livekit.Signalv2ServerMessage{
{
Message: &livekit.Signalv2ServerMessage_ConnectResponse{
ConnectResponse: &livekit.ConnectResponse{
SifTrailer: []byte("abcdefghijklmnopqrstuvwxyz0123456789"),
},
},
},
{
Message: &livekit.Signalv2ServerMessage_ConnectResponse{
ConnectResponse: &livekit.ConnectResponse{
SifTrailer: []byte("0123456789abcdefghijklmnopqrstuvwxyz0123456789"),
},
},
},
{
Message: &livekit.Signalv2ServerMessage_ConnectResponse{
ConnectResponse: &livekit.ConnectResponse{
SifTrailer: []byte("ABCDEFGHIJKLMNOPQRSTabcdefghijklmnopqrstuvwxyz0123456789"),
},
},
},
},
}
t.Run("no segmentation needed", func(t *testing.T) {
sr := NewSignalFragment(SignalFragmentParams{
MaxFragmentSize: 5_000_000,
})
marshalled, err := proto.Marshal(inputMessage)
require.NoError(t, err)
require.Nil(t, sr.Segment(marshalled))
})
t.Run("segmentation + reassmebly", func(t *testing.T) {
maxFragmentSize := 5
sr := NewSignalFragment(SignalFragmentParams{
MaxFragmentSize: maxFragmentSize,
})
marshalled, err := proto.Marshal(inputMessage)
require.NoError(t, err)
expectedNumFragments := (len(marshalled) + maxFragmentSize - 1) / maxFragmentSize
fragments := sr.Segment(marshalled)
require.NotZero(t, len(fragments))
require.Equal(t, uint32(len(marshalled)), fragments[0].TotalSize)
var reassembled []byte
for idx, fragment := range fragments {
require.Equal(t, uint32(idx+1), fragment.FragmentNumber)
require.NotZero(t, fragment.FragmentSize)
require.Equal(t, uint32(expectedNumFragments), fragment.NumFragments)
require.Equal(t, fragment.FragmentSize, uint32(len(fragment.Data)))
reassembled = sr.Reassemble(fragment)
}
require.Equal(t, marshalled, reassembled)
})
t.Run("runt", func(t *testing.T) {
maxFragmentSize := 5
sr := NewSignalFragment(SignalFragmentParams{
MaxFragmentSize: maxFragmentSize,
})
marshalled, err := proto.Marshal(inputMessage)
require.NoError(t, err)
fragments := sr.Segment(marshalled)
var reassembled []byte
for idx, fragment := range fragments {
// do not send one packet into re-assembly initially, re-assembly should not succeed
if idx == 0 {
continue
}
reassembled = sr.Reassemble(fragment)
}
require.Zero(t, len(reassembled))
// submit 1st fragment and ensure reassembly completes
reassembled = sr.Reassemble(fragments[0])
require.Equal(t, marshalled, reassembled)
})
t.Run("corrupted", func(t *testing.T) {
maxFragmentSize := 5
sr := NewSignalFragment(SignalFragmentParams{
MaxFragmentSize: maxFragmentSize,
})
marshalled, err := proto.Marshal(inputMessage)
require.NoError(t, err)
fragments := sr.Segment(marshalled)
var reassembled []byte
for idx, fragment := range fragments {
// corrupt a fragment, re-assembly should fail
if idx == 0 {
fragment.FragmentSize += 1
}
reassembled = sr.Reassemble(fragment)
}
require.Zero(t, len(reassembled))
})
}
+2 -2
View File
@@ -139,8 +139,8 @@ func (m *SubscriptionManager) isClosed() bool {
}
}
func (m *SubscriptionManager) SubscribeToTrack(trackID livekit.TrackID) {
if m.params.UseOneShotSignallingMode {
func (m *SubscriptionManager) SubscribeToTrack(trackID livekit.TrackID, isSync bool) {
if m.params.UseOneShotSignallingMode || isSync {
m.subscribeSynchronous(trackID)
return
}
+9 -9
View File
@@ -67,7 +67,7 @@ func TestSubscribe(t *testing.T) {
}
})
sm.SubscribeToTrack("track")
sm.SubscribeToTrack("track", false)
s := sm.subscriptions["track"]
require.True(t, s.isDesired())
require.Eventually(t, func() bool {
@@ -127,7 +127,7 @@ func TestSubscribe(t *testing.T) {
failed.Store(true)
}
sm.SubscribeToTrack("track")
sm.SubscribeToTrack("track", false)
s := sm.subscriptions["track"]
require.Eventually(t, func() bool {
return !s.getHasPermission()
@@ -168,7 +168,7 @@ func TestSubscribe(t *testing.T) {
failed.Store(true)
}
sm.SubscribeToTrack("track")
sm.SubscribeToTrack("track", false)
s := sm.subscriptions["track"]
require.Eventually(t, func() bool {
return !s.needsSubscribe()
@@ -271,8 +271,8 @@ func TestSubscribeStatusChanged(t *testing.T) {
}
})
sm.SubscribeToTrack("track1")
sm.SubscribeToTrack("track2")
sm.SubscribeToTrack("track1", false)
sm.SubscribeToTrack("track2", false)
s1 := sm.subscriptions["track1"]
s2 := sm.subscriptions["track2"]
require.Eventually(t, func() bool {
@@ -332,7 +332,7 @@ func TestUpdateSettingsBeforeSubscription(t *testing.T) {
}
sm.UpdateSubscribedTrackSettings("track", settings)
sm.SubscribeToTrack("track")
sm.SubscribeToTrack("track", false)
s := sm.subscriptions["track"]
require.Eventually(t, func() bool {
@@ -376,7 +376,7 @@ func TestSubscriptionLimits(t *testing.T) {
}
})
sm.SubscribeToTrack("track")
sm.SubscribeToTrack("track", false)
s := sm.subscriptions["track"]
require.True(t, s.isDesired())
require.Eventually(t, func() bool {
@@ -405,7 +405,7 @@ func TestSubscriptionLimits(t *testing.T) {
require.Equal(t, 1, tm.TrackSubscribedCallCount())
// reach subscription limit, subscribe pending
sm.SubscribeToTrack("track2")
sm.SubscribeToTrack("track2", false)
s2 := sm.subscriptions["track2"]
time.Sleep(subscriptionTimeout * 2)
require.True(t, s2.needsSubscribe())
@@ -437,7 +437,7 @@ func TestSubscriptionLimits(t *testing.T) {
}, subSettleTimeout, subCheckInterval, "track was not bound")
// subscribe to track1 again, which should pending
sm.SubscribeToTrack("track")
sm.SubscribeToTrack("track", false)
s = sm.subscriptions["track"]
require.True(t, s.isDesired())
time.Sleep(subscriptionTimeout * 2)
+86 -41
View File
@@ -87,19 +87,19 @@ const (
)
var (
ErrNoICETransport = errors.New("no ICE transport")
ErrIceRestartWithoutLocalSDP = errors.New("ICE restart without local SDP settled")
ErrIceRestartOnClosedPeerConnection = errors.New("ICE restart on closed peer connection")
ErrNoTransceiver = errors.New("no transceiver")
ErrNoSender = errors.New("no sender")
ErrMidNotFound = errors.New("mid not found")
ErrNotSynchronousPeerConnectionMode = errors.New("not using synchronous peer connection mode")
ErrNoRemoteDescription = errors.New("no remote description")
ErrNoLocalDescription = errors.New("no local description")
ErrInvalidSDPFragment = errors.New("invalid sdp fragment")
ErrNoBundleMid = errors.New("could not get bundle mid")
ErrMidMismatch = errors.New("media mid does not match bundle mid")
ErrICECredentialMismatch = errors.New("ice credential mismatch")
ErrNoICETransport = errors.New("no ICE transport")
ErrIceRestartWithoutLocalSDP = errors.New("ICE restart without local SDP settled")
ErrIceRestartOnClosedPeerConnection = errors.New("ICE restart on closed peer connection")
ErrNoTransceiver = errors.New("no transceiver")
ErrNoSender = errors.New("no sender")
ErrMidNotFound = errors.New("mid not found")
ErrNotSynchronousLocalCandidatesMode = errors.New("not using synchronous local candidates mode")
ErrNoRemoteDescription = errors.New("no remote description")
ErrNoLocalDescription = errors.New("no local description")
ErrInvalidSDPFragment = errors.New("invalid sdp fragment")
ErrNoBundleMid = errors.New("could not get bundle mid")
ErrMidMismatch = errors.New("media mid does not match bundle mid")
ErrICECredentialMismatch = errors.New("ice credential mismatch")
)
// -------------------------------------------------------------------------
@@ -258,24 +258,25 @@ type PCTransport struct {
}
type TransportParams struct {
Handler transport.Handler
ProtocolVersion types.ProtocolVersion
Config *WebRTCConfig
Twcc *lktwcc.Responder
DirectionConfig DirectionConfig
CongestionControlConfig config.CongestionControlConfig
EnabledCodecs []*livekit.Codec
Logger logger.Logger
Transport livekit.SignalTarget
SimTracks map[uint32]SimulcastTrackInfo
ClientInfo ClientInfo
IsOfferer bool
IsSendSide bool
AllowPlayoutDelay bool
UseOneShotSignallingMode bool
FireOnTrackBySdp bool
DataChannelMaxBufferedAmount uint64
DatachannelSlowThreshold int
Handler transport.Handler
ProtocolVersion types.ProtocolVersion
Config *WebRTCConfig
Twcc *lktwcc.Responder
DirectionConfig DirectionConfig
CongestionControlConfig config.CongestionControlConfig
EnabledCodecs []*livekit.Codec
Logger logger.Logger
Transport livekit.SignalTarget
SimTracks map[uint32]SimulcastTrackInfo
ClientInfo ClientInfo
IsOfferer bool
IsSendSide bool
AllowPlayoutDelay bool
UseOneShotSignallingMode bool
SynchronousLocalCandidatesMode bool
FireOnTrackBySdp bool
DataChannelMaxBufferedAmount uint64
DatachannelSlowThreshold int
// for development test
DatachannelMaxReceiverBufferSize int
@@ -485,7 +486,7 @@ func NewPCTransport(params TransportParams) (*PCTransport, error) {
canReuseTransceiver: true,
connectionDetails: types.NewICEConnectionDetails(params.Transport, params.Logger),
lastNegotiate: time.Now(),
activeOfferId: uint32(rand.Intn(1 << 8)),
activeOfferId: uint32(rand.Intn(1<<8) + 1),
}
bwe, err := t.createPeerConnection()
@@ -546,7 +547,7 @@ func (t *PCTransport) createPeerConnection() (cc.BandwidthEstimator, error) {
}
t.pc = pc
if !t.params.UseOneShotSignallingMode {
if !t.params.UseOneShotSignallingMode && !t.params.SynchronousLocalCandidatesMode {
// one shot signalling mode gathers all candidates and sends in answer
t.pc.OnICEGatheringStateChange(t.onICEGatheringStateChange)
t.pc.OnICECandidate(t.onICECandidateTrickle)
@@ -1292,7 +1293,8 @@ func (t *PCTransport) clearConnTimer() {
}
func (t *PCTransport) HandleRemoteDescription(sd webrtc.SessionDescription, remoteId uint32) error {
if t.params.UseOneShotSignallingMode {
if t.params.UseOneShotSignallingMode || t.params.SynchronousLocalCandidatesMode {
// SIGNALLING-V2-TODO: need to support filtering candidates for transport fallback
// add remote candidates to ICE connection details
parsed, err := sd.Unmarshal()
if err == nil {
@@ -1341,8 +1343,8 @@ func (t *PCTransport) HandleRemoteDescription(sd webrtc.SessionDescription, remo
}
func (t *PCTransport) GetAnswer() (webrtc.SessionDescription, error) {
if !t.params.UseOneShotSignallingMode {
return webrtc.SessionDescription{}, ErrNotSynchronousPeerConnectionMode
if !t.params.UseOneShotSignallingMode && !t.params.SynchronousLocalCandidatesMode {
return webrtc.SessionDescription{}, ErrNotSynchronousLocalCandidatesMode
}
prd := t.pc.PendingRemoteDescription()
@@ -1388,6 +1390,49 @@ func (t *PCTransport) GetAnswer() (webrtc.SessionDescription, error) {
return *cld, nil
}
func (t *PCTransport) GetOffer() (webrtc.SessionDescription, error) {
if !t.params.SynchronousLocalCandidatesMode {
return webrtc.SessionDescription{}, ErrNotSynchronousLocalCandidatesMode
}
offer, err := t.pc.CreateOffer(nil)
if err != nil {
return webrtc.SessionDescription{}, err
}
if err = t.pc.SetLocalDescription(offer); err != nil {
return webrtc.SessionDescription{}, err
}
// wait for gathering to complete to include all candidates in the answer
<-webrtc.GatheringCompletePromise(t.pc)
pld := t.pc.PendingLocalDescription()
// add local candidates to ICE connection details
parsed, err := pld.Unmarshal()
if err == nil {
addLocalICECandidates := func(attrs []sdp.Attribute) {
for _, a := range attrs {
if a.IsICECandidate() {
c, err := ice.UnmarshalCandidate(a.Value)
if err != nil {
continue
}
t.connectionDetails.AddLocalICECandidate(c, false, false)
}
}
}
addLocalICECandidates(parsed.Attributes)
for _, m := range parsed.MediaDescriptions {
addLocalICECandidates(m.Attributes)
}
}
return *pld, nil
}
func (t *PCTransport) GetICESessionUfrag() (string, error) {
cld := t.pc.CurrentLocalDescription()
if cld == nil {
@@ -1410,7 +1455,7 @@ func (t *PCTransport) GetICESessionUfrag() (string, error) {
// Handles SDP Fragment for ICE Trickle in WHIP
func (t *PCTransport) HandleICETrickleSDPFragment(sdpFragment string) error {
if !t.params.UseOneShotSignallingMode {
return ErrNotSynchronousPeerConnectionMode
return ErrNotSynchronousLocalCandidatesMode
}
parsedFragment := &lksdp.SDPFragment{}
@@ -1498,7 +1543,7 @@ func (t *PCTransport) HandleICETrickleSDPFragment(sdpFragment string) error {
// Handles SDP Fragment for ICE Restart in WHIP
func (t *PCTransport) HandleICERestartSDPFragment(sdpFragment string) (string, error) {
if !t.params.UseOneShotSignallingMode {
return "", ErrNotSynchronousPeerConnectionMode
return "", ErrNotSynchronousLocalCandidatesMode
}
parsedFragment := &lksdp.SDPFragment{}
@@ -1628,7 +1673,7 @@ func (t *PCTransport) Negotiate(force bool) {
// no op to cancel pending negotiation
})
t.debouncePending = false
t.updateLastNeogitateLocked()
t.updateLastNegotiateLocked()
postEvent = true
} else {
@@ -1642,7 +1687,7 @@ func (t *PCTransport) Negotiate(force bool) {
t.debouncedNegotiate.Add(func() {
t.lock.Lock()
t.debouncePending = false
t.updateLastNeogitateLocked()
t.updateLastNegotiateLocked()
t.lock.Unlock()
t.postEvent(event{
@@ -1661,7 +1706,7 @@ func (t *PCTransport) Negotiate(force bool) {
}
}
func (t *PCTransport) updateLastNeogitateLocked() {
func (t *PCTransport) updateLastNegotiateLocked() {
if now := time.Now(); now.After(t.lastNegotiate) {
t.lastNegotiate = now
}
+58 -51
View File
@@ -83,29 +83,30 @@ func (h TransportManagerPublisherTransportHandler) OnAnswer(sd webrtc.SessionDes
// -------------------------------
type TransportManagerParams struct {
SubscriberAsPrimary bool
Config *WebRTCConfig
Twcc *twcc.Responder
ProtocolVersion types.ProtocolVersion
CongestionControlConfig config.CongestionControlConfig
EnabledSubscribeCodecs []*livekit.Codec
EnabledPublishCodecs []*livekit.Codec
SimTracks map[uint32]SimulcastTrackInfo
ClientInfo ClientInfo
Migration bool
AllowTCPFallback bool
TCPFallbackRTTThreshold int
AllowUDPUnstableFallback bool
TURNSEnabled bool
AllowPlayoutDelay bool
DataChannelMaxBufferedAmount uint64
DatachannelSlowThreshold int
Logger logger.Logger
PublisherHandler transport.Handler
SubscriberHandler transport.Handler
DataChannelStats *telemetry.BytesTrackStats
UseOneShotSignallingMode bool
FireOnTrackBySdp bool
SubscriberAsPrimary bool
Config *WebRTCConfig
Twcc *twcc.Responder
ProtocolVersion types.ProtocolVersion
CongestionControlConfig config.CongestionControlConfig
EnabledSubscribeCodecs []*livekit.Codec
EnabledPublishCodecs []*livekit.Codec
SimTracks map[uint32]SimulcastTrackInfo
ClientInfo ClientInfo
Migration bool
AllowTCPFallback bool
TCPFallbackRTTThreshold int
AllowUDPUnstableFallback bool
TURNSEnabled bool
AllowPlayoutDelay bool
DataChannelMaxBufferedAmount uint64
DatachannelSlowThreshold int
Logger logger.Logger
PublisherHandler transport.Handler
SubscriberHandler transport.Handler
DataChannelStats *telemetry.BytesTrackStats
UseOneShotSignallingMode bool
SynchronousLocalCandidatesMode bool
FireOnTrackBySdp bool
}
type TransportManager struct {
@@ -150,21 +151,22 @@ func NewTransportManager(params TransportManagerParams) (*TransportManager, erro
lgr := LoggerWithPCTarget(params.Logger, livekit.SignalTarget_PUBLISHER)
publisher, err := NewPCTransport(TransportParams{
ProtocolVersion: params.ProtocolVersion,
Config: params.Config,
Twcc: params.Twcc,
DirectionConfig: params.Config.Publisher,
CongestionControlConfig: params.CongestionControlConfig,
EnabledCodecs: params.EnabledPublishCodecs,
Logger: lgr,
SimTracks: params.SimTracks,
ClientInfo: params.ClientInfo,
Transport: livekit.SignalTarget_PUBLISHER,
Handler: TransportManagerPublisherTransportHandler{TransportManagerTransportHandler{params.PublisherHandler, t, lgr}},
UseOneShotSignallingMode: params.UseOneShotSignallingMode,
DataChannelMaxBufferedAmount: params.DataChannelMaxBufferedAmount,
DatachannelSlowThreshold: params.DatachannelSlowThreshold,
FireOnTrackBySdp: params.FireOnTrackBySdp,
ProtocolVersion: params.ProtocolVersion,
Config: params.Config,
Twcc: params.Twcc,
DirectionConfig: params.Config.Publisher,
CongestionControlConfig: params.CongestionControlConfig,
EnabledCodecs: params.EnabledPublishCodecs,
Logger: lgr,
SimTracks: params.SimTracks,
ClientInfo: params.ClientInfo,
Transport: livekit.SignalTarget_PUBLISHER,
Handler: TransportManagerPublisherTransportHandler{TransportManagerTransportHandler{params.PublisherHandler, t, lgr}},
UseOneShotSignallingMode: params.UseOneShotSignallingMode,
SynchronousLocalCandidatesMode: params.SynchronousLocalCandidatesMode,
DataChannelMaxBufferedAmount: params.DataChannelMaxBufferedAmount,
DatachannelSlowThreshold: params.DatachannelSlowThreshold,
FireOnTrackBySdp: params.FireOnTrackBySdp,
})
if err != nil {
return nil, err
@@ -173,19 +175,20 @@ func NewTransportManager(params TransportManagerParams) (*TransportManager, erro
lgr = LoggerWithPCTarget(params.Logger, livekit.SignalTarget_SUBSCRIBER)
subscriber, err := NewPCTransport(TransportParams{
ProtocolVersion: params.ProtocolVersion,
Config: params.Config,
DirectionConfig: params.Config.Subscriber,
CongestionControlConfig: params.CongestionControlConfig,
EnabledCodecs: params.EnabledSubscribeCodecs,
Logger: lgr,
ClientInfo: params.ClientInfo,
IsOfferer: true,
IsSendSide: true,
AllowPlayoutDelay: params.AllowPlayoutDelay,
DatachannelSlowThreshold: params.DatachannelSlowThreshold,
Transport: livekit.SignalTarget_SUBSCRIBER,
Handler: TransportManagerTransportHandler{params.SubscriberHandler, t, lgr},
ProtocolVersion: params.ProtocolVersion,
Config: params.Config,
DirectionConfig: params.Config.Subscriber,
CongestionControlConfig: params.CongestionControlConfig,
EnabledCodecs: params.EnabledSubscribeCodecs,
Logger: lgr,
ClientInfo: params.ClientInfo,
IsOfferer: true,
IsSendSide: true,
AllowPlayoutDelay: params.AllowPlayoutDelay,
DatachannelSlowThreshold: params.DatachannelSlowThreshold,
Transport: livekit.SignalTarget_SUBSCRIBER,
Handler: TransportManagerTransportHandler{params.SubscriberHandler, t, lgr},
SynchronousLocalCandidatesMode: params.SynchronousLocalCandidatesMode,
})
if err != nil {
return nil, err
@@ -493,6 +496,10 @@ func (t *TransportManager) HandleAnswer(answer webrtc.SessionDescription, answer
t.subscriber.HandleRemoteDescription(answer, answerId)
}
func (t *TransportManager) GetOffer() (webrtc.SessionDescription, error) {
return t.subscriber.GetOffer()
}
// AddICECandidate adds candidates for remote peer
func (t *TransportManager) AddICECandidate(candidate webrtc.ICECandidateInit, target livekit.SignalTarget) {
switch target {
+2 -1
View File
@@ -407,6 +407,7 @@ type LocalParticipant interface {
SetTrackMuted(trackID livekit.TrackID, muted bool, fromAdmin bool) *livekit.TrackInfo
HandleAnswer(sdp webrtc.SessionDescription, answerId uint32)
GetOffer() (webrtc.SessionDescription, error)
Negotiate(force bool)
ICERestart(iceConfig *livekit.ICEConfig)
AddTrackLocal(trackLocal webrtc.TrackLocal, params AddTrackParams) (*webrtc.RTPSender, *webrtc.RTPTransceiver, error)
@@ -416,7 +417,7 @@ type LocalParticipant interface {
WriteSubscriberRTCP(pkts []rtcp.Packet) error
// subscriptions
SubscribeToTrack(trackID livekit.TrackID)
SubscribeToTrack(trackID livekit.TrackID, isSync bool)
UnsubscribeFromTrack(trackID livekit.TrackID)
UpdateSubscribedTrackSettings(trackID livekit.TrackID, settings *livekit.UpdateTrackSettings)
GetSubscribedTracks() []SubscribedTrack
@@ -360,6 +360,18 @@ type FakeLocalParticipant struct {
getLoggerResolverReturnsOnCall map[int]struct {
result1 logger.DeferredFieldResolver
}
GetOfferStub func() (webrtc.SessionDescription, error)
getOfferMutex sync.RWMutex
getOfferArgsForCall []struct {
}
getOfferReturns struct {
result1 webrtc.SessionDescription
result2 error
}
getOfferReturnsOnCall map[int]struct {
result1 webrtc.SessionDescription
result2 error
}
GetPacerStub func() pacer.Pacer
getPacerMutex sync.RWMutex
getPacerArgsForCall []struct {
@@ -1082,10 +1094,11 @@ type FakeLocalParticipant struct {
stopAndGetSubscribedTracksForwarderStateReturnsOnCall map[int]struct {
result1 map[livekit.TrackID]*livekit.RTPForwarderState
}
SubscribeToTrackStub func(livekit.TrackID)
SubscribeToTrackStub func(livekit.TrackID, bool)
subscribeToTrackMutex sync.RWMutex
subscribeToTrackArgsForCall []struct {
arg1 livekit.TrackID
arg2 bool
}
SubscriberAsPrimaryStub func() bool
subscriberAsPrimaryMutex sync.RWMutex
@@ -3072,6 +3085,62 @@ func (fake *FakeLocalParticipant) GetLoggerResolverReturnsOnCall(i int, result1
}{result1}
}
func (fake *FakeLocalParticipant) GetOffer() (webrtc.SessionDescription, error) {
fake.getOfferMutex.Lock()
ret, specificReturn := fake.getOfferReturnsOnCall[len(fake.getOfferArgsForCall)]
fake.getOfferArgsForCall = append(fake.getOfferArgsForCall, struct {
}{})
stub := fake.GetOfferStub
fakeReturns := fake.getOfferReturns
fake.recordInvocation("GetOffer", []interface{}{})
fake.getOfferMutex.Unlock()
if stub != nil {
return stub()
}
if specificReturn {
return ret.result1, ret.result2
}
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeLocalParticipant) GetOfferCallCount() int {
fake.getOfferMutex.RLock()
defer fake.getOfferMutex.RUnlock()
return len(fake.getOfferArgsForCall)
}
func (fake *FakeLocalParticipant) GetOfferCalls(stub func() (webrtc.SessionDescription, error)) {
fake.getOfferMutex.Lock()
defer fake.getOfferMutex.Unlock()
fake.GetOfferStub = stub
}
func (fake *FakeLocalParticipant) GetOfferReturns(result1 webrtc.SessionDescription, result2 error) {
fake.getOfferMutex.Lock()
defer fake.getOfferMutex.Unlock()
fake.GetOfferStub = nil
fake.getOfferReturns = struct {
result1 webrtc.SessionDescription
result2 error
}{result1, result2}
}
func (fake *FakeLocalParticipant) GetOfferReturnsOnCall(i int, result1 webrtc.SessionDescription, result2 error) {
fake.getOfferMutex.Lock()
defer fake.getOfferMutex.Unlock()
fake.GetOfferStub = nil
if fake.getOfferReturnsOnCall == nil {
fake.getOfferReturnsOnCall = make(map[int]struct {
result1 webrtc.SessionDescription
result2 error
})
}
fake.getOfferReturnsOnCall[i] = struct {
result1 webrtc.SessionDescription
result2 error
}{result1, result2}
}
func (fake *FakeLocalParticipant) GetPacer() pacer.Pacer {
fake.getPacerMutex.Lock()
ret, specificReturn := fake.getPacerReturnsOnCall[len(fake.getPacerArgsForCall)]
@@ -7069,16 +7138,17 @@ func (fake *FakeLocalParticipant) StopAndGetSubscribedTracksForwarderStateReturn
}{result1}
}
func (fake *FakeLocalParticipant) SubscribeToTrack(arg1 livekit.TrackID) {
func (fake *FakeLocalParticipant) SubscribeToTrack(arg1 livekit.TrackID, arg2 bool) {
fake.subscribeToTrackMutex.Lock()
fake.subscribeToTrackArgsForCall = append(fake.subscribeToTrackArgsForCall, struct {
arg1 livekit.TrackID
}{arg1})
arg2 bool
}{arg1, arg2})
stub := fake.SubscribeToTrackStub
fake.recordInvocation("SubscribeToTrack", []interface{}{arg1})
fake.recordInvocation("SubscribeToTrack", []interface{}{arg1, arg2})
fake.subscribeToTrackMutex.Unlock()
if stub != nil {
fake.SubscribeToTrackStub(arg1)
fake.SubscribeToTrackStub(arg1, arg2)
}
}
@@ -7088,17 +7158,17 @@ func (fake *FakeLocalParticipant) SubscribeToTrackCallCount() int {
return len(fake.subscribeToTrackArgsForCall)
}
func (fake *FakeLocalParticipant) SubscribeToTrackCalls(stub func(livekit.TrackID)) {
func (fake *FakeLocalParticipant) SubscribeToTrackCalls(stub func(livekit.TrackID, bool)) {
fake.subscribeToTrackMutex.Lock()
defer fake.subscribeToTrackMutex.Unlock()
fake.SubscribeToTrackStub = stub
}
func (fake *FakeLocalParticipant) SubscribeToTrackArgsForCall(i int) livekit.TrackID {
func (fake *FakeLocalParticipant) SubscribeToTrackArgsForCall(i int) (livekit.TrackID, bool) {
fake.subscribeToTrackMutex.RLock()
defer fake.subscribeToTrackMutex.RUnlock()
argsForCall := fake.subscribeToTrackArgsForCall[i]
return argsForCall.arg1
return argsForCall.arg1, argsForCall.arg2
}
func (fake *FakeLocalParticipant) SubscriberAsPrimary() bool {
@@ -8401,6 +8471,8 @@ func (fake *FakeLocalParticipant) Invocations() map[string][][]interface{} {
defer fake.getLoggerMutex.RUnlock()
fake.getLoggerResolverMutex.RLock()
defer fake.getLoggerResolverMutex.RUnlock()
fake.getOfferMutex.RLock()
defer fake.getOfferMutex.RUnlock()
fake.getPacerMutex.RLock()
defer fake.getPacerMutex.RUnlock()
fake.getPendingTrackMutex.RLock()
+18
View File
@@ -216,3 +216,21 @@ func MaybeTruncateIP(addr string) string {
return addr[:len(addr)-3] + "..."
}
func FragmentsAsSignalv2ServerMessages(fragments []*livekit.Fragment) []*livekit.Signalv2ServerMessage {
msgs := make([]*livekit.Signalv2ServerMessage, len(fragments))
for idx, fragment := range fragments {
msgs[idx] = &livekit.Signalv2ServerMessage{
Message: &livekit.Signalv2ServerMessage_Fragment{
Fragment: fragment,
},
}
}
return msgs
}
func MakeSignalv2ServerEnvelope(msgs []*livekit.Signalv2ServerMessage) *livekit.Signalv2ServerEnvelope {
return &livekit.Signalv2ServerEnvelope{
ServerMessages: msgs,
}
}
+1
View File
@@ -28,6 +28,7 @@ var (
ErrNameExceedsLimits = psrpc.NewErrorf(psrpc.InvalidArgument, "name length exceeds limits")
ErrMetadataExceedsLimits = psrpc.NewErrorf(psrpc.InvalidArgument, "metadata size exceeds limits")
ErrAttributeExceedsLimits = psrpc.NewErrorf(psrpc.InvalidArgument, "attribute size exceeds limits")
ErrNoRoomName = psrpc.NewErrorf(psrpc.InvalidArgument, "no room name")
ErrRoomNameExceedsLimits = psrpc.NewErrorf(psrpc.InvalidArgument, "room name length exceeds limits")
ErrParticipantIdentityExceedsLimits = psrpc.NewErrorf(psrpc.InvalidArgument, "participant identity length exceeds limits")
ErrDestinationSameAsSourceRoom = psrpc.NewErrorf(psrpc.InvalidArgument, "destination room cannot be the same as source room")
+197 -2
View File
@@ -333,7 +333,8 @@ func (r *RoomManager) StartSession(
return errors.New("could not restart closed participant")
}
participant.GetLogger().Infow("resuming RTC session",
participant.GetLogger().Infow(
"resuming RTC session",
"nodeID", r.currentNode.NodeID(),
"participantInit", &pi,
"numParticipants", room.GetParticipantCount(),
@@ -393,7 +394,8 @@ func (r *RoomManager) StartSession(
sid,
false,
)
pLogger.Infow("starting RTC session",
pLogger.Infow(
"starting RTC session",
"room", room.Name(),
"nodeID", r.currentNode.NodeID(),
"numParticipants", room.GetParticipantCount(),
@@ -561,6 +563,197 @@ func (r *RoomManager) StartSession(
return nil
}
// SIGNALLING-V2-TODO: consolidate common parts beteen this and `StartSession`
func (r *RoomManager) HandleConnect(
ctx context.Context,
grants *auth.ClaimGrants,
rscr *rpc.RelaySignalv2ConnectRequest,
) (*rpc.RelaySignalv2ConnectResponse, error) {
sessionStartTime := time.Now()
createRoom := rscr.CreateRoom
room, err := r.getOrCreateRoom(ctx, createRoom)
if err != nil {
return nil, err
}
defer room.Release()
protoRoom, roomInternal := room.ToProto(), room.Internal()
participantIdentity := livekit.ParticipantIdentity(grants.Identity)
// should not error out, error is logged in iceServersForParticipant even if it fails
// since this is used for TURN server credentials, we don't want to fail the request even if there's no TURN for the session
apiKey, _, _ := r.getFirstKeyPair()
/* SIGNALLING-V2-TODO - v2 should not have ICERestart, but leaving reminder here (similar location as `StartSession` code check on this path */
sid := livekit.ParticipantID(guid.New(utils.ParticipantPrefix))
pLogger := rtc.LoggerWithParticipant(
rtc.LoggerWithRoom(logger.GetLogger(), room.Name(), room.ID()),
participantIdentity,
sid,
false,
)
pLogger.Infow(
"starting RTC session",
"room", room.Name(),
"nodeID", r.currentNode.NodeID(),
"numParticipants", room.GetParticipantCount(),
"grants", grants,
"connectRequest", logger.Proto(rscr),
)
clientInfo := rscr.ConnectRequest.ClientInfo
clientConf := r.clientConfManager.GetConfiguration(clientInfo)
rtcConf := *r.rtcConfig
rtcConf.SetBufferFactory(room.GetBufferFactory())
if rscr.ConnectRequest.ConnectionSettings.DisableIceLite {
rtcConf.SettingEngine.SetLite(false)
}
// default allow forceTCP
allowFallback := true
if r.config.RTC.AllowTCPFallback != nil {
allowFallback = *r.config.RTC.AllowTCPFallback
}
// default do not force full reconnect on a publication error
reconnectOnPublicationError := false
if r.config.RTC.ReconnectOnPublicationError != nil {
reconnectOnPublicationError = *r.config.RTC.ReconnectOnPublicationError
}
// default do not force full reconnect on a subscription error
reconnectOnSubscriptionError := false
if r.config.RTC.ReconnectOnSubscriptionError != nil {
reconnectOnSubscriptionError = *r.config.RTC.ReconnectOnSubscriptionError
}
// default do not force full reconnect on a data channel error
reconnectOnDataChannelError := false
if r.config.RTC.ReconnectOnDataChannelError != nil {
reconnectOnDataChannelError = *r.config.RTC.ReconnectOnDataChannelError
}
subscriberAllowPause := r.config.RTC.CongestionControl.AllowPause
if rscr.ConnectRequest.ConnectionSettings.SubscriberAllowPause != nil {
subscriberAllowPause = *rscr.ConnectRequest.ConnectionSettings.SubscriberAllowPause
}
participant, err := rtc.NewParticipant(rtc.ParticipantParams{
Identity: participantIdentity,
Name: livekit.ParticipantName(grants.Name),
SID: sid,
Config: &rtcConf,
AudioConfig: r.config.Audio,
VideoConfig: r.config.Video,
LimitConfig: r.config.Limit,
ProtocolVersion: types.ProtocolVersion(clientInfo.Protocol),
SessionStartTime: sessionStartTime,
Telemetry: r.telemetry,
Trailer: room.Trailer(),
PLIThrottleConfig: r.config.RTC.PLIThrottle,
CongestionControlConfig: r.config.RTC.CongestionControl,
PublishEnabledCodecs: protoRoom.EnabledCodecs,
SubscribeEnabledCodecs: protoRoom.EnabledCodecs,
Grants: grants,
Logger: pLogger,
Reporter: roomobs.NewNoopParticipantSessionReporter(),
ClientConf: clientConf,
ClientInfo: rtc.ClientInfo{ClientInfo: clientInfo},
// SIGNALLING-V@-TODO Region: pi.Region,
AdaptiveStream: rscr.ConnectRequest.ConnectionSettings.AdaptiveStream,
AllowTCPFallback: allowFallback,
TURNSEnabled: r.config.IsTURNSEnabled(),
ParticipantHelper: &roomManagerParticipantHelper{
room: room,
codecRegressionThreshold: r.config.Video.CodecRegressionThreshold,
},
ReconnectOnPublicationError: reconnectOnPublicationError,
ReconnectOnSubscriptionError: reconnectOnSubscriptionError,
ReconnectOnDataChannelError: reconnectOnDataChannelError,
VersionGenerator: r.versionGenerator,
SubscriberAllowPause: subscriberAllowPause,
SubscriptionLimitAudio: r.config.Limit.SubscriptionLimitAudio,
SubscriptionLimitVideo: r.config.Limit.SubscriptionLimitVideo,
PlayoutDelay: roomInternal.GetPlayoutDelay(),
SyncStreams: roomInternal.GetSyncStreams(),
ForwardStats: r.forwardStats,
MetricConfig: r.config.Metric,
DataChannelMaxBufferedAmount: r.config.RTC.DataChannelMaxBufferedAmount,
DatachannelSlowThreshold: r.config.RTC.DatachannelSlowThreshold,
FireOnTrackBySdp: true,
SynchronousLocalCandidatesMode: true,
})
if err != nil {
return nil, err
}
iceConfig := r.setIceConfig(room.Name(), participant)
// join room
opts := rtc.ParticipantOptions{
AutoSubscribe: rscr.ConnectRequest.ConnectionSettings.AutoSubscribe,
}
iceServers := r.iceServersForParticipant(
apiKey,
participant,
iceConfig.PreferenceSubscriber == livekit.ICECandidateType_ICT_TLS,
)
connectResponse, err := room.Joinv2(participant, &opts, iceServers)
if err != nil {
pLogger.Errorw("could not join room", err)
_ = participant.Close(true, types.ParticipantCloseReasonJoinFailed, false)
return nil, err
}
if err = r.roomStore.StoreParticipant(ctx, room.Name(), participant.ToProto()); err != nil {
pLogger.Errorw("could not store participant", err)
}
persistRoomForParticipantCount := func(proto *livekit.Room) {
if !participant.Hidden() && !room.IsClosed() {
err = r.roomStore.StoreRoom(ctx, proto, room.Internal())
if err != nil {
logger.Errorw("could not store room", err)
}
}
}
// update room store with new numParticipants
persistRoomForParticipantCount(room.ToProto())
clientMeta := &livekit.AnalyticsClientMeta{
Region: r.currentNode.Region(),
Node: string(r.currentNode.NodeID()),
}
r.telemetry.ParticipantJoined(ctx, protoRoom, participant.ToProto(), clientInfo, clientMeta, true)
participant.OnClose(func(p types.LocalParticipant) {
if err := r.roomStore.DeleteParticipant(ctx, room.Name(), p.Identity()); err != nil {
pLogger.Errorw("could not delete participant", err)
}
// update room store with new numParticipants
proto := room.ToProto()
persistRoomForParticipantCount(proto)
r.telemetry.ParticipantLeft(ctx, proto, p.ToProto(), true)
})
participant.OnClaimsChanged(func(participant types.LocalParticipant) {
pLogger.Debugw("refreshing client token after claims change")
if err := r.refreshToken(participant); err != nil {
pLogger.Errorw("could not refresh token", err)
}
})
participant.OnICEConfigChanged(func(participant types.LocalParticipant, iceConfig *livekit.ICEConfig) {
r.iceConfigCache.Put(iceConfigCacheKey{room.Name(), participant.Identity()}, iceConfig)
})
return &rpc.RelaySignalv2ConnectResponse{
ConnectResponse: connectResponse,
}, nil
}
// create the actual room object, to be used on RTC node
func (r *RoomManager) getOrCreateRoom(ctx context.Context, createRoom *livekit.CreateRoomRequest) (*rtc.Room, error) {
roomName := livekit.RoomName(createRoom.Name)
@@ -1041,6 +1234,8 @@ func iceServerForStunServers(servers []string) *livekit.ICEServer {
return iceServer
}
// ------------------------------------
type roomManagerParticipantHelper struct {
room *rtc.Room
codecRegressionThreshold int
+30 -30
View File
@@ -201,7 +201,7 @@ func (s *RTCRestService) validateCreate(r *http.Request) (*createRequest, int, e
func (s *RTCRestService) handleCreate(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Content-type") != "application/sdp" {
handleError("Create", w, r, http.StatusBadRequest, fmt.Errorf("unsupported content-type: %s", r.Header.Get("Content-type")))
s.handleError("Create", w, r, http.StatusBadRequest, fmt.Errorf("unsupported content-type: %s", r.Header.Get("Content-type")))
return
}
@@ -209,25 +209,25 @@ func (s *RTCRestService) handleCreate(w http.ResponseWriter, r *http.Request) {
req, status, err := s.validateCreate(r)
if err != nil {
handleError("Create", w, r, status, err)
s.handleError("Create", w, r, status, err)
return
}
if err := s.roomAllocator.SelectRoomNode(r.Context(), req.RoomName, ""); err != nil {
handleError("Create", w, r, http.StatusInternalServerError, err)
s.handleError("Create", w, r, http.StatusInternalServerError, err)
return
}
rtcNode, err := s.router.GetNodeForRoom(r.Context(), req.RoomName)
if err != nil {
handleError("Create", w, r, http.StatusInternalServerError, err)
s.handleError("Create", w, r, http.StatusInternalServerError, err)
return
}
connID := livekit.ConnectionID(guid.New("CO_"))
starSession, err := req.ParticipantInit.ToStartSession(req.RoomName, connID)
if err != nil {
handleError("Create", w, r, http.StatusInternalServerError, err)
s.handleError("Create", w, r, http.StatusInternalServerError, err)
return
}
@@ -244,7 +244,7 @@ func (s *RTCRestService) handleCreate(w http.ResponseWriter, r *http.Request) {
SubscribedParticipantTracks: subscribedParticipantTracks,
})
if err != nil {
handleError("Create", w, r, http.StatusServiceUnavailable, err)
s.handleError("Create", w, r, http.StatusServiceUnavailable, err)
return
}
@@ -331,20 +331,20 @@ func (s *RTCRestService) iceTrickle(
if errors.As(err, &pe) {
switch pe.Code() {
case psrpc.NotFound:
handleError("Patch", w, r, http.StatusNotFound, errors.New(pe.Error()))
s.handleError("Patch", w, r, http.StatusNotFound, errors.New(pe.Error()))
case psrpc.InvalidArgument:
switch pe.Error() {
case rtc.ErrInvalidSDPFragment.Error(), rtc.ErrMidMismatch.Error(), rtc.ErrICECredentialMismatch.Error():
handleError("Patch", w, r, http.StatusBadRequest, errors.New(pe.Error()))
s.handleError("Patch", w, r, http.StatusBadRequest, errors.New(pe.Error()))
default:
handleError("Patch", w, r, http.StatusInternalServerError, errors.New(pe.Error()))
s.handleError("Patch", w, r, http.StatusInternalServerError, errors.New(pe.Error()))
}
default:
handleError("Patch", w, r, http.StatusInternalServerError, errors.New(pe.Error()))
s.handleError("Patch", w, r, http.StatusInternalServerError, errors.New(pe.Error()))
}
} else {
handleError("Patch", w, r, http.StatusInternalServerError, nil)
s.handleError("Patch", w, r, http.StatusInternalServerError, nil)
}
return
}
@@ -383,20 +383,20 @@ func (s *RTCRestService) iceRestart(
if errors.As(err, &pe) {
switch pe.Code() {
case psrpc.NotFound:
handleError("Patch", w, r, http.StatusNotFound, errors.New(pe.Error()))
s.handleError("Patch", w, r, http.StatusNotFound, errors.New(pe.Error()))
case psrpc.InvalidArgument:
switch pe.Error() {
case rtc.ErrInvalidSDPFragment.Error():
handleError("Patch", w, r, http.StatusBadRequest, errors.New(pe.Error()))
s.handleError("Patch", w, r, http.StatusBadRequest, errors.New(pe.Error()))
default:
handleError("Patch", w, r, http.StatusInternalServerError, errors.New(pe.Error()))
s.handleError("Patch", w, r, http.StatusInternalServerError, errors.New(pe.Error()))
}
default:
handleError("Patch", w, r, http.StatusInternalServerError, errors.New(pe.Error()))
s.handleError("Patch", w, r, http.StatusInternalServerError, errors.New(pe.Error()))
}
} else {
handleError("Patch", w, r, http.StatusInternalServerError, nil)
s.handleError("Patch", w, r, http.StatusInternalServerError, nil)
}
return
}
@@ -419,7 +419,7 @@ func (s *RTCRestService) iceRestart(
func (s *RTCRestService) handleParticipantPatch(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Content-type") != "application/trickle-ice-sdpfrag" {
handleError("Patch", w, r, http.StatusBadRequest, fmt.Errorf("unsupported content-type: %s", r.Header.Get("Content-type")))
s.handleError("Patch", w, r, http.StatusBadRequest, fmt.Errorf("unsupported content-type: %s", r.Header.Get("Content-type")))
return
}
@@ -428,38 +428,38 @@ func (s *RTCRestService) handleParticipantPatch(w http.ResponseWriter, r *http.R
// https://www.rfc-editor.org/rfc/rfc9725.html#name-http-patch-request-usage
ifMatch := r.Header.Get("If-Match")
if ifMatch == "" {
handleError("Patch", w, r, http.StatusPreconditionRequired, errors.New("missing entity tag"))
s.handleError("Patch", w, r, http.StatusPreconditionRequired, errors.New("missing entity tag"))
return
}
claims := GetGrants(r.Context())
if claims == nil || claims.Video == nil {
handleError("Patch", w, r, http.StatusUnauthorized, rtc.ErrPermissionDenied)
s.handleError("Patch", w, r, http.StatusUnauthorized, rtc.ErrPermissionDenied)
return
}
roomName, err := EnsureJoinPermission(r.Context())
if err != nil {
handleError("Patch", w, r, http.StatusUnauthorized, err)
s.handleError("Patch", w, r, http.StatusUnauthorized, err)
return
}
if roomName == "" {
handleError("Patch", w, r, http.StatusUnauthorized, errors.New("room name cannot be empty"))
s.handleError("Patch", w, r, http.StatusUnauthorized, errors.New("room name cannot be empty"))
return
}
if claims.Identity == "" {
handleError("Patch", w, r, http.StatusUnauthorized, errors.New("participant identity cannot be empty"))
s.handleError("Patch", w, r, http.StatusUnauthorized, errors.New("participant identity cannot be empty"))
return
}
pID := livekit.ParticipantID(r.PathValue("participant_id"))
if pID == "" {
handleError("Patch", w, r, http.StatusUnauthorized, errors.New("participant ID cannot be empty"))
s.handleError("Patch", w, r, http.StatusUnauthorized, errors.New("participant ID cannot be empty"))
return
}
sdpFragmentBytes, err := ioutil.ReadAll(r.Body)
if err != nil {
handleError("Patch", w, r, http.StatusBadRequest, fmt.Errorf("body does not have SDP fragment: %s", err))
s.handleError("Patch", w, r, http.StatusBadRequest, fmt.Errorf("body does not have SDP fragment: %s", err))
}
sdpFragment := string(sdpFragmentBytes)
@@ -473,21 +473,21 @@ func (s *RTCRestService) handleParticipantPatch(w http.ResponseWriter, r *http.R
func (s *RTCRestService) handleParticipantDelete(w http.ResponseWriter, r *http.Request) {
claims := GetGrants(r.Context())
if claims == nil || claims.Video == nil {
handleError("Delete", w, r, http.StatusUnauthorized, rtc.ErrPermissionDenied)
s.handleError("Delete", w, r, http.StatusUnauthorized, rtc.ErrPermissionDenied)
return
}
roomName, err := EnsureJoinPermission(r.Context())
if err != nil {
handleError("Delete", w, r, http.StatusUnauthorized, err)
s.handleError("Delete", w, r, http.StatusUnauthorized, err)
return
}
if roomName == "" {
handleError("Delete", w, r, http.StatusUnauthorized, errors.New("room name cannot be empty"))
s.handleError("Delete", w, r, http.StatusUnauthorized, errors.New("room name cannot be empty"))
return
}
if claims.Identity == "" {
handleError("Delete", w, r, http.StatusUnauthorized, errors.New("participant identity cannot be empty"))
s.handleError("Delete", w, r, http.StatusUnauthorized, errors.New("participant identity cannot be empty"))
return
}
@@ -501,7 +501,7 @@ func (s *RTCRestService) handleParticipantDelete(w http.ResponseWriter, r *http.
},
)
if err != nil {
handleError("Delete", w, r, http.StatusNotFound, err)
s.handleError("Delete", w, r, http.StatusNotFound, err)
return
}
@@ -515,7 +515,7 @@ func (s *RTCRestService) handleParticipantDelete(w http.ResponseWriter, r *http.
w.WriteHeader(http.StatusOK)
}
func handleError(method string, w http.ResponseWriter, r *http.Request, status int, err error) {
func (s *RTCRestService) handleError(method string, w http.ResponseWriter, r *http.Request, status int, err error) {
sutils.GetLogger(r.Context()).Warnw(
fmt.Sprintf("API RTCRest.%s", method), err,
"status", status,
+45 -123
View File
@@ -37,7 +37,6 @@ import (
"github.com/livekit/livekit-server/pkg/config"
"github.com/livekit/livekit-server/pkg/routing"
"github.com/livekit/livekit-server/pkg/routing/selector"
"github.com/livekit/livekit-server/pkg/rtc"
"github.com/livekit/livekit-server/pkg/telemetry"
"github.com/livekit/livekit-server/pkg/telemetry/prometheus"
@@ -87,12 +86,13 @@ func NewRTCService(
}
func (s *RTCService) SetupRoutes(mux *http.ServeMux) {
mux.Handle("/rtc", s)
mux.HandleFunc("/rtc/validate", s.validate)
}
func (s *RTCService) validate(w http.ResponseWriter, r *http.Request) {
log := utils.GetLogger(r.Context())
_, _, code, err := s.validateInternal(log, r, true)
lgr := utils.GetLogger(r.Context())
_, _, code, err := s.validateInternal(lgr, r, true)
if err != nil {
HandleError(w, r, code, err)
return
@@ -112,144 +112,66 @@ func decodeAttributes(str string) (map[string]string, error) {
return attrs, nil
}
func (s *RTCService) validateInternal(log logger.Logger, r *http.Request, strict bool) (livekit.RoomName, routing.ParticipantInit, int, error) {
claims := GetGrants(r.Context())
var pi routing.ParticipantInit
func (s *RTCService) validateInternal(lgr logger.Logger, r *http.Request, strict bool) (livekit.RoomName, routing.ParticipantInit, int, error) {
var params ValidateConnectRequestParams
params.publish = r.FormValue("publish")
// require a claim
if claims == nil || claims.Video == nil {
return "", pi, http.StatusUnauthorized, rtc.ErrPermissionDenied
}
onlyName, err := EnsureJoinPermission(r.Context())
if err != nil {
return "", pi, http.StatusUnauthorized, err
}
if claims.Identity == "" {
return "", pi, http.StatusBadRequest, ErrIdentityEmpty
}
if !s.config.Limit.CheckParticipantIdentityLength(claims.Identity) {
return "", pi, http.StatusBadRequest, fmt.Errorf("%w: max length %d", ErrParticipantIdentityExceedsLimits, s.config.Limit.MaxParticipantIdentityLength)
}
if claims.RoomConfig != nil {
if err := claims.RoomConfig.CheckCredentials(); err != nil {
logger.Warnw("credentials found in token", nil)
// TODO(dz): in a future version, we'll reject these connections
}
}
roomName := livekit.RoomName(r.FormValue("room"))
reconnectParam := r.FormValue("reconnect")
reconnectReason, _ := strconv.Atoi(r.FormValue("reconnect_reason")) // 0 means unknown reason
autoSubParam := r.FormValue("auto_subscribe")
publishParam := r.FormValue("publish")
adaptiveStreamParam := r.FormValue("adaptive_stream")
participantID := r.FormValue("sid")
subscriberAllowPauseParam := r.FormValue("subscriber_allow_pause")
disableICELite := r.FormValue("disable_ice_lite")
attributesStr := r.FormValue("attributes")
if onlyName != "" {
roomName = onlyName
}
if !s.config.Limit.CheckRoomNameLength(string(roomName)) {
return "", pi, http.StatusBadRequest, fmt.Errorf("%w: max length %d", ErrRoomNameExceedsLimits, s.config.Limit.MaxRoomNameLength)
}
// this is new connection for existing participant - with publish only permissions
if publishParam != "" {
// Make sure grant has GetCanPublish set,
if !claims.Video.GetCanPublish() {
return "", routing.ParticipantInit{}, http.StatusUnauthorized, rtc.ErrPermissionDenied
}
// Make sure by default subscribe is off
claims.Video.SetCanSubscribe(false)
claims.Identity += "#" + publishParam
}
// room allocator validations
err = s.roomAllocator.ValidateCreateRoom(r.Context(), roomName)
if err != nil {
if errors.Is(err, ErrRoomNotFound) {
return "", pi, http.StatusNotFound, err
} else {
return "", pi, http.StatusInternalServerError, err
}
}
region := ""
if router, ok := s.router.(routing.Router); ok {
region = router.GetRegion()
if foundNode, err := router.GetNodeForRoom(r.Context(), roomName); err == nil {
if selector.LimitsReached(s.limits, foundNode.Stats) {
return "", pi, http.StatusServiceUnavailable, rtc.ErrLimitExceeded
}
}
}
createRequest := &livekit.CreateRoomRequest{
Name: string(roomName),
RoomPreset: claims.RoomPreset,
}
SetRoomConfiguration(createRequest, claims.GetRoomConfiguration())
// Add extra attributes to the participant
if attributesStr != "" {
// Make sure grant has GetCanUpdateOwnMetadata set
if !claims.Video.GetCanUpdateOwnMetadata() {
return "", routing.ParticipantInit{}, http.StatusUnauthorized, rtc.ErrPermissionDenied
}
attrs, err := decodeAttributes(attributesStr)
attributesStrParam := r.FormValue("attributes")
if attributesStrParam != "" {
attrs, err := decodeAttributes(attributesStrParam)
if err != nil {
if strict {
return "", pi, http.StatusBadRequest, errors.New("cannot decode attributes")
return "", routing.ParticipantInit{}, http.StatusBadRequest, errors.New("cannot decode attributes")
}
log.Debugw("failed to decode attributes", "error", err)
lgr.Debugw("failed to decode attributes", "error", err)
// attrs will be empty here, so just proceed
}
if len(attrs) != 0 && claims.Attributes == nil {
claims.Attributes = make(map[string]string, len(attrs))
}
for k, v := range attrs {
if v == "" {
continue // do not allow deleting existing attributes
}
claims.Attributes[k] = v
}
params.attributes = attrs
}
pi = routing.ParticipantInit{
Reconnect: boolValue(reconnectParam),
ReconnectReason: livekit.ReconnectReason(reconnectReason),
Identity: livekit.ParticipantIdentity(claims.Identity),
Name: livekit.ParticipantName(claims.Name),
AutoSubscribe: true,
Client: ParseClientInfo(r),
Grants: claims,
Region: region,
CreateRoom: createRequest,
res, code, err := ValidateConnectRequest(
lgr,
r,
s.limits,
params,
s.router,
s.roomAllocator,
)
if err != nil {
return res.roomName, routing.ParticipantInit{}, code, err
}
pi := routing.ParticipantInit{
Reconnect: boolValue(r.FormValue("reconnect")),
Identity: livekit.ParticipantIdentity(res.grants.Identity),
Name: livekit.ParticipantName(res.grants.Name),
Client: ParseClientInfo(r),
Grants: res.grants,
Region: res.region,
CreateRoom: res.createRoomRequest,
AutoSubscribe: true,
AdaptiveStream: boolValue(r.FormValue("adaptive_stream")),
DisableICELite: boolValue(r.FormValue("disable_ice_lite")),
}
reconnectReason, _ := strconv.Atoi(r.FormValue("reconnect_reason")) // 0 means unknown reason
pi.ReconnectReason = livekit.ReconnectReason(reconnectReason)
if pi.Reconnect {
pi.ID = livekit.ParticipantID(participantID)
pi.ID = livekit.ParticipantID(r.FormValue("sid"))
}
if autoSubParam != "" {
pi.AutoSubscribe = boolValue(autoSubParam)
}
if adaptiveStreamParam != "" {
pi.AdaptiveStream = boolValue(adaptiveStreamParam)
if autoSubscribe := r.FormValue("auto_subscribe"); autoSubscribe != "" {
pi.AutoSubscribe = boolValue(autoSubscribe)
}
subscriberAllowPauseParam := r.FormValue("subscriber_allow_pause")
if subscriberAllowPauseParam != "" {
subscriberAllowPause := boolValue(subscriberAllowPauseParam)
pi.SubscriberAllowPause = &subscriberAllowPause
}
if disableICELite != "" {
pi.DisableICELite = boolValue(disableICELite)
}
return roomName, pi, http.StatusOK, nil
return res.roomName, pi, code, err
}
func (s *RTCService) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+163
View File
@@ -0,0 +1,163 @@
// Copyright 2023 LiveKit, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package service
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"github.com/livekit/livekit-server/pkg/config"
"github.com/livekit/livekit-server/pkg/routing"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"github.com/livekit/protocol/rpc"
"google.golang.org/protobuf/proto"
)
const (
cRTCv2Path = "/rtc/v2"
)
type RTCv2Service struct {
http.Handler
limits config.LimitConfig
roomAllocator RoomAllocator
router routing.MessageRouter
}
func NewRTCv2Service(
config *config.Config,
roomAllocator RoomAllocator,
router routing.MessageRouter,
) *RTCv2Service {
return &RTCv2Service{
limits: config.Limit,
router: router,
roomAllocator: roomAllocator,
}
}
func (s *RTCv2Service) SetupRoutes(mux *http.ServeMux) {
mux.HandleFunc("POST "+cRTCv2Path, s.handlePost)
}
func (s *RTCv2Service) validateInternal(
lgr logger.Logger,
r *http.Request,
connectRequest *livekit.ConnectRequest,
) (livekit.RoomName, livekit.ParticipantIdentity, *rpc.RelaySignalv2ConnectRequest, int, error) {
params := ValidateConnectRequestParams{
metadata: connectRequest.Metadata,
attributes: connectRequest.ParticipantAttributes,
}
res, code, err := ValidateConnectRequest(
lgr,
r,
s.limits,
params,
s.router,
s.roomAllocator,
)
if err != nil {
return "", "", nil, code, err
}
grantsJson, err := json.Marshal(res.grants)
if err != nil {
return "", "", nil, http.StatusInternalServerError, err
}
AugmentClientInfo(connectRequest.ClientInfo, r)
return res.roomName,
livekit.ParticipantIdentity(res.grants.Identity),
&rpc.RelaySignalv2ConnectRequest{
GrantsJson: string(grantsJson),
CreateRoom: res.createRoomRequest,
ConnectRequest: connectRequest,
},
code,
err
}
func (s *RTCv2Service) handlePost(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Content-type") != "application/x-protobuf" {
HandleErrorJson(w, r, http.StatusBadRequest, fmt.Errorf("unsupported content-type: %s", r.Header.Get("Content-type")))
return
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
HandleErrorJson(w, r, http.StatusBadRequest, fmt.Errorf("could not read request body: %s", err))
return
}
clientMessage := &livekit.Signalv2ClientMessage{}
err = proto.Unmarshal(body, clientMessage)
if err != nil {
HandleErrorJson(w, r, http.StatusBadRequest, fmt.Errorf("could not unmarshal request: %s", err))
return
}
switch msg := clientMessage.GetMessage().(type) {
case *livekit.Signalv2ClientMessage_ConnectRequest:
roomName, participantIdentity, rscr, code, err := s.validateInternal(logger.GetLogger(), r, msg.ConnectRequest)
if err != nil {
HandleErrorJson(w, r, code, err)
return
}
if err := s.roomAllocator.SelectRoomNode(r.Context(), roomName, ""); err != nil {
HandleErrorJson(w, r, http.StatusInternalServerError, err)
return
}
resp, err := s.router.HandleParticipantConnectRequest(r.Context(), roomName, participantIdentity, rscr)
if err != nil {
HandleErrorJson(w, r, http.StatusInternalServerError, err)
return
}
serverMessage := &livekit.Signalv2ServerMessage{
Message: &livekit.Signalv2ServerMessage_ConnectResponse{
ConnectResponse: resp.ConnectResponse,
},
}
marshalled, err := proto.Marshal(serverMessage)
if err != nil {
HandleErrorJson(w, r, http.StatusInternalServerError, err)
return
}
w.Header().Add("Content-type", "application/x-protobuf")
w.Write(marshalled)
case *livekit.Signalv2ClientMessage_Envelope:
for _, cm := range msg.Envelope.ClientMessages {
switch oneOf := cm.GetMessage().(type) {
case *livekit.Signalv2ClientMessage_ConnectRequest:
logger.Errorw("should not get ConnectRequest in envelope", nil)
default:
logger.Debugw("unhandled message", "message", oneOf)
}
}
}
w.WriteHeader(http.StatusOK)
}
+11 -1
View File
@@ -49,6 +49,7 @@ type LivekitServer struct {
config *config.Config
ioService *IOInfoService
rtcService *RTCService
rtcv2Service *RTCv2Service
rtcRestService *RTCRestService
agentService *AgentService
httpServer *http.Server
@@ -56,6 +57,7 @@ type LivekitServer struct {
router routing.Router
roomManager *RoomManager
signalServer *SignalServer
signalv2Server *Signalv2Server
turnServer *turn.Server
currentNode routing.LocalNode
running atomic.Bool
@@ -71,12 +73,14 @@ func NewLivekitServer(conf *config.Config,
sipService *SIPService,
ioService *IOInfoService,
rtcService *RTCService,
rtcv2Service *RTCv2Service,
rtcRestService *RTCRestService,
agentService *AgentService,
keyProvider auth.KeyProvider,
router routing.Router,
roomManager *RoomManager,
signalServer *SignalServer,
signalv2Server *Signalv2Server,
turnServer *turn.Server,
currentNode routing.LocalNode,
) (s *LivekitServer, err error) {
@@ -84,11 +88,13 @@ func NewLivekitServer(conf *config.Config,
config: conf,
ioService: ioService,
rtcService: rtcService,
rtcv2Service: rtcv2Service,
rtcRestService: rtcRestService,
agentService: agentService,
router: router,
roomManager: roomManager,
signalServer: signalServer,
signalv2Server: signalv2Server,
// turn server starts automatically
turnServer: turnServer,
currentNode: currentNode,
@@ -143,8 +149,8 @@ func NewLivekitServer(conf *config.Config,
xtwirp.RegisterServer(mux, egressServer)
xtwirp.RegisterServer(mux, ingressServer)
xtwirp.RegisterServer(mux, sipServer)
mux.Handle("/rtc", rtcService)
rtcService.SetupRoutes(mux)
rtcv2Service.SetupRoutes(mux)
rtcRestService.SetupRoutes(mux)
mux.Handle("/agent", agentService)
mux.HandleFunc("/", s.defaultHandler)
@@ -275,6 +281,10 @@ func (s *LivekitServer) Start() error {
return err
}
if err := s.signalv2Server.Start(); err != nil {
return err
}
httpGroup := &errgroup.Group{}
for _, ln := range listeners {
l := ln
@@ -0,0 +1,200 @@
// Code generated by counterfeiter. DO NOT EDIT.
package servicefakes
import (
"context"
"sync"
"github.com/livekit/livekit-server/pkg/service"
"github.com/livekit/protocol/auth"
"github.com/livekit/protocol/logger"
"github.com/livekit/protocol/rpc"
)
type FakeConnectHandler struct {
HandleConnectStub func(context.Context, logger.Logger, *auth.ClaimGrants, *rpc.RelaySignalv2ConnectRequest) (*rpc.RelaySignalv2ConnectResponse, error)
handleConnectMutex sync.RWMutex
handleConnectArgsForCall []struct {
arg1 context.Context
arg2 logger.Logger
arg3 *auth.ClaimGrants
arg4 *rpc.RelaySignalv2ConnectRequest
}
handleConnectReturns struct {
result1 *rpc.RelaySignalv2ConnectResponse
result2 error
}
handleConnectReturnsOnCall map[int]struct {
result1 *rpc.RelaySignalv2ConnectResponse
result2 error
}
LoggerStub func(context.Context) logger.Logger
loggerMutex sync.RWMutex
loggerArgsForCall []struct {
arg1 context.Context
}
loggerReturns struct {
result1 logger.Logger
}
loggerReturnsOnCall map[int]struct {
result1 logger.Logger
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
func (fake *FakeConnectHandler) HandleConnect(arg1 context.Context, arg2 logger.Logger, arg3 *auth.ClaimGrants, arg4 *rpc.RelaySignalv2ConnectRequest) (*rpc.RelaySignalv2ConnectResponse, error) {
fake.handleConnectMutex.Lock()
ret, specificReturn := fake.handleConnectReturnsOnCall[len(fake.handleConnectArgsForCall)]
fake.handleConnectArgsForCall = append(fake.handleConnectArgsForCall, struct {
arg1 context.Context
arg2 logger.Logger
arg3 *auth.ClaimGrants
arg4 *rpc.RelaySignalv2ConnectRequest
}{arg1, arg2, arg3, arg4})
stub := fake.HandleConnectStub
fakeReturns := fake.handleConnectReturns
fake.recordInvocation("HandleConnect", []interface{}{arg1, arg2, arg3, arg4})
fake.handleConnectMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3, arg4)
}
if specificReturn {
return ret.result1, ret.result2
}
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeConnectHandler) HandleConnectCallCount() int {
fake.handleConnectMutex.RLock()
defer fake.handleConnectMutex.RUnlock()
return len(fake.handleConnectArgsForCall)
}
func (fake *FakeConnectHandler) HandleConnectCalls(stub func(context.Context, logger.Logger, *auth.ClaimGrants, *rpc.RelaySignalv2ConnectRequest) (*rpc.RelaySignalv2ConnectResponse, error)) {
fake.handleConnectMutex.Lock()
defer fake.handleConnectMutex.Unlock()
fake.HandleConnectStub = stub
}
func (fake *FakeConnectHandler) HandleConnectArgsForCall(i int) (context.Context, logger.Logger, *auth.ClaimGrants, *rpc.RelaySignalv2ConnectRequest) {
fake.handleConnectMutex.RLock()
defer fake.handleConnectMutex.RUnlock()
argsForCall := fake.handleConnectArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4
}
func (fake *FakeConnectHandler) HandleConnectReturns(result1 *rpc.RelaySignalv2ConnectResponse, result2 error) {
fake.handleConnectMutex.Lock()
defer fake.handleConnectMutex.Unlock()
fake.HandleConnectStub = nil
fake.handleConnectReturns = struct {
result1 *rpc.RelaySignalv2ConnectResponse
result2 error
}{result1, result2}
}
func (fake *FakeConnectHandler) HandleConnectReturnsOnCall(i int, result1 *rpc.RelaySignalv2ConnectResponse, result2 error) {
fake.handleConnectMutex.Lock()
defer fake.handleConnectMutex.Unlock()
fake.HandleConnectStub = nil
if fake.handleConnectReturnsOnCall == nil {
fake.handleConnectReturnsOnCall = make(map[int]struct {
result1 *rpc.RelaySignalv2ConnectResponse
result2 error
})
}
fake.handleConnectReturnsOnCall[i] = struct {
result1 *rpc.RelaySignalv2ConnectResponse
result2 error
}{result1, result2}
}
func (fake *FakeConnectHandler) Logger(arg1 context.Context) logger.Logger {
fake.loggerMutex.Lock()
ret, specificReturn := fake.loggerReturnsOnCall[len(fake.loggerArgsForCall)]
fake.loggerArgsForCall = append(fake.loggerArgsForCall, struct {
arg1 context.Context
}{arg1})
stub := fake.LoggerStub
fakeReturns := fake.loggerReturns
fake.recordInvocation("Logger", []interface{}{arg1})
fake.loggerMutex.Unlock()
if stub != nil {
return stub(arg1)
}
if specificReturn {
return ret.result1
}
return fakeReturns.result1
}
func (fake *FakeConnectHandler) LoggerCallCount() int {
fake.loggerMutex.RLock()
defer fake.loggerMutex.RUnlock()
return len(fake.loggerArgsForCall)
}
func (fake *FakeConnectHandler) LoggerCalls(stub func(context.Context) logger.Logger) {
fake.loggerMutex.Lock()
defer fake.loggerMutex.Unlock()
fake.LoggerStub = stub
}
func (fake *FakeConnectHandler) LoggerArgsForCall(i int) context.Context {
fake.loggerMutex.RLock()
defer fake.loggerMutex.RUnlock()
argsForCall := fake.loggerArgsForCall[i]
return argsForCall.arg1
}
func (fake *FakeConnectHandler) LoggerReturns(result1 logger.Logger) {
fake.loggerMutex.Lock()
defer fake.loggerMutex.Unlock()
fake.LoggerStub = nil
fake.loggerReturns = struct {
result1 logger.Logger
}{result1}
}
func (fake *FakeConnectHandler) LoggerReturnsOnCall(i int, result1 logger.Logger) {
fake.loggerMutex.Lock()
defer fake.loggerMutex.Unlock()
fake.LoggerStub = nil
if fake.loggerReturnsOnCall == nil {
fake.loggerReturnsOnCall = make(map[int]struct {
result1 logger.Logger
})
}
fake.loggerReturnsOnCall[i] = struct {
result1 logger.Logger
}{result1}
}
func (fake *FakeConnectHandler) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.handleConnectMutex.RLock()
defer fake.handleConnectMutex.RUnlock()
fake.loggerMutex.RLock()
defer fake.loggerMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
for key, value := range fake.invocations {
copiedInvocations[key] = value
}
return copiedInvocations
}
func (fake *FakeConnectHandler) recordInvocation(key string, args []interface{}) {
fake.invocationsMutex.Lock()
defer fake.invocationsMutex.Unlock()
if fake.invocations == nil {
fake.invocations = map[string][][]interface{}{}
}
if fake.invocations[key] == nil {
fake.invocations[key] = [][]interface{}{}
}
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ service.ConnectHandler = new(FakeConnectHandler)
+155
View File
@@ -0,0 +1,155 @@
// Copyright 2023 LiveKit, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package service
import (
"context"
"encoding/json"
"github.com/livekit/livekit-server/pkg/routing"
"github.com/livekit/livekit-server/pkg/telemetry/prometheus"
"github.com/livekit/livekit-server/pkg/utils"
"github.com/livekit/protocol/auth"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"github.com/livekit/protocol/rpc"
"github.com/livekit/psrpc"
"github.com/livekit/psrpc/pkg/middleware"
)
//counterfeiter:generate . ConnectHandler
type ConnectHandler interface {
Logger(ctx context.Context) logger.Logger
HandleConnect(
ctx context.Context,
lgr logger.Logger,
grants *auth.ClaimGrants,
rscr *rpc.RelaySignalv2ConnectRequest,
) (*rpc.RelaySignalv2ConnectResponse, error)
}
type Signalv2Server struct {
server rpc.TypedSignalv2Server
nodeID livekit.NodeID
}
func NewSignalv2Server(
nodeID livekit.NodeID,
region string,
bus psrpc.MessageBus,
connectHandler ConnectHandler,
) (*Signalv2Server, error) {
s, err := rpc.NewTypedSignalv2Server(
nodeID,
&signalv2Service{region, connectHandler},
bus,
middleware.WithServerMetrics(rpc.PSRPCMetricsObserver{}),
)
if err != nil {
return nil, err
}
return &Signalv2Server{s, nodeID}, nil
}
func (s *Signalv2Server) Start() error {
logger.Debugw("starting relay signal v2 server", "topic", s.nodeID)
return s.server.RegisterAllNodeTopics(s.nodeID)
}
func (r *Signalv2Server) Stop() {
r.server.Kill()
}
// -------------------------------------------------
func NewDefaultSignalv2Server(
currentNode routing.LocalNode,
bus psrpc.MessageBus,
router routing.Router,
roomManager *RoomManager,
) (*Signalv2Server, error) {
return NewSignalv2Server(
currentNode.NodeID(),
currentNode.Region(),
bus,
&defaultSignalv2Handler{currentNode, router, roomManager},
)
}
// -------------------------------------------------
type defaultSignalv2Handler struct {
currentNode routing.LocalNode
router routing.Router
roomManager *RoomManager
}
func (s *defaultSignalv2Handler) Logger(ctx context.Context) logger.Logger {
return utils.GetLogger(ctx)
}
func (s *defaultSignalv2Handler) HandleConnect(
ctx context.Context,
lgr logger.Logger,
grants *auth.ClaimGrants,
rscr *rpc.RelaySignalv2ConnectRequest,
) (*rpc.RelaySignalv2ConnectResponse, error) {
prometheus.IncrementParticipantRtcInit(1)
rtcNode, err := s.router.GetNodeForRoom(ctx, livekit.RoomName(rscr.CreateRoom.Name))
if err != nil {
return nil, err
}
if livekit.NodeID(rtcNode.Id) != s.currentNode.NodeID() {
err = routing.ErrIncorrectRTCNode
lgr.Errorw(
"called participant on incorrect node", err,
"rtcNode", rtcNode,
)
return nil, err
}
return s.roomManager.HandleConnect(ctx, grants, rscr)
}
// ------------------------------------------
type signalv2Service struct {
region string
connectHandler ConnectHandler
}
func (r *signalv2Service) RelaySignalv2Connect(ctx context.Context, rscr *rpc.RelaySignalv2ConnectRequest) (*rpc.RelaySignalv2ConnectResponse, error) {
grants := &auth.ClaimGrants{}
if err := json.Unmarshal([]byte(rscr.GrantsJson), grants); err != nil {
return nil, err
}
lgr := r.connectHandler.Logger(ctx).WithValues(
"room", grants.Video.Room,
"participant", grants.Identity,
// SIGNALLING-V2-TODO - maybe add a connection ID to rscr for tracking/debugging only
)
resp, err := r.connectHandler.HandleConnect(ctx, lgr, grants, rscr)
if err != nil {
lgr.Errorw("could not handle new participant", err)
}
return resp, err
}
// ------------------------------------------
+156 -5
View File
@@ -16,19 +16,27 @@ package service
import (
"context"
"encoding/json"
"errors"
"fmt"
"net"
"net/http"
"regexp"
"strconv"
"strings"
"github.com/livekit/livekit-server/pkg/config"
"github.com/livekit/livekit-server/pkg/routing"
"github.com/livekit/livekit-server/pkg/routing/selector"
"github.com/livekit/livekit-server/pkg/rtc"
"github.com/livekit/livekit-server/pkg/utils"
"github.com/livekit/protocol/auth"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
"github.com/ua-parser/uap-go/uaparser"
)
func HandleError(w http.ResponseWriter, r *http.Request, status int, err error, keysAndValues ...interface{}) {
func handleError(w http.ResponseWriter, r *http.Request, status int, err error, keysAndValues ...interface{}) {
keysAndValues = append(keysAndValues, "status", status)
if r != nil && r.URL != nil {
keysAndValues = append(keysAndValues, "method", r.Method, "path", r.URL.Path)
@@ -37,9 +45,22 @@ func HandleError(w http.ResponseWriter, r *http.Request, status int, err error,
utils.GetLogger(r.Context()).WithCallDepth(1).Warnw("error handling request", err, keysAndValues...)
}
w.WriteHeader(status)
}
func HandleError(w http.ResponseWriter, r *http.Request, status int, err error, keysAndValues ...interface{}) {
handleError(w, r, status, err, keysAndValues...)
_, _ = w.Write([]byte(err.Error()))
}
func HandleErrorJson(w http.ResponseWriter, r *http.Request, status int, err error, keysAndValues ...interface{}) {
handleError(w, r, status, err, keysAndValues...)
json.NewEncoder(w).Encode(struct {
Error string `json:"error"`
}{
Error: err.Error(),
})
}
func boolValue(s string) bool {
return s == "1" || s == "true"
}
@@ -127,14 +148,20 @@ func ParseClientInfo(r *http.Request) *livekit.ClientInfo {
ci.DeviceModel = values.Get("device_model")
ci.Network = values.Get("network")
// get real address (forwarded http header) - check Cloudflare headers first, fall back to X-Forwarded-For
ci.Address = GetClientIP(r)
AugmentClientInfo(ci, r)
return ci
}
func AugmentClientInfo(ci *livekit.ClientInfo, req *http.Request) {
ci.Address = GetClientIP(req)
// attempt to parse types for SDKs that support browser as a platform
if ci.Sdk == livekit.ClientInfo_JS ||
ci.Sdk == livekit.ClientInfo_REACT_NATIVE ||
ci.Sdk == livekit.ClientInfo_FLUTTER ||
ci.Sdk == livekit.ClientInfo_UNITY {
client := uaparser.NewFromSaved().Parse(r.UserAgent())
client := uaparser.NewFromSaved().Parse(req.UserAgent())
if ci.Browser == "" {
ci.Browser = client.UserAgent.Family
ci.BrowserVersion = client.UserAgent.ToVersionString()
@@ -152,6 +179,130 @@ func ParseClientInfo(r *http.Request) *livekit.ClientInfo {
ci.DeviceModel = model
}
}
return ci
}
type ValidateConnectRequestParams struct {
roomName livekit.RoomName
publish string
metadata string
attributes map[string]string
}
type ValidateConnectRequestResult struct {
roomName livekit.RoomName
grants *auth.ClaimGrants
region string
createRoomRequest *livekit.CreateRoomRequest
}
func ValidateConnectRequest(
lgr logger.Logger,
r *http.Request,
limitConfig config.LimitConfig,
params ValidateConnectRequestParams,
router routing.MessageRouter,
roomAllocator RoomAllocator,
) (ValidateConnectRequestResult, int, error) {
var res ValidateConnectRequestResult
// require a claim
claims := GetGrants(r.Context())
if claims == nil || claims.Video == nil {
return res, http.StatusUnauthorized, rtc.ErrPermissionDenied
}
roomNameInToken, err := EnsureJoinPermission(r.Context())
if err != nil {
return res, http.StatusUnauthorized, err
}
if claims.Identity == "" {
return res, http.StatusBadRequest, ErrIdentityEmpty
}
if !limitConfig.CheckParticipantIdentityLength(claims.Identity) {
return res, http.StatusBadRequest, fmt.Errorf("%w: max length %d", ErrParticipantIdentityExceedsLimits, limitConfig.MaxParticipantIdentityLength)
}
if claims.RoomConfig != nil {
if err := claims.RoomConfig.CheckCredentials(); err != nil {
lgr.Warnw("credentials found in token", nil)
// TODO(dz): in a future version, we'll reject these connections
}
}
res.roomName = params.roomName
if roomNameInToken != "" {
res.roomName = roomNameInToken
}
if res.roomName == "" {
return res, http.StatusBadRequest, ErrNoRoomName
}
if !limitConfig.CheckRoomNameLength(string(res.roomName)) {
return res, http.StatusBadRequest, fmt.Errorf("%w: max length %d", ErrRoomNameExceedsLimits, limitConfig.MaxRoomNameLength)
}
// this is new connection for existing participant - with publish only permissions
if params.publish != "" {
// Make sure grant has GetCanPublish set,
if !claims.Video.GetCanPublish() {
return res, http.StatusUnauthorized, rtc.ErrPermissionDenied
}
// Make sure by default subscribe is off
claims.Video.SetCanSubscribe(false)
claims.Identity += "#" + params.publish
}
// room allocator validations
err = roomAllocator.ValidateCreateRoom(r.Context(), res.roomName)
if err != nil {
if errors.Is(err, ErrRoomNotFound) {
return res, http.StatusNotFound, err
} else {
return res, http.StatusInternalServerError, err
}
}
if router, ok := router.(routing.Router); ok {
res.region = router.GetRegion()
if foundNode, err := router.GetNodeForRoom(r.Context(), res.roomName); err == nil {
if selector.LimitsReached(limitConfig, foundNode.Stats) {
return res, http.StatusServiceUnavailable, rtc.ErrLimitExceeded
}
}
}
createRequest := &livekit.CreateRoomRequest{
Name: string(res.roomName),
RoomPreset: claims.RoomPreset,
}
SetRoomConfiguration(createRequest, claims.GetRoomConfiguration())
res.createRoomRequest = createRequest
if len(params.metadata) != 0 {
// Make sure grant has GetCanUpdateOwnMetadata set
if !claims.Video.GetCanUpdateOwnMetadata() {
return res, http.StatusUnauthorized, rtc.ErrPermissionDenied
}
claims.Metadata = params.metadata
}
// Add extra attributes to the participant
if len(params.attributes) != 0 {
// Make sure grant has GetCanUpdateOwnMetadata set
if !claims.Video.GetCanUpdateOwnMetadata() {
return res, http.StatusUnauthorized, rtc.ErrPermissionDenied
}
if claims.Attributes == nil {
claims.Attributes = make(map[string]string, len(params.attributes))
}
for k, v := range params.attributes {
if v == "" {
continue // do not allow deleting existing attributes
}
claims.Attributes[k] = v
}
}
res.grants = claims
return res, http.StatusOK, nil
}
+2
View File
@@ -77,6 +77,7 @@ func InitializeServer(conf *config.Config, currentNode routing.LocalNode) (*Live
NewRoomAllocator,
NewRoomService,
NewRTCService,
NewRTCv2Service,
NewRTCRestService,
NewAgentService,
NewAgentDispatchService,
@@ -84,6 +85,7 @@ func InitializeServer(conf *config.Config, currentNode routing.LocalNode) (*Live
getAgentStore,
getSignalRelayConfig,
NewDefaultSignalServer,
NewDefaultSignalv2Server,
routing.NewSignalClient,
getRoomConfig,
routing.NewRoomManagerClient,
+13 -8
View File
@@ -89,23 +89,23 @@ func InitializeServer(conf *config.Config, currentNode routing.LocalNode) (*Live
}
rtcEgressLauncher := NewEgressLauncher(egressClient, ioInfoService, objectStore)
topicFormatter := rpc.NewTopicFormatter()
roomClient, err := rpc.NewTypedRoomClient(clientParams)
v, err := rpc.NewTypedRoomClient(clientParams)
if err != nil {
return nil, err
}
participantClient, err := rpc.NewTypedParticipantClient(clientParams)
v2, err := rpc.NewTypedParticipantClient(clientParams)
if err != nil {
return nil, err
}
roomService, err := NewRoomService(limitConfig, apiConfig, router, roomAllocator, objectStore, rtcEgressLauncher, topicFormatter, roomClient, participantClient)
roomService, err := NewRoomService(limitConfig, apiConfig, router, roomAllocator, objectStore, rtcEgressLauncher, topicFormatter, v, v2)
if err != nil {
return nil, err
}
agentDispatchInternalClient, err := rpc.NewTypedAgentDispatchInternalClient(clientParams)
v3, err := rpc.NewTypedAgentDispatchInternalClient(clientParams)
if err != nil {
return nil, err
}
agentDispatchService := NewAgentDispatchService(agentDispatchInternalClient, topicFormatter, roomAllocator, router)
agentDispatchService := NewAgentDispatchService(v3, topicFormatter, roomAllocator, router)
egressService := NewEgressService(egressClient, rtcEgressLauncher, ioInfoService, roomService)
ingressConfig := getIngressConfig(conf)
ingressClient, err := rpc.NewIngressClient(clientParams)
@@ -120,11 +120,12 @@ func InitializeServer(conf *config.Config, currentNode routing.LocalNode) (*Live
}
sipService := NewSIPService(sipConfig, nodeID, messageBus, sipClient, sipStore, roomService, telemetryService)
rtcService := NewRTCService(conf, roomAllocator, router, telemetryService)
rtcRestParticipantClient, err := rpc.NewTypedRTCRestParticipantClient(clientParams)
rtCv2Service := NewRTCv2Service(conf, roomAllocator, router)
v4, err := rpc.NewTypedRTCRestParticipantClient(clientParams)
if err != nil {
return nil, err
}
serviceRTCRestService, err := NewRTCRestService(conf, router, roomAllocator, clientParams, topicFormatter, rtcRestParticipantClient)
serviceRTCRestService, err := NewRTCRestService(conf, router, roomAllocator, clientParams, topicFormatter, v4)
if err != nil {
return nil, err
}
@@ -148,12 +149,16 @@ func InitializeServer(conf *config.Config, currentNode routing.LocalNode) (*Live
if err != nil {
return nil, err
}
signalv2Server, err := NewDefaultSignalv2Server(currentNode, messageBus, router, roomManager)
if err != nil {
return nil, err
}
authHandler := getTURNAuthHandlerFunc(turnAuthHandler)
server, err := newInProcessTurnServer(conf, authHandler)
if err != nil {
return nil, err
}
livekitServer, err := NewLivekitServer(conf, roomService, agentDispatchService, egressService, ingressService, sipService, ioInfoService, rtcService, serviceRTCRestService, agentService, keyProvider, router, roomManager, signalServer, server, currentNode)
livekitServer, err := NewLivekitServer(conf, roomService, agentDispatchService, egressService, ingressService, sipService, ioInfoService, rtcService, rtCv2Service, serviceRTCRestService, agentService, keyProvider, router, roomManager, signalServer, signalv2Server, server, currentNode)
if err != nil {
return nil, err
}