feat: SubscriptionManager to consolidate subscription handling (#1317)

Added a new manager to handle all subscription needs. Implemented using reconciler pattern. The goals are:

improve subscription resilience by separating desired state and current state
reduce complexity of synchronous processing
better detect failures with the ability to trigger full reconnect
This commit is contained in:
David Zhao
2023-01-24 23:06:16 -08:00
committed by GitHub
parent c696626fe8
commit cd6b8b80b9
31 changed files with 2625 additions and 2208 deletions
+1 -1
View File
@@ -18,7 +18,7 @@ require (
github.com/jxskiss/base62 v1.1.0
github.com/livekit/mageutil v0.0.0-20221221221243-f361fbe40290
github.com/livekit/mediatransportutil v0.0.0-20230111071722-904079e94a7c
github.com/livekit/protocol v1.3.3-0.20230123192015-7c8172cb6ec0
github.com/livekit/protocol v1.3.3-0.20230124045313-d208f342983a
github.com/livekit/psrpc v0.2.4
github.com/livekit/rtcscore-go v0.0.0-20220815072451-20ee10ae1995
github.com/mackerelio/go-osstat v0.2.3
+2 -4
View File
@@ -233,10 +233,8 @@ github.com/livekit/mageutil v0.0.0-20221221221243-f361fbe40290 h1:ZVsQUuUOM9G7O3
github.com/livekit/mageutil v0.0.0-20221221221243-f361fbe40290/go.mod h1:Rs3MhFwutWhGwmY1VQsygw28z5bWcnEYmS1OG9OxjOQ=
github.com/livekit/mediatransportutil v0.0.0-20230111071722-904079e94a7c h1:wdzwTJjCpzy2FDmwdyVVGVa4+U9iv3E4Jy9qUDe/ubw=
github.com/livekit/mediatransportutil v0.0.0-20230111071722-904079e94a7c/go.mod h1:1Dlx20JPoIKGP45eo+yuj0HjeE25zmyeX/EWHiPCjFw=
github.com/livekit/protocol v1.3.2 h1:3goGWbB5HFRb3tMjog8KP0nvZL1Fy6zut3W1psBzqE4=
github.com/livekit/protocol v1.3.2/go.mod h1:gwCG03nKlHlC9hTjL4pXQpn783ALhmbyhq65UZxqbb8=
github.com/livekit/protocol v1.3.3-0.20230123192015-7c8172cb6ec0 h1:l5VHOZj/vfKKWCp2dWpKnR9vp8USLCNd/ajQsj5FJVo=
github.com/livekit/protocol v1.3.3-0.20230123192015-7c8172cb6ec0/go.mod h1:gwCG03nKlHlC9hTjL4pXQpn783ALhmbyhq65UZxqbb8=
github.com/livekit/protocol v1.3.3-0.20230124045313-d208f342983a h1:j30ZpR5TN1XRd4bPiO1xqGfYVVNxqh6QtI6bBSlpk4U=
github.com/livekit/protocol v1.3.3-0.20230124045313-d208f342983a/go.mod h1:gwCG03nKlHlC9hTjL4pXQpn783ALhmbyhq65UZxqbb8=
github.com/livekit/psrpc v0.2.4 h1:Fdxq56uJAIpRHCTgJsvp7ozw51dKtUmD3nxSXq9pCLs=
github.com/livekit/psrpc v0.2.4/go.mod h1:+nJvbKx9DCZ6PSAsMHJPRAKjmRJ5WiyyhEmbKYqMKto=
github.com/livekit/rtcscore-go v0.0.0-20220815072451-20ee10ae1995 h1:vOaY2qvfLihDyeZtnGGN1Law9wRrw8BMGCr1TygTvMw=
+8 -2
View File
@@ -8,10 +8,16 @@ var (
ErrMaxParticipantsExceeded = errors.New("room has exceeded its max participants")
ErrLimitExceeded = errors.New("node has exceeded its configured limit")
ErrAlreadyJoined = errors.New("a participant with the same identity is already in the room")
ErrUnexpectedOffer = errors.New("expected answer SDP, received offer")
ErrDataChannelUnavailable = errors.New("data channel is not available")
ErrCannotSubscribe = errors.New("participant does not have permission to subscribe")
ErrEmptyIdentity = errors.New("participant identity cannot be empty")
ErrEmptyParticipantID = errors.New("participant ID cannot be empty")
ErrMissingGrants = errors.New("VideoGrant is missing")
// Track subscription related
ErrPublisherNotConnected = errors.New("publisher is not connected")
ErrNoTrackPermission = errors.New("participant is not allowed to subscribe to this track")
ErrNoSubscribePermission = errors.New("participant is not given permission to subscribe to tracks")
ErrTrackNotFound = errors.New("track cannot be found")
ErrTrackNotAttached = errors.New("track is not yet attached")
ErrTrackNotBound = errors.New("track not bound")
)
+7 -78
View File
@@ -115,10 +115,6 @@ func NewMediaTrackReceiver(params MediaTrackReceiverParams) *MediaTrackReceiver
Logger: params.Logger,
})
t.MediaTrackSubscriptions.OnDownTrackCreated(t.onDownTrackCreated)
t.MediaTrackSubscriptions.OnSubscriptionOperationComplete(func(sub types.LocalParticipant) {
t.removePendingSubscribeOp(sub.ID())
sub.ClearInProgressAndProcessSubscriptionRequestsQueue(t.ID())
})
if t.trackInfo.Muted {
t.SetMuted(true)
@@ -405,47 +401,12 @@ func (t *MediaTrackReceiver) AddOnClose(f func()) {
t.lock.Unlock()
}
func (t *MediaTrackReceiver) addPendingSubscribeOp(subscriberID livekit.ParticipantID) {
t.lock.Lock()
if c, ok := t.pendingSubscribeOp[subscriberID]; !ok {
t.pendingSubscribeOp[subscriberID] = 1
} else {
t.pendingSubscribeOp[subscriberID] = c + 1
}
t.lock.Unlock()
}
func (t *MediaTrackReceiver) removePendingSubscribeOp(subscriberID livekit.ParticipantID) {
t.lock.Lock()
if c, ok := t.pendingSubscribeOp[subscriberID]; ok {
t.pendingSubscribeOp[subscriberID] = c - 1
if t.pendingSubscribeOp[subscriberID] == 0 {
delete(t.pendingSubscribeOp, subscriberID)
}
}
t.lock.Unlock()
}
// AddSubscriber subscribes sub to current mediaTrack
func (t *MediaTrackReceiver) AddSubscriber(sub types.LocalParticipant) error {
if sub.EnqueueSubscribeTrack(t.ID(), t.params.MediaTrack, t.params.IsRelayed, t.addSubscriber) {
t.addPendingSubscribeOp(sub.ID())
}
return nil
}
func (t *MediaTrackReceiver) addSubscriber(sub types.LocalParticipant) (err error) {
defer func() {
if err != nil {
t.removePendingSubscribeOp(sub.ID())
}
}()
func (t *MediaTrackReceiver) AddSubscriber(sub types.LocalParticipant) (types.SubscribedTrack, error) {
t.lock.RLock()
if t.state != mediaTrackReceiverStateOpen {
t.lock.RUnlock()
err = ErrNotOpen
return
return nil, ErrNotOpen
}
receivers := t.receiversShadow
@@ -455,8 +416,7 @@ func (t *MediaTrackReceiver) addSubscriber(sub types.LocalParticipant) (err erro
if len(receivers) == 0 {
// cannot add, no receiver
err = ErrNoReceiver
return
return nil, ErrNoReceiver
}
for _, receiver := range receivers {
@@ -473,7 +433,6 @@ func (t *MediaTrackReceiver) addSubscriber(sub types.LocalParticipant) (err erro
}
}
// using DownTrack from ion-sfu
streamId := string(t.PublisherID())
if sub.ProtocolVersion().SupportsPackedStreamId() {
// when possible, pack both IDs in streamID to allow new streams to be generated
@@ -482,44 +441,21 @@ func (t *MediaTrackReceiver) addSubscriber(sub types.LocalParticipant) (err erro
}
tLogger := LoggerWithTrack(sub.GetLogger(), t.ID(), t.params.IsRelayed)
err = t.MediaTrackSubscriptions.AddSubscriber(sub, NewWrappedReceiver(WrappedReceiverParams{
wrParams := NewWrappedReceiver(WrappedReceiverParams{
Receivers: receivers,
TrackID: t.ID(),
StreamId: streamId,
UpstreamCodecs: potentialCodecs,
Logger: tLogger,
DisableRed: t.trackInfo.GetDisableRed(),
}))
if err != nil {
return
}
return nil
})
return t.MediaTrackSubscriptions.AddSubscriber(sub, wrParams)
}
// RemoveSubscriber removes participant from subscription
// stop all forwarders to the client
func (t *MediaTrackReceiver) RemoveSubscriber(subscriberID livekit.ParticipantID, willBeResumed bool) {
subTrack := t.getSubscribedTrack(subscriberID)
if subTrack == nil {
return
}
sub := subTrack.Subscriber()
if sub.EnqueueUnsubscribeTrack(subTrack.ID(), t.params.MediaTrack, t.params.IsRelayed, willBeResumed, t.removeSubscriber) {
t.addPendingSubscribeOp(sub.ID())
}
}
func (t *MediaTrackReceiver) removeSubscriber(subscriberID livekit.ParticipantID, willBeResumed bool) (err error) {
defer func() {
if err != nil {
t.removePendingSubscribeOp(subscriberID)
}
}()
err = t.MediaTrackSubscriptions.RemoveSubscriber(subscriberID, willBeResumed)
return
_ = t.MediaTrackSubscriptions.RemoveSubscriber(subscriberID, willBeResumed)
}
func (t *MediaTrackReceiver) removeAllSubscribersForMime(mime string, willBeResumed bool) {
@@ -529,13 +465,6 @@ func (t *MediaTrackReceiver) removeAllSubscribersForMime(mime string, willBeResu
}
}
func (t *MediaTrackReceiver) IsSubscribed() bool {
t.lock.RLock()
defer t.lock.RUnlock()
return t.MediaTrackSubscriptions.GetNumSubscribers() != 0 || len(t.pendingSubscribeOp) != 0
}
func (t *MediaTrackReceiver) RevokeDisallowedSubscribers(allowedSubscriberIdentities []livekit.ParticipantIdentity) []livekit.ParticipantIdentity {
var revokedSubscriberIdentities []livekit.ParticipantIdentity
+16 -63
View File
@@ -1,13 +1,11 @@
package rtc
import (
"context"
"errors"
"sync"
"github.com/pion/rtcp"
"github.com/pion/webrtc/v3"
"github.com/pion/webrtc/v3/pkg/rtcerr"
"go.uber.org/atomic"
"github.com/livekit/protocol/livekit"
@@ -30,9 +28,8 @@ type MediaTrackSubscriptions struct {
subscribedTracksMu sync.RWMutex
subscribedTracks map[livekit.ParticipantID]types.SubscribedTrack
onDownTrackCreated func(downTrack *sfu.DownTrack)
onSubscriptionOperationComplete func(sub types.LocalParticipant)
onSubscriberMaxQualityChange func(subscriberID livekit.ParticipantID, codec webrtc.RTPCodecCapability, layer int32)
onDownTrackCreated func(downTrack *sfu.DownTrack)
onSubscriberMaxQualityChange func(subscriberID livekit.ParticipantID, codec webrtc.RTPCodecCapability, layer int32)
}
type MediaTrackSubscriptionsParams struct {
@@ -58,10 +55,6 @@ func (t *MediaTrackSubscriptions) OnDownTrackCreated(f func(downTrack *sfu.DownT
t.onDownTrackCreated = f
}
func (t *MediaTrackSubscriptions) OnSubscriptionOperationComplete(f func(sub types.LocalParticipant)) {
t.onSubscriptionOperationComplete = f
}
func (t *MediaTrackSubscriptions) OnSubscriberMaxQualityChange(f func(subscriberID livekit.ParticipantID, codec webrtc.RTPCodecCapability, layer int32)) {
t.onSubscriberMaxQualityChange = f
}
@@ -82,7 +75,7 @@ func (t *MediaTrackSubscriptions) IsSubscriber(subID livekit.ParticipantID) bool
}
// AddSubscriber subscribes sub to current mediaTrack
func (t *MediaTrackSubscriptions) AddSubscriber(sub types.LocalParticipant, wr *WrappedReceiver) error {
func (t *MediaTrackSubscriptions) AddSubscriber(sub types.LocalParticipant, wr *WrappedReceiver) (types.SubscribedTrack, error) {
trackID := t.params.MediaTrack.ID()
subscriberID := sub.ID()
@@ -90,7 +83,7 @@ func (t *MediaTrackSubscriptions) AddSubscriber(sub types.LocalParticipant, wr *
t.subscribedTracksMu.Lock()
if _, ok := t.subscribedTracks[subscriberID]; ok {
t.subscribedTracksMu.Unlock()
return errAlreadySubscribed
return nil, errAlreadySubscribed
}
t.subscribedTracksMu.Unlock()
@@ -114,7 +107,7 @@ func (t *MediaTrackSubscriptions) AddSubscriber(sub types.LocalParticipant, wr *
LoggerWithTrack(sub.GetLogger(), trackID, t.params.IsRelayed),
)
if err != nil {
return err
return nil, err
}
if t.onDownTrackCreated != nil {
@@ -221,12 +214,12 @@ func (t *MediaTrackSubscriptions) AddSubscriber(sub types.LocalParticipant, wr *
//
sender, transceiver, err = sub.AddTrackToSubscriber(downTrack, addTrackParams)
if err != nil {
return err
return nil, err
}
} else {
sender, transceiver, err = sub.AddTransceiverFromTrackToSubscriber(downTrack, addTrackParams)
if err != nil {
return err
return nil, err
}
}
}
@@ -235,32 +228,24 @@ func (t *MediaTrackSubscriptions) AddSubscriber(sub types.LocalParticipant, wr *
// NOTE: safety net, if somehow a cached transceiver is re-used by a different track
sub.UncacheDownTrack(transceiver)
// negotiation isn't required if we've replaced track
subTrack.SetNeedsNegotiation(!replacedTrack)
subTrack.SetRTPSender(sender)
sendParameters := sender.GetParameters()
downTrack.SetRTPHeaderExtensions(sendParameters.HeaderExtensions)
downTrack.SetTransceiver(transceiver)
downTrack.OnCloseHandler(func(willBeResumed bool) {
go t.downTrackClosed(sub, subTrack, willBeResumed, sender)
go t.downTrackClosed(sub, willBeResumed)
})
t.subscribedTracksMu.Lock()
t.subscribedTracks[subscriberID] = subTrack
t.subscribedTracksMu.Unlock()
// since sub will lock, run it in a goroutine to avoid deadlocks
go func() {
sub.AddSubscribedTrack(subTrack, t.params.MediaTrack)
if !replacedTrack {
sub.Negotiate(false)
}
if t.onSubscriptionOperationComplete != nil {
t.onSubscriptionOperationComplete(sub)
}
}()
return nil
return subTrack, nil
}
// RemoveSubscriber removes participant from subscription
@@ -378,47 +363,15 @@ func (t *MediaTrackSubscriptions) DebugInfo() []map[string]interface{} {
func (t *MediaTrackSubscriptions) downTrackClosed(
sub types.LocalParticipant,
subTrack types.SubscribedTrack,
willBeResumed bool,
sender *webrtc.RTPSender,
) {
defer func() {
if t.onSubscriptionOperationComplete != nil {
t.onSubscriptionOperationComplete(sub)
}
}()
subscriberID := sub.ID()
t.subscribedTracksMu.Lock()
subTrack := t.subscribedTracks[subscriberID]
delete(t.subscribedTracks, subscriberID)
t.subscribedTracksMu.Unlock()
if !willBeResumed {
t.params.Telemetry.TrackUnsubscribed(context.Background(), subscriberID, t.params.MediaTrack.ToProto())
if sender != nil {
sub.GetLogger().Debugw("removing PeerConnection track",
"publisher", subTrack.PublisherIdentity(),
"publisherID", subTrack.PublisherID(),
"kind", t.params.MediaTrack.Kind(),
)
if err := sub.RemoveTrackFromSubscriber(sender); err != nil {
if _, ok := err.(*rtcerr.InvalidStateError); !ok {
// most of these are safe to ignore, since the track state might have already
// been set to Inactive
sub.GetLogger().Debugw("could not remove remoteTrack from forwarder",
"error", err,
"publisher", subTrack.PublisherIdentity(),
"publisherID", subTrack.PublisherID(),
)
}
}
}
}
sub.RemoveSubscribedTrack(subTrack, t.params.MediaTrack)
if !willBeResumed {
sub.Negotiate(false)
if subTrack != nil {
subTrack.Close(willBeResumed)
}
}
+54 -360
View File
@@ -50,20 +50,6 @@ type downTrackState struct {
downTrack sfu.DownTrackState
}
type SubscribeRequestType int
const (
SubscribeRequestTypeRemove SubscribeRequestType = iota
SubscribeRequestTypeAdd
)
type SubscribeRequest struct {
requestType SubscribeRequestType
willBeResumed bool
addCb func(sub types.LocalParticipant) error
removeCb func(subscriberID livekit.ParticipantID, willBeResumed bool) error
}
type ParticipantParams struct {
Identity livekit.ParticipantIdentity
Name livekit.ParticipantName
@@ -92,6 +78,7 @@ type ParticipantParams struct {
ReconnectOnPublicationError bool
ReconnectOnSubscriptionError bool
VersionGenerator utils2.TimedVersionGenerator
TrackResolver types.MediaTrackResolver
DisableDynacast bool
}
@@ -124,15 +111,10 @@ type ParticipantImpl struct {
*TransportManager
*UpTrackManager
*SubscriptionManager
// tracks the current participant is subscribed to
subscribedTracks map[livekit.TrackID]types.SubscribedTrack
// track settings of tracks the current participant is subscribed to
subscribedTracksSettings map[livekit.TrackID]*livekit.UpdateTrackSettings
// keeps track of disallowed tracks
// tracks and participants that this participant isn't allowed to subscribe to
disallowedSubscriptions map[livekit.TrackID]livekit.ParticipantID // trackID -> publisherID
// keeps track of other publishers ids that we are subscribed to
subscribedTo map[livekit.ParticipantID]struct{}
// keeps track of unpublished tracks in order to reuse trackID
unpublishedTracks []*livekit.TrackInfo
@@ -159,7 +141,6 @@ type ParticipantImpl struct {
onStateChange func(p types.LocalParticipant, oldState livekit.ParticipantInfo_State)
onParticipantUpdate func(types.LocalParticipant)
onDataPacket func(types.LocalParticipant, *livekit.DataPacket)
onSubscribedTo func(types.LocalParticipant, livekit.ParticipantID)
migrateState atomic.Value // types.MigrateState
@@ -169,10 +150,6 @@ type ParticipantImpl struct {
cachedDownTracks map[livekit.TrackID]*downTrackState
subscriptionInProgress map[livekit.TrackID]bool
subscriptionRequestsQueue map[livekit.TrackID][]SubscribeRequest
trackPublisherVersion map[livekit.TrackID]uint32
supervisor *supervisor.ParticipantSupervisor
}
@@ -187,19 +164,13 @@ func NewParticipant(params ParticipantParams) (*ParticipantImpl, error) {
return nil, ErrMissingGrants
}
p := &ParticipantImpl{
params: params,
rtcpCh: make(chan []rtcp.Packet, 100),
pendingTracks: make(map[string]*pendingTrackInfo),
subscribedTracks: make(map[livekit.TrackID]types.SubscribedTrack),
subscribedTracksSettings: make(map[livekit.TrackID]*livekit.UpdateTrackSettings),
disallowedSubscriptions: make(map[livekit.TrackID]livekit.ParticipantID),
subscribedTo: make(map[livekit.ParticipantID]struct{}),
connectedAt: time.Now(),
rttUpdatedAt: time.Now(),
cachedDownTracks: make(map[livekit.TrackID]*downTrackState),
subscriptionInProgress: make(map[livekit.TrackID]bool),
subscriptionRequestsQueue: make(map[livekit.TrackID][]SubscribeRequest),
trackPublisherVersion: make(map[livekit.TrackID]uint32),
params: params,
rtcpCh: make(chan []rtcp.Packet, 100),
pendingTracks: make(map[string]*pendingTrackInfo),
disallowedSubscriptions: make(map[livekit.TrackID]livekit.ParticipantID),
connectedAt: time.Now(),
rttUpdatedAt: time.Now(),
cachedDownTracks: make(map[livekit.TrackID]*downTrackState),
dataChannelStats: telemetry.NewBytesTrackStats(
telemetry.BytesTrackIDForParticipantID(telemetry.BytesTrackTypeData, params.SID),
params.SID,
@@ -213,7 +184,6 @@ func NewParticipant(params ParticipantParams) (*ParticipantImpl, error) {
p.SetResponseSink(params.Sink)
p.supervisor.OnPublicationError(p.onPublicationError)
p.supervisor.OnSubscriptionError(p.onSubscriptionError)
var err error
// keep last participants and when updates were sent
@@ -227,6 +197,7 @@ func NewParticipant(params ParticipantParams) (*ParticipantImpl, error) {
}
p.setupUpTrackManager()
p.setupSubscriptionManager()
return p, nil
}
@@ -275,18 +246,12 @@ func (p *ParticipantImpl) IsDisconnected() bool {
func (p *ParticipantImpl) IsIdle() bool {
// check if there are any published tracks that are subscribed
for _, t := range p.GetPublishedTracks() {
if t.IsSubscribed() {
if t.GetNumSubscribers() > 0 {
return false
}
}
p.lock.RLock()
defer p.lock.RUnlock()
// check if participant is subscribed to any tracks
if len(p.subscribedTracks) != 0 || len(p.subscriptionInProgress) != 0 || len(p.subscriptionRequestsQueue) != 0 {
return false
}
return true
return !p.SubscriptionManager.HasSubscriptions()
}
func (p *ParticipantImpl) ConnectedAt() time.Time {
@@ -380,6 +345,7 @@ func (p *ParticipantImpl) SetPermission(permission *livekit.ParticipantPermissio
video.Recorder = permission.Recorder
canPublish := video.GetCanPublish()
canSubscribe := video.GetCanSubscribe()
onParticipantUpdate := p.onParticipantUpdate
onClaimsChanged := p.onClaimsChanged
p.lock.Unlock()
@@ -396,6 +362,16 @@ func (p *ParticipantImpl) SetPermission(permission *livekit.ParticipantPermissio
}
}
}
if canSubscribe {
// reconcile everything
p.SubscriptionManager.queueReconcile("")
} else {
// revoke all subscriptions
for _, st := range p.GetSubscribedTracks() {
st.MediaTrack().RemoveSubscriber(p.ID(), false)
}
}
// update isPublisher attribute
p.isPublisher.Store(canPublish && p.TransportManager.IsPublisherEstablished())
@@ -460,12 +436,6 @@ func (p *ParticipantImpl) OnDataPacket(callback func(types.LocalParticipant, *li
p.lock.Unlock()
}
func (p *ParticipantImpl) OnSubscribedTo(callback func(types.LocalParticipant, livekit.ParticipantID)) {
p.lock.Lock()
p.onSubscribedTo = callback
p.lock.Unlock()
}
func (p *ParticipantImpl) OnClose(callback func(types.LocalParticipant, map[livekit.TrackID]livekit.ParticipantID)) {
p.lock.Lock()
p.onClose = callback
@@ -665,12 +635,6 @@ func (p *ParticipantImpl) Close(sendLeave bool, reason types.ParticipantCloseRea
for trackID, publisherID := range p.disallowedSubscriptions {
disallowedSubscriptions[trackID] = publisherID
}
// remove all down tracks
downTracksToClose := make([]*sfu.DownTrack, 0, len(p.subscribedTracks))
for _, st := range p.subscribedTracks {
downTracksToClose = append(downTracksToClose, st.DownTrack())
}
p.lock.Unlock()
p.updateState(livekit.ParticipantInfo_DISCONNECTED)
@@ -684,13 +648,10 @@ func (p *ParticipantImpl) Close(sendLeave bool, reason types.ParticipantCloseRea
onClose(p, disallowedSubscriptions)
}
// Close peer connections without blocking participant close. If peer connections are gathering candidates
// Close peer connections without blocking participant Close. If peer connections are gathering candidates
// Close will block.
go func() {
for _, dt := range downTracksToClose {
dt.CloseWithFlush(sendLeave)
}
p.SubscriptionManager.Close(!sendLeave)
p.TransportManager.Close()
}()
@@ -698,6 +659,10 @@ func (p *ParticipantImpl) Close(sendLeave bool, reason types.ParticipantCloseRea
return nil
}
func (p *ParticipantImpl) IsClosed() bool {
return p.isClosed.Load()
}
// Negotiate subscriber SDP with client, if force is true, will cencel pending
// negotiate task and negotiate immediately
func (p *ParticipantImpl) Negotiate(force bool) {
@@ -755,16 +720,7 @@ func (p *ParticipantImpl) MaybeStartMigration(force bool, onStart func()) bool {
// DownTrack close has checks to handle the case of closing before bind.
// So, an `Unbind` before close would bypass that logic.
//
p.lock.Lock()
downTracksToClose := make([]*sfu.DownTrack, 0, len(p.subscribedTracks))
for _, st := range p.subscribedTracks {
downTracksToClose = append(downTracksToClose, st.DownTrack())
}
p.lock.Unlock()
for _, dt := range downTracksToClose {
dt.CloseWithFlush(false)
}
p.SubscriptionManager.Close(true)
p.TransportManager.SubscriberClose()
})
@@ -847,9 +803,9 @@ func (p *ParticipantImpl) GetConnectionQuality() *livekit.ConnectionQualityInfo
totalScore += score
}
p.lock.RLock()
subscriberScores := make(map[livekit.TrackID]float32, len(p.subscribedTracks))
for _, subTrack := range p.subscribedTracks {
subscribedTracks := p.SubscriptionManager.GetSubscribedTracks()
subscriberScores := make(map[livekit.TrackID]float32, len(subscribedTracks))
for _, subTrack := range subscribedTracks {
if subTrack.IsMuted() || subTrack.MediaTrack().IsMuted() {
continue
}
@@ -858,7 +814,6 @@ func (p *ParticipantImpl) GetConnectionQuality() *livekit.ConnectionQualityInfo
totalScore += score
numTracks++
}
p.lock.RUnlock()
avgScore := float32(5.0)
if numTracks > 0 {
@@ -872,25 +827,6 @@ func (p *ParticipantImpl) GetConnectionQuality() *livekit.ConnectionQualityInfo
}
}
func (p *ParticipantImpl) GetSubscribedParticipants() []livekit.ParticipantID {
p.lock.RLock()
defer p.lock.RUnlock()
var participantIDs []livekit.ParticipantID
for pID := range p.subscribedTo {
participantIDs = append(participantIDs, pID)
}
return participantIDs
}
func (p *ParticipantImpl) IsSubscribedTo(participantID livekit.ParticipantID) bool {
p.lock.RLock()
defer p.lock.RUnlock()
_, ok := p.subscribedTo[participantID]
return ok
}
func (p *ParticipantImpl) IsPublisher() bool {
return p.isPublisher.Load()
}
@@ -930,23 +866,6 @@ func (p *ParticipantImpl) IsRecorder() bool {
return p.grants.Video.Recorder
}
func (p *ParticipantImpl) UpdateSubscribedTrackSettings(trackID livekit.TrackID, settings *livekit.UpdateTrackSettings) error {
p.lock.Lock()
p.subscribedTracksSettings[trackID] = settings
subTrack := p.subscribedTracks[trackID]
if subTrack == nil {
// will get set when subscribed track is added
p.lock.Unlock()
p.params.Logger.Infow("could not find subscribed track", "trackID", trackID)
return nil
}
p.lock.Unlock()
subTrack.UpdateSubscriberSettings(settings)
return nil
}
func (p *ParticipantImpl) VerifySubscribeParticipantInfo(pID livekit.ParticipantID, version uint32) {
if !p.IsReady() {
// we have not sent a JoinResponse yet. metadata would be covered in JoinResponse
@@ -963,133 +882,23 @@ func (p *ParticipantImpl) VerifySubscribeParticipantInfo(pID livekit.Participant
}
}
// AddSubscribedTrack adds a track to the participant's subscribed list
func (p *ParticipantImpl) AddSubscribedTrack(subTrack types.SubscribedTrack, sourceTrack types.MediaTrack) {
p.lock.Lock()
if v, ok := p.trackPublisherVersion[subTrack.ID()]; ok && v > subTrack.PublisherVersion() {
p.supervisor.SetSubscribedTrack(subTrack.ID(), subTrack, sourceTrack)
p.lock.Unlock()
p.params.Logger.Debugw("ignoring add subscribedTrack from older version",
"current", v,
"requesting", subTrack.PublisherVersion(),
"trackID", subTrack.ID(),
)
return
}
p.params.Logger.Debugw("added subscribedTrack",
"publisherID", subTrack.PublisherID(),
"publisherIdentity", subTrack.PublisherIdentity(),
"trackID", subTrack.ID())
p.trackPublisherVersion[subTrack.ID()] = subTrack.PublisherVersion()
onSubscribedTo := p.onSubscribedTo
p.subscribedTracks[subTrack.ID()] = subTrack
// TODO: I believe we have yet to negotiate with the subscriber, so it's a bit early to consider the track been
// successfully subscribed to
// ideally we consider it's successful after the participant has answered that offer OR we've received the
// first receiver report
// will move the telemetry options when this is implemented
p.supervisor.SetSubscribedTrack(subTrack.ID(), subTrack, sourceTrack)
p.params.Telemetry.TrackSubscribed(context.Background(), p.ID(), sourceTrack.ToProto(), &livekit.ParticipantInfo{
Identity: string(subTrack.PublisherIdentity()),
Sid: string(subTrack.PublisherID()),
})
settings := p.subscribedTracksSettings[subTrack.ID()]
p.lock.Unlock()
// onTrackSubscribed handles post-processing after a track is subscribed
func (p *ParticipantImpl) onTrackSubscribed(subTrack types.SubscribedTrack) {
if p.params.ClientInfo.FireTrackByRTPPacket() {
subTrack.DownTrack().SetActivePaddingOnMuteUpTrack()
}
subTrack.OnBind(func() {
subTrack.AddOnBind(func() {
if p.TransportManager.HasSubscriberEverConnected() {
subTrack.DownTrack().SetConnected()
}
p.TransportManager.AddSubscribedTrack(subTrack)
})
if settings != nil {
subTrack.UpdateSubscriberSettings(settings)
}
publisherID := subTrack.PublisherID()
p.lock.Lock()
_, isAlreadySubscribed := p.subscribedTo[publisherID]
p.subscribedTo[publisherID] = struct{}{}
p.lock.Unlock()
if !isAlreadySubscribed && onSubscribedTo != nil {
onSubscribedTo(p, publisherID)
}
}
// RemoveSubscribedTrack removes a track to the participant's subscribed list
func (p *ParticipantImpl) RemoveSubscribedTrack(subTrack types.SubscribedTrack, sourceTrack types.MediaTrack) {
p.lock.Lock()
if v, ok := p.trackPublisherVersion[subTrack.ID()]; ok && v > subTrack.PublisherVersion() {
p.supervisor.ClearSubscribedTrack(subTrack.ID(), subTrack, sourceTrack)
p.lock.Unlock()
p.params.Logger.Debugw("ignoring remove subscribedTrack from older version",
"current", v,
"requesting", subTrack.PublisherVersion(),
"trackID", subTrack.ID(),
)
return
}
p.params.Logger.Debugw("removed subscribedTrack",
"publisherID", subTrack.PublisherID(),
"publisherIdentity", subTrack.PublisherIdentity(),
"trackID", subTrack.ID(), "kind", subTrack.DownTrack().Kind())
p.trackPublisherVersion[subTrack.ID()] = subTrack.PublisherVersion()
delete(p.subscribedTracks, subTrack.ID())
p.supervisor.ClearSubscribedTrack(subTrack.ID(), subTrack, sourceTrack)
// remove from subscribed map
numRemaining := 0
for _, st := range p.subscribedTracks {
if st.PublisherID() == subTrack.PublisherID() {
numRemaining++
}
}
//
// NOTE
// subscribedTracksSettings should not be deleted on removal as it is needed if corresponding publisher migrated
// LK-TODO: find a way to clean these up
//
if numRemaining == 0 {
delete(p.subscribedTo, subTrack.PublisherID())
}
p.lock.Unlock()
// onTrackUnsubscribed handles post-processing after a track is unsubscribed
func (p *ParticipantImpl) onTrackUnsubscribed(subTrack types.SubscribedTrack) {
p.TransportManager.RemoveSubscribedTrack(subTrack)
if numRemaining == 0 {
//
// When a participant leaves OR
// this participant unsubscribes from all tracks of another participant,
// have to send speaker update indicating that the participant speaker is no long active
// so that clients can clean up their speaker state for the leaving/unsubscribed participant
//
if p.ProtocolVersion().SupportsSpeakerChanged() {
_ = p.writeMessage(&livekit.SignalResponse{
Message: &livekit.SignalResponse_SpeakersChanged{
SpeakersChanged: &livekit.SpeakersChanged{
Speakers: []*livekit.SpeakerInfo{
{
Sid: string(subTrack.PublisherID()),
Level: 0,
Active: false,
},
},
},
},
})
}
}
}
func (p *ParticipantImpl) SubscriptionPermissionUpdate(publisherID livekit.ParticipantID, trackID livekit.TrackID, allowed bool) {
@@ -1224,6 +1033,18 @@ func (p *ParticipantImpl) setupUpTrackManager() {
p.UpTrackManager.OnUpTrackManagerClose(p.onUpTrackManagerClose)
}
func (p *ParticipantImpl) setupSubscriptionManager() {
p.SubscriptionManager = NewSubscriptionManager(SubscriptionManagerParams{
Participant: p,
Logger: p.params.Logger,
TrackResolver: p.params.TrackResolver,
Telemetry: p.params.Telemetry,
OnTrackSubscribed: p.onTrackSubscribed,
OnTrackUnsubscribed: p.onTrackUnsubscribed,
OnSubcriptionError: p.onSubscriptionError,
})
}
func (p *ParticipantImpl) updateState(state livekit.ParticipantInfo_State) {
oldState := p.State()
if state == oldState {
@@ -1399,8 +1220,9 @@ func (p *ParticipantImpl) subscriberRTCPWorker() {
var srs []rtcp.Packet
var sd []rtcp.SourceDescriptionChunk
subscribedTracks := p.SubscriptionManager.GetSubscribedTracks()
p.lock.RLock()
for _, subTrack := range p.subscribedTracks {
for _, subTrack := range subscribedTracks {
sr := subTrack.DownTrack().CreateSenderReport()
chunks := subTrack.DownTrack().CreateSourceDescriptionChunks()
if sr == nil || chunks == nil {
@@ -2010,30 +1832,9 @@ func (p *ParticipantImpl) DebugInfo() map[string]interface{} {
info["UpTrackManager"] = p.UpTrackManager.DebugInfo()
subscribedTrackInfo := make(map[livekit.TrackID]interface{})
p.lock.RLock()
for _, track := range p.subscribedTracks {
dt := track.DownTrack().DebugInfo()
dt["SubMuted"] = track.IsMuted()
subscribedTrackInfo[track.ID()] = dt
}
p.lock.RUnlock()
info["SubscribedTracks"] = subscribedTrackInfo
return info
}
func (p *ParticipantImpl) GetSubscribedTracks() []types.SubscribedTrack {
p.lock.RLock()
defer p.lock.RUnlock()
tracks := make([]types.SubscribedTrack, 0, len(p.subscribedTracks))
for _, t := range p.subscribedTracks {
tracks = append(tracks, t)
}
return tracks
}
func (p *ParticipantImpl) postRtcp(pkts []rtcp.Packet) {
select {
case p.rtcpCh <- pkts:
@@ -2043,10 +1844,7 @@ func (p *ParticipantImpl) postRtcp(pkts []rtcp.Packet) {
}
func (p *ParticipantImpl) setDowntracksConnected() {
p.lock.RLock()
defer p.lock.RUnlock()
for _, t := range p.subscribedTracks {
for _, t := range p.GetSubscribedTracks() {
if dt := t.DownTrack(); dt != nil {
dt.SetConnected()
}
@@ -2119,110 +1917,6 @@ func (p *ParticipantImpl) onAnyTransportNegotiationFailed() {
p.issueFullReconnect(types.ParticipantCloseReasonNegotiateFailed)
}
func (p *ParticipantImpl) EnqueueSubscribeTrack(trackID livekit.TrackID, sourceTrack types.MediaTrack, isRelayed bool, f func(sub types.LocalParticipant) error) bool {
// do not queue subscription is participant is already closed/disconnected
if p.isClosed.Load() || p.IsDisconnected() {
return false
}
p.params.Logger.Debugw("queuing subscribe", "trackID", trackID, "relayed", isRelayed)
p.supervisor.UpdateSubscription(trackID, true, sourceTrack)
p.params.Telemetry.TrackSubscribeRequested(
context.Background(),
p.ID(),
sourceTrack.ToProto(),
&livekit.ParticipantInfo{
Sid: string(sourceTrack.PublisherID()),
Identity: string(sourceTrack.PublisherIdentity()),
},
)
p.lock.Lock()
p.subscriptionRequestsQueue[trackID] = append(p.subscriptionRequestsQueue[trackID], SubscribeRequest{
requestType: SubscribeRequestTypeAdd,
addCb: f,
})
p.lock.Unlock()
go p.ProcessSubscriptionRequestsQueue(trackID)
return true
}
func (p *ParticipantImpl) EnqueueUnsubscribeTrack(
trackID livekit.TrackID,
sourceTrack types.MediaTrack,
isRelayed bool,
willBeResumed bool,
f func(subscriberID livekit.ParticipantID, willBeResumed bool) error,
) bool {
p.params.Logger.Debugw("queuing unsubscribe", "trackID", trackID, "relayed", isRelayed)
p.supervisor.UpdateSubscription(trackID, false, sourceTrack)
p.lock.Lock()
p.subscriptionRequestsQueue[trackID] = append(p.subscriptionRequestsQueue[trackID], SubscribeRequest{
requestType: SubscribeRequestTypeRemove,
willBeResumed: willBeResumed,
removeCb: f,
})
p.lock.Unlock()
go p.ProcessSubscriptionRequestsQueue(trackID)
return true
}
func (p *ParticipantImpl) ProcessSubscriptionRequestsQueue(trackID livekit.TrackID) {
p.lock.Lock()
if p.subscriptionInProgress[trackID] || len(p.subscriptionRequestsQueue[trackID]) == 0 {
p.lock.Unlock()
return
}
request := p.subscriptionRequestsQueue[trackID][0]
p.subscriptionRequestsQueue[trackID] = p.subscriptionRequestsQueue[trackID][1:]
if len(p.subscriptionRequestsQueue[trackID]) == 0 {
delete(p.subscriptionRequestsQueue, trackID)
}
p.subscriptionInProgress[trackID] = true
p.lock.Unlock()
switch request.requestType {
case SubscribeRequestTypeAdd:
err := request.addCb(p)
if err != nil {
if err != errAlreadySubscribed {
p.params.Logger.Errorw("error adding subscriber", err, "trackID", trackID)
}
// process pending request even if adding errors out
p.ClearInProgressAndProcessSubscriptionRequestsQueue(trackID)
}
case SubscribeRequestTypeRemove:
err := request.removeCb(p.ID(), request.willBeResumed)
if err != nil {
p.ClearInProgressAndProcessSubscriptionRequestsQueue(trackID)
}
default:
p.params.Logger.Warnw("unknown request type", nil,
"requestType", request.requestType)
// let the queue move forward
p.ClearInProgressAndProcessSubscriptionRequestsQueue(trackID)
}
}
func (p *ParticipantImpl) ClearInProgressAndProcessSubscriptionRequestsQueue(trackID livekit.TrackID) {
p.lock.Lock()
delete(p.subscriptionInProgress, trackID)
p.lock.Unlock()
go p.ProcessSubscriptionRequestsQueue(trackID)
}
func (p *ParticipantImpl) UpdateSubscribedQuality(nodeID livekit.NodeID, trackID livekit.TrackID, maxQualities []types.SubscribedCodecQuality) error {
track := p.GetPublishedTrack(trackID)
if track == nil {
+98 -83
View File
@@ -13,6 +13,7 @@ import (
"google.golang.org/protobuf/proto"
"github.com/livekit/livekit-server/pkg/sfu/connectionquality"
"github.com/livekit/livekit-server/pkg/utils"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
@@ -49,6 +50,7 @@ type Room struct {
serverInfo *livekit.ServerInfo
telemetry telemetry.TelemetryService
egressLauncher EgressLauncher
changeNotifier *utils.ChangeNotifierManager
// map of identity -> Participant
participants map[livekit.ParticipantIdentity]types.LocalParticipant
@@ -92,6 +94,7 @@ func NewRoom(
audioConfig: audioConfig,
telemetry: telemetry,
egressLauncher: egressLauncher,
changeNotifier: utils.NewChangeNotifierManager(),
serverInfo: serverInfo,
participants: make(map[livekit.ParticipantIdentity]types.LocalParticipant),
participantOpts: make(map[livekit.ParticipantIdentity]*ParticipantOptions),
@@ -134,7 +137,7 @@ func (r *Room) GetParticipant(identity livekit.ParticipantIdentity) types.LocalP
return r.participants[identity]
}
func (r *Room) GetParticipantBySid(participantID livekit.ParticipantID) types.LocalParticipant {
func (r *Room) GetParticipantByID(participantID livekit.ParticipantID) types.LocalParticipant {
r.lock.RLock()
defer r.lock.RUnlock()
@@ -281,26 +284,38 @@ func (r *Room) Join(participant types.LocalParticipant, opts *ParticipantOptions
participant.OnTrackUpdated(r.onTrackUpdated)
participant.OnParticipantUpdate(r.onParticipantUpdate)
participant.OnDataPacket(r.onDataPacket)
participant.OnSubscribedTo(func(p types.LocalParticipant, publisherID livekit.ParticipantID) {
go func() {
// when a participant subscribes to another participant,
// send speaker update if the subscribed to participant is active.
speakers := r.GetActiveSpeakers()
for _, speaker := range speakers {
if livekit.ParticipantID(speaker.Sid) == publisherID {
_ = p.SendSpeakerUpdate(speakers)
break
}
}
// send connection quality of subscribed to participant
pub := r.GetParticipantBySid(publisherID)
participant.OnSubscribeStatusChanged(func(publisherID livekit.ParticipantID, subscribed bool) {
if subscribed {
pub := r.GetParticipantByID(publisherID)
if pub != nil && pub.State() == livekit.ParticipantInfo_ACTIVE {
// when a participant subscribes to another participant,
// send speaker update if the subscribed to participant is active.
level, active := pub.GetAudioLevel()
if active {
_ = participant.SendSpeakerUpdate([]*livekit.SpeakerInfo{
{
Sid: string(pub.ID()),
Level: float32(level),
Active: active,
},
})
}
update := &livekit.ConnectionQualityUpdate{}
update.Updates = append(update.Updates, pub.GetConnectionQuality())
_ = p.SendConnectionQualityUpdate(update)
_ = participant.SendConnectionQualityUpdate(update)
}
}()
} else {
// no longer subscribed to the publisher, clear speaker status
_ = participant.SendSpeakerUpdate([]*livekit.SpeakerInfo{
{
Sid: string(publisherID),
Level: 0,
Active: false,
},
})
}
})
r.Logger.Infow("new participant joined",
"pID", participant.ID(),
@@ -410,6 +425,11 @@ func (r *Room) RemoveParticipant(identity livekit.ParticipantIdentity, pID livek
return
}
// clean up notifiers, participant isn't coming back
for _, track := range p.GetPublishedTracks() {
r.changeNotifier.RemoveNotifier(string(track.ID()), true)
}
// send broadcast only if it's not already closed
sendUpdates := !p.IsDisconnected()
@@ -418,7 +438,7 @@ func (r *Room) RemoveParticipant(identity livekit.ParticipantIdentity, pID livek
p.OnStateChange(nil)
p.OnParticipantUpdate(nil)
p.OnDataPacket(nil)
p.OnSubscribedTo(nil)
p.OnSubscribeStatusChanged(nil)
// close participant as well
r.Logger.Debugw("closing participant for removal", "pID", p.ID(), "participant", p.Identity())
@@ -443,41 +463,40 @@ func (r *Room) UpdateSubscriptions(
trackIDs []livekit.TrackID,
participantTracks []*livekit.ParticipantTracks,
subscribe bool,
) error {
) {
// find all matching tracks
trackPublishers := make(map[livekit.TrackID]types.Participant)
publisherIDs := make(map[livekit.TrackID]livekit.ParticipantID)
publisherIdentities := make(map[livekit.TrackID]livekit.ParticipantIdentity)
participants := r.GetParticipants()
for _, trackID := range trackIDs {
for _, p := range participants {
track := p.GetPublishedTrack(trackID)
if track != nil {
trackPublishers[trackID] = p
publisherIDs[trackID] = p.ID()
publisherIdentities[trackID] = p.Identity()
break
}
}
}
for _, pt := range participantTracks {
p := r.GetParticipantBySid(livekit.ParticipantID(pt.ParticipantSid))
if p == nil {
continue
}
pub := r.GetParticipantByID(livekit.ParticipantID(pt.ParticipantSid))
for _, trackID := range livekit.StringsAsTrackIDs(pt.TrackSids) {
trackPublishers[trackID] = p
publisherIDs[trackID] = livekit.ParticipantID(pt.ParticipantSid)
if pub != nil {
publisherIdentities[trackID] = pub.Identity()
}
}
}
// handle subscription changes
for trackID, publisher := range trackPublishers {
for trackID, publisherID := range publisherIDs {
if subscribe {
if _, err := publisher.AddSubscriber(participant, types.AddSubscriberParams{TrackIDs: []livekit.TrackID{trackID}}); err != nil {
return err
}
participant.SubscribeToTrack(trackID, publisherIdentities[trackID], publisherID)
} else {
publisher.RemoveSubscriber(participant, trackID, false)
participant.UnsubscribeFromTrack(trackID)
}
}
return nil
}
func (r *Room) SyncState(participant types.LocalParticipant, state *livekit.SyncState) error {
@@ -485,41 +504,50 @@ func (r *Room) SyncState(participant types.LocalParticipant, state *livekit.Sync
}
func (r *Room) UpdateSubscriptionPermission(participant types.LocalParticipant, subscriptionPermission *livekit.SubscriptionPermission) error {
return participant.UpdateSubscriptionPermission(subscriptionPermission, nil, r.GetParticipant, r.GetParticipantBySid)
if err := participant.UpdateSubscriptionPermission(subscriptionPermission, nil, r.GetParticipant, r.GetParticipantByID); err != nil {
return err
}
for _, track := range participant.GetPublishedTracks() {
notifier := r.changeNotifier.GetNotifier(string(track.ID()))
if notifier != nil {
notifier.NotifyChanged()
}
}
return nil
}
func (r *Room) RemoveDisallowedSubscriptions(sub types.LocalParticipant, disallowedSubscriptions map[livekit.TrackID]livekit.ParticipantID) {
for trackID, publisherID := range disallowedSubscriptions {
pub := r.GetParticipantBySid(publisherID)
pub := r.GetParticipantByID(publisherID)
if pub == nil {
continue
}
pub.RemoveSubscriber(sub, trackID, false)
}
}
func (r *Room) SetParticipantPermission(participant types.LocalParticipant, permission *livekit.ParticipantPermission) error {
hadCanSubscribe := participant.CanSubscribe()
participant.SetPermission(permission)
// when subscribe perms are given, trigger autosub
if !hadCanSubscribe && participant.CanSubscribe() {
if participant.State() == livekit.ParticipantInfo_ACTIVE {
if r.subscribeToExistingTracks(participant) == 0 {
// start negotiating even if there are other media tracks to subscribe
// we'll need to set the participant up to receive data
participant.Negotiate(false)
}
track := pub.GetPublishedTrack(trackID)
if track != nil {
track.RemoveSubscriber(sub.ID(), false)
}
}
return nil
}
func (r *Room) UpdateVideoLayers(participant types.Participant, updateVideoLayers *livekit.UpdateVideoLayers) error {
return participant.UpdateVideoLayers(updateVideoLayers)
}
func (r *Room) ResolveMediaTrackForSubscriber(subIdentity livekit.ParticipantIdentity, publisherID livekit.ParticipantID, trackID livekit.TrackID) (types.MediaResolverResult, error) {
res := types.MediaResolverResult{}
pub := r.GetParticipantByID(publisherID)
if pub == nil {
return res, ErrPublisherNotConnected
}
res.Track = pub.GetPublishedTrack(trackID)
res.TrackChangeNotifier = r.changeNotifier.GetOrCreateNotifier(string(trackID))
res.HasPermission = pub.HasPermission(trackID, subIdentity)
return res, nil
}
func (r *Room) IsClosed() bool {
select {
case <-r.closed:
@@ -680,10 +708,6 @@ func (r *Room) SimulateScenario(participant types.LocalParticipant, simulateScen
// checks if participant should be autosubscribed to new tracks, assumes lock is already acquired
func (r *Room) autoSubscribe(participant types.LocalParticipant) bool {
if !participant.CanSubscribe() {
return false
}
opts := r.participantOpts[participant.Identity()]
// default to true if no options are set
if opts != nil && !opts.AutoSubscribe {
@@ -718,7 +742,7 @@ func (r *Room) createJoinResponseLocked(participant types.LocalParticipant, iceS
}
}
// a ParticipantImpl in the room added a new remoteTrack, subscribe other participants to it
// a ParticipantImpl in the room added a new track, subscribe other participants to it
func (r *Room) onTrackPublished(participant types.LocalParticipant, track types.MediaTrack) {
// publish participant update, since track state is changed
r.broadcastParticipantState(participant, broadcastOptions{skipSource: true})
@@ -744,20 +768,19 @@ func (r *Room) onTrackPublished(participant types.LocalParticipant, track types.
"publisher", participant.Identity(),
"publisherID", participant.ID(),
"trackID", track.ID())
if _, err := participant.AddSubscriber(existingParticipant, types.AddSubscriberParams{TrackIDs: []livekit.TrackID{track.ID()}}); err != nil {
r.Logger.Errorw("could not subscribe to remoteTrack", err,
"participant", existingParticipant.Identity(),
"pID", existingParticipant.ID(),
"publisher", participant.Identity(),
"publisherID", participant.ID(),
"trackID", track.ID())
}
existingParticipant.SubscribeToTrack(track.ID(), participant.Identity(), participant.ID())
}
onParticipantChanged := r.onParticipantChanged
r.lock.RUnlock()
if onParticipantChanged != nil {
onParticipantChanged(participant)
}
if r.onParticipantChanged != nil {
r.onParticipantChanged(participant)
notifier := r.changeNotifier.GetNotifier(string(track.ID()))
if notifier != nil {
notifier.NotifyChanged()
}
r.lock.RUnlock()
// auto track egress
if r.internal != nil && r.internal.TrackEgress != nil {
@@ -830,15 +853,15 @@ func (r *Room) onDataPacket(source types.LocalParticipant, dp *livekit.DataPacke
}
}
func (r *Room) subscribeToExistingTracks(p types.LocalParticipant) int {
func (r *Room) subscribeToExistingTracks(p types.LocalParticipant) {
r.lock.RLock()
shouldSubscribe := r.autoSubscribe(p)
r.lock.RUnlock()
if !shouldSubscribe {
return 0
return
}
tracksAdded := 0
var trackIDs []livekit.TrackID
for _, op := range r.GetParticipants() {
if p.ID() == op.ID() {
// don't send to itself
@@ -846,22 +869,14 @@ func (r *Room) subscribeToExistingTracks(p types.LocalParticipant) int {
}
// subscribe to all
n, err := op.AddSubscriber(p, types.AddSubscriberParams{AllTracks: true})
if err != nil {
// TODO: log error? or disconnect?
r.Logger.Errorw("could not subscribe to publisher", err,
"participant", p.Identity(),
"pID", p.ID(),
"publisher", op.Identity(),
"publisherID", op.ID(),
)
for _, track := range op.GetPublishedTracks() {
trackIDs = append(trackIDs, track.ID())
p.SubscribeToTrack(track.ID(), op.Identity(), op.ID())
}
tracksAdded += n
}
if tracksAdded > 0 {
r.Logger.Debugw("subscribed participants to existing tracks", "trackID", tracksAdded)
if len(trackIDs) > 0 {
r.Logger.Debugw("subscribed participant to existing tracks", "trackID", trackIDs)
}
return tracksAdded
}
// broadcast an update about participant p
+18 -29
View File
@@ -96,17 +96,15 @@ func TestRoomJoin(t *testing.T) {
stateChangeCB(p, livekit.ParticipantInfo_JOINED)
// it should become a subscriber when connectivity changes
numTracks := 0
for _, op := range rm.GetParticipants() {
if p == op {
continue
}
mockP := op.(*typesfakes.FakeLocalParticipant)
require.NotZero(t, mockP.AddSubscriberCallCount())
// last call should be to add the newest participant
sub, params := mockP.AddSubscriberArgsForCall(mockP.AddSubscriberCallCount() - 1)
require.Equal(t, p, sub)
require.Equal(t, types.AddSubscriberParams{AllTracks: true}, params)
numTracks += len(op.GetPublishedTracks())
}
require.Equal(t, numTracks, p.SubscribeToTrackCallCount())
})
t.Run("participant state change is broadcasted to others", func(t *testing.T) {
@@ -385,11 +383,9 @@ func TestNewTrack(t *testing.T) {
trackCB := pub.OnTrackPublishedArgsForCall(0)
require.NotNil(t, trackCB)
trackCB(pub, track)
// only p1 should've been called
require.Equal(t, 1, pub.AddSubscriberCallCount())
sub, params := pub.AddSubscriberArgsForCall(pub.AddSubscriberCallCount() - 1)
require.Equal(t, p1, sub)
require.Equal(t, types.AddSubscriberParams{TrackIDs: []livekit.TrackID{track.ID()}}, params)
// only p1 should've been subscribed to
require.Equal(t, 0, p0.SubscribeToTrackCallCount())
require.Equal(t, 1, p1.SubscribeToTrackCallCount())
})
}
@@ -635,29 +631,18 @@ func TestHiddenParticipants(t *testing.T) {
})
t.Run("hidden participant subscribes to tracks", func(t *testing.T) {
rm := newRoomWithParticipants(t, testRoomOpts{num: 2, numHidden: 1})
p := newMockParticipant("new", types.CurrentProtocol, false, true)
rm := newRoomWithParticipants(t, testRoomOpts{num: 2})
hidden := newMockParticipant("hidden", types.CurrentProtocol, true, false)
err := rm.Join(p, &ParticipantOptions{AutoSubscribe: true}, iceServersForRoom)
err := rm.Join(hidden, &ParticipantOptions{AutoSubscribe: true}, iceServersForRoom)
require.NoError(t, err)
stateChangeCB := p.OnStateChangeArgsForCall(0)
stateChangeCB := hidden.OnStateChangeArgsForCall(0)
require.NotNil(t, stateChangeCB)
p.StateReturns(livekit.ParticipantInfo_ACTIVE)
stateChangeCB(p, livekit.ParticipantInfo_JOINED)
hidden.StateReturns(livekit.ParticipantInfo_ACTIVE)
stateChangeCB(hidden, livekit.ParticipantInfo_JOINED)
// it should become a subscriber when connectivity changes
for _, op := range rm.GetParticipants() {
if p == op {
continue
}
mockP := op.(*typesfakes.FakeLocalParticipant)
require.NotZero(t, mockP.AddSubscriberCallCount())
// last call should be to add the newest participant
sub, params := mockP.AddSubscriberArgsForCall(mockP.AddSubscriberCallCount() - 1)
require.Equal(t, p, sub)
require.Equal(t, types.AddSubscriberParams{AllTracks: true}, params)
}
require.Equal(t, 2, hidden.SubscribeToTrackCallCount())
})
}
@@ -708,6 +693,10 @@ func newRoomWithParticipants(t *testing.T, opts testRoomOpts) *Room {
require.NoError(t, err)
participant.StateReturns(livekit.ParticipantInfo_ACTIVE)
participant.IsReadyReturns(true)
// each participant has a track
participant.GetPublishedTracksReturns([]types.MediaTrack{
&typesfakes.FakeMediaTrack{},
})
}
return rm
}
+9 -29
View File
@@ -26,37 +26,17 @@ func HandleParticipantSignal(room types.Room, participant types.LocalParticipant
case *livekit.SignalRequest_Mute:
participant.SetTrackMuted(livekit.TrackID(msg.Mute.Sid), msg.Mute.Muted, false)
case *livekit.SignalRequest_Subscription:
var err error
// always allow unsubscribe
if participant.CanSubscribe() || !msg.Subscription.Subscribe {
updateErr := room.UpdateSubscriptions(
participant,
livekit.StringsAsTrackIDs(msg.Subscription.TrackSids),
msg.Subscription.ParticipantTracks,
msg.Subscription.Subscribe,
)
if updateErr != nil {
err = updateErr
}
} else {
err = ErrCannotSubscribe
}
if err != nil {
pLogger.Warnw("could not update subscription", err,
"trackID", msg.Subscription.TrackSids,
"subscribe", msg.Subscription.Subscribe)
} else {
pLogger.Infow("updated subscription",
"trackID", msg.Subscription.TrackSids,
"subscribe", msg.Subscription.Subscribe)
}
// allow participant to indicate their interest in the subscription
// permission check happens later in SubscriptionManager
room.UpdateSubscriptions(
participant,
livekit.StringsAsTrackIDs(msg.Subscription.TrackSids),
msg.Subscription.ParticipantTracks,
msg.Subscription.Subscribe,
)
case *livekit.SignalRequest_TrackSetting:
for _, sid := range livekit.StringsAsTrackIDs(msg.TrackSetting.TrackSids) {
err := participant.UpdateSubscribedTrackSettings(sid, msg.TrackSetting)
if err != nil {
pLogger.Errorw("failed to update subscribed track settings", err, "trackID", sid)
continue
}
participant.UpdateSubscribedTrackSettings(sid, msg.TrackSetting)
}
case *livekit.SignalRequest_Leave:
pLogger.Infow("client leaving room")
+59 -14
View File
@@ -1,6 +1,7 @@
package rtc
import (
"sync"
"time"
"github.com/bep/debounce"
@@ -30,14 +31,18 @@ type SubscribedTrackParams struct {
}
type SubscribedTrack struct {
params SubscribedTrackParams
subMuted atomic.Bool
pubMuted atomic.Bool
settings atomic.Pointer[livekit.UpdateTrackSettings]
logger logger.Logger
params SubscribedTrackParams
subMuted atomic.Bool
pubMuted atomic.Bool
settings atomic.Pointer[livekit.UpdateTrackSettings]
logger logger.Logger
sender atomic.Pointer[webrtc.RTPSender]
needsNegotiation atomic.Bool
onBind atomic.Value // func()
bound atomic.Bool
bindLock sync.Mutex
onBindCallbacks []func()
onClose atomic.Value // func(bool)
bound atomic.Bool
debouncer func(func())
}
@@ -56,28 +61,52 @@ func NewSubscribedTrack(params SubscribedTrackParams) *SubscribedTrack {
return s
}
func (t *SubscribedTrack) OnBind(f func()) {
t.onBind.Store(f)
func (t *SubscribedTrack) AddOnBind(f func()) {
t.bindLock.Lock()
bound := t.bound.Load()
if !bound {
t.onBindCallbacks = append(t.onBindCallbacks, f)
}
t.bindLock.Unlock()
t.maybeOnBind()
if bound {
// fire immediately, do not need to persist since bind is a one time event
go f()
}
}
// for DownTrack callback to notify us that it's bound
func (t *SubscribedTrack) Bound() {
t.bindLock.Lock()
t.bound.Store(true)
callbacks := t.onBindCallbacks
t.onBindCallbacks = nil
t.bindLock.Unlock()
if !t.params.AdaptiveStream {
t.params.DownTrack.SetMaxSpatialLayer(
buffer.VideoQualityToSpatialLayer(livekit.VideoQuality_HIGH, t.params.MediaTrack.ToProto()),
)
}
t.maybeOnBind()
for _, cb := range callbacks {
go cb()
}
}
func (t *SubscribedTrack) maybeOnBind() {
if onBind := t.onBind.Load(); onBind != nil && t.bound.Load() {
go onBind.(func())()
// for DownTrack callback to notify us that it's closed
func (t *SubscribedTrack) Close(willBeResumed bool) {
if onClose := t.onClose.Load(); onClose != nil {
go onClose.(func(bool))(willBeResumed)
}
}
func (t *SubscribedTrack) OnClose(f func(bool)) {
t.onClose.Store(f)
}
func (t *SubscribedTrack) IsBound() bool {
return t.bound.Load()
}
func (t *SubscribedTrack) ID() livekit.TrackID {
return livekit.TrackID(t.params.DownTrack.ID())
}
@@ -167,6 +196,22 @@ func (t *SubscribedTrack) UpdateVideoLayer() {
}
}
func (t *SubscribedTrack) NeedsNegotiation() bool {
return t.needsNegotiation.Load()
}
func (t *SubscribedTrack) SetNeedsNegotiation(needs bool) {
t.needsNegotiation.Store(needs)
}
func (t *SubscribedTrack) RTPSender() *webrtc.RTPSender {
return t.sender.Load()
}
func (t *SubscribedTrack) SetRTPSender(sender *webrtc.RTPSender) {
t.sender.Store(sender)
}
func (t *SubscribedTrack) updateDownTrackMute() {
muted := t.subMuted.Load() || t.pubMuted.Load()
t.DownTrack().Mute(muted)
+755
View File
@@ -0,0 +1,755 @@
/*
* Copyright 2023 LiveKit, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rtc
import (
"context"
"sync"
"sync/atomic"
"time"
"github.com/pion/webrtc/v3/pkg/rtcerr"
"github.com/livekit/livekit-server/pkg/rtc/types"
"github.com/livekit/livekit-server/pkg/sfu"
"github.com/livekit/livekit-server/pkg/telemetry"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
)
// using var instead of const to override in tests
var (
reconcileInterval = 3 * time.Second
// amount of time to give up if a track or publisher isn't found
notFoundTimeout = 5 * time.Second
// amount of time to try otherwise before flagging subscription as failed
subscriptionTimeout = 10 * time.Second
)
type SubscriptionManagerParams struct {
Logger logger.Logger
Participant types.LocalParticipant
TrackResolver types.MediaTrackResolver
OnTrackSubscribed func(subTrack types.SubscribedTrack)
OnTrackUnsubscribed func(subTrack types.SubscribedTrack)
OnSubcriptionError func(trackID livekit.TrackID)
Telemetry telemetry.TelemetryService
}
// SubscriptionManager manages a participant's subscriptions
type SubscriptionManager struct {
params SubscriptionManagerParams
lock sync.RWMutex
subscriptions map[livekit.TrackID]*trackSubscription
subscribedTo map[livekit.ParticipantID]map[livekit.TrackID]struct{}
reconcileCh chan livekit.TrackID
closeCh chan struct{}
doneCh chan struct{}
onSubscribeStatusChanged func(publisherID livekit.ParticipantID, subscribed bool)
}
func NewSubscriptionManager(params SubscriptionManagerParams) *SubscriptionManager {
m := &SubscriptionManager{
params: params,
subscriptions: make(map[livekit.TrackID]*trackSubscription),
subscribedTo: make(map[livekit.ParticipantID]map[livekit.TrackID]struct{}),
reconcileCh: make(chan livekit.TrackID, 10),
closeCh: make(chan struct{}),
doneCh: make(chan struct{}),
}
go m.reconcileWorker()
return m
}
func (m *SubscriptionManager) Close(willBeResumed bool) {
m.lock.Lock()
if m.isClosed() {
m.lock.Unlock()
return
}
close(m.closeCh)
m.lock.Unlock()
<-m.doneCh
subTracks := m.GetSubscribedTracks()
downTracksToClose := make([]*sfu.DownTrack, 0, len(subTracks))
for _, st := range subTracks {
dt := st.DownTrack()
// nil check exists primarily for tests
if dt != nil {
downTracksToClose = append(downTracksToClose, st.DownTrack())
}
}
for _, dt := range downTracksToClose {
dt.CloseWithFlush(!willBeResumed)
}
}
func (m *SubscriptionManager) isClosed() bool {
select {
case <-m.closeCh:
return true
default:
return false
}
}
func (m *SubscriptionManager) SubscribeToTrack(trackID livekit.TrackID, publisherIdentity livekit.ParticipantIdentity, publisherID livekit.ParticipantID) {
m.lock.Lock()
sub, ok := m.subscriptions[trackID]
if !ok {
sub = newTrackSubscription(m.params.Participant.ID(), trackID)
m.subscriptions[trackID] = sub
}
m.lock.Unlock()
sub.setPublisher(publisherIdentity, publisherID)
if sub.setDesired(true) {
m.params.Logger.Infow("subscribing to track",
"trackID", trackID,
"publisherID", publisherID,
"publisherIdentity", publisherIdentity,
)
}
// always reconcile, since SubscribeToTrack could be called when the track is ready
m.queueReconcile(trackID)
}
func (m *SubscriptionManager) UnsubscribeFromTrack(trackID livekit.TrackID) {
m.lock.Lock()
sub, ok := m.subscriptions[trackID]
m.lock.Unlock()
if !ok {
return
}
if sub.setDesired(false) {
m.params.Logger.Infow("unsubscribing from track",
"trackID", trackID,
"publisherID", sub.getPublisherID(),
"publisherIdentity", sub.getPublisherIdentity(),
)
m.queueReconcile(trackID)
}
}
func (m *SubscriptionManager) GetSubscribedTracks() []types.SubscribedTrack {
m.lock.RLock()
defer m.lock.RUnlock()
tracks := make([]types.SubscribedTrack, 0, len(m.subscriptions))
for _, t := range m.subscriptions {
st := t.getSubscribedTrack()
if st != nil {
tracks = append(tracks, st)
}
}
return tracks
}
func (m *SubscriptionManager) HasSubscriptions() bool {
m.lock.RLock()
defer m.lock.RUnlock()
for _, s := range m.subscriptions {
if s.isDesired() {
return true
}
}
return false
}
func (m *SubscriptionManager) GetSubscribedParticipants() []livekit.ParticipantID {
m.lock.RLock()
defer m.lock.RUnlock()
var participantIDs []livekit.ParticipantID
for pID := range m.subscribedTo {
participantIDs = append(participantIDs, pID)
}
return participantIDs
}
func (m *SubscriptionManager) IsSubscribedTo(participantID livekit.ParticipantID) bool {
m.lock.RLock()
defer m.lock.RUnlock()
_, ok := m.subscribedTo[participantID]
return ok
}
func (m *SubscriptionManager) UpdateSubscribedTrackSettings(trackID livekit.TrackID, settings *livekit.UpdateTrackSettings) {
m.lock.Lock()
sub, ok := m.subscriptions[trackID]
if !ok {
sub = newTrackSubscription(m.params.Participant.ID(), trackID)
m.subscriptions[trackID] = sub
}
m.lock.Unlock()
sub.setSettings(settings)
}
// OnSubscribeStatusChanged callback will be notified when a participant subscribes or unsubscribes to another participant
// it will only fire once per publisher. If current participant is subscribed to multiple tracks from another, this
// callback will only fire once.
func (m *SubscriptionManager) OnSubscribeStatusChanged(fn func(publisherID livekit.ParticipantID, subscribed bool)) {
m.lock.Lock()
m.onSubscribeStatusChanged = fn
m.lock.Unlock()
}
func (m *SubscriptionManager) WaitUntilSubscribed(timeout time.Duration) error {
expiresAt := time.Now().Add(timeout)
for expiresAt.After(time.Now()) {
allSubscribed := true
m.lock.RLock()
for _, sub := range m.subscriptions {
if sub.needsSubscribe() {
allSubscribed = false
break
}
}
m.lock.RUnlock()
if allSubscribed {
return nil
}
time.Sleep(50 * time.Millisecond)
}
return context.DeadlineExceeded
}
func (m *SubscriptionManager) canReconcile() bool {
p := m.params.Participant
if m.isClosed() || p.IsClosed() || p.IsDisconnected() {
return false
}
return true
}
func (m *SubscriptionManager) reconcileSubscriptions() {
var needsToReconcile []*trackSubscription
m.lock.RLock()
for _, sub := range m.subscriptions {
if sub.needsSubscribe() || sub.needsUnsubscribe() || sub.needsBind() {
needsToReconcile = append(needsToReconcile, sub)
}
}
m.lock.RUnlock()
for _, s := range needsToReconcile {
m.reconcileSubscription(s)
}
}
func (m *SubscriptionManager) reconcileSubscription(s *trackSubscription) {
if !m.canReconcile() {
return
}
if s.needsSubscribe() {
if s.numAttempts.Load() == 0 {
m.params.Telemetry.TrackSubscribeRequested(
context.Background(),
m.params.Participant.ID(),
&livekit.TrackInfo{
Sid: string(s.trackID),
},
&livekit.ParticipantInfo{
Sid: string(s.getPublisherID()),
Identity: string(s.getPublisherIdentity()),
},
)
}
if err := m.subscribe(s); err != nil {
s.recordAttempt(false)
switch err {
case ErrNoTrackPermission, ErrNoSubscribePermission:
// retry permission errors forever, since it's outside of our control and publisher could
// grant any time
// however, we'll still log an event to reflect this in telemetry
if s.durationSinceStart() > subscriptionTimeout {
s.maybeRecordError(m.params.Telemetry, m.params.Participant.ID(), err, true)
}
case ErrPublisherNotConnected, ErrTrackNotFound:
// publisher left or track was unpublished, if after timeout, we'd unsubscribe
// from it. this is the *only* case we'd change desired state
if s.durationSinceStart() > notFoundTimeout {
s.maybeRecordError(m.params.Telemetry, m.params.Participant.ID(), err, true)
m.params.Logger.Infow("unsubscribing track since track isn't available",
"trackID", s.trackID,
"publisherID", s.getPublisherID(),
"publisherIdentity", s.getPublisherIdentity(),
)
s.setDesired(false)
m.queueReconcile(s.trackID)
}
default:
// all other errors
m.params.Logger.Warnw("failed to subscribe", err,
"attempt", s.numAttempts.Load(),
"trackID", s.trackID,
)
if s.durationSinceStart() > subscriptionTimeout {
s.maybeRecordError(m.params.Telemetry, m.params.Participant.ID(), err, false)
m.params.OnSubcriptionError(s.trackID)
}
}
} else {
s.recordAttempt(true)
}
return
}
if s.needsUnsubscribe() {
if err := m.unsubscribe(s); err != nil {
m.params.Logger.Errorw("failed to unsubscribe", err,
"trackID", s.trackID,
)
} else {
// successfully unsubscribed, remove from map
m.lock.Lock()
if !s.isDesired() {
delete(m.subscriptions, s.trackID)
}
m.lock.Unlock()
}
return
}
if s.needsBind() {
// check bound status, notify error callback if it's not bound
if s.durationSinceStart() > subscriptionTimeout {
m.params.Logger.Errorw("track not bound after timeout", nil,
"trackID", s.trackID,
"publisherID", s.getPublisherID(),
"publisherIdentity", s.getPublisherIdentity(),
)
s.maybeRecordError(m.params.Telemetry, m.params.Participant.ID(), ErrTrackNotBound, false)
m.params.OnSubcriptionError(s.trackID)
}
}
}
// trigger an immediate reconcilation, when trackID is empty, will reconcile all subscriptions
func (m *SubscriptionManager) queueReconcile(trackID livekit.TrackID) {
select {
case m.reconcileCh <- trackID:
default:
// queue is full, will reconcile based on timer
}
}
func (m *SubscriptionManager) reconcileWorker() {
reconcileTicker := time.NewTicker(reconcileInterval)
defer reconcileTicker.Stop()
defer close(m.doneCh)
for {
select {
case <-m.closeCh:
return
case <-reconcileTicker.C:
m.reconcileSubscriptions()
case trackID := <-m.reconcileCh:
m.lock.RLock()
s := m.subscriptions[trackID]
m.lock.RUnlock()
if s != nil {
m.reconcileSubscription(s)
} else {
m.reconcileSubscriptions()
}
}
}
}
func (m *SubscriptionManager) subscribe(s *trackSubscription) error {
s.startAttempt()
if !m.params.Participant.CanSubscribe() {
return ErrNoSubscribePermission
}
res, err := m.params.TrackResolver(m.params.Participant.Identity(), s.publisherID, s.trackID)
if err != nil {
return err
}
if res.TrackChangeNotifier != nil && s.setChangeNotifier(res.TrackChangeNotifier) {
// set callback only when we haven't done it before
// we set the observer before checking for existence of track, so that we may get notified when track becomes
// available
res.TrackChangeNotifier.AddObserver(string(m.params.Participant.ID()), func() {
m.queueReconcile(s.trackID)
})
}
track := res.Track
if track == nil {
return ErrTrackNotFound
}
// since hasPermission defaults to true, we will want to send a message to the client the first time
// that we discover permissions were denied
permChanged := s.setHasPermission(res.HasPermission)
if permChanged {
m.params.Participant.SubscriptionPermissionUpdate(s.getPublisherID(), s.trackID, res.HasPermission)
}
if !res.HasPermission {
return ErrNoTrackPermission
}
subTrack, err := track.AddSubscriber(m.params.Participant)
if err != nil && err != errAlreadySubscribed {
// ignore already subscribed error
return err
}
subTrack.OnClose(func(willBeResumed bool) {
m.handleSubscribedTrackClose(s, willBeResumed)
})
subTrack.AddOnBind(func() {
s.setBound()
s.maybeRecordSuccess(m.params.Telemetry, m.params.Participant.ID())
})
s.setSubscribedTrack(subTrack)
if subTrack.NeedsNegotiation() {
m.params.Participant.Negotiate(false)
}
// add mark the participant as someone we've subscribed to
firstSubscribe := false
publisherID := s.getPublisherID()
m.lock.Lock()
pTracks := m.subscribedTo[publisherID]
changedCB := m.onSubscribeStatusChanged
if pTracks == nil {
pTracks = make(map[livekit.TrackID]struct{})
m.subscribedTo[publisherID] = pTracks
firstSubscribe = true
}
pTracks[s.trackID] = struct{}{}
m.lock.Unlock()
go m.params.OnTrackSubscribed(subTrack)
if changedCB != nil && firstSubscribe {
go changedCB(publisherID, true)
}
return nil
}
func (m *SubscriptionManager) unsubscribe(s *trackSubscription) error {
// remove from subscribedTo
subTrack := s.getSubscribedTrack()
if subTrack == nil {
// already unsubscribed
return nil
}
track := subTrack.MediaTrack()
pID := m.params.Participant.ID()
track.RemoveSubscriber(pID, false)
return nil
}
// DownTrack closing is how the publisher signifies that the subscription is no longer fulfilled
// this could be due to a few reasons:
// - subscriber-initiated unsubscribe
// - UpTrack was closed
// - publisher revoked permissions for the participant
func (m *SubscriptionManager) handleSubscribedTrackClose(s *trackSubscription, willBeResumed bool) {
m.params.Logger.Debugw("subscribed track closed",
"trackID", s.trackID,
"publisherID", s.getPublisherID(),
"publisherIdentity", s.getPublisherIdentity(),
"willBeResumed", willBeResumed,
)
subTrack := s.getSubscribedTrack()
if subTrack == nil {
return
}
// remove from subscribedTo
publisherID := s.getPublisherID()
lastSubscription := false
m.lock.Lock()
changedCB := m.onSubscribeStatusChanged
pTracks := m.subscribedTo[publisherID]
if pTracks != nil {
delete(pTracks, s.trackID)
if len(pTracks) == 0 {
delete(m.subscribedTo, publisherID)
lastSubscription = true
}
}
m.lock.Unlock()
if changedCB != nil && lastSubscription {
go changedCB(publisherID, false)
}
subTrack.OnClose(nil)
s.setSubscribedTrack(nil)
go m.params.OnTrackUnsubscribed(subTrack)
if !willBeResumed {
m.params.Telemetry.TrackUnsubscribed(
context.Background(),
m.params.Participant.ID(),
&livekit.TrackInfo{Sid: string(s.trackID), Type: subTrack.MediaTrack().Kind()},
!m.params.Participant.IsClosed(),
)
sender := subTrack.RTPSender()
if sender != nil {
m.params.Logger.Debugw("removing PeerConnection track",
"publisher", subTrack.PublisherIdentity(),
"publisherID", subTrack.PublisherID(),
"kind", subTrack.MediaTrack().Kind(),
)
if err := m.params.Participant.RemoveTrackFromSubscriber(sender); err != nil {
if _, ok := err.(*rtcerr.InvalidStateError); !ok {
// most of these are safe to ignore, since the track state might have already
// been set to Inactive
m.params.Logger.Debugw("could not remove remoteTrack from forwarder",
"error", err,
"publisher", subTrack.PublisherIdentity(),
"publisherID", subTrack.PublisherID(),
)
}
}
}
}
if !willBeResumed {
m.params.Participant.Negotiate(false)
}
m.queueReconcile(s.trackID)
}
type trackSubscription struct {
subscriberID livekit.ParticipantID
trackID livekit.TrackID
lock sync.RWMutex
desired bool
publisherID livekit.ParticipantID
publisherIdentity livekit.ParticipantIdentity
settings *livekit.UpdateTrackSettings
changeNotifier types.ChangeNotifier
hasPermission bool
subscribedTrack types.SubscribedTrack
eventSent atomic.Bool
numAttempts atomic.Int32
bound bool
subStartedAt atomic.Pointer[time.Time]
}
func newTrackSubscription(subscriberID livekit.ParticipantID, trackID livekit.TrackID) *trackSubscription {
return &trackSubscription{
subscriberID: subscriberID,
trackID: trackID,
// default allow
hasPermission: true,
}
}
func (s *trackSubscription) setPublisher(publisherIdentity livekit.ParticipantIdentity, publisherID livekit.ParticipantID) {
s.lock.Lock()
defer s.lock.Unlock()
s.publisherID = publisherID
s.publisherIdentity = publisherIdentity
}
func (s *trackSubscription) getPublisherID() livekit.ParticipantID {
s.lock.RLock()
defer s.lock.RUnlock()
return s.publisherID
}
func (s *trackSubscription) getPublisherIdentity() livekit.ParticipantIdentity {
s.lock.RLock()
defer s.lock.RUnlock()
return s.publisherIdentity
}
func (s *trackSubscription) setDesired(desired bool) bool {
s.lock.Lock()
defer s.lock.Unlock()
if s.desired == desired {
return false
}
s.desired = desired
// when no longer desired, we no longer care about change notifications
if !desired && s.changeNotifier != nil {
s.changeNotifier.RemoveObserver(string(s.subscriberID))
s.changeNotifier = nil
}
return true
}
// set permission and return true if it has changed
func (s *trackSubscription) setHasPermission(perm bool) bool {
s.lock.Lock()
defer s.lock.Unlock()
if s.hasPermission == perm {
return false
}
s.hasPermission = perm
return true
}
func (s *trackSubscription) getHasPermission() bool {
s.lock.RLock()
defer s.lock.RUnlock()
return s.hasPermission
}
func (s *trackSubscription) isDesired() bool {
s.lock.RLock()
defer s.lock.RUnlock()
return s.desired
}
func (s *trackSubscription) setSubscribedTrack(track types.SubscribedTrack) {
s.lock.Lock()
s.subscribedTrack = track
s.bound = false
settings := s.settings
s.lock.Unlock()
if settings != nil && track != nil {
track.UpdateSubscriberSettings(settings)
}
}
func (s *trackSubscription) getSubscribedTrack() types.SubscribedTrack {
s.lock.RLock()
defer s.lock.RUnlock()
return s.subscribedTrack
}
func (s *trackSubscription) setChangeNotifier(notifier types.ChangeNotifier) bool {
s.lock.Lock()
if s.changeNotifier == notifier {
s.lock.Unlock()
return false
}
existing := s.changeNotifier
s.changeNotifier = notifier
s.lock.Unlock()
if existing != nil {
existing.RemoveObserver(string(s.subscriberID))
}
return true
}
func (s *trackSubscription) setSettings(settings *livekit.UpdateTrackSettings) {
s.lock.Lock()
s.settings = settings
subTrack := s.subscribedTrack
s.lock.Unlock()
if subTrack != nil {
subTrack.UpdateSubscriberSettings(settings)
}
}
// mark the subscription as bound - when we've received the client's answer
func (s *trackSubscription) setBound() {
s.lock.Lock()
defer s.lock.Unlock()
s.bound = true
}
func (s *trackSubscription) startAttempt() {
if s.numAttempts.Load() == 0 {
t := time.Now()
s.subStartedAt.Store(&t)
}
}
func (s *trackSubscription) recordAttempt(success bool) {
if !success {
s.numAttempts.Add(1)
} else {
s.numAttempts.Store(0)
}
}
func (s *trackSubscription) maybeRecordError(ts telemetry.TelemetryService, pID livekit.ParticipantID, err error, isUserError bool) {
if s.eventSent.Swap(true) {
return
}
ts.TrackSubscribeFailed(context.Background(), pID, s.trackID, err, isUserError)
}
func (s *trackSubscription) maybeRecordSuccess(ts telemetry.TelemetryService, pID livekit.ParticipantID) {
subTrack := s.getSubscribedTrack()
if subTrack == nil {
return
}
mediaTrack := subTrack.MediaTrack()
if mediaTrack == nil {
return
}
if s.eventSent.Swap(true) {
return
}
ts.TrackSubscribed(context.Background(), pID, mediaTrack.ToProto(), &livekit.ParticipantInfo{
Identity: string(subTrack.PublisherIdentity()),
Sid: string(subTrack.PublisherID()),
})
}
func (s *trackSubscription) durationSinceStart() time.Duration {
t := s.subStartedAt.Load()
if t == nil {
return 0
}
return time.Since(*t)
}
func (s *trackSubscription) needsSubscribe() bool {
s.lock.RLock()
defer s.lock.RUnlock()
return s.desired && s.subscribedTrack == nil
}
func (s *trackSubscription) needsUnsubscribe() bool {
s.lock.RLock()
defer s.lock.RUnlock()
return !s.desired && s.subscribedTrack != nil
}
func (s *trackSubscription) needsBind() bool {
s.lock.RLock()
defer s.lock.RUnlock()
return s.desired && s.subscribedTrack != nil && !s.bound
}
+393
View File
@@ -0,0 +1,393 @@
/*
* Copyright 2023 LiveKit, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rtc
import (
"sync"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/livekit/livekit-server/pkg/rtc/types"
"github.com/livekit/livekit-server/pkg/rtc/types/typesfakes"
"github.com/livekit/livekit-server/pkg/telemetry/telemetryfakes"
"github.com/livekit/livekit-server/pkg/utils"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
)
func init() {
reconcileInterval = 50 * time.Millisecond
notFoundTimeout = 200 * time.Millisecond
subscriptionTimeout = 200 * time.Millisecond
}
const (
subSettleTimeout = 300 * time.Millisecond
subCheckInterval = 10 * time.Millisecond
)
func TestSubscribe(t *testing.T) {
t.Run("happy path subscribe", func(t *testing.T) {
sm := newTestSubscriptionManager(t)
defer sm.Close(false)
resolver := newTestResolver(true, nil)
sm.params.TrackResolver = resolver.Resolve
subCount := atomic.Int32{}
failed := atomic.Bool{}
sm.params.OnTrackSubscribed = func(subTrack types.SubscribedTrack) {
subCount.Add(1)
}
sm.params.OnSubcriptionError = func(trackID livekit.TrackID) {
failed.Store(true)
}
numParticipantSubscribed := atomic.Int32{}
numParticipantUnsubscribed := atomic.Int32{}
sm.OnSubscribeStatusChanged(func(pubID livekit.ParticipantID, subscribed bool) {
if subscribed {
numParticipantSubscribed.Add(1)
} else {
numParticipantUnsubscribed.Add(1)
}
})
sm.SubscribeToTrack("track", "pub", "pubID")
s := sm.subscriptions["track"]
require.True(t, s.isDesired())
require.Nil(t, s.getSubscribedTrack())
require.Eventually(t, func() bool {
return subCount.Load() == 1
}, subSettleTimeout, subCheckInterval, "track was not subscribed")
require.NotNil(t, s.getSubscribedTrack())
require.Len(t, sm.GetSubscribedTracks(), 1)
require.Len(t, sm.GetSubscribedParticipants(), 1)
require.Equal(t, "pubID", string(sm.GetSubscribedParticipants()[0]))
// ensure telemetry events are sent
tm := sm.params.Telemetry.(*telemetryfakes.FakeTelemetryService)
require.Equal(t, 1, tm.TrackSubscribeRequestedCallCount())
// ensure bound
setTestSubscribedTrackBound(t, s.getSubscribedTrack())
require.Eventually(t, func() bool {
return !s.needsBind()
}, subSettleTimeout, subCheckInterval, "track was not bound")
// telemetry event should have been sent
require.Equal(t, 1, tm.TrackSubscribedCallCount())
time.Sleep(notFoundTimeout)
require.False(t, failed.Load())
// ensure its resilience after being closed
setTestSubscribedTrackClosed(t, s.getSubscribedTrack(), false)
require.True(t, s.needsSubscribe())
require.Eventually(t, func() bool {
return s.isDesired() && !s.needsSubscribe()
}, subSettleTimeout, subCheckInterval, "track was not resubscribed")
// was subscribed twice, unsubscribed once (due to close)
require.Equal(t, int32(2), numParticipantSubscribed.Load())
require.Equal(t, int32(1), numParticipantUnsubscribed.Load())
})
t.Run("no track permission", func(t *testing.T) {
sm := newTestSubscriptionManager(t)
defer sm.Close(false)
resolver := newTestResolver(false, nil)
sm.params.TrackResolver = resolver.Resolve
failed := atomic.Bool{}
sm.params.OnSubcriptionError = func(trackID livekit.TrackID) {
failed.Store(true)
}
sm.SubscribeToTrack("track", "pub", "pubID")
s := sm.subscriptions["track"]
require.Eventually(t, func() bool {
return !s.getHasPermission()
}, subSettleTimeout, subCheckInterval, "should not have permission to subscribe")
time.Sleep(subscriptionTimeout)
// should not have called failed callbacks, isDesired remains unchanged
require.True(t, s.isDesired())
require.False(t, failed.Load())
require.True(t, s.needsSubscribe())
require.Len(t, sm.GetSubscribedTracks(), 0)
// trackSubscribed telemetry not sent
tm := sm.params.Telemetry.(*telemetryfakes.FakeTelemetryService)
require.Equal(t, 1, tm.TrackSubscribeRequestedCallCount())
require.Equal(t, 0, tm.TrackSubscribedCallCount())
// give permissions now
resolver.lock.Lock()
resolver.hasPermission = true
resolver.lock.Unlock()
require.Eventually(t, func() bool {
return !s.needsSubscribe()
}, subSettleTimeout, subCheckInterval, "should be subscribed")
require.Len(t, sm.GetSubscribedTracks(), 1)
})
t.Run("publisher left", func(t *testing.T) {
sm := newTestSubscriptionManager(t)
defer sm.Close(false)
resolver := newTestResolver(true, nil)
sm.params.TrackResolver = resolver.Resolve
failed := atomic.Bool{}
sm.params.OnSubcriptionError = func(trackID livekit.TrackID) {
failed.Store(true)
}
sm.SubscribeToTrack("track", "pub", "pubID")
s := sm.subscriptions["track"]
require.Eventually(t, func() bool {
return !s.needsSubscribe()
}, subSettleTimeout, subCheckInterval, "should be subscribed")
resolver.lock.Lock()
resolver.err = ErrTrackNotFound
resolver.lock.Unlock()
// publisher triggers close
setTestSubscribedTrackClosed(t, s.getSubscribedTrack(), false)
require.Eventually(t, func() bool {
return !s.isDesired()
}, subSettleTimeout, subCheckInterval, "isDesired not set to false")
})
}
func TestUnsubscribe(t *testing.T) {
sm := newTestSubscriptionManager(t)
defer sm.Close(false)
unsubCount := atomic.Int32{}
sm.params.OnTrackUnsubscribed = func(subTrack types.SubscribedTrack) {
unsubCount.Add(1)
}
resolver := newTestResolver(true, nil)
s := &trackSubscription{
trackID: "track",
desired: true,
subscriberID: sm.params.Participant.ID(),
publisherID: "pubID",
publisherIdentity: "pub",
hasPermission: true,
bound: true,
}
// a bunch of unfortunate manual wiring
res, err := resolver.Resolve("sub", s.publisherID, s.trackID)
require.NoError(t, err)
res.TrackChangeNotifier.AddObserver(string(sm.params.Participant.ID()), func() {})
s.changeNotifier = res.TrackChangeNotifier
st, err := res.Track.AddSubscriber(sm.params.Participant)
require.NoError(t, err)
s.subscribedTrack = st
st.OnClose(func(willBeResumed bool) {
sm.handleSubscribedTrackClose(s, willBeResumed)
})
res.Track.(*typesfakes.FakeMediaTrack).RemoveSubscriberStub = func(pID livekit.ParticipantID, willBeResumed bool) {
setTestSubscribedTrackClosed(t, st, willBeResumed)
}
sm.lock.Lock()
sm.subscriptions["track"] = s
sm.lock.Unlock()
require.False(t, s.needsSubscribe())
require.False(t, s.needsUnsubscribe())
// unsubscribe
sm.UnsubscribeFromTrack("track")
require.False(t, s.isDesired())
require.Eventually(t, func() bool {
return !s.needsUnsubscribe()
}, subSettleTimeout, subCheckInterval, "track was not unsubscribed")
// no traces should be left
require.Len(t, sm.GetSubscribedTracks(), 0)
sm.lock.RLock()
require.Len(t, sm.subscriptions, 0)
sm.lock.RUnlock()
require.False(t, res.TrackChangeNotifier.HasObservers())
tm := sm.params.Telemetry.(*telemetryfakes.FakeTelemetryService)
require.Equal(t, 1, tm.TrackUnsubscribedCallCount())
}
func TestSubscribeStatusChanged(t *testing.T) {
sm := newTestSubscriptionManager(t)
defer sm.Close(false)
resolver := newTestResolver(true, nil)
sm.params.TrackResolver = resolver.Resolve
numParticipantSubscribed := atomic.Int32{}
numParticipantUnsubscribed := atomic.Int32{}
sm.OnSubscribeStatusChanged(func(pubID livekit.ParticipantID, subscribed bool) {
if subscribed {
numParticipantSubscribed.Add(1)
} else {
numParticipantUnsubscribed.Add(1)
}
})
sm.SubscribeToTrack("track1", "pub", "pubID")
sm.SubscribeToTrack("track2", "pub", "pubID")
s1 := sm.subscriptions["track1"]
s2 := sm.subscriptions["track2"]
require.Eventually(t, func() bool {
return !s1.needsSubscribe() && !s2.needsSubscribe()
}, subSettleTimeout, subCheckInterval, "track1 and track2 should be subscribed")
st1 := s1.getSubscribedTrack()
st1.OnClose(func(willBeResumed bool) {
sm.handleSubscribedTrackClose(s1, willBeResumed)
})
st2 := s2.getSubscribedTrack()
st2.OnClose(func(willBeResumed bool) {
sm.handleSubscribedTrackClose(s2, willBeResumed)
})
st1.MediaTrack().(*typesfakes.FakeMediaTrack).RemoveSubscriberStub = func(pID livekit.ParticipantID, willBeResumed bool) {
setTestSubscribedTrackClosed(t, st1, willBeResumed)
}
st2.MediaTrack().(*typesfakes.FakeMediaTrack).RemoveSubscriberStub = func(pID livekit.ParticipantID, willBeResumed bool) {
setTestSubscribedTrackClosed(t, st2, willBeResumed)
}
require.Equal(t, int32(1), numParticipantSubscribed.Load())
require.Equal(t, int32(0), numParticipantUnsubscribed.Load())
// now unsubscribe track2, no event should be fired
sm.UnsubscribeFromTrack("track2")
require.Eventually(t, func() bool {
return !s2.needsUnsubscribe()
}, subSettleTimeout, subCheckInterval, "track2 should be unsubscribed")
require.Equal(t, int32(0), numParticipantUnsubscribed.Load())
// unsubscribe track1, expect event
sm.UnsubscribeFromTrack("track1")
require.Eventually(t, func() bool {
return !s1.needsUnsubscribe()
}, subSettleTimeout, subCheckInterval, "track1 should be unsubscribed")
require.Equal(t, int32(1), numParticipantUnsubscribed.Load())
}
// clients may send update subscribed settings prior to subscription events coming through
// settings should be persisted and used when the subscription does take place.
func TestUpdateSettingsBeforeSubscription(t *testing.T) {
sm := newTestSubscriptionManager(t)
defer sm.Close(false)
resolver := newTestResolver(true, nil)
sm.params.TrackResolver = resolver.Resolve
settings := &livekit.UpdateTrackSettings{
Disabled: true,
Width: 100,
Height: 100,
}
sm.UpdateSubscribedTrackSettings("track", settings)
sm.SubscribeToTrack("track", "pub", "pubID")
s := sm.subscriptions["track"]
require.Eventually(t, func() bool {
return !s.needsSubscribe()
}, subSettleTimeout, subCheckInterval, "track should be subscribed")
st := s.getSubscribedTrack().(*typesfakes.FakeSubscribedTrack)
require.Equal(t, 1, st.UpdateSubscriberSettingsCallCount())
applied := st.UpdateSubscriberSettingsArgsForCall(0)
require.Equal(t, settings.Disabled, applied.Disabled)
require.Equal(t, settings.Width, applied.Width)
require.Equal(t, settings.Height, applied.Height)
}
func newTestSubscriptionManager(t *testing.T) *SubscriptionManager {
p := &typesfakes.FakeLocalParticipant{}
p.CanSubscribeReturns(true)
p.IDReturns("subID")
p.IdentityReturns("sub")
return NewSubscriptionManager(SubscriptionManagerParams{
Participant: p,
Logger: logger.GetLogger(),
OnTrackSubscribed: func(subTrack types.SubscribedTrack) {},
OnTrackUnsubscribed: func(subTrack types.SubscribedTrack) {},
OnSubcriptionError: func(trackID livekit.TrackID) {},
TrackResolver: func(identity livekit.ParticipantIdentity, pID livekit.ParticipantID, trackID livekit.TrackID) (types.MediaResolverResult, error) {
return types.MediaResolverResult{}, ErrTrackNotFound
},
Telemetry: &telemetryfakes.FakeTelemetryService{},
})
}
type testResolver struct {
lock sync.Mutex
hasPermission bool
err error
}
func newTestResolver(hasPermission bool, err error) *testResolver {
return &testResolver{
hasPermission: hasPermission,
err: err,
}
}
func (t *testResolver) Resolve(identity livekit.ParticipantIdentity, pID livekit.ParticipantID, trackID livekit.TrackID) (types.MediaResolverResult, error) {
t.lock.Lock()
defer t.lock.Unlock()
if t.err != nil {
return types.MediaResolverResult{}, t.err
}
mt := &typesfakes.FakeMediaTrack{}
st := &typesfakes.FakeSubscribedTrack{}
st.IDReturns(trackID)
st.PublisherIDReturns(pID)
st.PublisherIdentityReturns(identity)
mt.AddSubscriberReturns(st, nil)
st.MediaTrackReturns(mt)
return types.MediaResolverResult{
Track: mt,
TrackChangeNotifier: utils.NewChangeNotifier(),
HasPermission: t.hasPermission,
}, nil
}
func setTestSubscribedTrackBound(t *testing.T, st types.SubscribedTrack) {
fst, ok := st.(*typesfakes.FakeSubscribedTrack)
require.True(t, ok)
for i := 0; i < fst.AddOnBindCallCount(); i++ {
fst.AddOnBindArgsForCall(i)()
}
}
func setTestSubscribedTrackClosed(t *testing.T, st types.SubscribedTrack, willBeResumed bool) {
fst, ok := st.(*typesfakes.FakeSubscribedTrack)
require.True(t, ok)
fst.OnCloseArgsForCall(0)(willBeResumed)
}
+3 -106
View File
@@ -30,19 +30,16 @@ type ParticipantSupervisor struct {
lock sync.RWMutex
isPublisherConnected bool
publications map[livekit.TrackID]*trackMonitor
subscriptions map[livekit.TrackID]*trackMonitor
isStopped atomic.Bool
onPublicationError func(trackID livekit.TrackID)
onSubscriptionError func(trackID livekit.TrackID)
onPublicationError func(trackID livekit.TrackID)
}
func NewParticipantSupervisor(params ParticipantSupervisorParams) *ParticipantSupervisor {
p := &ParticipantSupervisor{
params: params,
publications: make(map[livekit.TrackID]*trackMonitor),
subscriptions: make(map[livekit.TrackID]*trackMonitor),
params: params,
publications: make(map[livekit.TrackID]*trackMonitor),
}
go p.checkState()
@@ -68,20 +65,6 @@ func (p *ParticipantSupervisor) getOnPublicationError() func(trackID livekit.Tra
return p.onPublicationError
}
func (p *ParticipantSupervisor) OnSubscriptionError(f func(trackID livekit.TrackID)) {
p.lock.Lock()
defer p.lock.Unlock()
p.onSubscriptionError = f
}
func (p *ParticipantSupervisor) getOnSubscriptionError() func(trackID livekit.TrackID) {
p.lock.RLock()
defer p.lock.RUnlock()
return p.onSubscriptionError
}
func (p *ParticipantSupervisor) SetPublisherPeerConnectionConnected(isConnected bool) {
p.lock.Lock()
p.isPublisherConnected = isConnected
@@ -139,55 +122,6 @@ func (p *ParticipantSupervisor) ClearPublishedTrack(trackID livekit.TrackID, pub
p.lock.RUnlock()
}
func (p *ParticipantSupervisor) UpdateSubscription(trackID livekit.TrackID, isSubscribe bool, sourceTrack types.MediaTrack) {
p.lock.Lock()
sm, ok := p.subscriptions[trackID]
if !ok {
sm = &trackMonitor{
opMon: NewSubscriptionMonitor(SubscriptionMonitorParams{TrackID: trackID, Logger: p.params.Logger}),
}
p.subscriptions[trackID] = sm
}
sm.opMon.PostEvent(
types.OperationMonitorEventUpdateSubscription,
SubscriptionOpParams{
SourceTrack: sourceTrack,
IsSubscribe: isSubscribe,
},
)
p.lock.Unlock()
}
func (p *ParticipantSupervisor) SetSubscribedTrack(trackID livekit.TrackID, subTrack types.SubscribedTrack, sourceTrack types.MediaTrack) {
p.lock.RLock()
sm, ok := p.subscriptions[trackID]
if ok {
sm.opMon.PostEvent(
types.OperationMonitorEventSetSubscribedTrack,
UpdateSubscribedTrackParams{
SourceTrack: sourceTrack,
SubscribedTrack: subTrack,
},
)
}
p.lock.RUnlock()
}
func (p *ParticipantSupervisor) ClearSubscribedTrack(trackID livekit.TrackID, subTrack types.SubscribedTrack, sourceTrack types.MediaTrack) {
p.lock.RLock()
sm, ok := p.subscriptions[trackID]
if ok {
sm.opMon.PostEvent(
types.OperationMonitorEventClearSubscribedTrack,
UpdateSubscribedTrackParams{
SourceTrack: sourceTrack,
SubscribedTrack: subTrack,
},
)
}
p.lock.RUnlock()
}
func (p *ParticipantSupervisor) checkState() {
ticker := time.NewTicker(monitorInterval)
defer ticker.Stop()
@@ -196,7 +130,6 @@ func (p *ParticipantSupervisor) checkState() {
<-ticker.C
p.checkPublications()
p.checkSubscriptions()
}
}
@@ -235,39 +168,3 @@ func (p *ParticipantSupervisor) checkPublications() {
}
}
}
func (p *ParticipantSupervisor) checkSubscriptions() {
var erroredSubscriptions []livekit.TrackID
var removableSubscriptions []livekit.TrackID
p.lock.RLock()
for trackID, sm := range p.subscriptions {
if err := sm.opMon.Check(); err != nil {
if sm.err == nil {
p.params.Logger.Errorw("supervisor error on subscription", err, "trackID", trackID)
sm.err = err
erroredSubscriptions = append(erroredSubscriptions, trackID)
}
} else {
if sm.err != nil {
p.params.Logger.Infow("supervisor subscription recovered", "trackID", trackID)
sm.err = err
}
if sm.opMon.IsIdle() {
removableSubscriptions = append(removableSubscriptions, trackID)
}
}
}
p.lock.RUnlock()
p.lock.Lock()
for _, trackID := range removableSubscriptions {
delete(p.subscriptions, trackID)
}
p.lock.Unlock()
if onSubscriptionError := p.getOnSubscriptionError(); onSubscriptionError != nil {
for _, trackID := range erroredSubscriptions {
onSubscriptionError(trackID)
}
}
}
-189
View File
@@ -1,189 +0,0 @@
package supervisor
import (
"errors"
"sync"
"time"
"github.com/gammazero/deque"
"github.com/livekit/livekit-server/pkg/rtc/types"
"github.com/livekit/livekit-server/pkg/telemetry/prometheus"
"github.com/livekit/protocol/livekit"
"github.com/livekit/protocol/logger"
)
const (
transitionWaitDuration = 20 * time.Second
)
var (
errSubscribeTimeout = errors.New("subscribe time out")
errUnsubscribeTimeout = errors.New("unsubscribe time out")
)
type transition struct {
isSubscribe bool
at time.Time
}
type subscriptionOps struct {
desiredTransitions deque.Deque
subscribedTrack types.SubscribedTrack
}
type SubscriptionOpParams struct {
SourceTrack types.MediaTrack
IsSubscribe bool
}
type UpdateSubscribedTrackParams struct {
SourceTrack types.MediaTrack
SubscribedTrack types.SubscribedTrack
}
type SubscriptionMonitorParams struct {
TrackID livekit.TrackID
Logger logger.Logger
}
type SubscriptionMonitor struct {
params SubscriptionMonitorParams
lock sync.RWMutex
subscriptionOpsBySource map[types.MediaTrack]*subscriptionOps
}
func NewSubscriptionMonitor(params SubscriptionMonitorParams) *SubscriptionMonitor {
s := &SubscriptionMonitor{
params: params,
subscriptionOpsBySource: make(map[types.MediaTrack]*subscriptionOps),
}
return s
}
func (s *SubscriptionMonitor) PostEvent(ome types.OperationMonitorEvent, omd types.OperationMonitorData) {
switch ome {
case types.OperationMonitorEventUpdateSubscription:
s.updateSubscription(omd.(SubscriptionOpParams))
case types.OperationMonitorEventSetSubscribedTrack:
s.setSubscribedTrack(omd.(UpdateSubscribedTrackParams))
case types.OperationMonitorEventClearSubscribedTrack:
s.clearSubscribedTrack(omd.(UpdateSubscribedTrackParams))
}
}
func (s *SubscriptionMonitor) updateSubscription(params SubscriptionOpParams) {
if params.IsSubscribe {
prometheus.AddSubscribeAttempt(params.SourceTrack.Kind().String())
}
s.lock.Lock()
so := s.getOrCreateSubscriptionOpsForSource(params.SourceTrack)
so.desiredTransitions.PushBack(
&transition{
isSubscribe: params.IsSubscribe,
at: time.Now(),
},
)
s.update()
s.lock.Unlock()
}
func (s *SubscriptionMonitor) setSubscribedTrack(params UpdateSubscribedTrackParams) {
s.lock.Lock()
so := s.getOrCreateSubscriptionOpsForSource(params.SourceTrack)
so.subscribedTrack = params.SubscribedTrack
s.update()
s.lock.Unlock()
}
func (s *SubscriptionMonitor) clearSubscribedTrack(params UpdateSubscribedTrackParams) {
s.lock.Lock()
so := s.getOrCreateSubscriptionOpsForSource(params.SourceTrack)
if so.subscribedTrack == params.SubscribedTrack {
so.subscribedTrack = nil
} else {
s.params.Logger.Errorw("supervisor: mismatched subscribed track on clear", nil, "trackID", s.params.TrackID)
}
s.update()
s.lock.Unlock()
}
func (s *SubscriptionMonitor) Check() error {
s.lock.Lock()
defer s.lock.Unlock()
for _, so := range s.subscriptionOpsBySource {
var tx *transition
if so.desiredTransitions.Len() > 0 {
tx = so.desiredTransitions.Front().(*transition)
}
if tx == nil {
continue
}
if time.Since(tx.at) > transitionWaitDuration {
// timed out waiting for transition
if tx.isSubscribe {
return errSubscribeTimeout
} else {
return errUnsubscribeTimeout
}
}
}
// give more time for transition to happen
return nil
}
func (s *SubscriptionMonitor) IsIdle() bool {
s.lock.RLock()
defer s.lock.RUnlock()
return len(s.subscriptionOpsBySource) == 0
}
func (s *SubscriptionMonitor) getOrCreateSubscriptionOpsForSource(sourceTrack types.MediaTrack) *subscriptionOps {
so := s.subscriptionOpsBySource[sourceTrack]
if so == nil {
so = &subscriptionOps{}
so.desiredTransitions.SetMinCapacity(4)
s.subscriptionOpsBySource[sourceTrack] = so
}
return so
}
func (s *SubscriptionMonitor) update() {
for sourceTrack, so := range s.subscriptionOpsBySource {
for {
var tx *transition
if so.desiredTransitions.Len() > 0 {
tx = so.desiredTransitions.PopFront().(*transition)
}
if tx == nil {
break
}
if (tx.isSubscribe && so.subscribedTrack == nil) || (!tx.isSubscribe && so.subscribedTrack != nil) {
// put it back as the condition is not satisfied
so.desiredTransitions.PushFront(tx)
break
}
if tx.isSubscribe && so.subscribedTrack != nil {
prometheus.AddSubscribeSuccess(so.subscribedTrack.MediaTrack().Kind().String())
}
if so.desiredTransitions.Len() == 0 && so.subscribedTrack == nil {
delete(s.subscriptionOpsBySource, sourceTrack)
}
}
}
}
+1 -1
View File
@@ -1762,7 +1762,7 @@ func (t *PCTransport) handleICERestart(e *event) error {
return t.doICERestart()
}
// configure subscriber tranceiver for audio stereo and nack
// configure subscriber transceiver for audio stereo and nack
func configureAudioTransceiver(tr *webrtc.RTPTransceiver, stereo bool, nack bool) {
sender := tr.Sender()
if sender == nil {
+39 -39
View File
@@ -173,6 +173,7 @@ func (p ParticipantCloseReason) ToDisconnectReason() livekit.DisconnectReason {
type Participant interface {
ID() livekit.ParticipantID
Identity() livekit.ParticipantIdentity
State() livekit.ParticipantInfo_State
ToProto() *livekit.ParticipantInfo
@@ -183,8 +184,9 @@ type Participant interface {
GetPublishedTracks() []MediaTrack
RemovePublishedTrack(track MediaTrack, willBeResumed bool, shouldClose bool)
AddSubscriber(op LocalParticipant, params AddSubscriberParams) (int, error)
RemoveSubscriber(op LocalParticipant, trackID livekit.TrackID, resume bool)
// HasPermission checks permission of the subscriber by identity. Returns true if subscriber is allowed to subscribe
// to the track with trackID
HasPermission(trackID livekit.TrackID, subIdentity livekit.ParticipantIdentity) bool
// permissions
Hidden() bool
@@ -232,7 +234,7 @@ type LocalParticipant interface {
GetAdaptiveStream() bool
ProtocolVersion() ProtocolVersion
ConnectedAt() time.Time
State() livekit.ParticipantInfo_State
IsClosed() bool
IsReady() bool
IsDisconnected() bool
IsIdle() bool
@@ -265,11 +267,14 @@ type LocalParticipant interface {
RemoveTrackFromSubscriber(sender *webrtc.RTPSender) error
// subscriptions
AddSubscribedTrack(st SubscribedTrack, sourceTrack MediaTrack)
RemoveSubscribedTrack(st SubscribedTrack, sourceTrack MediaTrack)
UpdateSubscribedTrackSettings(trackID livekit.TrackID, settings *livekit.UpdateTrackSettings) error
SubscribeToTrack(trackID livekit.TrackID, publisherIdentity livekit.ParticipantIdentity, publisherID livekit.ParticipantID)
UnsubscribeFromTrack(trackID livekit.TrackID)
UpdateSubscribedTrackSettings(trackID livekit.TrackID, settings *livekit.UpdateTrackSettings)
GetSubscribedTracks() []SubscribedTrack
VerifySubscribeParticipantInfo(pID livekit.ParticipantID, version uint32)
// WaitUntilSubscribed waits until all subscriptions have been settled, or if the timeout
// has been reached. If the timeout expires, it will return an error.
WaitUntilSubscribed(timeout time.Duration) error
// returns list of participant identities that the current participant is subscribed to
GetSubscribedParticipants() []livekit.ParticipantID
@@ -299,7 +304,7 @@ type LocalParticipant interface {
// OnParticipantUpdate - metadata or permission is updated
OnParticipantUpdate(callback func(LocalParticipant))
OnDataPacket(callback func(LocalParticipant, *livekit.DataPacket))
OnSubscribedTo(callback func(LocalParticipant, livekit.ParticipantID))
OnSubscribeStatusChanged(fn func(publisherID livekit.ParticipantID, subscribed bool))
OnClose(callback func(LocalParticipant, map[livekit.TrackID]livekit.ParticipantID))
OnClaimsChanged(callback func(LocalParticipant))
OnReceiverReport(dt *sfu.DownTrack, report *rtcp.ReceiverReport)
@@ -316,17 +321,6 @@ type LocalParticipant interface {
UncacheDownTrack(rtpTransceiver *webrtc.RTPTransceiver)
GetCachedDownTrack(trackID livekit.TrackID) (*webrtc.RTPTransceiver, sfu.DownTrackState)
EnqueueSubscribeTrack(trackID livekit.TrackID, sourceTrack MediaTrack, isRelayed bool, f func(sub LocalParticipant) error) bool
EnqueueUnsubscribeTrack(
trackID livekit.TrackID,
sourceTrack MediaTrack,
isRelayed bool,
willBeResumed bool,
f func(subscriberID livekit.ParticipantID, willBeResumed bool) error,
) bool
ProcessSubscriptionRequestsQueue(trackID livekit.TrackID)
ClearInProgressAndProcessSubscriptionRequestsQueue(trackID livekit.TrackID)
SetICEConfig(iceConfig *livekit.ICEConfig)
OnICEConfigChanged(callback func(participant LocalParticipant, iceConfig *livekit.ICEConfig))
@@ -341,12 +335,12 @@ type Room interface {
Name() livekit.RoomName
ID() livekit.RoomID
RemoveParticipant(identity livekit.ParticipantIdentity, pID livekit.ParticipantID, reason ParticipantCloseReason)
UpdateSubscriptions(participant LocalParticipant, trackIDs []livekit.TrackID, participantTracks []*livekit.ParticipantTracks, subscribe bool) error
UpdateSubscriptions(participant LocalParticipant, trackIDs []livekit.TrackID, participantTracks []*livekit.ParticipantTracks, subscribe bool)
UpdateSubscriptionPermission(participant LocalParticipant, permissions *livekit.SubscriptionPermission) error
SyncState(participant LocalParticipant, state *livekit.SyncState) error
SimulateScenario(participant LocalParticipant, scenario *livekit.SimulateScenario) error
SetParticipantPermission(participant LocalParticipant, permission *livekit.ParticipantPermission) error
UpdateVideoLayers(participant Participant, updateVideoLayers *livekit.UpdateVideoLayers) error
ResolveMediaTrackForSubscriber(subIdentity livekit.ParticipantIdentity, publisherID livekit.ParticipantID, trackID livekit.TrackID) (MediaResolverResult, error)
}
// MediaTrack represents a media track
@@ -376,13 +370,12 @@ type MediaTrack interface {
AddOnClose(func())
// subscribers
AddSubscriber(participant LocalParticipant) error
AddSubscriber(participant LocalParticipant) (SubscribedTrack, error)
RemoveSubscriber(participantID livekit.ParticipantID, willBeResumed bool)
IsSubscriber(subID livekit.ParticipantID) bool
RevokeDisallowedSubscribers(allowedSubscriberIdentities []livekit.ParticipantIdentity) []livekit.ParticipantIdentity
GetAllSubscribers() []livekit.ParticipantID
GetNumSubscribers() int
IsSubscribed() bool
// returns quality information that's appropriate for width & height
GetQualityForDimension(width, height uint32) livekit.VideoQuality
@@ -412,11 +405,12 @@ type LocalMediaTrack interface {
NotifySubscriberNodeMediaLoss(nodeID livekit.NodeID, fractionalLoss uint8)
}
// MediaTrack is the main interface representing a track published to the room
//
//counterfeiter:generate . SubscribedTrack
type SubscribedTrack interface {
OnBind(f func())
AddOnBind(f func())
IsBound() bool
Close(willBeResumed bool)
OnClose(f func(willBeResumed bool))
ID() livekit.TrackID
PublisherID() livekit.ParticipantID
PublisherIdentity() livekit.ParticipantIdentity
@@ -426,23 +420,37 @@ type SubscribedTrack interface {
Subscriber() LocalParticipant
DownTrack() *sfu.DownTrack
MediaTrack() MediaTrack
RTPSender() *webrtc.RTPSender
IsMuted() bool
SetPublisherMuted(muted bool)
UpdateSubscriberSettings(settings *livekit.UpdateTrackSettings)
// selects appropriate video layer according to subscriber preferences
UpdateVideoLayer()
NeedsNegotiation() bool
}
//
type ChangeNotifier interface {
AddObserver(key string, onChanged func())
RemoveObserver(key string)
HasObservers() bool
NotifyChanged()
}
type MediaResolverResult struct {
TrackChangeNotifier ChangeNotifier
Track MediaTrack
// is permission given to the requesting participant
HasPermission bool
}
// MediaTrackResolver locates a specific media track for a subscriber
type MediaTrackResolver func(livekit.ParticipantIdentity, livekit.ParticipantID, livekit.TrackID) (MediaResolverResult, error)
// Supervisor/operation monitor related definitions
//
type OperationMonitorEvent int
const (
OperationMonitorEventUpdateSubscription OperationMonitorEvent = iota
OperationMonitorEventSetSubscribedTrack
OperationMonitorEventClearSubscribedTrack
OperationMonitorEventPublisherPeerConnectionConnected
OperationMonitorEventPublisherPeerConnectionConnected OperationMonitorEvent = iota
OperationMonitorEventAddPendingPublication
OperationMonitorEventSetPublicationMute
OperationMonitorEventSetPublishedTrack
@@ -451,12 +459,6 @@ const (
func (o OperationMonitorEvent) String() string {
switch o {
case OperationMonitorEventUpdateSubscription:
return "UPDATE_SUBSCRIPTION"
case OperationMonitorEventSetSubscribedTrack:
return "SET_SUBSCRIBED_TRACK"
case OperationMonitorEventClearSubscribedTrack:
return "CLEAR_SUBSCRIBED_TRACK"
case OperationMonitorEventPublisherPeerConnectionConnected:
return "PUBLISHER_PEER_CONNECTION_CONNECTED"
case OperationMonitorEventAddPendingPublication:
@@ -480,9 +482,7 @@ type OperationMonitor interface {
IsIdle() bool
}
//
// SignalDeduper related definitions
//
type SignalDeduper interface {
Dedupe(participantKey livekit.ParticipantKey, req *livekit.SignalRequest) bool
ParticipantClosed(participantKey livekit.ParticipantKey)
@@ -15,16 +15,18 @@ type FakeLocalMediaTrack struct {
addOnCloseArgsForCall []struct {
arg1 func()
}
AddSubscriberStub func(types.LocalParticipant) error
AddSubscriberStub func(types.LocalParticipant) (types.SubscribedTrack, error)
addSubscriberMutex sync.RWMutex
addSubscriberArgsForCall []struct {
arg1 types.LocalParticipant
}
addSubscriberReturns struct {
result1 error
result1 types.SubscribedTrack
result2 error
}
addSubscriberReturnsOnCall map[int]struct {
result1 error
result1 types.SubscribedTrack
result2 error
}
ClearAllReceiversStub func(bool)
clearAllReceiversMutex sync.RWMutex
@@ -144,16 +146,6 @@ type FakeLocalMediaTrack struct {
isSimulcastReturnsOnCall map[int]struct {
result1 bool
}
IsSubscribedStub func() bool
isSubscribedMutex sync.RWMutex
isSubscribedArgsForCall []struct {
}
isSubscribedReturns struct {
result1 bool
}
isSubscribedReturnsOnCall map[int]struct {
result1 bool
}
IsSubscriberStub func(livekit.ParticipantID) bool
isSubscriberMutex sync.RWMutex
isSubscriberArgsForCall []struct {
@@ -339,7 +331,7 @@ func (fake *FakeLocalMediaTrack) AddOnCloseArgsForCall(i int) func() {
return argsForCall.arg1
}
func (fake *FakeLocalMediaTrack) AddSubscriber(arg1 types.LocalParticipant) error {
func (fake *FakeLocalMediaTrack) AddSubscriber(arg1 types.LocalParticipant) (types.SubscribedTrack, error) {
fake.addSubscriberMutex.Lock()
ret, specificReturn := fake.addSubscriberReturnsOnCall[len(fake.addSubscriberArgsForCall)]
fake.addSubscriberArgsForCall = append(fake.addSubscriberArgsForCall, struct {
@@ -353,9 +345,9 @@ func (fake *FakeLocalMediaTrack) AddSubscriber(arg1 types.LocalParticipant) erro
return stub(arg1)
}
if specificReturn {
return ret.result1
return ret.result1, ret.result2
}
return fakeReturns.result1
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeLocalMediaTrack) AddSubscriberCallCount() int {
@@ -364,7 +356,7 @@ func (fake *FakeLocalMediaTrack) AddSubscriberCallCount() int {
return len(fake.addSubscriberArgsForCall)
}
func (fake *FakeLocalMediaTrack) AddSubscriberCalls(stub func(types.LocalParticipant) error) {
func (fake *FakeLocalMediaTrack) AddSubscriberCalls(stub func(types.LocalParticipant) (types.SubscribedTrack, error)) {
fake.addSubscriberMutex.Lock()
defer fake.addSubscriberMutex.Unlock()
fake.AddSubscriberStub = stub
@@ -377,27 +369,30 @@ func (fake *FakeLocalMediaTrack) AddSubscriberArgsForCall(i int) types.LocalPart
return argsForCall.arg1
}
func (fake *FakeLocalMediaTrack) AddSubscriberReturns(result1 error) {
func (fake *FakeLocalMediaTrack) AddSubscriberReturns(result1 types.SubscribedTrack, result2 error) {
fake.addSubscriberMutex.Lock()
defer fake.addSubscriberMutex.Unlock()
fake.AddSubscriberStub = nil
fake.addSubscriberReturns = struct {
result1 error
}{result1}
result1 types.SubscribedTrack
result2 error
}{result1, result2}
}
func (fake *FakeLocalMediaTrack) AddSubscriberReturnsOnCall(i int, result1 error) {
func (fake *FakeLocalMediaTrack) AddSubscriberReturnsOnCall(i int, result1 types.SubscribedTrack, result2 error) {
fake.addSubscriberMutex.Lock()
defer fake.addSubscriberMutex.Unlock()
fake.AddSubscriberStub = nil
if fake.addSubscriberReturnsOnCall == nil {
fake.addSubscriberReturnsOnCall = make(map[int]struct {
result1 error
result1 types.SubscribedTrack
result2 error
})
}
fake.addSubscriberReturnsOnCall[i] = struct {
result1 error
}{result1}
result1 types.SubscribedTrack
result2 error
}{result1, result2}
}
func (fake *FakeLocalMediaTrack) ClearAllReceivers(arg1 bool) {
@@ -1024,59 +1019,6 @@ func (fake *FakeLocalMediaTrack) IsSimulcastReturnsOnCall(i int, result1 bool) {
}{result1}
}
func (fake *FakeLocalMediaTrack) IsSubscribed() bool {
fake.isSubscribedMutex.Lock()
ret, specificReturn := fake.isSubscribedReturnsOnCall[len(fake.isSubscribedArgsForCall)]
fake.isSubscribedArgsForCall = append(fake.isSubscribedArgsForCall, struct {
}{})
stub := fake.IsSubscribedStub
fakeReturns := fake.isSubscribedReturns
fake.recordInvocation("IsSubscribed", []interface{}{})
fake.isSubscribedMutex.Unlock()
if stub != nil {
return stub()
}
if specificReturn {
return ret.result1
}
return fakeReturns.result1
}
func (fake *FakeLocalMediaTrack) IsSubscribedCallCount() int {
fake.isSubscribedMutex.RLock()
defer fake.isSubscribedMutex.RUnlock()
return len(fake.isSubscribedArgsForCall)
}
func (fake *FakeLocalMediaTrack) IsSubscribedCalls(stub func() bool) {
fake.isSubscribedMutex.Lock()
defer fake.isSubscribedMutex.Unlock()
fake.IsSubscribedStub = stub
}
func (fake *FakeLocalMediaTrack) IsSubscribedReturns(result1 bool) {
fake.isSubscribedMutex.Lock()
defer fake.isSubscribedMutex.Unlock()
fake.IsSubscribedStub = nil
fake.isSubscribedReturns = struct {
result1 bool
}{result1}
}
func (fake *FakeLocalMediaTrack) IsSubscribedReturnsOnCall(i int, result1 bool) {
fake.isSubscribedMutex.Lock()
defer fake.isSubscribedMutex.Unlock()
fake.IsSubscribedStub = nil
if fake.isSubscribedReturnsOnCall == nil {
fake.isSubscribedReturnsOnCall = make(map[int]struct {
result1 bool
})
}
fake.isSubscribedReturnsOnCall[i] = struct {
result1 bool
}{result1}
}
func (fake *FakeLocalMediaTrack) IsSubscriber(arg1 livekit.ParticipantID) bool {
fake.isSubscriberMutex.Lock()
ret, specificReturn := fake.isSubscriberReturnsOnCall[len(fake.isSubscriberArgsForCall)]
@@ -1941,8 +1883,6 @@ func (fake *FakeLocalMediaTrack) Invocations() map[string][][]interface{} {
defer fake.isMutedMutex.RUnlock()
fake.isSimulcastMutex.RLock()
defer fake.isSimulcastMutex.RUnlock()
fake.isSubscribedMutex.RLock()
defer fake.isSubscribedMutex.RUnlock()
fake.isSubscriberMutex.RLock()
defer fake.isSubscriberMutex.RUnlock()
fake.kindMutex.RLock()
File diff suppressed because it is too large Load Diff
+19 -79
View File
@@ -15,16 +15,18 @@ type FakeMediaTrack struct {
addOnCloseArgsForCall []struct {
arg1 func()
}
AddSubscriberStub func(types.LocalParticipant) error
AddSubscriberStub func(types.LocalParticipant) (types.SubscribedTrack, error)
addSubscriberMutex sync.RWMutex
addSubscriberArgsForCall []struct {
arg1 types.LocalParticipant
}
addSubscriberReturns struct {
result1 error
result1 types.SubscribedTrack
result2 error
}
addSubscriberReturnsOnCall map[int]struct {
result1 error
result1 types.SubscribedTrack
result2 error
}
ClearAllReceiversStub func(bool)
clearAllReceiversMutex sync.RWMutex
@@ -111,16 +113,6 @@ type FakeMediaTrack struct {
isSimulcastReturnsOnCall map[int]struct {
result1 bool
}
IsSubscribedStub func() bool
isSubscribedMutex sync.RWMutex
isSubscribedArgsForCall []struct {
}
isSubscribedReturns struct {
result1 bool
}
isSubscribedReturnsOnCall map[int]struct {
result1 bool
}
IsSubscriberStub func(livekit.ParticipantID) bool
isSubscriberMutex sync.RWMutex
isSubscriberArgsForCall []struct {
@@ -275,7 +267,7 @@ func (fake *FakeMediaTrack) AddOnCloseArgsForCall(i int) func() {
return argsForCall.arg1
}
func (fake *FakeMediaTrack) AddSubscriber(arg1 types.LocalParticipant) error {
func (fake *FakeMediaTrack) AddSubscriber(arg1 types.LocalParticipant) (types.SubscribedTrack, error) {
fake.addSubscriberMutex.Lock()
ret, specificReturn := fake.addSubscriberReturnsOnCall[len(fake.addSubscriberArgsForCall)]
fake.addSubscriberArgsForCall = append(fake.addSubscriberArgsForCall, struct {
@@ -289,9 +281,9 @@ func (fake *FakeMediaTrack) AddSubscriber(arg1 types.LocalParticipant) error {
return stub(arg1)
}
if specificReturn {
return ret.result1
return ret.result1, ret.result2
}
return fakeReturns.result1
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeMediaTrack) AddSubscriberCallCount() int {
@@ -300,7 +292,7 @@ func (fake *FakeMediaTrack) AddSubscriberCallCount() int {
return len(fake.addSubscriberArgsForCall)
}
func (fake *FakeMediaTrack) AddSubscriberCalls(stub func(types.LocalParticipant) error) {
func (fake *FakeMediaTrack) AddSubscriberCalls(stub func(types.LocalParticipant) (types.SubscribedTrack, error)) {
fake.addSubscriberMutex.Lock()
defer fake.addSubscriberMutex.Unlock()
fake.AddSubscriberStub = stub
@@ -313,27 +305,30 @@ func (fake *FakeMediaTrack) AddSubscriberArgsForCall(i int) types.LocalParticipa
return argsForCall.arg1
}
func (fake *FakeMediaTrack) AddSubscriberReturns(result1 error) {
func (fake *FakeMediaTrack) AddSubscriberReturns(result1 types.SubscribedTrack, result2 error) {
fake.addSubscriberMutex.Lock()
defer fake.addSubscriberMutex.Unlock()
fake.AddSubscriberStub = nil
fake.addSubscriberReturns = struct {
result1 error
}{result1}
result1 types.SubscribedTrack
result2 error
}{result1, result2}
}
func (fake *FakeMediaTrack) AddSubscriberReturnsOnCall(i int, result1 error) {
func (fake *FakeMediaTrack) AddSubscriberReturnsOnCall(i int, result1 types.SubscribedTrack, result2 error) {
fake.addSubscriberMutex.Lock()
defer fake.addSubscriberMutex.Unlock()
fake.AddSubscriberStub = nil
if fake.addSubscriberReturnsOnCall == nil {
fake.addSubscriberReturnsOnCall = make(map[int]struct {
result1 error
result1 types.SubscribedTrack
result2 error
})
}
fake.addSubscriberReturnsOnCall[i] = struct {
result1 error
}{result1}
result1 types.SubscribedTrack
result2 error
}{result1, result2}
}
func (fake *FakeMediaTrack) ClearAllReceivers(arg1 bool) {
@@ -790,59 +785,6 @@ func (fake *FakeMediaTrack) IsSimulcastReturnsOnCall(i int, result1 bool) {
}{result1}
}
func (fake *FakeMediaTrack) IsSubscribed() bool {
fake.isSubscribedMutex.Lock()
ret, specificReturn := fake.isSubscribedReturnsOnCall[len(fake.isSubscribedArgsForCall)]
fake.isSubscribedArgsForCall = append(fake.isSubscribedArgsForCall, struct {
}{})
stub := fake.IsSubscribedStub
fakeReturns := fake.isSubscribedReturns
fake.recordInvocation("IsSubscribed", []interface{}{})
fake.isSubscribedMutex.Unlock()
if stub != nil {
return stub()
}
if specificReturn {
return ret.result1
}
return fakeReturns.result1
}
func (fake *FakeMediaTrack) IsSubscribedCallCount() int {
fake.isSubscribedMutex.RLock()
defer fake.isSubscribedMutex.RUnlock()
return len(fake.isSubscribedArgsForCall)
}
func (fake *FakeMediaTrack) IsSubscribedCalls(stub func() bool) {
fake.isSubscribedMutex.Lock()
defer fake.isSubscribedMutex.Unlock()
fake.IsSubscribedStub = stub
}
func (fake *FakeMediaTrack) IsSubscribedReturns(result1 bool) {
fake.isSubscribedMutex.Lock()
defer fake.isSubscribedMutex.Unlock()
fake.IsSubscribedStub = nil
fake.isSubscribedReturns = struct {
result1 bool
}{result1}
}
func (fake *FakeMediaTrack) IsSubscribedReturnsOnCall(i int, result1 bool) {
fake.isSubscribedMutex.Lock()
defer fake.isSubscribedMutex.Unlock()
fake.IsSubscribedStub = nil
if fake.isSubscribedReturnsOnCall == nil {
fake.isSubscribedReturnsOnCall = make(map[int]struct {
result1 bool
})
}
fake.isSubscribedReturnsOnCall[i] = struct {
result1 bool
}{result1}
}
func (fake *FakeMediaTrack) IsSubscriber(arg1 livekit.ParticipantID) bool {
fake.isSubscriberMutex.Lock()
ret, specificReturn := fake.isSubscriberReturnsOnCall[len(fake.isSubscriberArgsForCall)]
@@ -1521,8 +1463,6 @@ func (fake *FakeMediaTrack) Invocations() map[string][][]interface{} {
defer fake.isMutedMutex.RUnlock()
fake.isSimulcastMutex.RLock()
defer fake.isSimulcastMutex.RUnlock()
fake.isSubscribedMutex.RLock()
defer fake.isSubscribedMutex.RUnlock()
fake.isSubscriberMutex.RLock()
defer fake.isSubscriberMutex.RUnlock()
fake.kindMutex.RLock()
+141 -124
View File
@@ -9,20 +9,6 @@ import (
)
type FakeParticipant struct {
AddSubscriberStub func(types.LocalParticipant, types.AddSubscriberParams) (int, error)
addSubscriberMutex sync.RWMutex
addSubscriberArgsForCall []struct {
arg1 types.LocalParticipant
arg2 types.AddSubscriberParams
}
addSubscriberReturns struct {
result1 int
result2 error
}
addSubscriberReturnsOnCall map[int]struct {
result1 int
result2 error
}
CloseStub func(bool, types.ParticipantCloseReason) error
closeMutex sync.RWMutex
closeArgsForCall []struct {
@@ -66,6 +52,18 @@ type FakeParticipant struct {
getPublishedTracksReturnsOnCall map[int]struct {
result1 []types.MediaTrack
}
HasPermissionStub func(livekit.TrackID, livekit.ParticipantIdentity) bool
hasPermissionMutex sync.RWMutex
hasPermissionArgsForCall []struct {
arg1 livekit.TrackID
arg2 livekit.ParticipantIdentity
}
hasPermissionReturns struct {
result1 bool
}
hasPermissionReturnsOnCall map[int]struct {
result1 bool
}
HiddenStub func() bool
hiddenMutex sync.RWMutex
hiddenArgsForCall []struct {
@@ -113,13 +111,6 @@ type FakeParticipant struct {
arg2 bool
arg3 bool
}
RemoveSubscriberStub func(types.LocalParticipant, livekit.TrackID, bool)
removeSubscriberMutex sync.RWMutex
removeSubscriberArgsForCall []struct {
arg1 types.LocalParticipant
arg2 livekit.TrackID
arg3 bool
}
SetMetadataStub func(string)
setMetadataMutex sync.RWMutex
setMetadataArgsForCall []struct {
@@ -134,6 +125,16 @@ type FakeParticipant struct {
startMutex sync.RWMutex
startArgsForCall []struct {
}
StateStub func() livekit.ParticipantInfo_State
stateMutex sync.RWMutex
stateArgsForCall []struct {
}
stateReturns struct {
result1 livekit.ParticipantInfo_State
}
stateReturnsOnCall map[int]struct {
result1 livekit.ParticipantInfo_State
}
SubscriptionPermissionStub func() (*livekit.SubscriptionPermission, *livekit.TimedVersion)
subscriptionPermissionMutex sync.RWMutex
subscriptionPermissionArgsForCall []struct {
@@ -185,71 +186,6 @@ type FakeParticipant struct {
invocationsMutex sync.RWMutex
}
func (fake *FakeParticipant) AddSubscriber(arg1 types.LocalParticipant, arg2 types.AddSubscriberParams) (int, error) {
fake.addSubscriberMutex.Lock()
ret, specificReturn := fake.addSubscriberReturnsOnCall[len(fake.addSubscriberArgsForCall)]
fake.addSubscriberArgsForCall = append(fake.addSubscriberArgsForCall, struct {
arg1 types.LocalParticipant
arg2 types.AddSubscriberParams
}{arg1, arg2})
stub := fake.AddSubscriberStub
fakeReturns := fake.addSubscriberReturns
fake.recordInvocation("AddSubscriber", []interface{}{arg1, arg2})
fake.addSubscriberMutex.Unlock()
if stub != nil {
return stub(arg1, arg2)
}
if specificReturn {
return ret.result1, ret.result2
}
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeParticipant) AddSubscriberCallCount() int {
fake.addSubscriberMutex.RLock()
defer fake.addSubscriberMutex.RUnlock()
return len(fake.addSubscriberArgsForCall)
}
func (fake *FakeParticipant) AddSubscriberCalls(stub func(types.LocalParticipant, types.AddSubscriberParams) (int, error)) {
fake.addSubscriberMutex.Lock()
defer fake.addSubscriberMutex.Unlock()
fake.AddSubscriberStub = stub
}
func (fake *FakeParticipant) AddSubscriberArgsForCall(i int) (types.LocalParticipant, types.AddSubscriberParams) {
fake.addSubscriberMutex.RLock()
defer fake.addSubscriberMutex.RUnlock()
argsForCall := fake.addSubscriberArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2
}
func (fake *FakeParticipant) AddSubscriberReturns(result1 int, result2 error) {
fake.addSubscriberMutex.Lock()
defer fake.addSubscriberMutex.Unlock()
fake.AddSubscriberStub = nil
fake.addSubscriberReturns = struct {
result1 int
result2 error
}{result1, result2}
}
func (fake *FakeParticipant) AddSubscriberReturnsOnCall(i int, result1 int, result2 error) {
fake.addSubscriberMutex.Lock()
defer fake.addSubscriberMutex.Unlock()
fake.AddSubscriberStub = nil
if fake.addSubscriberReturnsOnCall == nil {
fake.addSubscriberReturnsOnCall = make(map[int]struct {
result1 int
result2 error
})
}
fake.addSubscriberReturnsOnCall[i] = struct {
result1 int
result2 error
}{result1, result2}
}
func (fake *FakeParticipant) Close(arg1 bool, arg2 types.ParticipantCloseReason) error {
fake.closeMutex.Lock()
ret, specificReturn := fake.closeReturnsOnCall[len(fake.closeArgsForCall)]
@@ -479,6 +415,68 @@ func (fake *FakeParticipant) GetPublishedTracksReturnsOnCall(i int, result1 []ty
}{result1}
}
func (fake *FakeParticipant) HasPermission(arg1 livekit.TrackID, arg2 livekit.ParticipantIdentity) bool {
fake.hasPermissionMutex.Lock()
ret, specificReturn := fake.hasPermissionReturnsOnCall[len(fake.hasPermissionArgsForCall)]
fake.hasPermissionArgsForCall = append(fake.hasPermissionArgsForCall, struct {
arg1 livekit.TrackID
arg2 livekit.ParticipantIdentity
}{arg1, arg2})
stub := fake.HasPermissionStub
fakeReturns := fake.hasPermissionReturns
fake.recordInvocation("HasPermission", []interface{}{arg1, arg2})
fake.hasPermissionMutex.Unlock()
if stub != nil {
return stub(arg1, arg2)
}
if specificReturn {
return ret.result1
}
return fakeReturns.result1
}
func (fake *FakeParticipant) HasPermissionCallCount() int {
fake.hasPermissionMutex.RLock()
defer fake.hasPermissionMutex.RUnlock()
return len(fake.hasPermissionArgsForCall)
}
func (fake *FakeParticipant) HasPermissionCalls(stub func(livekit.TrackID, livekit.ParticipantIdentity) bool) {
fake.hasPermissionMutex.Lock()
defer fake.hasPermissionMutex.Unlock()
fake.HasPermissionStub = stub
}
func (fake *FakeParticipant) HasPermissionArgsForCall(i int) (livekit.TrackID, livekit.ParticipantIdentity) {
fake.hasPermissionMutex.RLock()
defer fake.hasPermissionMutex.RUnlock()
argsForCall := fake.hasPermissionArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2
}
func (fake *FakeParticipant) HasPermissionReturns(result1 bool) {
fake.hasPermissionMutex.Lock()
defer fake.hasPermissionMutex.Unlock()
fake.HasPermissionStub = nil
fake.hasPermissionReturns = struct {
result1 bool
}{result1}
}
func (fake *FakeParticipant) HasPermissionReturnsOnCall(i int, result1 bool) {
fake.hasPermissionMutex.Lock()
defer fake.hasPermissionMutex.Unlock()
fake.HasPermissionStub = nil
if fake.hasPermissionReturnsOnCall == nil {
fake.hasPermissionReturnsOnCall = make(map[int]struct {
result1 bool
})
}
fake.hasPermissionReturnsOnCall[i] = struct {
result1 bool
}{result1}
}
func (fake *FakeParticipant) Hidden() bool {
fake.hiddenMutex.Lock()
ret, specificReturn := fake.hiddenReturnsOnCall[len(fake.hiddenArgsForCall)]
@@ -725,40 +723,6 @@ func (fake *FakeParticipant) RemovePublishedTrackArgsForCall(i int) (types.Media
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
}
func (fake *FakeParticipant) RemoveSubscriber(arg1 types.LocalParticipant, arg2 livekit.TrackID, arg3 bool) {
fake.removeSubscriberMutex.Lock()
fake.removeSubscriberArgsForCall = append(fake.removeSubscriberArgsForCall, struct {
arg1 types.LocalParticipant
arg2 livekit.TrackID
arg3 bool
}{arg1, arg2, arg3})
stub := fake.RemoveSubscriberStub
fake.recordInvocation("RemoveSubscriber", []interface{}{arg1, arg2, arg3})
fake.removeSubscriberMutex.Unlock()
if stub != nil {
fake.RemoveSubscriberStub(arg1, arg2, arg3)
}
}
func (fake *FakeParticipant) RemoveSubscriberCallCount() int {
fake.removeSubscriberMutex.RLock()
defer fake.removeSubscriberMutex.RUnlock()
return len(fake.removeSubscriberArgsForCall)
}
func (fake *FakeParticipant) RemoveSubscriberCalls(stub func(types.LocalParticipant, livekit.TrackID, bool)) {
fake.removeSubscriberMutex.Lock()
defer fake.removeSubscriberMutex.Unlock()
fake.RemoveSubscriberStub = stub
}
func (fake *FakeParticipant) RemoveSubscriberArgsForCall(i int) (types.LocalParticipant, livekit.TrackID, bool) {
fake.removeSubscriberMutex.RLock()
defer fake.removeSubscriberMutex.RUnlock()
argsForCall := fake.removeSubscriberArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
}
func (fake *FakeParticipant) SetMetadata(arg1 string) {
fake.setMetadataMutex.Lock()
fake.setMetadataArgsForCall = append(fake.setMetadataArgsForCall, struct {
@@ -847,6 +811,59 @@ func (fake *FakeParticipant) StartCalls(stub func()) {
fake.StartStub = stub
}
func (fake *FakeParticipant) State() livekit.ParticipantInfo_State {
fake.stateMutex.Lock()
ret, specificReturn := fake.stateReturnsOnCall[len(fake.stateArgsForCall)]
fake.stateArgsForCall = append(fake.stateArgsForCall, struct {
}{})
stub := fake.StateStub
fakeReturns := fake.stateReturns
fake.recordInvocation("State", []interface{}{})
fake.stateMutex.Unlock()
if stub != nil {
return stub()
}
if specificReturn {
return ret.result1
}
return fakeReturns.result1
}
func (fake *FakeParticipant) StateCallCount() int {
fake.stateMutex.RLock()
defer fake.stateMutex.RUnlock()
return len(fake.stateArgsForCall)
}
func (fake *FakeParticipant) StateCalls(stub func() livekit.ParticipantInfo_State) {
fake.stateMutex.Lock()
defer fake.stateMutex.Unlock()
fake.StateStub = stub
}
func (fake *FakeParticipant) StateReturns(result1 livekit.ParticipantInfo_State) {
fake.stateMutex.Lock()
defer fake.stateMutex.Unlock()
fake.StateStub = nil
fake.stateReturns = struct {
result1 livekit.ParticipantInfo_State
}{result1}
}
func (fake *FakeParticipant) StateReturnsOnCall(i int, result1 livekit.ParticipantInfo_State) {
fake.stateMutex.Lock()
defer fake.stateMutex.Unlock()
fake.StateStub = nil
if fake.stateReturnsOnCall == nil {
fake.stateReturnsOnCall = make(map[int]struct {
result1 livekit.ParticipantInfo_State
})
}
fake.stateReturnsOnCall[i] = struct {
result1 livekit.ParticipantInfo_State
}{result1}
}
func (fake *FakeParticipant) SubscriptionPermission() (*livekit.SubscriptionPermission, *livekit.TimedVersion) {
fake.subscriptionPermissionMutex.Lock()
ret, specificReturn := fake.subscriptionPermissionReturnsOnCall[len(fake.subscriptionPermissionArgsForCall)]
@@ -1084,8 +1101,6 @@ func (fake *FakeParticipant) UpdateVideoLayersReturnsOnCall(i int, result1 error
func (fake *FakeParticipant) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.addSubscriberMutex.RLock()
defer fake.addSubscriberMutex.RUnlock()
fake.closeMutex.RLock()
defer fake.closeMutex.RUnlock()
fake.debugInfoMutex.RLock()
@@ -1094,6 +1109,8 @@ func (fake *FakeParticipant) Invocations() map[string][][]interface{} {
defer fake.getPublishedTrackMutex.RUnlock()
fake.getPublishedTracksMutex.RLock()
defer fake.getPublishedTracksMutex.RUnlock()
fake.hasPermissionMutex.RLock()
defer fake.hasPermissionMutex.RUnlock()
fake.hiddenMutex.RLock()
defer fake.hiddenMutex.RUnlock()
fake.iDMutex.RLock()
@@ -1104,14 +1121,14 @@ func (fake *FakeParticipant) Invocations() map[string][][]interface{} {
defer fake.isRecorderMutex.RUnlock()
fake.removePublishedTrackMutex.RLock()
defer fake.removePublishedTrackMutex.RUnlock()
fake.removeSubscriberMutex.RLock()
defer fake.removeSubscriberMutex.RUnlock()
fake.setMetadataMutex.RLock()
defer fake.setMetadataMutex.RUnlock()
fake.setNameMutex.RLock()
defer fake.setNameMutex.RUnlock()
fake.startMutex.RLock()
defer fake.startMutex.RUnlock()
fake.stateMutex.RLock()
defer fake.stateMutex.RUnlock()
fake.subscriptionPermissionMutex.RLock()
defer fake.subscriptionPermissionMutex.RUnlock()
fake.toProtoMutex.RLock()
+66 -94
View File
@@ -36,17 +36,20 @@ type FakeRoom struct {
arg2 livekit.ParticipantID
arg3 types.ParticipantCloseReason
}
SetParticipantPermissionStub func(types.LocalParticipant, *livekit.ParticipantPermission) error
setParticipantPermissionMutex sync.RWMutex
setParticipantPermissionArgsForCall []struct {
arg1 types.LocalParticipant
arg2 *livekit.ParticipantPermission
ResolveMediaTrackForSubscriberStub func(livekit.ParticipantIdentity, livekit.ParticipantID, livekit.TrackID) (types.MediaResolverResult, error)
resolveMediaTrackForSubscriberMutex sync.RWMutex
resolveMediaTrackForSubscriberArgsForCall []struct {
arg1 livekit.ParticipantIdentity
arg2 livekit.ParticipantID
arg3 livekit.TrackID
}
setParticipantPermissionReturns struct {
result1 error
resolveMediaTrackForSubscriberReturns struct {
result1 types.MediaResolverResult
result2 error
}
setParticipantPermissionReturnsOnCall map[int]struct {
result1 error
resolveMediaTrackForSubscriberReturnsOnCall map[int]struct {
result1 types.MediaResolverResult
result2 error
}
SimulateScenarioStub func(types.LocalParticipant, *livekit.SimulateScenario) error
simulateScenarioMutex sync.RWMutex
@@ -84,7 +87,7 @@ type FakeRoom struct {
updateSubscriptionPermissionReturnsOnCall map[int]struct {
result1 error
}
UpdateSubscriptionsStub func(types.LocalParticipant, []livekit.TrackID, []*livekit.ParticipantTracks, bool) error
UpdateSubscriptionsStub func(types.LocalParticipant, []livekit.TrackID, []*livekit.ParticipantTracks, bool)
updateSubscriptionsMutex sync.RWMutex
updateSubscriptionsArgsForCall []struct {
arg1 types.LocalParticipant
@@ -92,12 +95,6 @@ type FakeRoom struct {
arg3 []*livekit.ParticipantTracks
arg4 bool
}
updateSubscriptionsReturns struct {
result1 error
}
updateSubscriptionsReturnsOnCall map[int]struct {
result1 error
}
UpdateVideoLayersStub func(types.Participant, *livekit.UpdateVideoLayers) error
updateVideoLayersMutex sync.RWMutex
updateVideoLayersArgsForCall []struct {
@@ -254,66 +251,70 @@ func (fake *FakeRoom) RemoveParticipantArgsForCall(i int) (livekit.ParticipantId
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
}
func (fake *FakeRoom) SetParticipantPermission(arg1 types.LocalParticipant, arg2 *livekit.ParticipantPermission) error {
fake.setParticipantPermissionMutex.Lock()
ret, specificReturn := fake.setParticipantPermissionReturnsOnCall[len(fake.setParticipantPermissionArgsForCall)]
fake.setParticipantPermissionArgsForCall = append(fake.setParticipantPermissionArgsForCall, struct {
arg1 types.LocalParticipant
arg2 *livekit.ParticipantPermission
}{arg1, arg2})
stub := fake.SetParticipantPermissionStub
fakeReturns := fake.setParticipantPermissionReturns
fake.recordInvocation("SetParticipantPermission", []interface{}{arg1, arg2})
fake.setParticipantPermissionMutex.Unlock()
func (fake *FakeRoom) ResolveMediaTrackForSubscriber(arg1 livekit.ParticipantIdentity, arg2 livekit.ParticipantID, arg3 livekit.TrackID) (types.MediaResolverResult, error) {
fake.resolveMediaTrackForSubscriberMutex.Lock()
ret, specificReturn := fake.resolveMediaTrackForSubscriberReturnsOnCall[len(fake.resolveMediaTrackForSubscriberArgsForCall)]
fake.resolveMediaTrackForSubscriberArgsForCall = append(fake.resolveMediaTrackForSubscriberArgsForCall, struct {
arg1 livekit.ParticipantIdentity
arg2 livekit.ParticipantID
arg3 livekit.TrackID
}{arg1, arg2, arg3})
stub := fake.ResolveMediaTrackForSubscriberStub
fakeReturns := fake.resolveMediaTrackForSubscriberReturns
fake.recordInvocation("ResolveMediaTrackForSubscriber", []interface{}{arg1, arg2, arg3})
fake.resolveMediaTrackForSubscriberMutex.Unlock()
if stub != nil {
return stub(arg1, arg2)
return stub(arg1, arg2, arg3)
}
if specificReturn {
return ret.result1
return ret.result1, ret.result2
}
return fakeReturns.result1
return fakeReturns.result1, fakeReturns.result2
}
func (fake *FakeRoom) SetParticipantPermissionCallCount() int {
fake.setParticipantPermissionMutex.RLock()
defer fake.setParticipantPermissionMutex.RUnlock()
return len(fake.setParticipantPermissionArgsForCall)
func (fake *FakeRoom) ResolveMediaTrackForSubscriberCallCount() int {
fake.resolveMediaTrackForSubscriberMutex.RLock()
defer fake.resolveMediaTrackForSubscriberMutex.RUnlock()
return len(fake.resolveMediaTrackForSubscriberArgsForCall)
}
func (fake *FakeRoom) SetParticipantPermissionCalls(stub func(types.LocalParticipant, *livekit.ParticipantPermission) error) {
fake.setParticipantPermissionMutex.Lock()
defer fake.setParticipantPermissionMutex.Unlock()
fake.SetParticipantPermissionStub = stub
func (fake *FakeRoom) ResolveMediaTrackForSubscriberCalls(stub func(livekit.ParticipantIdentity, livekit.ParticipantID, livekit.TrackID) (types.MediaResolverResult, error)) {
fake.resolveMediaTrackForSubscriberMutex.Lock()
defer fake.resolveMediaTrackForSubscriberMutex.Unlock()
fake.ResolveMediaTrackForSubscriberStub = stub
}
func (fake *FakeRoom) SetParticipantPermissionArgsForCall(i int) (types.LocalParticipant, *livekit.ParticipantPermission) {
fake.setParticipantPermissionMutex.RLock()
defer fake.setParticipantPermissionMutex.RUnlock()
argsForCall := fake.setParticipantPermissionArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2
func (fake *FakeRoom) ResolveMediaTrackForSubscriberArgsForCall(i int) (livekit.ParticipantIdentity, livekit.ParticipantID, livekit.TrackID) {
fake.resolveMediaTrackForSubscriberMutex.RLock()
defer fake.resolveMediaTrackForSubscriberMutex.RUnlock()
argsForCall := fake.resolveMediaTrackForSubscriberArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
}
func (fake *FakeRoom) SetParticipantPermissionReturns(result1 error) {
fake.setParticipantPermissionMutex.Lock()
defer fake.setParticipantPermissionMutex.Unlock()
fake.SetParticipantPermissionStub = nil
fake.setParticipantPermissionReturns = struct {
result1 error
}{result1}
func (fake *FakeRoom) ResolveMediaTrackForSubscriberReturns(result1 types.MediaResolverResult, result2 error) {
fake.resolveMediaTrackForSubscriberMutex.Lock()
defer fake.resolveMediaTrackForSubscriberMutex.Unlock()
fake.ResolveMediaTrackForSubscriberStub = nil
fake.resolveMediaTrackForSubscriberReturns = struct {
result1 types.MediaResolverResult
result2 error
}{result1, result2}
}
func (fake *FakeRoom) SetParticipantPermissionReturnsOnCall(i int, result1 error) {
fake.setParticipantPermissionMutex.Lock()
defer fake.setParticipantPermissionMutex.Unlock()
fake.SetParticipantPermissionStub = nil
if fake.setParticipantPermissionReturnsOnCall == nil {
fake.setParticipantPermissionReturnsOnCall = make(map[int]struct {
result1 error
func (fake *FakeRoom) ResolveMediaTrackForSubscriberReturnsOnCall(i int, result1 types.MediaResolverResult, result2 error) {
fake.resolveMediaTrackForSubscriberMutex.Lock()
defer fake.resolveMediaTrackForSubscriberMutex.Unlock()
fake.ResolveMediaTrackForSubscriberStub = nil
if fake.resolveMediaTrackForSubscriberReturnsOnCall == nil {
fake.resolveMediaTrackForSubscriberReturnsOnCall = make(map[int]struct {
result1 types.MediaResolverResult
result2 error
})
}
fake.setParticipantPermissionReturnsOnCall[i] = struct {
result1 error
}{result1}
fake.resolveMediaTrackForSubscriberReturnsOnCall[i] = struct {
result1 types.MediaResolverResult
result2 error
}{result1, result2}
}
func (fake *FakeRoom) SimulateScenario(arg1 types.LocalParticipant, arg2 *livekit.SimulateScenario) error {
@@ -502,7 +503,7 @@ func (fake *FakeRoom) UpdateSubscriptionPermissionReturnsOnCall(i int, result1 e
}{result1}
}
func (fake *FakeRoom) UpdateSubscriptions(arg1 types.LocalParticipant, arg2 []livekit.TrackID, arg3 []*livekit.ParticipantTracks, arg4 bool) error {
func (fake *FakeRoom) UpdateSubscriptions(arg1 types.LocalParticipant, arg2 []livekit.TrackID, arg3 []*livekit.ParticipantTracks, arg4 bool) {
var arg2Copy []livekit.TrackID
if arg2 != nil {
arg2Copy = make([]livekit.TrackID, len(arg2))
@@ -514,7 +515,6 @@ func (fake *FakeRoom) UpdateSubscriptions(arg1 types.LocalParticipant, arg2 []li
copy(arg3Copy, arg3)
}
fake.updateSubscriptionsMutex.Lock()
ret, specificReturn := fake.updateSubscriptionsReturnsOnCall[len(fake.updateSubscriptionsArgsForCall)]
fake.updateSubscriptionsArgsForCall = append(fake.updateSubscriptionsArgsForCall, struct {
arg1 types.LocalParticipant
arg2 []livekit.TrackID
@@ -522,16 +522,11 @@ func (fake *FakeRoom) UpdateSubscriptions(arg1 types.LocalParticipant, arg2 []li
arg4 bool
}{arg1, arg2Copy, arg3Copy, arg4})
stub := fake.UpdateSubscriptionsStub
fakeReturns := fake.updateSubscriptionsReturns
fake.recordInvocation("UpdateSubscriptions", []interface{}{arg1, arg2Copy, arg3Copy, arg4})
fake.updateSubscriptionsMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3, arg4)
fake.UpdateSubscriptionsStub(arg1, arg2, arg3, arg4)
}
if specificReturn {
return ret.result1
}
return fakeReturns.result1
}
func (fake *FakeRoom) UpdateSubscriptionsCallCount() int {
@@ -540,7 +535,7 @@ func (fake *FakeRoom) UpdateSubscriptionsCallCount() int {
return len(fake.updateSubscriptionsArgsForCall)
}
func (fake *FakeRoom) UpdateSubscriptionsCalls(stub func(types.LocalParticipant, []livekit.TrackID, []*livekit.ParticipantTracks, bool) error) {
func (fake *FakeRoom) UpdateSubscriptionsCalls(stub func(types.LocalParticipant, []livekit.TrackID, []*livekit.ParticipantTracks, bool)) {
fake.updateSubscriptionsMutex.Lock()
defer fake.updateSubscriptionsMutex.Unlock()
fake.UpdateSubscriptionsStub = stub
@@ -553,29 +548,6 @@ func (fake *FakeRoom) UpdateSubscriptionsArgsForCall(i int) (types.LocalParticip
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4
}
func (fake *FakeRoom) UpdateSubscriptionsReturns(result1 error) {
fake.updateSubscriptionsMutex.Lock()
defer fake.updateSubscriptionsMutex.Unlock()
fake.UpdateSubscriptionsStub = nil
fake.updateSubscriptionsReturns = struct {
result1 error
}{result1}
}
func (fake *FakeRoom) UpdateSubscriptionsReturnsOnCall(i int, result1 error) {
fake.updateSubscriptionsMutex.Lock()
defer fake.updateSubscriptionsMutex.Unlock()
fake.UpdateSubscriptionsStub = nil
if fake.updateSubscriptionsReturnsOnCall == nil {
fake.updateSubscriptionsReturnsOnCall = make(map[int]struct {
result1 error
})
}
fake.updateSubscriptionsReturnsOnCall[i] = struct {
result1 error
}{result1}
}
func (fake *FakeRoom) UpdateVideoLayers(arg1 types.Participant, arg2 *livekit.UpdateVideoLayers) error {
fake.updateVideoLayersMutex.Lock()
ret, specificReturn := fake.updateVideoLayersReturnsOnCall[len(fake.updateVideoLayersArgsForCall)]
@@ -647,8 +619,8 @@ func (fake *FakeRoom) Invocations() map[string][][]interface{} {
defer fake.nameMutex.RUnlock()
fake.removeParticipantMutex.RLock()
defer fake.removeParticipantMutex.RUnlock()
fake.setParticipantPermissionMutex.RLock()
defer fake.setParticipantPermissionMutex.RUnlock()
fake.resolveMediaTrackForSubscriberMutex.RLock()
defer fake.resolveMediaTrackForSubscriberMutex.RUnlock()
fake.simulateScenarioMutex.RLock()
defer fake.simulateScenarioMutex.RUnlock()
fake.syncStateMutex.RLock()
+301 -27
View File
@@ -7,9 +7,20 @@ import (
"github.com/livekit/livekit-server/pkg/rtc/types"
"github.com/livekit/livekit-server/pkg/sfu"
"github.com/livekit/protocol/livekit"
webrtc "github.com/pion/webrtc/v3"
)
type FakeSubscribedTrack struct {
AddOnBindStub func(func())
addOnBindMutex sync.RWMutex
addOnBindArgsForCall []struct {
arg1 func()
}
CloseStub func(bool)
closeMutex sync.RWMutex
closeArgsForCall []struct {
arg1 bool
}
DownTrackStub func() *sfu.DownTrack
downTrackMutex sync.RWMutex
downTrackArgsForCall []struct {
@@ -30,6 +41,16 @@ type FakeSubscribedTrack struct {
iDReturnsOnCall map[int]struct {
result1 livekit.TrackID
}
IsBoundStub func() bool
isBoundMutex sync.RWMutex
isBoundArgsForCall []struct {
}
isBoundReturns struct {
result1 bool
}
isBoundReturnsOnCall map[int]struct {
result1 bool
}
IsMutedStub func() bool
isMutedMutex sync.RWMutex
isMutedArgsForCall []struct {
@@ -50,10 +71,20 @@ type FakeSubscribedTrack struct {
mediaTrackReturnsOnCall map[int]struct {
result1 types.MediaTrack
}
OnBindStub func(func())
onBindMutex sync.RWMutex
onBindArgsForCall []struct {
arg1 func()
NeedsNegotiationStub func() bool
needsNegotiationMutex sync.RWMutex
needsNegotiationArgsForCall []struct {
}
needsNegotiationReturns struct {
result1 bool
}
needsNegotiationReturnsOnCall map[int]struct {
result1 bool
}
OnCloseStub func(func(willBeResumed bool))
onCloseMutex sync.RWMutex
onCloseArgsForCall []struct {
arg1 func(willBeResumed bool)
}
PublisherIDStub func() livekit.ParticipantID
publisherIDMutex sync.RWMutex
@@ -85,6 +116,16 @@ type FakeSubscribedTrack struct {
publisherVersionReturnsOnCall map[int]struct {
result1 uint32
}
RTPSenderStub func() *webrtc.RTPSender
rTPSenderMutex sync.RWMutex
rTPSenderArgsForCall []struct {
}
rTPSenderReturns struct {
result1 *webrtc.RTPSender
}
rTPSenderReturnsOnCall map[int]struct {
result1 *webrtc.RTPSender
}
SetPublisherMutedStub func(bool)
setPublisherMutedMutex sync.RWMutex
setPublisherMutedArgsForCall []struct {
@@ -133,6 +174,70 @@ type FakeSubscribedTrack struct {
invocationsMutex sync.RWMutex
}
func (fake *FakeSubscribedTrack) AddOnBind(arg1 func()) {
fake.addOnBindMutex.Lock()
fake.addOnBindArgsForCall = append(fake.addOnBindArgsForCall, struct {
arg1 func()
}{arg1})
stub := fake.AddOnBindStub
fake.recordInvocation("AddOnBind", []interface{}{arg1})
fake.addOnBindMutex.Unlock()
if stub != nil {
fake.AddOnBindStub(arg1)
}
}
func (fake *FakeSubscribedTrack) AddOnBindCallCount() int {
fake.addOnBindMutex.RLock()
defer fake.addOnBindMutex.RUnlock()
return len(fake.addOnBindArgsForCall)
}
func (fake *FakeSubscribedTrack) AddOnBindCalls(stub func(func())) {
fake.addOnBindMutex.Lock()
defer fake.addOnBindMutex.Unlock()
fake.AddOnBindStub = stub
}
func (fake *FakeSubscribedTrack) AddOnBindArgsForCall(i int) func() {
fake.addOnBindMutex.RLock()
defer fake.addOnBindMutex.RUnlock()
argsForCall := fake.addOnBindArgsForCall[i]
return argsForCall.arg1
}
func (fake *FakeSubscribedTrack) Close(arg1 bool) {
fake.closeMutex.Lock()
fake.closeArgsForCall = append(fake.closeArgsForCall, struct {
arg1 bool
}{arg1})
stub := fake.CloseStub
fake.recordInvocation("Close", []interface{}{arg1})
fake.closeMutex.Unlock()
if stub != nil {
fake.CloseStub(arg1)
}
}
func (fake *FakeSubscribedTrack) CloseCallCount() int {
fake.closeMutex.RLock()
defer fake.closeMutex.RUnlock()
return len(fake.closeArgsForCall)
}
func (fake *FakeSubscribedTrack) CloseCalls(stub func(bool)) {
fake.closeMutex.Lock()
defer fake.closeMutex.Unlock()
fake.CloseStub = stub
}
func (fake *FakeSubscribedTrack) CloseArgsForCall(i int) bool {
fake.closeMutex.RLock()
defer fake.closeMutex.RUnlock()
argsForCall := fake.closeArgsForCall[i]
return argsForCall.arg1
}
func (fake *FakeSubscribedTrack) DownTrack() *sfu.DownTrack {
fake.downTrackMutex.Lock()
ret, specificReturn := fake.downTrackReturnsOnCall[len(fake.downTrackArgsForCall)]
@@ -239,6 +344,59 @@ func (fake *FakeSubscribedTrack) IDReturnsOnCall(i int, result1 livekit.TrackID)
}{result1}
}
func (fake *FakeSubscribedTrack) IsBound() bool {
fake.isBoundMutex.Lock()
ret, specificReturn := fake.isBoundReturnsOnCall[len(fake.isBoundArgsForCall)]
fake.isBoundArgsForCall = append(fake.isBoundArgsForCall, struct {
}{})
stub := fake.IsBoundStub
fakeReturns := fake.isBoundReturns
fake.recordInvocation("IsBound", []interface{}{})
fake.isBoundMutex.Unlock()
if stub != nil {
return stub()
}
if specificReturn {
return ret.result1
}
return fakeReturns.result1
}
func (fake *FakeSubscribedTrack) IsBoundCallCount() int {
fake.isBoundMutex.RLock()
defer fake.isBoundMutex.RUnlock()
return len(fake.isBoundArgsForCall)
}
func (fake *FakeSubscribedTrack) IsBoundCalls(stub func() bool) {
fake.isBoundMutex.Lock()
defer fake.isBoundMutex.Unlock()
fake.IsBoundStub = stub
}
func (fake *FakeSubscribedTrack) IsBoundReturns(result1 bool) {
fake.isBoundMutex.Lock()
defer fake.isBoundMutex.Unlock()
fake.IsBoundStub = nil
fake.isBoundReturns = struct {
result1 bool
}{result1}
}
func (fake *FakeSubscribedTrack) IsBoundReturnsOnCall(i int, result1 bool) {
fake.isBoundMutex.Lock()
defer fake.isBoundMutex.Unlock()
fake.IsBoundStub = nil
if fake.isBoundReturnsOnCall == nil {
fake.isBoundReturnsOnCall = make(map[int]struct {
result1 bool
})
}
fake.isBoundReturnsOnCall[i] = struct {
result1 bool
}{result1}
}
func (fake *FakeSubscribedTrack) IsMuted() bool {
fake.isMutedMutex.Lock()
ret, specificReturn := fake.isMutedReturnsOnCall[len(fake.isMutedArgsForCall)]
@@ -345,35 +503,88 @@ func (fake *FakeSubscribedTrack) MediaTrackReturnsOnCall(i int, result1 types.Me
}{result1}
}
func (fake *FakeSubscribedTrack) OnBind(arg1 func()) {
fake.onBindMutex.Lock()
fake.onBindArgsForCall = append(fake.onBindArgsForCall, struct {
arg1 func()
}{arg1})
stub := fake.OnBindStub
fake.recordInvocation("OnBind", []interface{}{arg1})
fake.onBindMutex.Unlock()
func (fake *FakeSubscribedTrack) NeedsNegotiation() bool {
fake.needsNegotiationMutex.Lock()
ret, specificReturn := fake.needsNegotiationReturnsOnCall[len(fake.needsNegotiationArgsForCall)]
fake.needsNegotiationArgsForCall = append(fake.needsNegotiationArgsForCall, struct {
}{})
stub := fake.NeedsNegotiationStub
fakeReturns := fake.needsNegotiationReturns
fake.recordInvocation("NeedsNegotiation", []interface{}{})
fake.needsNegotiationMutex.Unlock()
if stub != nil {
fake.OnBindStub(arg1)
return stub()
}
if specificReturn {
return ret.result1
}
return fakeReturns.result1
}
func (fake *FakeSubscribedTrack) NeedsNegotiationCallCount() int {
fake.needsNegotiationMutex.RLock()
defer fake.needsNegotiationMutex.RUnlock()
return len(fake.needsNegotiationArgsForCall)
}
func (fake *FakeSubscribedTrack) NeedsNegotiationCalls(stub func() bool) {
fake.needsNegotiationMutex.Lock()
defer fake.needsNegotiationMutex.Unlock()
fake.NeedsNegotiationStub = stub
}
func (fake *FakeSubscribedTrack) NeedsNegotiationReturns(result1 bool) {
fake.needsNegotiationMutex.Lock()
defer fake.needsNegotiationMutex.Unlock()
fake.NeedsNegotiationStub = nil
fake.needsNegotiationReturns = struct {
result1 bool
}{result1}
}
func (fake *FakeSubscribedTrack) NeedsNegotiationReturnsOnCall(i int, result1 bool) {
fake.needsNegotiationMutex.Lock()
defer fake.needsNegotiationMutex.Unlock()
fake.NeedsNegotiationStub = nil
if fake.needsNegotiationReturnsOnCall == nil {
fake.needsNegotiationReturnsOnCall = make(map[int]struct {
result1 bool
})
}
fake.needsNegotiationReturnsOnCall[i] = struct {
result1 bool
}{result1}
}
func (fake *FakeSubscribedTrack) OnClose(arg1 func(willBeResumed bool)) {
fake.onCloseMutex.Lock()
fake.onCloseArgsForCall = append(fake.onCloseArgsForCall, struct {
arg1 func(willBeResumed bool)
}{arg1})
stub := fake.OnCloseStub
fake.recordInvocation("OnClose", []interface{}{arg1})
fake.onCloseMutex.Unlock()
if stub != nil {
fake.OnCloseStub(arg1)
}
}
func (fake *FakeSubscribedTrack) OnBindCallCount() int {
fake.onBindMutex.RLock()
defer fake.onBindMutex.RUnlock()
return len(fake.onBindArgsForCall)
func (fake *FakeSubscribedTrack) OnCloseCallCount() int {
fake.onCloseMutex.RLock()
defer fake.onCloseMutex.RUnlock()
return len(fake.onCloseArgsForCall)
}
func (fake *FakeSubscribedTrack) OnBindCalls(stub func(func())) {
fake.onBindMutex.Lock()
defer fake.onBindMutex.Unlock()
fake.OnBindStub = stub
func (fake *FakeSubscribedTrack) OnCloseCalls(stub func(func(willBeResumed bool))) {
fake.onCloseMutex.Lock()
defer fake.onCloseMutex.Unlock()
fake.OnCloseStub = stub
}
func (fake *FakeSubscribedTrack) OnBindArgsForCall(i int) func() {
fake.onBindMutex.RLock()
defer fake.onBindMutex.RUnlock()
argsForCall := fake.onBindArgsForCall[i]
func (fake *FakeSubscribedTrack) OnCloseArgsForCall(i int) func(willBeResumed bool) {
fake.onCloseMutex.RLock()
defer fake.onCloseMutex.RUnlock()
argsForCall := fake.onCloseArgsForCall[i]
return argsForCall.arg1
}
@@ -536,6 +747,59 @@ func (fake *FakeSubscribedTrack) PublisherVersionReturnsOnCall(i int, result1 ui
}{result1}
}
func (fake *FakeSubscribedTrack) RTPSender() *webrtc.RTPSender {
fake.rTPSenderMutex.Lock()
ret, specificReturn := fake.rTPSenderReturnsOnCall[len(fake.rTPSenderArgsForCall)]
fake.rTPSenderArgsForCall = append(fake.rTPSenderArgsForCall, struct {
}{})
stub := fake.RTPSenderStub
fakeReturns := fake.rTPSenderReturns
fake.recordInvocation("RTPSender", []interface{}{})
fake.rTPSenderMutex.Unlock()
if stub != nil {
return stub()
}
if specificReturn {
return ret.result1
}
return fakeReturns.result1
}
func (fake *FakeSubscribedTrack) RTPSenderCallCount() int {
fake.rTPSenderMutex.RLock()
defer fake.rTPSenderMutex.RUnlock()
return len(fake.rTPSenderArgsForCall)
}
func (fake *FakeSubscribedTrack) RTPSenderCalls(stub func() *webrtc.RTPSender) {
fake.rTPSenderMutex.Lock()
defer fake.rTPSenderMutex.Unlock()
fake.RTPSenderStub = stub
}
func (fake *FakeSubscribedTrack) RTPSenderReturns(result1 *webrtc.RTPSender) {
fake.rTPSenderMutex.Lock()
defer fake.rTPSenderMutex.Unlock()
fake.RTPSenderStub = nil
fake.rTPSenderReturns = struct {
result1 *webrtc.RTPSender
}{result1}
}
func (fake *FakeSubscribedTrack) RTPSenderReturnsOnCall(i int, result1 *webrtc.RTPSender) {
fake.rTPSenderMutex.Lock()
defer fake.rTPSenderMutex.Unlock()
fake.RTPSenderStub = nil
if fake.rTPSenderReturnsOnCall == nil {
fake.rTPSenderReturnsOnCall = make(map[int]struct {
result1 *webrtc.RTPSender
})
}
fake.rTPSenderReturnsOnCall[i] = struct {
result1 *webrtc.RTPSender
}{result1}
}
func (fake *FakeSubscribedTrack) SetPublisherMuted(arg1 bool) {
fake.setPublisherMutedMutex.Lock()
fake.setPublisherMutedArgsForCall = append(fake.setPublisherMutedArgsForCall, struct {
@@ -786,22 +1050,32 @@ func (fake *FakeSubscribedTrack) UpdateVideoLayerCalls(stub func()) {
func (fake *FakeSubscribedTrack) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.addOnBindMutex.RLock()
defer fake.addOnBindMutex.RUnlock()
fake.closeMutex.RLock()
defer fake.closeMutex.RUnlock()
fake.downTrackMutex.RLock()
defer fake.downTrackMutex.RUnlock()
fake.iDMutex.RLock()
defer fake.iDMutex.RUnlock()
fake.isBoundMutex.RLock()
defer fake.isBoundMutex.RUnlock()
fake.isMutedMutex.RLock()
defer fake.isMutedMutex.RUnlock()
fake.mediaTrackMutex.RLock()
defer fake.mediaTrackMutex.RUnlock()
fake.onBindMutex.RLock()
defer fake.onBindMutex.RUnlock()
fake.needsNegotiationMutex.RLock()
defer fake.needsNegotiationMutex.RUnlock()
fake.onCloseMutex.RLock()
defer fake.onCloseMutex.RUnlock()
fake.publisherIDMutex.RLock()
defer fake.publisherIDMutex.RUnlock()
fake.publisherIdentityMutex.RLock()
defer fake.publisherIdentityMutex.RUnlock()
fake.publisherVersionMutex.RLock()
defer fake.publisherVersionMutex.RUnlock()
fake.rTPSenderMutex.RLock()
defer fake.rTPSenderMutex.RUnlock()
fake.setPublisherMutedMutex.RLock()
defer fake.setPublisherMutedMutex.RUnlock()
fake.subscriberMutex.RLock()
+12 -218
View File
@@ -33,10 +33,6 @@ type UpTrackManager struct {
subscriptionPermissionVersion *utils.TimedVersion
// subscriber permission for published tracks
subscriberPermissions map[livekit.ParticipantIdentity]*livekit.TrackPermission // subscriberIdentity => *livekit.TrackPermission
// keeps tracks of track specific subscribers who are awaiting permission
pendingSubscriptions map[livekit.TrackID][]livekit.ParticipantIdentity // trackID => []subscriberIdentity
opsQueue *utils.OpsQueue
lock sync.RWMutex
@@ -47,20 +43,15 @@ type UpTrackManager struct {
func NewUpTrackManager(params UpTrackManagerParams) *UpTrackManager {
return &UpTrackManager{
params: params,
publishedTracks: make(map[livekit.TrackID]types.MediaTrack),
pendingSubscriptions: make(map[livekit.TrackID][]livekit.ParticipantIdentity),
opsQueue: utils.NewOpsQueue(params.Logger, "utm", 20),
params: params,
publishedTracks: make(map[livekit.TrackID]types.MediaTrack),
}
}
func (u *UpTrackManager) Start() {
u.opsQueue.Start()
}
func (u *UpTrackManager) Close(willBeResumed bool) {
u.opsQueue.Stop()
u.lock.Lock()
u.closed = true
notify := len(u.publishedTracks) == 0
@@ -96,68 +87,6 @@ func (u *UpTrackManager) OnPublishedTrackUpdated(f func(track types.MediaTrack,
u.onTrackUpdated = f
}
// AddSubscriber subscribes op to all publishedTracks
func (u *UpTrackManager) AddSubscriber(sub types.LocalParticipant, params types.AddSubscriberParams) (int, error) {
u.lock.Lock()
defer u.lock.Unlock()
var tracks []types.MediaTrack
if params.AllTracks {
for _, t := range u.publishedTracks {
tracks = append(tracks, t)
}
} else {
for _, trackID := range params.TrackIDs {
track := u.getPublishedTrackLocked(trackID)
if track == nil {
continue
}
tracks = append(tracks, track)
}
}
if len(tracks) == 0 {
return 0, nil
}
var trackIDs []livekit.TrackID
for _, track := range tracks {
trackIDs = append(trackIDs, track.ID())
}
sub.GetLogger().Debugw("subscribing to tracks",
"trackID", trackIDs)
n := 0
for _, track := range tracks {
trackID := track.ID()
subscriberIdentity := sub.Identity()
if !u.hasPermissionLocked(trackID, subscriberIdentity) {
u.maybeAddPendingSubscriptionLocked(trackID, subscriberIdentity, sub, nil)
continue
}
if err := track.AddSubscriber(sub); err != nil {
return n, err
}
n += 1
u.maybeRemovePendingSubscriptionLocked(trackID, sub, true, true)
}
return n, nil
}
func (u *UpTrackManager) RemoveSubscriber(sub types.LocalParticipant, trackID livekit.TrackID, willBeResumed bool) {
u.lock.Lock()
defer u.lock.Unlock()
track := u.getPublishedTrackLocked(trackID)
if track != nil {
track.RemoveSubscriber(sub.ID(), willBeResumed)
}
u.maybeRemovePendingSubscriptionLocked(trackID, sub, false, false)
}
func (u *UpTrackManager) SetPublishedTrackMuted(trackID livekit.TrackID, muted bool) types.MediaTrack {
u.lock.RLock()
track := u.publishedTracks[trackID]
@@ -271,7 +200,6 @@ func (u *UpTrackManager) UpdateSubscriptionPermission(
}
u.lock.Unlock()
u.processPendingSubscriptions(resolverByIdentity)
u.maybeRevokeSubscriptions(resolverByIdentity)
return nil
@@ -288,6 +216,13 @@ func (u *UpTrackManager) SubscriptionPermission() (*livekit.SubscriptionPermissi
return u.subscriptionPermission, u.subscriptionPermissionVersion.ToProto()
}
func (u *UpTrackManager) HasPermission(trackID livekit.TrackID, subIdentity livekit.ParticipantIdentity) bool {
u.lock.RLock()
defer u.lock.RUnlock()
return u.hasPermissionLocked(trackID, subIdentity)
}
func (u *UpTrackManager) UpdateVideoLayers(updateVideoLayers *livekit.UpdateVideoLayers) error {
track := u.GetPublishedTrack(livekit.TrackID(updateVideoLayers.TrackSid))
if track == nil {
@@ -318,7 +253,6 @@ func (u *UpTrackManager) AddPublishedTrack(track types.MediaTrack) {
u.lock.Lock()
trackID := track.ID()
delete(u.publishedTracks, trackID)
delete(u.pendingSubscriptions, trackID)
// not modifying subscription permissions, will get reset on next update from participant
if u.closed && len(u.publishedTracks) == 0 {
@@ -424,6 +358,8 @@ func (u *UpTrackManager) hasPermissionLocked(trackID livekit.TrackID, subscriber
return false
}
// returns a list of participants that are allowed to subscribe to the track. if nil is returned, it means everyone is
// allowed to subscribe to this track
func (u *UpTrackManager) getAllowedSubscribersLocked(trackID livekit.TrackID) []livekit.ParticipantIdentity {
if u.subscriberPermissions == nil {
return nil
@@ -447,145 +383,6 @@ func (u *UpTrackManager) getAllowedSubscribersLocked(trackID livekit.TrackID) []
return allowed
}
func (u *UpTrackManager) maybeAddPendingSubscriptionLocked(
trackID livekit.TrackID,
subscriberIdentity livekit.ParticipantIdentity,
sub types.LocalParticipant,
resolver func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant,
) {
pending := u.pendingSubscriptions[trackID]
for _, identity := range pending {
if identity == subscriberIdentity {
// already pending
return
}
}
u.pendingSubscriptions[trackID] = append(u.pendingSubscriptions[trackID], subscriberIdentity)
u.params.Logger.Debugw("adding pending subscription", "subscriberIdentity", subscriberIdentity, "trackID", trackID)
u.opsQueue.Enqueue(func() {
if sub == nil {
if resolver == nil {
u.params.Logger.Warnw("no resolver", nil)
} else {
sub = resolver(subscriberIdentity)
}
}
if sub != nil {
sub.SubscriptionPermissionUpdate(u.params.SID, trackID, false)
} else {
u.params.Logger.Warnw("could not send subscription permission update, no subscriber", nil, "subscriberIdentity", subscriberIdentity)
}
})
}
func (u *UpTrackManager) maybeRemovePendingSubscriptionLocked(trackID livekit.TrackID, sub types.LocalParticipant, sendUpdate bool, forceUpdate bool) {
subscriberIdentity := sub.Identity()
found := false
pending := u.pendingSubscriptions[trackID]
n := len(pending)
for idx, identity := range pending {
if identity == subscriberIdentity {
found = true
u.pendingSubscriptions[trackID][idx] = u.pendingSubscriptions[trackID][n-1]
u.pendingSubscriptions[trackID] = u.pendingSubscriptions[trackID][:n-1]
break
}
}
if len(u.pendingSubscriptions[trackID]) == 0 {
delete(u.pendingSubscriptions, trackID)
}
if sendUpdate && (forceUpdate || found) {
if found {
u.params.Logger.Debugw("removing pending subscription", "subscriberID", sub.ID(), "trackID", trackID)
}
u.opsQueue.Enqueue(func() {
sub.SubscriptionPermissionUpdate(u.params.SID, trackID, true)
})
}
}
// creates subscriptions for tracks if permissions have been granted
func (u *UpTrackManager) processPendingSubscriptions(resolver func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant) {
type ResolvedInfo struct {
sub types.LocalParticipant
state livekit.ParticipantInfo_State
}
// gather all identites that need resolving
resolvedInfos := make(map[livekit.ParticipantIdentity]*ResolvedInfo)
u.lock.RLock()
for trackID, pending := range u.pendingSubscriptions {
track := u.getPublishedTrackLocked(trackID)
if track == nil {
// published track is gone
continue
}
for _, identity := range pending {
resolvedInfos[identity] = nil
}
}
u.lock.RUnlock()
for identity := range resolvedInfos {
sub := resolver(identity)
if sub != nil {
resolvedInfos[identity] = &ResolvedInfo{
sub: sub,
state: sub.State(),
}
}
}
// check for subscriptions that can be reinstated
u.lock.Lock()
updatedPendingSubscriptions := make(map[livekit.TrackID][]livekit.ParticipantIdentity)
for trackID, pending := range u.pendingSubscriptions {
track := u.getPublishedTrackLocked(trackID)
if track == nil {
// published track is gone
continue
}
var updatedPending []livekit.ParticipantIdentity
for _, identity := range pending {
resolvedInfo := resolvedInfos[identity]
if resolvedInfo == nil || resolvedInfo.sub == nil || resolvedInfo.state == livekit.ParticipantInfo_DISCONNECTED {
// do not keep this pending subscription as subscriber may be gone
continue
}
if !u.hasPermissionLocked(trackID, identity) {
updatedPending = append(updatedPending, identity)
continue
}
sub := resolvedInfo.sub
if err := track.AddSubscriber(sub); err != nil {
u.params.Logger.Errorw("error reinstating subscription", err, "subscirberID", sub.ID(), "trackID", trackID)
// keep it in pending on error in case the error is transient
updatedPending = append(updatedPending, identity)
continue
}
u.params.Logger.Debugw("reinstating subscription", "subscriberID", sub.ID(), "trackID", trackID)
u.opsQueue.Enqueue(func() {
sub.SubscriptionPermissionUpdate(u.params.SID, trackID, true)
})
}
updatedPendingSubscriptions[trackID] = updatedPending
}
u.pendingSubscriptions = updatedPendingSubscriptions
u.lock.Unlock()
}
func (u *UpTrackManager) maybeRevokeSubscriptions(resolver func(participantIdentity livekit.ParticipantIdentity) types.LocalParticipant) {
u.lock.Lock()
defer u.lock.Unlock()
@@ -597,10 +394,7 @@ func (u *UpTrackManager) maybeRevokeSubscriptions(resolver func(participantIdent
continue
}
revoked := track.RevokeDisallowedSubscribers(allowed)
for _, subIdentity := range revoked {
u.maybeAddPendingSubscriptionLocked(trackID, subIdentity, nil, resolver)
}
track.RevokeDisallowedSubscribers(allowed)
}
}
+1 -1
View File
@@ -49,7 +49,7 @@ func NewWrappedReceiver(params WrappedReceiverParams) *WrappedReceiver {
PayloadType: 111,
})
} else if !params.DisableRed && strings.EqualFold(codecs[0].MimeType, webrtc.MimeTypeOpus) {
// if upstream is opus only and red eanbled, add red to match clients that supoort red
// if upstream is opus only and red enabled, add red to match clients that support red
codecs = append(codecs, webrtc.RTPCodecParameters{
RTPCodecCapability: redCodecCapability,
PayloadType: 63,
+5 -11
View File
@@ -323,7 +323,7 @@ func (r *RoomManager) StartSession(
AllowTCPFallback: allowFallback,
TURNSEnabled: r.config.IsTURNSEnabled(),
GetParticipantInfo: func(pID livekit.ParticipantID) *livekit.ParticipantInfo {
if p := room.GetParticipantBySid(pID); p != nil {
if p := room.GetParticipantByID(pID); p != nil {
return p.ToProto()
}
return nil
@@ -331,6 +331,7 @@ func (r *RoomManager) StartSession(
ReconnectOnPublicationError: reconnectOnPublicationError,
ReconnectOnSubscriptionError: reconnectOnSubscriptionError,
VersionGenerator: r.versionGenerator,
TrackResolver: room.ResolveMediaTrackForSubscriber,
})
if err != nil {
return err
@@ -585,10 +586,7 @@ func (r *RoomManager) handleRTCMessage(ctx context.Context, roomName livekit.Roo
participant.SetMetadata(rm.UpdateParticipant.Metadata)
}
if rm.UpdateParticipant.Permission != nil {
err := room.SetParticipantPermission(participant, rm.UpdateParticipant.Permission)
if err != nil {
pLogger.Errorw("could not update permissions", err)
}
participant.SetPermission(rm.UpdateParticipant.Permission)
}
case *livekit.RTCNodeMessage_DeleteRoom:
room.Logger.Infow("deleting room")
@@ -601,16 +599,12 @@ func (r *RoomManager) handleRTCMessage(ctx context.Context, roomName livekit.Roo
return
}
pLogger.Debugw("updating participant subscriptions")
if err := room.UpdateSubscriptions(
room.UpdateSubscriptions(
participant,
livekit.StringsAsTrackIDs(rm.UpdateSubscriptions.TrackSids),
rm.UpdateSubscriptions.ParticipantTracks,
rm.UpdateSubscriptions.Subscribe,
); err != nil {
pLogger.Warnw("could not update subscription", err,
"tracks", rm.UpdateSubscriptions.TrackSids,
"subscribe", rm.UpdateSubscriptions.Subscribe)
}
)
case *livekit.RTCNodeMessage_SendData:
pLogger.Debugw("api send data", "size", len(rm.SendData.Data))
up := &livekit.UserPacket{
+32 -4
View File
@@ -246,6 +246,8 @@ func (t *telemetryService) TrackSubscribeRequested(
publisher *livekit.ParticipantInfo,
) {
t.enqueue(func() {
prometheus.RecordTrackSubscribeAttempt()
room := t.getRoomDetails(participantID)
ev := newTrackEvent(livekit.AnalyticsEventType_TRACK_SUBSCRIBE_REQUESTED, room, participantID, track)
ev.Publisher = publisher
@@ -260,7 +262,7 @@ func (t *telemetryService) TrackSubscribed(
publisher *livekit.ParticipantInfo,
) {
t.enqueue(func() {
prometheus.AddSubscribedTrack(track.Type.String())
prometheus.RecordTrackSubscribeSuccess(track.Type.String())
room := t.getRoomDetails(participantID)
ev := newTrackEvent(livekit.AnalyticsEventType_TRACK_SUBSCRIBED, room, participantID, track)
@@ -269,12 +271,38 @@ func (t *telemetryService) TrackSubscribed(
})
}
func (t *telemetryService) TrackUnsubscribed(ctx context.Context, participantID livekit.ParticipantID, track *livekit.TrackInfo) {
func (t *telemetryService) TrackSubscribeFailed(
ctx context.Context,
participantID livekit.ParticipantID,
trackID livekit.TrackID,
err error,
isUserError bool,
) {
t.enqueue(func() {
prometheus.SubSubscribedTrack(track.Type.String())
prometheus.RecordTrackSubscribeFailure(err, isUserError)
room := t.getRoomDetails(participantID)
t.SendEvent(ctx, newTrackEvent(livekit.AnalyticsEventType_TRACK_UNSUBSCRIBED, room, participantID, track))
ev := newTrackEvent(livekit.AnalyticsEventType_TRACK_SUBSCRIBE_FAILED, room, participantID, &livekit.TrackInfo{
Sid: string(trackID),
})
ev.Error = err.Error()
t.SendEvent(ctx, ev)
})
}
func (t *telemetryService) TrackUnsubscribed(
ctx context.Context,
participantID livekit.ParticipantID,
track *livekit.TrackInfo,
shouldSendEvent bool,
) {
t.enqueue(func() {
prometheus.RecordTrackUnsubscribed(track.Type.String())
if shouldSendEvent {
room := t.getRoomDetails(participantID)
t.SendEvent(ctx, newTrackEvent(livekit.AnalyticsEventType_TRACK_UNSUBSCRIBED, room, participantID, track))
}
})
}
+5 -5
View File
@@ -156,10 +156,10 @@ func GetUpdatedNodeStats(prev *livekit.NodeStats, prevAverage *livekit.NodeStats
stats := &livekit.NodeStats{
StartedAt: prev.StartedAt,
UpdatedAt: updatedAt,
NumRooms: roomTotal.Load(),
NumClients: participantTotal.Load(),
NumTracksIn: trackPublishedTotal.Load(),
NumTracksOut: trackSubscribedTotal.Load(),
NumRooms: roomCurrent.Load(),
NumClients: participantCurrent.Load(),
NumTracksIn: trackPublishedCurrent.Load(),
NumTracksOut: trackSubscribedCurrent.Load(),
NumTrackPublishAttempts: trackPublishAttemptsNow,
NumTrackPublishSuccess: trackPublishSuccessNow,
NumTrackSubscribeAttempts: trackSubscribeAttemptsNow,
@@ -222,7 +222,7 @@ func GetUpdatedNodeStats(prev *livekit.NodeStats, prevAverage *livekit.NodeStats
if packetTotal == 0 {
stats.SysPacketsDroppedPctPerSec = 0
} else {
stats.SysPacketsDroppedPctPerSec = float32(stats.SysPacketsDroppedPerSec) / float32(packetTotal)
stats.SysPacketsDroppedPctPerSec = stats.SysPacketsDroppedPerSec / packetTotal
}
promSysDroppedPacketPctGauge.Set(float64(stats.SysPacketsDroppedPctPerSec))
}
+60 -48
View File
@@ -10,26 +10,29 @@ import (
)
var (
roomTotal atomic.Int32
participantTotal atomic.Int32
trackPublishedTotal atomic.Int32
trackSubscribedTotal atomic.Int32
roomCurrent atomic.Int32
participantCurrent atomic.Int32
trackPublishedCurrent atomic.Int32
trackSubscribedCurrent atomic.Int32
trackPublishAttempts atomic.Int32
trackPublishSuccess atomic.Int32
trackSubscribeAttempts atomic.Int32
trackSubscribeSuccess atomic.Int32
// count the number of failures that are due to user error (permissions, track doesn't exist), so we could compute
// success rate by subtracting this from total attempts
trackSubscribeUserError atomic.Int32
promRoomTotal prometheus.Gauge
promRoomDuration prometheus.Histogram
promParticipantTotal prometheus.Gauge
promTrackPublishedTotal *prometheus.GaugeVec
promTrackSubscribedTotal *prometheus.GaugeVec
promTrackPublishCounter *prometheus.CounterVec
promTrackSubscribeCounter *prometheus.CounterVec
promRoomCurrent prometheus.Gauge
promRoomDuration prometheus.Histogram
promParticipantCurrent prometheus.Gauge
promTrackPublishedCurrent *prometheus.GaugeVec
promTrackSubscribedCurrent *prometheus.GaugeVec
promTrackPublishCounter *prometheus.CounterVec
promTrackSubscribeCounter *prometheus.CounterVec
)
func initRoomStats(nodeID string, nodeType livekit.NodeType) {
promRoomTotal = prometheus.NewGauge(prometheus.GaugeOpts{
promRoomCurrent = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: livekitNamespace,
Subsystem: "room",
Name: "total",
@@ -44,19 +47,19 @@ func initRoomStats(nodeID string, nodeType livekit.NodeType) {
5, 10, 60, 5 * 60, 10 * 60, 30 * 60, 60 * 60, 2 * 60 * 60, 5 * 60 * 60, 10 * 60 * 60,
},
})
promParticipantTotal = prometheus.NewGauge(prometheus.GaugeOpts{
promParticipantCurrent = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: livekitNamespace,
Subsystem: "participant",
Name: "total",
ConstLabels: prometheus.Labels{"node_id": nodeID, "node_type": nodeType.String()},
})
promTrackPublishedTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{
promTrackPublishedCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: livekitNamespace,
Subsystem: "track",
Name: "published_total",
ConstLabels: prometheus.Labels{"node_id": nodeID, "node_type": nodeType.String()},
}, []string{"kind"})
promTrackSubscribedTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{
promTrackSubscribedCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: livekitNamespace,
Subsystem: "track",
Name: "subscribed_total",
@@ -73,48 +76,48 @@ func initRoomStats(nodeID string, nodeType livekit.NodeType) {
Subsystem: "track",
Name: "subscribe_counter",
ConstLabels: prometheus.Labels{"node_id": nodeID, "node_type": nodeType.String()},
}, []string{"kind", "state"})
}, []string{"state", "error"})
prometheus.MustRegister(promRoomTotal)
prometheus.MustRegister(promRoomCurrent)
prometheus.MustRegister(promRoomDuration)
prometheus.MustRegister(promParticipantTotal)
prometheus.MustRegister(promTrackPublishedTotal)
prometheus.MustRegister(promTrackSubscribedTotal)
prometheus.MustRegister(promParticipantCurrent)
prometheus.MustRegister(promTrackPublishedCurrent)
prometheus.MustRegister(promTrackSubscribedCurrent)
prometheus.MustRegister(promTrackPublishCounter)
prometheus.MustRegister(promTrackSubscribeCounter)
}
func RoomStarted() {
promRoomTotal.Add(1)
roomTotal.Inc()
promRoomCurrent.Add(1)
roomCurrent.Inc()
}
func RoomEnded(startedAt time.Time) {
if !startedAt.IsZero() {
promRoomDuration.Observe(float64(time.Since(startedAt)) / float64(time.Second))
}
promRoomTotal.Sub(1)
roomTotal.Dec()
promRoomCurrent.Sub(1)
roomCurrent.Dec()
}
func AddParticipant() {
promParticipantTotal.Add(1)
participantTotal.Inc()
promParticipantCurrent.Add(1)
participantCurrent.Inc()
}
func SubParticipant() {
promParticipantTotal.Sub(1)
participantTotal.Dec()
promParticipantCurrent.Sub(1)
participantCurrent.Dec()
}
func AddPublishedTrack(kind string) {
promTrackPublishedTotal.WithLabelValues(kind).Add(1)
trackPublishedTotal.Inc()
promTrackPublishedCurrent.WithLabelValues(kind).Add(1)
trackPublishedCurrent.Inc()
}
func SubPublishedTrack(kind string) {
promTrackPublishedTotal.WithLabelValues(kind).Sub(1)
trackPublishedTotal.Dec()
promTrackPublishedCurrent.WithLabelValues(kind).Sub(1)
trackPublishedCurrent.Dec()
}
func AddPublishAttempt(kind string) {
@@ -127,22 +130,31 @@ func AddPublishSuccess(kind string) {
promTrackPublishCounter.WithLabelValues(kind, "success").Inc()
}
func AddSubscribedTrack(kind string) {
promTrackSubscribedTotal.WithLabelValues(kind).Add(1)
trackSubscribedTotal.Inc()
}
func RecordTrackSubscribeSuccess(kind string) {
// modify both current and total counters
promTrackSubscribedCurrent.WithLabelValues(kind).Add(1)
trackSubscribedCurrent.Inc()
func SubSubscribedTrack(kind string) {
promTrackSubscribedTotal.WithLabelValues(kind).Sub(1)
trackSubscribedTotal.Dec()
}
func AddSubscribeAttempt(kind string) {
trackSubscribeAttempts.Inc()
promTrackSubscribeCounter.WithLabelValues(kind, "attempt").Inc()
}
func AddSubscribeSuccess(kind string) {
promTrackSubscribeCounter.WithLabelValues("success", "").Inc()
trackSubscribeSuccess.Inc()
promTrackSubscribeCounter.WithLabelValues(kind, "success").Inc()
}
func RecordTrackUnsubscribed(kind string) {
// unsubscribed modifies current counter, but we leave the total values alone since they
// are used to compute rate
promTrackSubscribedCurrent.WithLabelValues(kind).Sub(1)
trackSubscribedCurrent.Dec()
}
func RecordTrackSubscribeAttempt() {
trackSubscribeAttempts.Inc()
promTrackSubscribeCounter.WithLabelValues("attempt", "").Inc()
}
func RecordTrackSubscribeFailure(err error, isUserError bool) {
promTrackSubscribeCounter.WithLabelValues("failure", err.Error()).Inc()
if isUserError {
trackSubscribeUserError.Inc()
}
}
@@ -135,6 +135,15 @@ type FakeTelemetryService struct {
arg1 telemetry.StatsKey
arg2 *livekit.AnalyticsStat
}
TrackSubscribeFailedStub func(context.Context, livekit.ParticipantID, livekit.TrackID, error, bool)
trackSubscribeFailedMutex sync.RWMutex
trackSubscribeFailedArgsForCall []struct {
arg1 context.Context
arg2 livekit.ParticipantID
arg3 livekit.TrackID
arg4 error
arg5 bool
}
TrackSubscribeRequestedStub func(context.Context, livekit.ParticipantID, *livekit.TrackInfo, *livekit.ParticipantInfo)
trackSubscribeRequestedMutex sync.RWMutex
trackSubscribeRequestedArgsForCall []struct {
@@ -167,12 +176,13 @@ type FakeTelemetryService struct {
arg4 *livekit.TrackInfo
arg5 bool
}
TrackUnsubscribedStub func(context.Context, livekit.ParticipantID, *livekit.TrackInfo)
TrackUnsubscribedStub func(context.Context, livekit.ParticipantID, *livekit.TrackInfo, bool)
trackUnsubscribedMutex sync.RWMutex
trackUnsubscribedArgsForCall []struct {
arg1 context.Context
arg2 livekit.ParticipantID
arg3 *livekit.TrackInfo
arg4 bool
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
@@ -787,6 +797,42 @@ func (fake *FakeTelemetryService) TrackStatsArgsForCall(i int) (telemetry.StatsK
return argsForCall.arg1, argsForCall.arg2
}
func (fake *FakeTelemetryService) TrackSubscribeFailed(arg1 context.Context, arg2 livekit.ParticipantID, arg3 livekit.TrackID, arg4 error, arg5 bool) {
fake.trackSubscribeFailedMutex.Lock()
fake.trackSubscribeFailedArgsForCall = append(fake.trackSubscribeFailedArgsForCall, struct {
arg1 context.Context
arg2 livekit.ParticipantID
arg3 livekit.TrackID
arg4 error
arg5 bool
}{arg1, arg2, arg3, arg4, arg5})
stub := fake.TrackSubscribeFailedStub
fake.recordInvocation("TrackSubscribeFailed", []interface{}{arg1, arg2, arg3, arg4, arg5})
fake.trackSubscribeFailedMutex.Unlock()
if stub != nil {
fake.TrackSubscribeFailedStub(arg1, arg2, arg3, arg4, arg5)
}
}
func (fake *FakeTelemetryService) TrackSubscribeFailedCallCount() int {
fake.trackSubscribeFailedMutex.RLock()
defer fake.trackSubscribeFailedMutex.RUnlock()
return len(fake.trackSubscribeFailedArgsForCall)
}
func (fake *FakeTelemetryService) TrackSubscribeFailedCalls(stub func(context.Context, livekit.ParticipantID, livekit.TrackID, error, bool)) {
fake.trackSubscribeFailedMutex.Lock()
defer fake.trackSubscribeFailedMutex.Unlock()
fake.TrackSubscribeFailedStub = stub
}
func (fake *FakeTelemetryService) TrackSubscribeFailedArgsForCall(i int) (context.Context, livekit.ParticipantID, livekit.TrackID, error, bool) {
fake.trackSubscribeFailedMutex.RLock()
defer fake.trackSubscribeFailedMutex.RUnlock()
argsForCall := fake.trackSubscribeFailedArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5
}
func (fake *FakeTelemetryService) TrackSubscribeRequested(arg1 context.Context, arg2 livekit.ParticipantID, arg3 *livekit.TrackInfo, arg4 *livekit.ParticipantInfo) {
fake.trackSubscribeRequestedMutex.Lock()
fake.trackSubscribeRequestedArgsForCall = append(fake.trackSubscribeRequestedArgsForCall, struct {
@@ -927,18 +973,19 @@ func (fake *FakeTelemetryService) TrackUnpublishedArgsForCall(i int) (context.Co
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5
}
func (fake *FakeTelemetryService) TrackUnsubscribed(arg1 context.Context, arg2 livekit.ParticipantID, arg3 *livekit.TrackInfo) {
func (fake *FakeTelemetryService) TrackUnsubscribed(arg1 context.Context, arg2 livekit.ParticipantID, arg3 *livekit.TrackInfo, arg4 bool) {
fake.trackUnsubscribedMutex.Lock()
fake.trackUnsubscribedArgsForCall = append(fake.trackUnsubscribedArgsForCall, struct {
arg1 context.Context
arg2 livekit.ParticipantID
arg3 *livekit.TrackInfo
}{arg1, arg2, arg3})
arg4 bool
}{arg1, arg2, arg3, arg4})
stub := fake.TrackUnsubscribedStub
fake.recordInvocation("TrackUnsubscribed", []interface{}{arg1, arg2, arg3})
fake.recordInvocation("TrackUnsubscribed", []interface{}{arg1, arg2, arg3, arg4})
fake.trackUnsubscribedMutex.Unlock()
if stub != nil {
fake.TrackUnsubscribedStub(arg1, arg2, arg3)
fake.TrackUnsubscribedStub(arg1, arg2, arg3, arg4)
}
}
@@ -948,17 +995,17 @@ func (fake *FakeTelemetryService) TrackUnsubscribedCallCount() int {
return len(fake.trackUnsubscribedArgsForCall)
}
func (fake *FakeTelemetryService) TrackUnsubscribedCalls(stub func(context.Context, livekit.ParticipantID, *livekit.TrackInfo)) {
func (fake *FakeTelemetryService) TrackUnsubscribedCalls(stub func(context.Context, livekit.ParticipantID, *livekit.TrackInfo, bool)) {
fake.trackUnsubscribedMutex.Lock()
defer fake.trackUnsubscribedMutex.Unlock()
fake.TrackUnsubscribedStub = stub
}
func (fake *FakeTelemetryService) TrackUnsubscribedArgsForCall(i int) (context.Context, livekit.ParticipantID, *livekit.TrackInfo) {
func (fake *FakeTelemetryService) TrackUnsubscribedArgsForCall(i int) (context.Context, livekit.ParticipantID, *livekit.TrackInfo, bool) {
fake.trackUnsubscribedMutex.RLock()
defer fake.trackUnsubscribedMutex.RUnlock()
argsForCall := fake.trackUnsubscribedArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4
}
func (fake *FakeTelemetryService) Invocations() map[string][][]interface{} {
@@ -1000,6 +1047,8 @@ func (fake *FakeTelemetryService) Invocations() map[string][][]interface{} {
defer fake.trackPublishedUpdateMutex.RUnlock()
fake.trackStatsMutex.RLock()
defer fake.trackStatsMutex.RUnlock()
fake.trackSubscribeFailedMutex.RLock()
defer fake.trackSubscribeFailedMutex.RUnlock()
fake.trackSubscribeRequestedMutex.RLock()
defer fake.trackSubscribeRequestedMutex.RUnlock()
fake.trackSubscribedMutex.RLock()
+3 -1
View File
@@ -40,7 +40,9 @@ type TelemetryService interface {
// TrackSubscribed - a participant subscribed to a track successfully
TrackSubscribed(ctx context.Context, participantID livekit.ParticipantID, track *livekit.TrackInfo, publisher *livekit.ParticipantInfo)
// TrackUnsubscribed - a participant unsubscribed from a track successfully
TrackUnsubscribed(ctx context.Context, participantID livekit.ParticipantID, track *livekit.TrackInfo)
TrackUnsubscribed(ctx context.Context, participantID livekit.ParticipantID, track *livekit.TrackInfo, shouldSendEvent bool)
// TrackSubscribeFailed - failure to subscribe to a track
TrackSubscribeFailed(ctx context.Context, participantID livekit.ParticipantID, trackID livekit.TrackID, err error, isUserError bool)
// TrackMuted - the publisher has muted the Track
TrackMuted(ctx context.Context, participantID livekit.ParticipantID, track *livekit.TrackInfo)
// TrackUnmuted - the publisher has muted the Track
+114
View File
@@ -0,0 +1,114 @@
/*
* Copyright 2023 LiveKit, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package utils
import (
"sync"
)
type ChangeNotifier struct {
lock sync.Mutex
observers map[string]func()
}
func NewChangeNotifier() *ChangeNotifier {
return &ChangeNotifier{
observers: make(map[string]func()),
}
}
func (n *ChangeNotifier) AddObserver(key string, onChanged func()) {
n.lock.Lock()
defer n.lock.Unlock()
n.observers[key] = onChanged
}
func (n *ChangeNotifier) RemoveObserver(key string) {
n.lock.Lock()
defer n.lock.Unlock()
delete(n.observers, key)
}
func (n *ChangeNotifier) HasObservers() bool {
n.lock.Lock()
defer n.lock.Unlock()
return len(n.observers) > 0
}
func (n *ChangeNotifier) NotifyChanged() {
n.lock.Lock()
if len(n.observers) == 0 {
n.lock.Unlock()
return
}
observers := make([]func(), 0, len(n.observers))
for _, f := range n.observers {
observers = append(observers, f)
}
n.lock.Unlock()
go func() {
for _, f := range observers {
f()
}
}()
}
type ChangeNotifierManager struct {
lock sync.Mutex
notifiers map[string]*ChangeNotifier
}
func NewChangeNotifierManager() *ChangeNotifierManager {
return &ChangeNotifierManager{
notifiers: make(map[string]*ChangeNotifier),
}
}
func (m *ChangeNotifierManager) GetNotifier(key string) *ChangeNotifier {
m.lock.Lock()
defer m.lock.Unlock()
return m.notifiers[key]
}
func (m *ChangeNotifierManager) GetOrCreateNotifier(key string) *ChangeNotifier {
m.lock.Lock()
defer m.lock.Unlock()
if notifier, ok := m.notifiers[key]; ok {
return notifier
}
notifier := NewChangeNotifier()
m.notifiers[key] = notifier
return notifier
}
func (m *ChangeNotifierManager) RemoveNotifier(key string, force bool) {
m.lock.Lock()
defer m.lock.Unlock()
if notifier, ok := m.notifiers[key]; ok {
if force || !notifier.HasObservers() {
delete(m.notifiers, key)
}
}
}